aboutsummaryrefslogtreecommitdiffstats
path: root/src/kube2msb
diff options
context:
space:
mode:
Diffstat (limited to 'src/kube2msb')
-rw-r--r--src/kube2msb/Makefile11
-rw-r--r--src/kube2msb/kube2msb.go288
-rw-r--r--src/kube2msb/kube_work.go195
-rw-r--r--src/kube2msb/msb_client.go129
-rw-r--r--src/kube2msb/msb_work.go97
-rw-r--r--src/kube2msb/types.go136
-rw-r--r--src/kube2msb/vendor/github.com/Sirupsen/logrus/LICENSE21
-rw-r--r--src/kube2msb/vendor/github.com/Sirupsen/logrus/README.md352
-rw-r--r--src/kube2msb/vendor/github.com/Sirupsen/logrus/entry.go248
-rw-r--r--src/kube2msb/vendor/github.com/Sirupsen/logrus/exported.go182
-rw-r--r--src/kube2msb/vendor/github.com/Sirupsen/logrus/formatter.go44
-rw-r--r--src/kube2msb/vendor/github.com/Sirupsen/logrus/hooks.go34
-rw-r--r--src/kube2msb/vendor/github.com/Sirupsen/logrus/json_formatter.go26
-rw-r--r--src/kube2msb/vendor/github.com/Sirupsen/logrus/logger.go161
-rw-r--r--src/kube2msb/vendor/github.com/Sirupsen/logrus/logrus.go94
-rw-r--r--src/kube2msb/vendor/github.com/Sirupsen/logrus/terminal_darwin.go12
-rw-r--r--src/kube2msb/vendor/github.com/Sirupsen/logrus/terminal_freebsd.go20
-rw-r--r--src/kube2msb/vendor/github.com/Sirupsen/logrus/terminal_linux.go12
-rw-r--r--src/kube2msb/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go21
-rw-r--r--src/kube2msb/vendor/github.com/Sirupsen/logrus/terminal_windows.go27
-rw-r--r--src/kube2msb/vendor/github.com/Sirupsen/logrus/text_formatter.go124
-rw-r--r--src/kube2msb/vendor/github.com/beorn7/perks/LICENSE20
-rw-r--r--src/kube2msb/vendor/github.com/beorn7/perks/quantile/exampledata.txt2388
-rw-r--r--src/kube2msb/vendor/github.com/beorn7/perks/quantile/stream.go292
-rw-r--r--src/kube2msb/vendor/github.com/blang/semver/LICENSE22
-rw-r--r--src/kube2msb/vendor/github.com/blang/semver/README.md142
-rw-r--r--src/kube2msb/vendor/github.com/blang/semver/json.go23
-rw-r--r--src/kube2msb/vendor/github.com/blang/semver/semver.go395
-rw-r--r--src/kube2msb/vendor/github.com/blang/semver/sort.go28
-rw-r--r--src/kube2msb/vendor/github.com/blang/semver/sql.go30
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/LICENSE202
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/NOTICE5
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/http/client.go7
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/http/http.go159
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/http/middleware.go14
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/http/url.go29
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/jose/claims.go126
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/jose/jose.go112
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/jose/jwk.go135
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/jose/jws.go51
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/jose/jwt.go82
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/jose/sig.go24
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/jose/sig_hmac.go67
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/jose/sig_rsa.go67
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/key/key.go153
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/key/manager.go99
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/key/repo.go55
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/key/rotate.go165
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/key/sync.go91
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/oauth2/error.go29
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/oauth2/oauth2.go416
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/client.go846
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/identity.go44
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/interface.go3
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/key.go67
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/provider.go688
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/transport.go88
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/util.go109
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/verification.go188
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-systemd/LICENSE191
-rw-r--r--src/kube2msb/vendor/github.com/coreos/go-systemd/journal/journal.go179
-rw-r--r--src/kube2msb/vendor/github.com/coreos/pkg/LICENSE202
-rw-r--r--src/kube2msb/vendor/github.com/coreos/pkg/NOTICE5
-rw-r--r--src/kube2msb/vendor/github.com/coreos/pkg/capnslog/README.md39
-rw-r--r--src/kube2msb/vendor/github.com/coreos/pkg/capnslog/formatters.go157
-rw-r--r--src/kube2msb/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go96
-rw-r--r--src/kube2msb/vendor/github.com/coreos/pkg/capnslog/init.go49
-rw-r--r--src/kube2msb/vendor/github.com/coreos/pkg/capnslog/init_windows.go25
-rw-r--r--src/kube2msb/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go68
-rw-r--r--src/kube2msb/vendor/github.com/coreos/pkg/capnslog/log_hijack.go39
-rw-r--r--src/kube2msb/vendor/github.com/coreos/pkg/capnslog/logmap.go240
-rw-r--r--src/kube2msb/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go171
-rw-r--r--src/kube2msb/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go65
-rw-r--r--src/kube2msb/vendor/github.com/coreos/pkg/health/README.md11
-rw-r--r--src/kube2msb/vendor/github.com/coreos/pkg/health/health.go127
-rw-r--r--src/kube2msb/vendor/github.com/coreos/pkg/httputil/README.md13
-rw-r--r--src/kube2msb/vendor/github.com/coreos/pkg/httputil/cookie.go21
-rw-r--r--src/kube2msb/vendor/github.com/coreos/pkg/httputil/json.go27
-rw-r--r--src/kube2msb/vendor/github.com/coreos/pkg/timeutil/backoff.go15
-rw-r--r--src/kube2msb/vendor/github.com/davecgh/go-spew/LICENSE13
-rw-r--r--src/kube2msb/vendor/github.com/davecgh/go-spew/spew/bypass.go151
-rw-r--r--src/kube2msb/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go37
-rw-r--r--src/kube2msb/vendor/github.com/davecgh/go-spew/spew/common.go341
-rw-r--r--src/kube2msb/vendor/github.com/davecgh/go-spew/spew/config.go297
-rw-r--r--src/kube2msb/vendor/github.com/davecgh/go-spew/spew/doc.go202
-rw-r--r--src/kube2msb/vendor/github.com/davecgh/go-spew/spew/dump.go509
-rw-r--r--src/kube2msb/vendor/github.com/davecgh/go-spew/spew/format.go419
-rw-r--r--src/kube2msb/vendor/github.com/davecgh/go-spew/spew/spew.go148
-rw-r--r--src/kube2msb/vendor/github.com/docker/distribution/LICENSE202
-rw-r--r--src/kube2msb/vendor/github.com/docker/distribution/digest/digest.go139
-rw-r--r--src/kube2msb/vendor/github.com/docker/distribution/digest/digester.go155
-rw-r--r--src/kube2msb/vendor/github.com/docker/distribution/digest/doc.go42
-rw-r--r--src/kube2msb/vendor/github.com/docker/distribution/digest/set.go245
-rw-r--r--src/kube2msb/vendor/github.com/docker/distribution/digest/verifiers.go44
-rw-r--r--src/kube2msb/vendor/github.com/docker/distribution/reference/reference.go334
-rw-r--r--src/kube2msb/vendor/github.com/docker/distribution/reference/regexp.go124
-rw-r--r--src/kube2msb/vendor/github.com/docker/go-units/CONTRIBUTING.md67
-rw-r--r--src/kube2msb/vendor/github.com/docker/go-units/LICENSE.code191
-rw-r--r--src/kube2msb/vendor/github.com/docker/go-units/LICENSE.docs425
-rw-r--r--src/kube2msb/vendor/github.com/docker/go-units/MAINTAINERS27
-rw-r--r--src/kube2msb/vendor/github.com/docker/go-units/README.md18
-rw-r--r--src/kube2msb/vendor/github.com/docker/go-units/circle.yml11
-rw-r--r--src/kube2msb/vendor/github.com/docker/go-units/duration.go33
-rw-r--r--src/kube2msb/vendor/github.com/docker/go-units/size.go95
-rw-r--r--src/kube2msb/vendor/github.com/docker/go-units/ulimit.go118
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/CHANGES.md163
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/LICENSE22
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/README.md74
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/Srcfile1
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/bench_test.sh10
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/compress.go123
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/compressor_cache.go103
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/compressor_pools.go91
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/compressors.go53
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/constants.go30
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/container.go361
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/cors_filter.go202
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/coverage.sh2
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/curly.go162
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/curly_route.go52
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/doc.go196
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/entity_accessors.go163
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/filter.go26
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/install.sh9
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/jsr311.go248
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/log/log.go31
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/logger.go32
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/mime.go45
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/options_filter.go26
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/parameter.go114
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/path_expression.go69
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/request.go131
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/response.go235
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/route.go183
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/route_builder.go240
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/router.go18
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/service_error.go23
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/CHANGES.md43
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/README.md76
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/api_declaration_list.go64
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/config.go38
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/model_builder.go436
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/model_list.go86
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/model_property_ext.go66
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/model_property_list.go87
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/ordered_route_map.go36
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/swagger.go184
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/swagger_builder.go21
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/swagger_webservice.go440
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/web_service.go268
-rw-r--r--src/kube2msb/vendor/github.com/emicklei/go-restful/web_service_container.go39
-rw-r--r--src/kube2msb/vendor/github.com/ghodss/yaml/LICENSE50
-rw-r--r--src/kube2msb/vendor/github.com/ghodss/yaml/README.md116
-rw-r--r--src/kube2msb/vendor/github.com/ghodss/yaml/fields.go497
-rw-r--r--src/kube2msb/vendor/github.com/ghodss/yaml/yaml.go277
-rw-r--r--src/kube2msb/vendor/github.com/gogo/protobuf/LICENSE36
-rw-r--r--src/kube2msb/vendor/github.com/gogo/protobuf/proto/Makefile43
-rw-r--r--src/kube2msb/vendor/github.com/gogo/protobuf/proto/clone.go228
-rw-r--r--src/kube2msb/vendor/github.com/gogo/protobuf/proto/decode.go872
-rw-r--r--src/kube2msb/vendor/github.com/gogo/protobuf/proto/decode_gogo.go175
-rw-r--r--src/kube2msb/vendor/github.com/gogo/protobuf/proto/encode.go1335
-rw-r--r--src/kube2msb/vendor/github.com/gogo/protobuf/proto/encode_gogo.go354
-rw-r--r--src/kube2msb/vendor/github.com/gogo/protobuf/proto/equal.go266
-rw-r--r--src/kube2msb/vendor/github.com/gogo/protobuf/proto/extensions.go519
-rw-r--r--src/kube2msb/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go221
-rw-r--r--src/kube2msb/vendor/github.com/gogo/protobuf/proto/lib.go883
-rw-r--r--src/kube2msb/vendor/github.com/gogo/protobuf/proto/lib_gogo.go40
-rw-r--r--src/kube2msb/vendor/github.com/gogo/protobuf/proto/message_set.go280
-rw-r--r--src/kube2msb/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go479
-rw-r--r--src/kube2msb/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go266
-rw-r--r--src/kube2msb/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go108
-rw-r--r--src/kube2msb/vendor/github.com/gogo/protobuf/proto/properties.go915
-rw-r--r--src/kube2msb/vendor/github.com/gogo/protobuf/proto/properties_gogo.go64
-rw-r--r--src/kube2msb/vendor/github.com/gogo/protobuf/proto/skip_gogo.go117
-rw-r--r--src/kube2msb/vendor/github.com/gogo/protobuf/proto/text.go793
-rw-r--r--src/kube2msb/vendor/github.com/gogo/protobuf/proto/text_gogo.go55
-rw-r--r--src/kube2msb/vendor/github.com/gogo/protobuf/proto/text_parser.go841
-rw-r--r--src/kube2msb/vendor/github.com/golang/glog/LICENSE191
-rw-r--r--src/kube2msb/vendor/github.com/golang/glog/README44
-rw-r--r--src/kube2msb/vendor/github.com/golang/glog/glog.go1177
-rw-r--r--src/kube2msb/vendor/github.com/golang/glog/glog_file.go124
-rw-r--r--src/kube2msb/vendor/github.com/golang/protobuf/LICENSE31
-rw-r--r--src/kube2msb/vendor/github.com/golang/protobuf/proto/Makefile43
-rw-r--r--src/kube2msb/vendor/github.com/golang/protobuf/proto/clone.go223
-rw-r--r--src/kube2msb/vendor/github.com/golang/protobuf/proto/decode.go867
-rw-r--r--src/kube2msb/vendor/github.com/golang/protobuf/proto/encode.go1325
-rw-r--r--src/kube2msb/vendor/github.com/golang/protobuf/proto/equal.go276
-rw-r--r--src/kube2msb/vendor/github.com/golang/protobuf/proto/extensions.go399
-rw-r--r--src/kube2msb/vendor/github.com/golang/protobuf/proto/lib.go894
-rw-r--r--src/kube2msb/vendor/github.com/golang/protobuf/proto/message_set.go280
-rw-r--r--src/kube2msb/vendor/github.com/golang/protobuf/proto/pointer_reflect.go479
-rw-r--r--src/kube2msb/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go266
-rw-r--r--src/kube2msb/vendor/github.com/golang/protobuf/proto/properties.go842
-rw-r--r--src/kube2msb/vendor/github.com/golang/protobuf/proto/text.go751
-rw-r--r--src/kube2msb/vendor/github.com/golang/protobuf/proto/text_parser.go806
-rw-r--r--src/kube2msb/vendor/github.com/google/cadvisor/LICENSE190
-rw-r--r--src/kube2msb/vendor/github.com/google/cadvisor/info/v1/container.go583
-rw-r--r--src/kube2msb/vendor/github.com/google/cadvisor/info/v1/docker.go37
-rw-r--r--src/kube2msb/vendor/github.com/google/cadvisor/info/v1/machine.go205
-rw-r--r--src/kube2msb/vendor/github.com/google/cadvisor/info/v1/metric.go79
-rw-r--r--src/kube2msb/vendor/github.com/google/gofuzz/CONTRIBUTING.md67
-rw-r--r--src/kube2msb/vendor/github.com/google/gofuzz/LICENSE202
-rw-r--r--src/kube2msb/vendor/github.com/google/gofuzz/README.md71
-rw-r--r--src/kube2msb/vendor/github.com/google/gofuzz/doc.go18
-rw-r--r--src/kube2msb/vendor/github.com/google/gofuzz/fuzz.go446
-rw-r--r--src/kube2msb/vendor/github.com/imdario/mergo/LICENSE28
-rw-r--r--src/kube2msb/vendor/github.com/imdario/mergo/README.md68
-rw-r--r--src/kube2msb/vendor/github.com/imdario/mergo/doc.go44
-rw-r--r--src/kube2msb/vendor/github.com/imdario/mergo/map.go146
-rw-r--r--src/kube2msb/vendor/github.com/imdario/mergo/merge.go99
-rw-r--r--src/kube2msb/vendor/github.com/imdario/mergo/mergo.go90
-rw-r--r--src/kube2msb/vendor/github.com/jonboulle/clockwork/LICENSE201
-rw-r--r--src/kube2msb/vendor/github.com/jonboulle/clockwork/README.md61
-rw-r--r--src/kube2msb/vendor/github.com/jonboulle/clockwork/clockwork.go164
-rw-r--r--src/kube2msb/vendor/github.com/juju/ratelimit/LICENSE191
-rw-r--r--src/kube2msb/vendor/github.com/juju/ratelimit/README.md117
-rw-r--r--src/kube2msb/vendor/github.com/juju/ratelimit/ratelimit.go245
-rw-r--r--src/kube2msb/vendor/github.com/juju/ratelimit/reader.go51
-rw-r--r--src/kube2msb/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE201
-rw-r--r--src/kube2msb/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go75
-rw-r--r--src/kube2msb/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go16
-rw-r--r--src/kube2msb/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go46
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/LICENSE191
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/NOTICE17
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go64
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups_unsupported.go3
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/apply_raw.go402
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/blkio.go237
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu.go94
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuacct.go121
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset.go139
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/devices.go78
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer.go61
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/fs_unsupported.go3
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/hugetlb.go71
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory.go291
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/name.go40
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_cls.go41
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_prio.go41
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/perf_event.go35
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/pids.go73
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/utils.go78
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/stats.go106
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go413
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go61
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unix.go124
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go6
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_windows.go6
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go332
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/config_unix.go51
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/device.go57
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/device_defaults.go125
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/hugepage_limit.go9
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/interface_priority_map.go14
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go30
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces.go5
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go31
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall_unsupported.go15
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unix.go127
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unsupported.go8
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/network.go72
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go143
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/proc.go27
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/setns_linux.go40
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go25
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go25
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_arm.go25
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go12
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig_notcgo.go15
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go9
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go99
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go121
-rw-r--r--src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go33
-rw-r--r--src/kube2msb/vendor/github.com/pborman/uuid/CONTRIBUTORS1
-rw-r--r--src/kube2msb/vendor/github.com/pborman/uuid/LICENSE27
-rw-r--r--src/kube2msb/vendor/github.com/pborman/uuid/dce.go84
-rw-r--r--src/kube2msb/vendor/github.com/pborman/uuid/doc.go8
-rw-r--r--src/kube2msb/vendor/github.com/pborman/uuid/hash.go53
-rw-r--r--src/kube2msb/vendor/github.com/pborman/uuid/json.go30
-rw-r--r--src/kube2msb/vendor/github.com/pborman/uuid/node.go101
-rw-r--r--src/kube2msb/vendor/github.com/pborman/uuid/time.go132
-rw-r--r--src/kube2msb/vendor/github.com/pborman/uuid/util.go43
-rw-r--r--src/kube2msb/vendor/github.com/pborman/uuid/uuid.go163
-rw-r--r--src/kube2msb/vendor/github.com/pborman/uuid/version1.go41
-rw-r--r--src/kube2msb/vendor/github.com/pborman/uuid/version4.go25
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/client_golang/LICENSE201
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/client_golang/NOTICE28
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/README.md53
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/collector.go75
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/counter.go175
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/desc.go201
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/doc.go109
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/expvar.go119
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/gauge.go147
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go50
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/histogram.go450
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/http.go361
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/metric.go166
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go142
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/push.go65
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/registry.go726
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/summary.go540
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/untyped.go145
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/value.go234
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/vec.go247
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/client_model/LICENSE201
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/client_model/NOTICE5
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/client_model/go/metrics.pb.go364
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/common/LICENSE201
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/common/NOTICE5
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/common/expfmt/decode.go433
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/common/expfmt/encode.go88
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/common/expfmt/expfmt.go40
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/common/expfmt/fuzz.go36
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/common/expfmt/json_decode.go174
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/common/expfmt/text_create.go305
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/common/expfmt/text_parse.go753
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt67
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go162
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/common/model/alert.go136
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/common/model/fingerprinting.go105
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/common/model/fnv.go42
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/common/model/labels.go206
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/common/model/labelset.go169
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/common/model/metric.go98
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/common/model/model.go16
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/common/model/signature.go144
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/common/model/silence.go106
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/common/model/time.go249
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/common/model/value.go403
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/procfs/AUTHORS.md11
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/procfs/CONTRIBUTING.md18
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/procfs/LICENSE201
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/procfs/NOTICE7
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/procfs/README.md7
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/procfs/doc.go45
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/procfs/fs.go36
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/procfs/proc.go149
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/procfs/proc_limits.go111
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/procfs/proc_stat.go175
-rw-r--r--src/kube2msb/vendor/github.com/prometheus/procfs/stat.go55
-rw-r--r--src/kube2msb/vendor/github.com/spf13/pflag/LICENSE28
-rw-r--r--src/kube2msb/vendor/github.com/spf13/pflag/README.md256
-rw-r--r--src/kube2msb/vendor/github.com/spf13/pflag/bool.go97
-rw-r--r--src/kube2msb/vendor/github.com/spf13/pflag/count.go97
-rw-r--r--src/kube2msb/vendor/github.com/spf13/pflag/duration.go86
-rw-r--r--src/kube2msb/vendor/github.com/spf13/pflag/flag.go836
-rw-r--r--src/kube2msb/vendor/github.com/spf13/pflag/float32.go91
-rw-r--r--src/kube2msb/vendor/github.com/spf13/pflag/float64.go87
-rw-r--r--src/kube2msb/vendor/github.com/spf13/pflag/golangflag.go97
-rw-r--r--src/kube2msb/vendor/github.com/spf13/pflag/int.go87
-rw-r--r--src/kube2msb/vendor/github.com/spf13/pflag/int32.go91
-rw-r--r--src/kube2msb/vendor/github.com/spf13/pflag/int64.go87
-rw-r--r--src/kube2msb/vendor/github.com/spf13/pflag/int8.go91
-rw-r--r--src/kube2msb/vendor/github.com/spf13/pflag/int_slice.go128
-rw-r--r--src/kube2msb/vendor/github.com/spf13/pflag/ip.go96
-rw-r--r--src/kube2msb/vendor/github.com/spf13/pflag/ipmask.go122
-rw-r--r--src/kube2msb/vendor/github.com/spf13/pflag/ipnet.go100
-rw-r--r--src/kube2msb/vendor/github.com/spf13/pflag/string.go82
-rw-r--r--src/kube2msb/vendor/github.com/spf13/pflag/string_slice.go111
-rw-r--r--src/kube2msb/vendor/github.com/spf13/pflag/uint.go91
-rw-r--r--src/kube2msb/vendor/github.com/spf13/pflag/uint16.go89
-rw-r--r--src/kube2msb/vendor/github.com/spf13/pflag/uint32.go89
-rw-r--r--src/kube2msb/vendor/github.com/spf13/pflag/uint64.go91
-rw-r--r--src/kube2msb/vendor/github.com/spf13/pflag/uint8.go91
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/LICENSE22
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/0doc.go199
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/README.md148
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/binc.go922
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/cbor.go585
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/decode.go2019
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/encode.go1419
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/fast-path.generated.go39365
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl540
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/fast-path.not.go32
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl104
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl58
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/gen-helper.generated.go233
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl364
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/gen.generated.go175
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/gen.go1997
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/helper.go1271
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/helper_internal.go242
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go20
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/helper_unsafe.go45
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/json.go1213
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/msgpack.go845
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/noop.go213
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/prebuild.go3
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/prebuild.sh199
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/rpc.go180
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/simple.go519
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/test-cbor-goldens.json639
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/test.py126
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/tests.sh80
-rw-r--r--src/kube2msb/vendor/github.com/ugorji/go/codec/time.go233
-rw-r--r--src/kube2msb/vendor/golang.org/x/net/LICENSE27
-rw-r--r--src/kube2msb/vendor/golang.org/x/net/PATENTS22
-rw-r--r--src/kube2msb/vendor/golang.org/x/net/context/context.go447
-rw-r--r--src/kube2msb/vendor/golang.org/x/net/context/ctxhttp/cancelreq.go19
-rw-r--r--src/kube2msb/vendor/golang.org/x/net/context/ctxhttp/cancelreq_go14.go23
-rw-r--r--src/kube2msb/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go140
-rw-r--r--src/kube2msb/vendor/golang.org/x/net/http2/Dockerfile51
-rw-r--r--src/kube2msb/vendor/golang.org/x/net/http2/Makefile3
-rw-r--r--src/kube2msb/vendor/golang.org/x/net/http2/README20
-rw-r--r--src/kube2msb/vendor/golang.org/x/net/http2/client_conn_pool.go225
-rw-r--r--src/kube2msb/vendor/golang.org/x/net/http2/configure_transport.go89
-rw-r--r--src/kube2msb/vendor/golang.org/x/net/http2/errors.go90
-rw-r--r--src/kube2msb/vendor/golang.org/x/net/http2/fixed_buffer.go60
-rw-r--r--src/kube2msb/vendor/golang.org/x/net/http2/flow.go50
-rw-r--r--src/kube2msb/vendor/golang.org/x/net/http2/frame.go1269
-rw-r--r--src/kube2msb/vendor/golang.org/x/net/http2/go15.go11
-rw-r--r--src/kube2msb/vendor/golang.org/x/net/http2/gotrack.go170
-rw-r--r--src/kube2msb/vendor/golang.org/x/net/http2/headermap.go78
-rw-r--r--src/kube2msb/vendor/golang.org/x/net/http2/hpack/encode.go251
-rw-r--r--src/kube2msb/vendor/golang.org/x/net/http2/hpack/hpack.go533
-rw-r--r--src/kube2msb/vendor/golang.org/x/net/http2/hpack/huffman.go190
-rw-r--r--src/kube2msb/vendor/golang.org/x/net/http2/hpack/tables.go352
-rw-r--r--src/kube2msb/vendor/golang.org/x/net/http2/http2.go429
-rw-r--r--src/kube2msb/vendor/golang.org/x/net/http2/not_go15.go11
-rw-r--r--src/kube2msb/vendor/golang.org/x/net/http2/not_go16.go13
-rw-r--r--src/kube2msb/vendor/golang.org/x/net/http2/pipe.go147
-rw-r--r--src/kube2msb/vendor/golang.org/x/net/http2/server.go2308
-rw-r--r--src/kube2msb/vendor/golang.org/x/net/http2/transport.go1750
-rw-r--r--src/kube2msb/vendor/golang.org/x/net/http2/write.go263
-rw-r--r--src/kube2msb/vendor/golang.org/x/net/http2/writesched.go283
-rw-r--r--src/kube2msb/vendor/golang.org/x/oauth2/AUTHORS3
-rw-r--r--src/kube2msb/vendor/golang.org/x/oauth2/CONTRIBUTING.md31
-rw-r--r--src/kube2msb/vendor/golang.org/x/oauth2/CONTRIBUTORS3
-rw-r--r--src/kube2msb/vendor/golang.org/x/oauth2/LICENSE27
-rw-r--r--src/kube2msb/vendor/golang.org/x/oauth2/README.md64
-rw-r--r--src/kube2msb/vendor/golang.org/x/oauth2/client_appengine.go25
-rw-r--r--src/kube2msb/vendor/golang.org/x/oauth2/google/appengine.go83
-rw-r--r--src/kube2msb/vendor/golang.org/x/oauth2/google/appengine_hook.go13
-rw-r--r--src/kube2msb/vendor/golang.org/x/oauth2/google/default.go154
-rw-r--r--src/kube2msb/vendor/golang.org/x/oauth2/google/google.go145
-rw-r--r--src/kube2msb/vendor/golang.org/x/oauth2/google/sdk.go168
-rw-r--r--src/kube2msb/vendor/golang.org/x/oauth2/internal/oauth2.go76
-rw-r--r--src/kube2msb/vendor/golang.org/x/oauth2/internal/token.go213
-rw-r--r--src/kube2msb/vendor/golang.org/x/oauth2/internal/transport.go67
-rw-r--r--src/kube2msb/vendor/golang.org/x/oauth2/jws/jws.go160
-rw-r--r--src/kube2msb/vendor/golang.org/x/oauth2/jwt/jwt.go147
-rw-r--r--src/kube2msb/vendor/golang.org/x/oauth2/oauth2.go325
-rw-r--r--src/kube2msb/vendor/golang.org/x/oauth2/token.go143
-rw-r--r--src/kube2msb/vendor/golang.org/x/oauth2/transport.go132
-rw-r--r--src/kube2msb/vendor/google.golang.org/cloud/LICENSE202
-rw-r--r--src/kube2msb/vendor/google.golang.org/cloud/compute/metadata/metadata.go382
-rw-r--r--src/kube2msb/vendor/google.golang.org/cloud/internal/cloud.go128
-rw-r--r--src/kube2msb/vendor/gopkg.in/inf.v0/LICENSE28
-rw-r--r--src/kube2msb/vendor/gopkg.in/inf.v0/dec.go615
-rw-r--r--src/kube2msb/vendor/gopkg.in/inf.v0/rounder.go145
-rw-r--r--src/kube2msb/vendor/gopkg.in/yaml.v2/LICENSE188
-rw-r--r--src/kube2msb/vendor/gopkg.in/yaml.v2/LICENSE.libyaml31
-rw-r--r--src/kube2msb/vendor/gopkg.in/yaml.v2/README.md131
-rw-r--r--src/kube2msb/vendor/gopkg.in/yaml.v2/apic.go742
-rw-r--r--src/kube2msb/vendor/gopkg.in/yaml.v2/decode.go683
-rw-r--r--src/kube2msb/vendor/gopkg.in/yaml.v2/emitterc.go1685
-rw-r--r--src/kube2msb/vendor/gopkg.in/yaml.v2/encode.go306
-rw-r--r--src/kube2msb/vendor/gopkg.in/yaml.v2/parserc.go1096
-rw-r--r--src/kube2msb/vendor/gopkg.in/yaml.v2/readerc.go394
-rw-r--r--src/kube2msb/vendor/gopkg.in/yaml.v2/resolve.go203
-rw-r--r--src/kube2msb/vendor/gopkg.in/yaml.v2/scannerc.go2710
-rw-r--r--src/kube2msb/vendor/gopkg.in/yaml.v2/sorter.go104
-rw-r--r--src/kube2msb/vendor/gopkg.in/yaml.v2/writerc.go89
-rw-r--r--src/kube2msb/vendor/gopkg.in/yaml.v2/yaml.go346
-rw-r--r--src/kube2msb/vendor/gopkg.in/yaml.v2/yamlh.go716
-rw-r--r--src/kube2msb/vendor/gopkg.in/yaml.v2/yamlprivateh.go173
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/LICENSE202
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/OWNERS6
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/context.go121
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/conversion.go160
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/deep_copy_generated.go2950
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/doc.go24
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/endpoints/util.go238
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/errors/doc.go18
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/errors/errors.go456
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/field_constants.go38
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/generate.go64
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/helpers.go502
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/install/install.go251
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/mapper.go60
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/meta.go129
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/meta/doc.go19
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/meta/errors.go72
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/meta/help.go134
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/meta/interfaces.go180
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/meta/meta.go567
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/meta/metatypes/types.go30
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/meta/multirestmapper.go200
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/meta/priority.go173
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/meta/restmapper.go520
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/node_example.json49
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/pod/util.go61
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/pod_example.json102
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/ref.go130
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/register.go115
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/replication_controller_example.json82
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/requestcontext.go115
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/resource/amount.go298
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/resource/generated.pb.go46
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/resource/generated.proto93
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/resource/math.go327
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/resource/quantity.go777
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/resource/quantity_proto.go284
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/resource/scale_int.go95
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/resource/suffix.go198
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/resource_helpers.go209
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/service/annotations.go28
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/service/util.go68
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/types.generated.go59756
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/types.go2891
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/deep_copy_generated.go288
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/doc.go19
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/duration.go47
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/generated.pb.go4212
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/generated.proto377
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/group_version.go287
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/helpers.go154
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/meta.go62
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/register.go25
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/time.go160
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/time_proto.go85
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/types.go460
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/types_swagger_doc_generated.go208
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/validation/validation.go74
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/well_known_labels.go30
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/util/group_version.go48
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/v1/conversion.go579
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/v1/conversion_generated.go6820
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/v1/deep_copy_generated.go2924
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/v1/defaults.go301
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/v1/doc.go21
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/v1/generated.pb.go34797
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/v1/generated.proto2935
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/v1/meta.go85
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/v1/register.go94
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/v1/types.generated.go60001
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/v1/types.go3329
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/v1/types_swagger_doc_generated.go1742
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/validation/doc.go19
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/validation/events.go45
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/validation/name.go66
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/validation/schema.go370
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/validation/validation.go3193
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apimachinery/doc.go20
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apimachinery/registered/registered.go346
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apimachinery/types.go52
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/deep_copy_generated.go111
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/doc.go19
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/install/install.go125
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/register.go54
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/types.generated.go1634
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/types.go94
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/conversion.go118
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/conversion_generated.go156
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/deep_copy_generated.go118
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/defaults.go46
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/doc.go20
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/generated.pb.go969
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/generated.proto102
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/register.go50
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/types.generated.go1664
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/types.go94
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/types_swagger_doc_generated.go71
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/deep_copy_generated.go86
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/doc.go19
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/install/install.go123
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/register.go50
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/types.generated.go1265
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/types.go61
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/conversion.go30
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/conversion_generated.go143
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/deep_copy_generated.go86
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/defaults.go25
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/doc.go20
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/register.go44
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/types.generated.go1321
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/types.go63
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/deep_copy_generated.go149
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/doc.go19
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/install/install.go128
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/register.go50
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/types.generated.go2570
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/types.go124
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/conversion.go30
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/conversion_generated.go333
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/deep_copy_generated.go149
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/defaults.go25
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/doc.go20
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/register.go48
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/types.generated.go2710
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/types.go123
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/types_swagger_doc_generated.go118
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/deep_copy_generated.go149
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/doc.go19
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/install/install.go129
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/register.go54
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/types.generated.go2659
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/types.go120
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/conversion_generated.go300
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/deep_copy_generated.go150
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/defaults.go34
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/doc.go20
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.pb.go1612
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.proto131
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/register.go47
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types.generated.go2659
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types.go122
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types_swagger_doc_generated.go117
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/deep_copy_generated.go258
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/doc.go19
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/install/install.go137
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/register.go56
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/types.generated.go4671
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/types.go244
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/conversion.go106
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/conversion_generated.go330
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/deep_copy_generated.go197
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/defaults.go42
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/doc.go20
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/generated.pb.go1901
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/generated.proto177
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/register.go47
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/types.generated.go3184
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/types.go186
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/types_swagger_doc_generated.go114
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/conversion.go116
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/conversion_generated.go573
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/deep_copy_generated.go298
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/defaults.go49
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/doc.go20
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.pb.go3018
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.proto254
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/register.go50
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types.generated.go5310
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types.go283
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types_swagger_doc_generated.go178
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/deep_copy_generated.go120
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/doc.go19
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/install/install.go131
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/register.go57
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/types.generated.go1963
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/types.go85
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/conversion.go23
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/conversion_generated.go237
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/deep_copy_generated.go121
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/doc.go20
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/generated.pb.go1192
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/generated.proto86
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/register.go62
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/types.generated.go1963
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/types.go85
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/types_swagger_doc_generated.go70
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/deep_copy_generated.go299
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/doc.go19
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/helpers.go97
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/install/install.go129
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/register.go50
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/types.generated.go9703
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/types.go621
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/conversion_generated.go182
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/deep_copy_generated.go110
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/defaults.go114
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/doc.go20
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/register.go40
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/types.go141
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/deep_copy_generated.go859
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/doc.go19
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/install/install.go132
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/register.go79
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/types.generated.go17991
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/types.go901
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion.go404
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion_generated.go2539
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/deep_copy_generated.go1170
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/defaults.go167
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/doc.go22
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.pb.go13005
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.proto1010
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/register.go69
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/types.generated.go23939
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/types.go1198
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/types_swagger_doc_generated.go740
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/deep_copy_generated.go90
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/doc.go19
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/install/install.go129
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/register.go52
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/types.generated.go1440
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/types.go71
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/conversion_generated.go183
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/deep_copy_generated.go91
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/doc.go23
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/generated.pb.go903
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/generated.proto77
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/register.go50
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/types.generated.go1440
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/types.go72
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/types_swagger_doc_generated.go70
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/deep_copy_generated.go241
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/doc.go20
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/install/install.go130
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/register.go64
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/types.go178
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/conversion_generated.go536
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/deep_copy_generated.go238
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/doc.go21
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.pb.go2209
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.proto159
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/register.go52
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/types.generated.go4327
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/types.go165
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/types_swagger_doc_generated.go138
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/auth/user/doc.go19
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/auth/user/user.go65
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/capabilities/capabilities.go94
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/capabilities/doc.go18
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/delta_fifo.go613
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/doc.go24
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/expiration_cache.go208
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/expiration_cache_fakes.go54
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/fake_custom_store.go102
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/fifo.go321
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/index.go82
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/listers.go672
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/listwatch.go86
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/reflector.go423
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/store.go240
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/thread_safe_store.go288
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/undelta_store.go83
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/metrics/metrics.go67
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/restclient/client.go224
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/restclient/config.go328
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/restclient/plugin.go73
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/restclient/request.go1086
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/restclient/transport.go94
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/restclient/url_utils.go93
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/restclient/urlbackoff.go107
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/restclient/versions.go88
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/transport/cache.go88
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/transport/config.go84
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/transport/round_trippers.go337
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/transport/transport.go140
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/typed/discovery/discovery_client.go317
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/apps.go76
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/auth/clientauth.go125
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/autoscaling.go77
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/batch.go107
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/certificates.go86
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/certificatesigningrequests.go104
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/client.go179
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/helpers.go183
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/latest/latest.go54
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/register.go43
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/types.go152
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/conversion.go231
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/register.go40
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/types.go145
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/auth_loaders.go91
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/client_config.go411
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/config.go472
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/doc.go37
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/loader.go585
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/merged_client_builder.go122
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/overrides.go198
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/validation.go270
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clusterrolebindings.go92
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clusterroles.go92
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/componentstatuses.go60
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/conditions.go240
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/configmap.go122
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/containerinfo.go123
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/daemon_sets.go100
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/deployment.go111
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/doc.go57
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/endpoints.go101
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/events.go219
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/extensions.go131
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/flags.go31
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/helper.go273
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/horizontalpodautoscaler.go103
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/import_known_versions.go41
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/ingress.go100
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/jobs.go167
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/limit_ranges.go94
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/namespaces.go116
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/network_policys.go92
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/nodes.go111
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/persistentvolumeclaim.go99
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/persistentvolumes.go93
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/pet_sets.go100
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/pod_disruption_budgets.go100
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/pod_templates.go94
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/pods.go115
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/podsecuritypolicy.go111
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/policy.go76
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/rbac.go96
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/replica_sets.go100
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/replication_controllers.go99
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/resource_quotas.go102
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/rolebindings.go95
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/roles.go95
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/scale.go77
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/scheduledjobs.go103
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/secrets.go120
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/service_accounts.go120
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/services.go121
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/thirdpartyresources.go98
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/util.go79
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/controller/framework/controller.go326
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/controller/framework/doc.go18
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/controller/framework/fake_controller_source.go262
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/controller/framework/shared_informer.go383
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/conversion/OWNERS5
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/conversion/cloner.go237
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/conversion/converter.go951
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/conversion/deep_equal.go36
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/conversion/doc.go24
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/conversion/helper.go39
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/conversion/queryparams/convert.go188
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/conversion/queryparams/doc.go19
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/fields/doc.go19
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/fields/fields.go62
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/fields/selector.go247
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/kubelet/qos/doc.go25
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/kubelet/qos/policy.go66
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/kubelet/qos/qos.go140
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/kubelet/qos/types.go29
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/labels/doc.go19
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/labels/labels.go71
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/labels/selector.go810
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/master/ports/doc.go19
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/master/ports/ports.go40
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/OWNERS5
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/codec.go198
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/codec_check.go50
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/conversion.go98
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/deep_copy_generated.go63
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/doc.go45
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/embedded.go136
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/error.go102
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/extension.go48
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/generated.pb.go689
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/generated.proto124
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/helper.go212
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/interfaces.go217
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/register.go66
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/scheme.go623
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/serializer/codec_factory.go364
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/serializer/json/json.go243
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/serializer/json/meta.go61
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/serializer/negotiated_codec.go57
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/serializer/protobuf/doc.go18
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/serializer/protobuf/protobuf.go433
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/serializer/protobuf_extension.go52
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/serializer/recognizer/recognizer.go127
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/serializer/streaming/streaming.go137
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/serializer/versioning/versioning.go275
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/swagger_doc_generator.go262
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/types.go514
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/types_proto.go69
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/unstructured.go199
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/types/doc.go18
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/types/namespacedname.go35
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/types/uid.go22
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/types/unix_user_id.go23
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/clock.go218
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/crypto/crypto.go190
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/doc.go20
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/errors/doc.go18
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/errors/errors.go156
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/flowcontrol/backoff.go149
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/flowcontrol/throttle.go116
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/framer/framer.go167
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/hash/hash.go37
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/homedir/homedir.go40
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/integer/integer.go67
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/intstr/generated.pb.go347
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/intstr/generated.proto42
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/intstr/intstr.go147
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/json/json.go107
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/line_delimiter.go63
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/logs.go61
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/net/http.go235
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/net/interface.go278
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/net/port_range.go108
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/net/port_split.go77
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/net/sets/README.md17
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/net/sets/ipnet.go119
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/net/util.go36
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/parsers/parsers.go54
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/rand/rand.go83
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/resource_container_linux.go49
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/resource_container_unsupported.go31
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/runner.go58
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/runtime/runtime.go94
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/sets/byte.go194
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/sets/doc.go20
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/sets/empty.go23
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/sets/int.go194
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/sets/int64.go194
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/sets/string.go194
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/string_flag.go56
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/template.go48
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/trace.go72
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/umask.go27
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/umask_windows.go27
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/util.go147
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/uuid.go42
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/validation/field/errors.go228
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/validation/field/path.go91
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/validation/validation.go306
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/wait/doc.go19
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/wait/wait.go268
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/yaml/decoder.go247
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/version/base.go59
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/version/doc.go19
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/version/semver.go50
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/version/version.go76
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/watch/doc.go19
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/watch/filter.go109
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/watch/mux.go257
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/watch/streamwatcher.go119
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/watch/until.go82
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/watch/versioned/decoder.go71
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/watch/versioned/encoder.go51
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/watch/versioned/generated.pb.go342
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/watch/versioned/generated.proto43
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/watch/versioned/register.go84
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/watch/versioned/types.go37
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/pkg/watch/watch.go137
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/gcp/gcp.go106
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/oidc/OWNERS2
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/oidc/oidc.go270
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/plugins.go23
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/third_party/forked/golang/LICENSE27
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/third_party/forked/golang/PATENTS22
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/third_party/forked/golang/reflect/deep_equal.go388
-rw-r--r--src/kube2msb/vendor/k8s.io/kubernetes/third_party/forked/golang/reflect/type.go91
-rw-r--r--src/kube2msb/vendor/vendor.json1018
939 files changed, 509094 insertions, 0 deletions
diff --git a/src/kube2msb/Makefile b/src/kube2msb/Makefile
new file mode 100644
index 0000000..d598b97
--- /dev/null
+++ b/src/kube2msb/Makefile
@@ -0,0 +1,11 @@
+.PHONY: kude2msb clean test
+
+kube2msb: kube2msb.go
+ CGO_ENABLED=0 go build --ldflags '-extldflags "-static"'
+ strip kube2msb
+
+clean:
+ rm -fr kube2msb
+
+test: clean
+ go test -v --vmodule=*=4
diff --git a/src/kube2msb/kube2msb.go b/src/kube2msb/kube2msb.go
new file mode 100644
index 0000000..627405e
--- /dev/null
+++ b/src/kube2msb/kube2msb.go
@@ -0,0 +1,288 @@
+/*
+Copyright 2017 ZTE, Inc. and others.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package main
+
+import (
+ "flag"
+ "fmt"
+ "log"
+ "net/url"
+ "os"
+ "reflect"
+ "time"
+
+ kapi "k8s.io/kubernetes/pkg/api"
+ kcache "k8s.io/kubernetes/pkg/client/cache"
+ kclient "k8s.io/kubernetes/pkg/client/unversioned"
+ kclientcmd "k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
+ kframework "k8s.io/kubernetes/pkg/controller/framework"
+ kselector "k8s.io/kubernetes/pkg/fields"
+ klabels "k8s.io/kubernetes/pkg/labels"
+)
+
+var (
+ argMSBUrl = flag.String("msb_url", "", "URL to MSB backend")
+ argKubeMasterUrl = flag.String("kube_master_url", "", "Url to reach kubernetes master. Env variables in this flag will be expanded.")
+ addMap = make(map[string]*kapi.Pod)
+ deleteMap = make(map[string]*kapi.Pod)
+ nodeSelector = klabels.Everything()
+)
+
+const resyncPeriod = 5 * time.Second
+
+func getMSBUrl() (string, error) {
+ if *argMSBUrl == "" {
+ return "", fmt.Errorf("no --msb_url specified")
+ }
+ parsedUrl, err := url.Parse(os.ExpandEnv(*argMSBUrl))
+ if err != nil {
+ return "", fmt.Errorf("failed to parse --msb_url %s - %v", *argMSBUrl, err)
+ }
+ if parsedUrl.Scheme == "" || parsedUrl.Host == "" || parsedUrl.Host == ":" {
+ return "", fmt.Errorf("invalid --msb_url specified %s", *argMSBUrl)
+ }
+ return parsedUrl.String(), nil
+}
+
+func newMSBClient() (Client, error) {
+ msbUrl, err := getMSBUrl()
+ if err != nil {
+ return nil, err
+ }
+
+ client, err := newMSBAgent(msbUrl)
+ if err != nil {
+ return nil, err
+ }
+ return client, nil
+}
+
+func getKubeMasterUrl() (string, error) {
+ if *argKubeMasterUrl == "" {
+ return "", fmt.Errorf("no --kube_master_url specified")
+ }
+ parsedUrl, err := url.Parse(os.ExpandEnv(*argKubeMasterUrl))
+ if err != nil {
+ return "", fmt.Errorf("failed to parse --kube_master_url %s - %v", *argKubeMasterUrl, err)
+ }
+ if parsedUrl.Scheme == "" || parsedUrl.Host == "" || parsedUrl.Host == ":" {
+ return "", fmt.Errorf("invalid --kube_master_url specified %s", *argKubeMasterUrl)
+ }
+ return parsedUrl.String(), nil
+}
+
+func newKubeClient() (*kclient.Client, error) {
+ masterUrl, err := getKubeMasterUrl()
+ if err != nil {
+ return nil, err
+ }
+ overrides := &kclientcmd.ConfigOverrides{}
+ overrides.ClusterInfo.Server = masterUrl
+
+ rules := kclientcmd.NewDefaultClientConfigLoadingRules()
+ kubeConfig, err := kclientcmd.NewNonInteractiveDeferredLoadingClientConfig(rules, overrides).ClientConfig()
+
+ if err != nil {
+ log.Println("Error creating Kube Config", err)
+ return nil, err
+ }
+ kubeConfig.Host = masterUrl
+
+ log.Printf("Using %s for kubernetes master", kubeConfig.Host)
+ return kclient.New(kubeConfig)
+}
+
+// Returns a cache.ListWatch that gets all changes to services.
+func createServiceLW(kubeClient *kclient.Client) *kcache.ListWatch {
+ return kcache.NewListWatchFromClient(kubeClient, "services", kapi.NamespaceAll, kselector.Everything())
+}
+
+func sendServiceWork(action KubeWorkAction, queue chan<- KubeWork, serviceObj interface{}) {
+ if service, ok := serviceObj.(*kapi.Service); ok {
+ log.Println("Service Action: ", action, " for service ", service.Name)
+ queue <- KubeWork{
+ Action: action,
+ Service: service,
+ }
+ }
+}
+
+func watchForServices(kubeClient *kclient.Client, queue chan<- KubeWork) {
+ _, svcController := kframework.NewInformer(
+ createServiceLW(kubeClient),
+ &kapi.Service{},
+ resyncPeriod,
+ kframework.ResourceEventHandlerFuncs{
+ AddFunc: func(obj interface{}) {
+ sendServiceWork(KubeWorkAddService, queue, obj)
+ },
+ DeleteFunc: func(obj interface{}) {
+ sendServiceWork(KubeWorkRemoveService, queue, obj)
+ },
+ UpdateFunc: func(oldObj, newObj interface{}) {
+ if reflect.DeepEqual(newObj, oldObj) == false {
+ sendServiceWork(KubeWorkUpdateService, queue, newObj)
+ }
+ },
+ },
+ )
+ stop := make(chan struct{})
+ go svcController.Run(stop)
+}
+
+// Returns a cache.ListWatch that gets all changes to Pods.
+func createPodLW(kubeClient *kclient.Client) *kcache.ListWatch {
+ return kcache.NewListWatchFromClient(kubeClient, "pods", kapi.NamespaceAll, kselector.Everything())
+}
+
+// Dispatch the notifications for Pods by type to the worker
+func sendPodWork(action KubeWorkAction, queue chan<- KubeWork, podObj interface{}) {
+ if pod, ok := podObj.(*kapi.Pod); ok {
+ log.Println("Pod Action: ", action, " for Pod:", pod.Name)
+ queue <- KubeWork{
+ Action: action,
+ Pod: pod,
+ }
+ }
+}
+
+// Launch the go routine to watch notifications for Pods.
+func watchForPods(kubeClient *kclient.Client, queue chan<- KubeWork) {
+ var podController *kframework.Controller
+ _, podController = kframework.NewInformer(
+ createPodLW(kubeClient),
+ &kapi.Pod{},
+ resyncPeriod,
+ kframework.ResourceEventHandlerFuncs{
+ AddFunc: func(obj interface{}) {
+ sendPodWork(KubeWorkAddPod, queue, obj)
+ },
+ DeleteFunc: func(obj interface{}) {
+ if o, ok := obj.(*kapi.Pod); ok {
+ if _, ok := deleteMap[o.Name]; ok {
+ delete(deleteMap, o.Name)
+ }
+ }
+ sendPodWork(KubeWorkRemovePod, queue, obj)
+ },
+ UpdateFunc: func(oldObj, newObj interface{}) {
+ o, n := oldObj.(*kapi.Pod), newObj.(*kapi.Pod)
+ if reflect.DeepEqual(oldObj, newObj) == false {
+ //Adding Pod
+ if _, ok := addMap[n.Name]; ok {
+ if kapi.IsPodReady(n) {
+ delete(addMap, n.Name)
+ sendPodWork(KubeWorkUpdatePod, queue, newObj)
+ }
+ return
+ }
+ //Deleting Pod
+ if _, ok := deleteMap[n.Name]; ok {
+ return
+ } else {
+ if o.ObjectMeta.DeletionTimestamp == nil &&
+ n.ObjectMeta.DeletionTimestamp != nil {
+ deleteMap[n.Name] = n
+ return
+ }
+ //Updating Pod
+ sendPodWork(KubeWorkUpdatePod, queue, newObj)
+ }
+ }
+ },
+ },
+ )
+ stop := make(chan struct{})
+ go podController.Run(stop)
+}
+
+func runBookKeeper(workQue <-chan KubeWork, msbQueue chan<- MSBWork) {
+
+ client := newClientBookKeeper()
+ client.msbQueue = msbQueue
+
+ for work := range workQue {
+ switch work.Action {
+ case KubeWorkAddService:
+ client.AddService(work.Service)
+ case KubeWorkRemoveService:
+ client.RemoveService(work.Service)
+ case KubeWorkUpdateService:
+ client.UpdateService(work.Service)
+ case KubeWorkAddPod:
+ client.AddPod(work.Pod)
+ case KubeWorkRemovePod:
+ client.RemovePod(work.Pod)
+ case KubeWorkUpdatePod:
+ client.UpdatePod(work.Pod)
+ default:
+ log.Println("Unsupported work action: ", work.Action)
+ }
+ }
+ log.Println("Completed all work")
+}
+
+func runMSBWorker(queue <-chan MSBWork, client Client) {
+ worker := newMSBAgentWorker(client)
+
+ for work := range queue {
+ log.Println("MSB Work Action: ", work.Action, " ServiceInfo:", work.ServiceInfo)
+
+ switch work.Action {
+ case MSBWorkAddService:
+ worker.AddService(work.IPAddress, work.ServiceInfo)
+ case MSBWorkRemoveService:
+ worker.RemoveService(work.IPAddress, work.ServiceInfo)
+ case MSBWorkAddPod:
+ worker.AddPod(work.IPAddress, work.ServiceInfo)
+ case MSBWorkRemovePod:
+ worker.RemovePod(work.IPAddress, work.ServiceInfo)
+ default:
+ log.Println("Unsupported Action of: ", work.Action)
+ }
+ }
+}
+
+func main() {
+ flag.Parse()
+ var err error
+
+ msbClient, err := newMSBClient()
+ if err != nil {
+ log.Fatalf("Failed to create MSB client - %v", err)
+ }
+
+ kubeClient, err := newKubeClient()
+ if err != nil {
+ log.Fatalf("Failed to create a kubernetes client: %v", err)
+ }
+
+ if _, err := kubeClient.ServerVersion(); err != nil {
+ log.Fatal("Could not connect to Kube Master", err)
+ } else {
+ log.Println("Connected to K8S API Server")
+ }
+
+ kubeWorkQueue := make(chan KubeWork)
+ msbWorkQueue := make(chan MSBWork)
+ go runBookKeeper(kubeWorkQueue, msbWorkQueue)
+ watchForServices(kubeClient, kubeWorkQueue)
+ watchForPods(kubeClient, kubeWorkQueue)
+ go runMSBWorker(msbWorkQueue, msbClient)
+
+ // Prevent exit
+ select {}
+}
diff --git a/src/kube2msb/kube_work.go b/src/kube2msb/kube_work.go
new file mode 100644
index 0000000..4e99cbd
--- /dev/null
+++ b/src/kube2msb/kube_work.go
@@ -0,0 +1,195 @@
+/*
+Copyright 2017 ZTE, Inc. and others.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package main
+
+import (
+ "log"
+ "sync"
+
+ kapi "k8s.io/kubernetes/pkg/api"
+)
+
+type KubeBookKeeper interface {
+ AddService(*kapi.Service)
+ RemoveService(*kapi.Service)
+ UpdateService(*kapi.Service)
+ AddPod(*kapi.Pod)
+ RemovePod(*kapi.Pod)
+ UpdatePod(*kapi.Pod)
+}
+
+type ClientBookKeeper struct {
+ sync.Mutex
+ KubeBookKeeper
+ services map[string]*kapi.Service
+ pods map[string]*kapi.Pod
+ msbQueue chan<- MSBWork
+}
+
+func newClientBookKeeper() *ClientBookKeeper {
+ return &ClientBookKeeper{
+ services: make(map[string]*kapi.Service),
+ pods: make(map[string]*kapi.Pod),
+ }
+}
+
+func (client *ClientBookKeeper) AddService(svc *kapi.Service) {
+ client.Lock()
+ defer client.Unlock()
+ if _, ok := svc.ObjectMeta.Annotations[serviceKey]; !ok {
+ log.Println("Not the target, skip this ADD notification for service:", svc.Name)
+ return
+ }
+
+ if _, ok := client.services[svc.Name]; ok {
+ log.Printf("service:%s already exist. skip this ADD notification.", svc.Name)
+ return
+ }
+
+ if kapi.IsServiceIPSet(svc) {
+ if svc.Spec.Type == kapi.ServiceTypeClusterIP || svc.Spec.Type == kapi.ServiceTypeNodePort {
+ log.Printf("Adding %s service:%s", svc.Spec.Type, svc.Name)
+ client.msbQueue <- MSBWork{
+ Action: MSBWorkAddService,
+ ServiceInfo: svc.ObjectMeta.Annotations[serviceKey],
+ IPAddress: svc.Spec.ClusterIP,
+ }
+ } else if svc.Spec.Type == kapi.ServiceTypeLoadBalancer {
+ log.Println("Adding LoadBalancerIP service:", svc.Name)
+ client.msbQueue <- MSBWork{
+ Action: MSBWorkAddService,
+ ServiceInfo: svc.ObjectMeta.Annotations[serviceKey],
+ IPAddress: svc.Spec.LoadBalancerIP,
+ }
+ } else {
+ log.Printf("Service Type:%s for Service:%s is not supported", svc.Spec.Type, svc.Name)
+ return
+ }
+ client.services[svc.Name] = svc
+ log.Println("Queued Service to be added: ", svc.Name)
+ } else {
+ // if ClusterIP is not set, do not create a DNS records
+ log.Printf("Skipping dns record for headless service: %s\n", svc.Name)
+ }
+}
+
+func (client *ClientBookKeeper) RemoveService(svc *kapi.Service) {
+ client.Lock()
+ defer client.Unlock()
+ if _, ok := svc.ObjectMeta.Annotations[serviceKey]; !ok {
+ log.Println("Not the target, skip this Remove notification for service:", svc.Name)
+ return
+ }
+
+ if _, ok := client.services[svc.Name]; !ok {
+ log.Printf("Service:%s not exist. skip this REMOVE notification.", svc.Name)
+ return
+ }
+
+ if svc.Spec.Type == kapi.ServiceTypeClusterIP || svc.Spec.Type == kapi.ServiceTypeNodePort {
+ log.Printf("Removing %s service:%s", svc.Spec.Type, svc.Name)
+ //Perform All DNS Removes
+ client.msbQueue <- MSBWork{
+ Action: MSBWorkRemoveService,
+ ServiceInfo: svc.ObjectMeta.Annotations[serviceKey],
+ IPAddress: svc.Spec.ClusterIP,
+ }
+ } else if svc.Spec.Type == kapi.ServiceTypeLoadBalancer {
+ log.Println("Removing LoadBalancerIP service:", svc.Name)
+ client.msbQueue <- MSBWork{
+ Action: MSBWorkRemoveService,
+ ServiceInfo: svc.ObjectMeta.Annotations[serviceKey],
+ IPAddress: svc.Spec.LoadBalancerIP,
+ }
+ } else {
+ log.Printf("Service Type:%s for Service:%s is not supported", svc.Spec.Type, svc.Name)
+ return
+ }
+ delete(client.services, svc.Name)
+ log.Println("Queued Service to be removed: ", svc.Name)
+}
+
+func (client *ClientBookKeeper) UpdateService(svc *kapi.Service) {
+ if _, ok := svc.ObjectMeta.Annotations[serviceKey]; !ok {
+ log.Println("Not the target, skip this Update notification for service:", svc.Name)
+ return
+ }
+
+ client.RemoveService(svc)
+ client.AddService(svc)
+}
+
+func (client *ClientBookKeeper) AddPod(pod *kapi.Pod) {
+ client.Lock()
+ defer client.Unlock()
+ if _, ok := pod.Annotations[serviceKey]; !ok {
+ log.Println("Not the target, skip this ADD notification for pod:", pod.Name)
+ return
+ }
+
+ if _, ok := client.pods[pod.Name]; ok {
+ log.Printf("Pod:%s already exist. skip this ADD notification.", pod.Name)
+ return
+ }
+
+ //newly added Pod
+ if pod.Name == "" || pod.Status.PodIP == "" {
+ log.Printf("Pod:%s has neither name nor pod ip. skip this ADD notification.", pod.Name)
+ addMap[pod.Name] = pod
+ return
+ }
+
+ //Perform All DNS Adds
+ client.msbQueue <- MSBWork{
+ Action: MSBWorkAddPod,
+ ServiceInfo: pod.Annotations[serviceKey],
+ IPAddress: pod.Status.PodIP,
+ }
+ client.pods[pod.Name] = pod
+ log.Println("Queued Pod to be added: ", pod.Name)
+}
+
+func (client *ClientBookKeeper) RemovePod(pod *kapi.Pod) {
+ client.Lock()
+ defer client.Unlock()
+ if _, ok := pod.Annotations[serviceKey]; !ok {
+ log.Println("Not the target, skip this Remove notification for pod:", pod.Name)
+ return
+ }
+
+ if _, ok := client.pods[pod.Name]; !ok {
+ log.Printf("Pod:%s not exist. skip this REMOVE notification.", pod.Name)
+ return
+ }
+ //Perform All DNS Removes
+ client.msbQueue <- MSBWork{
+ Action: MSBWorkRemovePod,
+ ServiceInfo: pod.Annotations[serviceKey],
+ IPAddress: pod.Status.PodIP,
+ }
+ delete(client.pods, pod.Name)
+ log.Println("Queued Pod to be removed: ", pod.Name)
+}
+
+func (client *ClientBookKeeper) UpdatePod(pod *kapi.Pod) {
+ if _, ok := pod.Annotations[serviceKey]; !ok {
+ log.Println("Not the target, skip this Update notification for pod:", pod.Name)
+ return
+ }
+
+ client.RemovePod(pod)
+ client.AddPod(pod)
+}
diff --git a/src/kube2msb/msb_client.go b/src/kube2msb/msb_client.go
new file mode 100644
index 0000000..2c1b402
--- /dev/null
+++ b/src/kube2msb/msb_client.go
@@ -0,0 +1,129 @@
+/*
+Copyright 2017 ZTE, Inc. and others.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package main
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "log"
+ "net/http"
+)
+
+const (
+ urlPrefix = "/api/microservices/v1/services"
+)
+
+type Client interface {
+ Register(string)
+ DeRegister(string)
+}
+
+type MSBAgent struct {
+ Client
+ url string
+}
+
+func newMSBAgent(s string) (*MSBAgent, error) {
+ healthCheckURL := s + urlPrefix + "/health"
+ resp, err := http.Get(healthCheckURL)
+ if err != nil {
+ return nil, err
+ }
+
+ if resp.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("MSB agent:%s is not available", s)
+ }
+
+ return &MSBAgent{url: s}, nil
+}
+
+func (client *MSBAgent) Register(serviceInfo string) {
+ var (
+ sas = []*ServiceAnnotation{}
+ )
+ err := json.Unmarshal([]byte(serviceInfo), &sas)
+ if err != nil {
+ log.Printf("Failed to Unmarshal serviceInfo to ServiceAnnotation:%v", err)
+ return
+ }
+
+ for _, sa := range sas {
+ su := ServiceAnnotation2ServiceUnit(sa)
+ body, _ := json.Marshal(su)
+ postURL := client.url + urlPrefix
+
+ resp, err := http.Post(postURL, "application/json", bytes.NewReader(body))
+ if err != nil {
+ log.Printf("Failed to do a request:%v", err)
+ return
+ }
+
+ log.Printf("Http request to register service:%s returned code:%d", su.Name, resp.StatusCode)
+ }
+}
+
+func (client *MSBAgent) DeRegister(serviceInfo string) {
+ var (
+ sas = []*ServiceAnnotation{}
+ )
+
+ err := json.Unmarshal([]byte(serviceInfo), &sas)
+ if err != nil {
+ log.Printf("Failed to Unmarshal serviceInfo to ServiceAnnotation:%v", err)
+ return
+ }
+
+ for _, sa := range sas {
+ var deleteURL string
+ if sa.Version == "" {
+ deleteURL = client.url + urlPrefix + "/" + sa.ServiceName + "/version/" + "null" + "/nodes/" + sa.IP + "/" + sa.Port
+ } else {
+ deleteURL = client.url + urlPrefix + "/" + sa.ServiceName + "/version/" + sa.Version + "/nodes/" + sa.IP + "/" + sa.Port
+ }
+ log.Printf("deleteURL:%s", deleteURL)
+ req, err := http.NewRequest("DELETE", deleteURL, nil)
+ if err != nil {
+ log.Printf("(deleteURL:%s) failed to NewRequest:%v", deleteURL, err)
+ return
+ }
+
+ c := &http.Client{}
+ resp, err := c.Do(req)
+ if err != nil {
+ log.Printf("(deleteURL:%s) failed to do a request:%v", deleteURL, err)
+ return
+ }
+
+ log.Printf("Http request to deregister service:%s returned code:%d", sa.ServiceName, resp.StatusCode)
+ }
+}
+
+func ServiceAnnotation2ServiceUnit(sa *ServiceAnnotation) *ServiceUnit {
+ if sa == nil {
+ return nil
+ }
+
+ return &ServiceUnit{
+ Name: sa.ServiceName,
+ Version: sa.Version,
+ URL: sa.URL,
+ Protocol: sa.Protocol,
+ LBPolicy: sa.LBPolicy,
+ VisualRange: sa.VisualRange,
+ Instances: []InstanceUnit{{ServiceIP: sa.IP, ServicePort: sa.Port}},
+ }
+}
diff --git a/src/kube2msb/msb_work.go b/src/kube2msb/msb_work.go
new file mode 100644
index 0000000..5c40bae
--- /dev/null
+++ b/src/kube2msb/msb_work.go
@@ -0,0 +1,97 @@
+/*
+Copyright 2017 ZTE, Inc. and others.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package main
+
+import (
+ "log"
+ "strings"
+ "sync"
+)
+
+type MSBWorker interface {
+ AddService(string, string)
+ RemoveService(string)
+ AddPod(string, string)
+ RemovePod(string)
+}
+
+type MSBAgentWorker struct {
+ sync.Mutex
+ MSBWorker
+ agent Client
+}
+
+func newMSBAgentWorker(client Client) *MSBAgentWorker {
+ return &MSBAgentWorker{
+ agent: client,
+ }
+}
+
+func (client *MSBAgentWorker) AddService(ip, sInfo string) {
+ client.Lock()
+ defer client.Unlock()
+
+ if ip == "" || sInfo == "" {
+ log.Println("Service Info is not valid for AddService")
+ return
+ }
+
+ client.agent.Register(mergeIP(ip, sInfo))
+}
+
+func (client *MSBAgentWorker) RemoveService(ip, sInfo string) {
+ client.Lock()
+ defer client.Unlock()
+
+ if sInfo == "" {
+ log.Println("Service Info is not valid for RemoveService")
+ return
+ }
+
+ client.agent.DeRegister(mergeIP(ip, sInfo))
+}
+
+func (client *MSBAgentWorker) AddPod(ip, sInfo string) {
+ client.Lock()
+ defer client.Unlock()
+ if ip == "" || sInfo == "" {
+ log.Println("Service Info is not valid for AddPod")
+ return
+ }
+
+ client.agent.Register(mergeIP(ip, sInfo))
+}
+
+func (client *MSBAgentWorker) RemovePod(ip, sInfo string) {
+ client.Lock()
+ defer client.Unlock()
+ if sInfo == "" {
+ log.Println("Service Info is not valid for RemovePod")
+ return
+ }
+
+ client.agent.DeRegister(mergeIP(ip, sInfo))
+}
+
+func mergeIP(ip, sInfo string) string {
+ insert := "{\"ip\":\"" + ip + "\","
+ parts := strings.Split(sInfo, "{")
+ out := parts[0]
+ for i := 1; i < len(parts); i++ {
+ out += insert + parts[i]
+ }
+ return out
+}
diff --git a/src/kube2msb/types.go b/src/kube2msb/types.go
new file mode 100644
index 0000000..095400b
--- /dev/null
+++ b/src/kube2msb/types.go
@@ -0,0 +1,136 @@
+/*
+Copyright 2017 ZTE, Inc. and others.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// types.go
+package main
+
+import (
+ kapi "k8s.io/kubernetes/pkg/api"
+)
+
+type KubeWorkAction string
+
+const (
+ KubeWorkAddService KubeWorkAction = "AddService"
+ KubeWorkRemoveService KubeWorkAction = "RemoveService"
+ KubeWorkUpdateService KubeWorkAction = "UpdateService"
+ KubeWorkAddPod KubeWorkAction = "AddPod"
+ KubeWorkRemovePod KubeWorkAction = "RemovePod"
+ KubeWorkUpdatePod KubeWorkAction = "UpdatePod"
+)
+
+type KubeWork struct {
+ Action KubeWorkAction
+ Service *kapi.Service
+ Pod *kapi.Pod
+}
+
+type MSBWorkAction string
+
+const (
+ MSBWorkAddService MSBWorkAction = "AddService"
+ MSBWorkRemoveService MSBWorkAction = "RemoveService"
+ MSBWorkAddPod MSBWorkAction = "AddPod"
+ MSBWorkRemovePod MSBWorkAction = "RemovePod"
+)
+
+type MSBWork struct {
+ Action MSBWorkAction
+ ServiceInfo string
+ IPAddress string
+}
+
+const serviceKey = "msb.onap.org/service-info"
+
+type ServiceUnit struct {
+ Name string `json:"serviceName,omitempty"`
+ Version string `json:"version"`
+ URL string `json:"url"`
+ Protocol string `json:"protocol"`
+ VisualRange string `json:"visualRange"`
+ LBPolicy string `json:"lb_policy"`
+ PublishPort string `json:"publish_port"`
+ Namespace string `json:"namespace"`
+ NWPlaneType string `json:"network_plane_type"`
+ Host string `json:"host"`
+ SubDomain string `json:"subdomain,omitempty"`
+ Path string `json:"path"`
+ Instances []InstanceUnit `json:"nodes"`
+ Metadata []MetaUnit `json:"metadata"`
+ Labels []string `json:"labels"`
+ SwaggerURL string `json:"swagger_url,omitempty"`
+ IsManual bool `json:"is_manual"`
+ EnableSSL bool `json:"enable_ssl"`
+ EnableTLS bool `json:"enable_tls"`
+ EnableReferMatch string `json:"enable_refer_match"`
+ ProxyRule Rules `json:"proxy_rule,omitempty"`
+ RateLimiting RateLimit `json:"rate_limiting,omitempty"`
+}
+
+type InstanceUnit struct {
+ ServiceIP string `json:"ip,omitempty"`
+ ServicePort string `json:"port,omitempty"`
+ LBServerParams string `json:"lb_server_params,omitempty"`
+ CheckType string `json:"checkType,omitempty"`
+ CheckURL string `json:"checkUrl,omitempty"`
+ CheckInterval string `json:"checkInterval,omitempty"`
+ CheckTTL string `json:"ttl,omitempty"`
+ CheckTimeOut string `json:"checkTimeOut,omitempty"`
+ HaRole string `json:"ha_role,omitempty"`
+ ServiceID string `json:"nodeId,omitempty"`
+ ServiceStatus string `json:"status,omitempty"`
+ APPVersion string `json:"appversion,omitempty"`
+}
+
+type MetaUnit struct {
+ Key string `json:"key"`
+ Value string `json:"value"`
+}
+
+type Rules struct {
+ HTTPProxy HTTPProxyRule `json:"http_proxy,omitempty"`
+ StreamProxy StreamProxyRule `json:"stream_proxy,omitempty"`
+}
+
+type HTTPProxyRule struct {
+ SendTimeout string `json:"send_timeout,omitempty"`
+ ReadTimeout string `json:"read_timeout,omitempty"`
+}
+
+type StreamProxyRule struct {
+ ProxyTimeout string `json:"proxy_timeout,omitempty"`
+ ProxyResponse string `json:"proxy_responses,omitempty"`
+}
+
+type RateLimit struct {
+ LimitReq LimitRequest `json:"limit_req,omitempty"`
+}
+
+type LimitRequest struct {
+ Rate string `json:"rate,omitempty"`
+ Burst string `json:"burst,omitempty"`
+}
+
+type ServiceAnnotation struct {
+ IP string `json:"ip,omitempty"`
+ Port string `json:"port,omitempty"`
+ ServiceName string `json:"serviceName,omitempty"`
+ Version string `json:"version,omitempty"`
+ URL string `json:"url,omitempty"`
+ Protocol string `json:"protocol,omitempty"`
+ LBPolicy string `json:"lb_policy,omitempty"`
+ VisualRange string `json:"visualRange,omitempty"`
+}
diff --git a/src/kube2msb/vendor/github.com/Sirupsen/logrus/LICENSE b/src/kube2msb/vendor/github.com/Sirupsen/logrus/LICENSE
new file mode 100644
index 0000000..f090cb4
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/Sirupsen/logrus/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Simon Eskildsen
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/src/kube2msb/vendor/github.com/Sirupsen/logrus/README.md b/src/kube2msb/vendor/github.com/Sirupsen/logrus/README.md
new file mode 100644
index 0000000..b6aa84c
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/Sirupsen/logrus/README.md
@@ -0,0 +1,352 @@
+# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/>&nbsp;[![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus)
+
+Logrus is a structured logger for Go (golang), completely API compatible with
+the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
+yet stable (pre 1.0), the core API is unlikely change much but please version
+control your Logrus to make sure you aren't fetching latest `master` on every
+build.**
+
+Nicely color-coded in development (when a TTY is attached, otherwise just
+plain text):
+
+![Colored](http://i.imgur.com/PY7qMwd.png)
+
+With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash
+or Splunk:
+
+```json
+{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
+ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
+
+{"level":"warning","msg":"The group's number increased tremendously!",
+"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
+"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
+"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
+
+{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
+"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
+```
+
+With the default `log.Formatter = new(logrus.TextFormatter)` when a TTY is not
+attached, the output is compatible with the
+[l2met](http://r.32k.io/l2met-introduction) format:
+
+```text
+time="2014-04-20 15:36:23.830442383 -0400 EDT" level="info" msg="A group of walrus emerges from the ocean" animal="walrus" size=10
+time="2014-04-20 15:36:23.830584199 -0400 EDT" level="warning" msg="The group's number increased tremendously!" omg=true number=122
+time="2014-04-20 15:36:23.830596521 -0400 EDT" level="info" msg="A giant walrus appears!" animal="walrus" size=10
+time="2014-04-20 15:36:23.830611837 -0400 EDT" level="info" msg="Tremendously sized cow enters the ocean." animal="walrus" size=9
+time="2014-04-20 15:36:23.830626464 -0400 EDT" level="fatal" msg="The ice breaks!" omg=true number=100
+```
+
+#### Example
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+```go
+package main
+
+import (
+ log "github.com/Sirupsen/logrus"
+)
+
+func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ }).Info("A walrus appears")
+}
+```
+
+Note that it's completely api-compatible with the stdlib logger, so you can
+replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"`
+and you'll now have the flexibility of Logrus. You can customize it all you
+want:
+
+```go
+package main
+
+import (
+ "os"
+ log "github.com/Sirupsen/logrus"
+ "github.com/Sirupsen/logrus/hooks/airbrake"
+)
+
+func init() {
+ // Log as JSON instead of the default ASCII formatter.
+ log.SetFormatter(&log.JSONFormatter{})
+
+ // Use the Airbrake hook to report errors that have Error severity or above to
+ // an exception tracker. You can create custom hooks, see the Hooks section.
+ log.AddHook(&logrus_airbrake.AirbrakeHook{})
+
+ // Output to stderr instead of stdout, could also be a file.
+ log.SetOutput(os.Stderr)
+
+ // Only log the warning severity or above.
+ log.SetLevel(log.WarnLevel)
+}
+
+func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+
+ log.WithFields(log.Fields{
+ "omg": true,
+ "number": 122,
+ }).Warn("The group's number increased tremendously!")
+
+ log.WithFields(log.Fields{
+ "omg": true,
+ "number": 100,
+ }).Fatal("The ice breaks!")
+}
+```
+
+For more advanced usage such as logging to multiple locations from the same
+application, you can also create an instance of the `logrus` Logger:
+
+```go
+package main
+
+import (
+ "github.com/Sirupsen/logrus"
+)
+
+// Create a new instance of the logger. You can have any number of instances.
+var log = logrus.New()
+
+func main() {
+ // The API for setting attributes is a little different than the package level
+ // exported logger. See Godoc.
+ log.Out = os.Stderr
+
+ log.WithFields(logrus.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+}
+```
+
+#### Fields
+
+Logrus encourages careful, structured logging though logging fields instead of
+long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
+to send event %s to topic %s with key %d")`, you should log the much more
+discoverable:
+
+```go
+log.WithFields(log.Fields{
+ "event": event,
+ "topic": topic,
+ "key": key,
+}).Fatal("Failed to send event")
+```
+
+We've found this API forces you to think about logging in a way that produces
+much more useful logging messages. We've been in countless situations where just
+a single added field to a log statement that was already there would've saved us
+hours. The `WithFields` call is optional.
+
+In general, with Logrus using any of the `printf`-family functions should be
+seen as a hint you should add a field, however, you can still use the
+`printf`-family functions with Logrus.
+
+#### Hooks
+
+You can add hooks for logging levels. For example to send errors to an exception
+tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
+multiple places simultaneously, e.g. syslog.
+
+```go
+// Not the real implementation of the Airbrake hook. Just a simple sample.
+import (
+ log "github.com/Sirupsen/logrus"
+)
+
+func init() {
+ log.AddHook(new(AirbrakeHook))
+}
+
+type AirbrakeHook struct{}
+
+// `Fire()` takes the entry that the hook is fired for. `entry.Data[]` contains
+// the fields for the entry. See the Fields section of the README.
+func (hook *AirbrakeHook) Fire(entry *logrus.Entry) error {
+ err := airbrake.Notify(entry.Data["error"].(error))
+ if err != nil {
+ log.WithFields(log.Fields{
+ "source": "airbrake",
+ "endpoint": airbrake.Endpoint,
+ }).Info("Failed to send error to Airbrake")
+ }
+
+ return nil
+}
+
+// `Levels()` returns a slice of `Levels` the hook is fired for.
+func (hook *AirbrakeHook) Levels() []log.Level {
+ return []log.Level{
+ log.ErrorLevel,
+ log.FatalLevel,
+ log.PanicLevel,
+ }
+}
+```
+
+Logrus comes with built-in hooks. Add those, or your custom hook, in `init`:
+
+```go
+import (
+ log "github.com/Sirupsen/logrus"
+ "github.com/Sirupsen/logrus/hooks/airbrake"
+ "github.com/Sirupsen/logrus/hooks/syslog"
+ "log/syslog"
+)
+
+func init() {
+ log.AddHook(new(logrus_airbrake.AirbrakeHook))
+
+ hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
+ if err != nil {
+ log.Error("Unable to connect to local syslog daemon")
+ } else {
+ log.AddHook(hook)
+ }
+}
+```
+
+* [`github.com/Sirupsen/logrus/hooks/airbrake`](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go)
+ Send errors to an exception tracking service compatible with the Airbrake API.
+ Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes.
+
+* [`github.com/Sirupsen/logrus/hooks/papertrail`](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go)
+ Send errors to the Papertrail hosted logging service via UDP.
+
+* [`github.com/Sirupsen/logrus/hooks/syslog`](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go)
+ Send errors to remote syslog server.
+ Uses standard library `log/syslog` behind the scenes.
+
+* [`github.com/nubo/hiprus`](https://github.com/nubo/hiprus)
+ Send errors to a channel in hipchat.
+
+* [`github.com/sebest/logrusly`](https://github.com/sebest/logrusly)
+ Send logs to Loggly (https://www.loggly.com/)
+
+#### Level logging
+
+Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
+
+```go
+log.Debug("Useful debugging information.")
+log.Info("Something noteworthy happened!")
+log.Warn("You should probably take a look at this.")
+log.Error("Something failed but I'm not quitting.")
+// Calls os.Exit(1) after logging
+log.Fatal("Bye.")
+// Calls panic() after logging
+log.Panic("I'm bailing.")
+```
+
+You can set the logging level on a `Logger`, then it will only log entries with
+that severity or anything above it:
+
+```go
+// Will log anything that is info or above (warn, error, fatal, panic). Default.
+log.SetLevel(log.InfoLevel)
+```
+
+It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
+environment if your application has that.
+
+#### Entries
+
+Besides the fields added with `WithField` or `WithFields` some fields are
+automatically added to all logging events:
+
+1. `time`. The timestamp when the entry was created.
+2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
+ the `AddFields` call. E.g. `Failed to send event.`
+3. `level`. The logging level. E.g. `info`.
+
+#### Environments
+
+Logrus has no notion of environment.
+
+If you wish for hooks and formatters to only be used in specific environments,
+you should handle that yourself. For example, if your application has a global
+variable `Environment`, which is a string representation of the environment you
+could do:
+
+```go
+import (
+ log "github.com/Sirupsen/logrus"
+)
+
+init() {
+ // do something here to set environment depending on an environment variable
+ // or command-line flag
+ if Environment == "production" {
+ log.SetFormatter(logrus.JSONFormatter)
+ } else {
+ // The TextFormatter is default, you don't actually have to do this.
+ log.SetFormatter(logrus.TextFormatter)
+ }
+}
+```
+
+This configuration is how `logrus` was intended to be used, but JSON in
+production is mostly only useful if you do log aggregation with tools like
+Splunk or Logstash.
+
+#### Formatters
+
+The built-in logging formatters are:
+
+* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
+ without colors.
+ * *Note:* to force colored output when there is no TTY, set the `ForceColors`
+ field to `true`. To force no colored output even if there is a TTY set the
+ `DisableColors` field to `true`
+* `logrus.JSONFormatter`. Logs fields as JSON.
+
+Third party logging formatters:
+
+* [`zalgo`](https://github.com/aybabtme/logzalgo): invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
+
+You can define your formatter by implementing the `Formatter` interface,
+requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
+`Fields` type (`map[string]interface{}`) with all your fields as well as the
+default ones (see Entries section above):
+
+```go
+type MyJSONFormatter struct {
+}
+
+log.SetFormatter(new(MyJSONFormatter))
+
+func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
+ // Note this doesn't include Time, Level and Message which are available on
+ // the Entry. Consult `godoc` on information about those fields or read the
+ // source of the official loggers.
+ serialized, err := json.Marshal(entry.Data)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+ }
+ return append(serialized, '\n'), nil
+}
+```
+
+#### Rotation
+
+Log rotation is not provided with Logrus. Log rotation should be done by an
+external program (like `logrotated(8)`) that can compress and delete old log
+entries. It should not be a feature of the application-level logger.
+
+
+[godoc]: https://godoc.org/github.com/Sirupsen/logrus
diff --git a/src/kube2msb/vendor/github.com/Sirupsen/logrus/entry.go b/src/kube2msb/vendor/github.com/Sirupsen/logrus/entry.go
new file mode 100644
index 0000000..e164eec
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/Sirupsen/logrus/entry.go
@@ -0,0 +1,248 @@
+package logrus
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "time"
+)
+
+// An entry is the final or intermediate Logrus logging entry. It contains all
+// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
+// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
+// passed around as much as you wish to avoid field duplication.
+type Entry struct {
+ Logger *Logger
+
+ // Contains all the fields set by the user.
+ Data Fields
+
+ // Time at which the log entry was created
+ Time time.Time
+
+ // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
+ Level Level
+
+ // Message passed to Debug, Info, Warn, Error, Fatal or Panic
+ Message string
+}
+
+func NewEntry(logger *Logger) *Entry {
+ return &Entry{
+ Logger: logger,
+ // Default is three fields, give a little extra room
+ Data: make(Fields, 5),
+ }
+}
+
+// Returns a reader for the entry, which is a proxy to the formatter.
+func (entry *Entry) Reader() (*bytes.Buffer, error) {
+ serialized, err := entry.Logger.Formatter.Format(entry)
+ return bytes.NewBuffer(serialized), err
+}
+
+// Returns the string representation from the reader and ultimately the
+// formatter.
+func (entry *Entry) String() (string, error) {
+ reader, err := entry.Reader()
+ if err != nil {
+ return "", err
+ }
+
+ return reader.String(), err
+}
+
+// Add a single field to the Entry.
+func (entry *Entry) WithField(key string, value interface{}) *Entry {
+ return entry.WithFields(Fields{key: value})
+}
+
+// Add a map of fields to the Entry.
+func (entry *Entry) WithFields(fields Fields) *Entry {
+ data := Fields{}
+ for k, v := range entry.Data {
+ data[k] = v
+ }
+ for k, v := range fields {
+ data[k] = v
+ }
+ return &Entry{Logger: entry.Logger, Data: data}
+}
+
+func (entry *Entry) log(level Level, msg string) {
+ entry.Time = time.Now()
+ entry.Level = level
+ entry.Message = msg
+
+ if err := entry.Logger.Hooks.Fire(level, entry); err != nil {
+ entry.Logger.mu.Lock()
+ fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
+ entry.Logger.mu.Unlock()
+ }
+
+ reader, err := entry.Reader()
+ if err != nil {
+ entry.Logger.mu.Lock()
+ fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
+ entry.Logger.mu.Unlock()
+ }
+
+ entry.Logger.mu.Lock()
+ defer entry.Logger.mu.Unlock()
+
+ _, err = io.Copy(entry.Logger.Out, reader)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
+ }
+
+ // To avoid Entry#log() returning a value that only would make sense for
+ // panic() to use in Entry#Panic(), we avoid the allocation by checking
+ // directly here.
+ if level <= PanicLevel {
+ panic(entry)
+ }
+}
+
+func (entry *Entry) Debug(args ...interface{}) {
+ if entry.Logger.Level >= DebugLevel {
+ entry.log(DebugLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Print(args ...interface{}) {
+ entry.Info(args...)
+}
+
+func (entry *Entry) Info(args ...interface{}) {
+ if entry.Logger.Level >= InfoLevel {
+ entry.log(InfoLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Warn(args ...interface{}) {
+ if entry.Logger.Level >= WarnLevel {
+ entry.log(WarnLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Error(args ...interface{}) {
+ if entry.Logger.Level >= ErrorLevel {
+ entry.log(ErrorLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Fatal(args ...interface{}) {
+ if entry.Logger.Level >= FatalLevel {
+ entry.log(FatalLevel, fmt.Sprint(args...))
+ }
+ os.Exit(1)
+}
+
+func (entry *Entry) Panic(args ...interface{}) {
+ if entry.Logger.Level >= PanicLevel {
+ entry.log(PanicLevel, fmt.Sprint(args...))
+ }
+ panic(fmt.Sprint(args...))
+}
+
+// Entry Printf family functions
+
+func (entry *Entry) Debugf(format string, args ...interface{}) {
+ if entry.Logger.Level >= DebugLevel {
+ entry.Debug(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Infof(format string, args ...interface{}) {
+ if entry.Logger.Level >= InfoLevel {
+ entry.Info(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Printf(format string, args ...interface{}) {
+ entry.Infof(format, args...)
+}
+
+func (entry *Entry) Warnf(format string, args ...interface{}) {
+ if entry.Logger.Level >= WarnLevel {
+ entry.Warn(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Warningf(format string, args ...interface{}) {
+ entry.Warnf(format, args...)
+}
+
+func (entry *Entry) Errorf(format string, args ...interface{}) {
+ if entry.Logger.Level >= ErrorLevel {
+ entry.Error(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Fatalf(format string, args ...interface{}) {
+ if entry.Logger.Level >= FatalLevel {
+ entry.Fatal(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Panicf(format string, args ...interface{}) {
+ if entry.Logger.Level >= PanicLevel {
+ entry.Panic(fmt.Sprintf(format, args...))
+ }
+}
+
+// Entry Println family functions
+
+func (entry *Entry) Debugln(args ...interface{}) {
+ if entry.Logger.Level >= DebugLevel {
+ entry.Debug(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Infoln(args ...interface{}) {
+ if entry.Logger.Level >= InfoLevel {
+ entry.Info(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Println(args ...interface{}) {
+ entry.Infoln(args...)
+}
+
+func (entry *Entry) Warnln(args ...interface{}) {
+ if entry.Logger.Level >= WarnLevel {
+ entry.Warn(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Warningln(args ...interface{}) {
+ entry.Warnln(args...)
+}
+
+func (entry *Entry) Errorln(args ...interface{}) {
+ if entry.Logger.Level >= ErrorLevel {
+ entry.Error(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Fatalln(args ...interface{}) {
+ if entry.Logger.Level >= FatalLevel {
+ entry.Fatal(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Panicln(args ...interface{}) {
+ if entry.Logger.Level >= PanicLevel {
+ entry.Panic(entry.sprintlnn(args...))
+ }
+}
+
+// Sprintlnn => Sprint no newline. This is to get the behavior of how
+// fmt.Sprintln where spaces are always added between operands, regardless of
+// their type. Instead of vendoring the Sprintln implementation to spare a
+// string allocation, we do the simplest thing.
+func (entry *Entry) sprintlnn(args ...interface{}) string {
+ msg := fmt.Sprintln(args...)
+ return msg[:len(msg)-1]
+}
diff --git a/src/kube2msb/vendor/github.com/Sirupsen/logrus/exported.go b/src/kube2msb/vendor/github.com/Sirupsen/logrus/exported.go
new file mode 100644
index 0000000..d087124
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/Sirupsen/logrus/exported.go
@@ -0,0 +1,182 @@
+package logrus
+
+import (
+ "io"
+)
+
+var (
+ // std is the name of the standard logger in stdlib `log`
+ std = New()
+)
+
+// SetOutput sets the standard logger output.
+func SetOutput(out io.Writer) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Out = out
+}
+
+// SetFormatter sets the standard logger formatter.
+func SetFormatter(formatter Formatter) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Formatter = formatter
+}
+
+// SetLevel sets the standard logger level.
+func SetLevel(level Level) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Level = level
+}
+
+// GetLevel returns the standard logger level.
+func GetLevel() Level {
+ return std.Level
+}
+
+// AddHook adds a hook to the standard logger hooks.
+func AddHook(hook Hook) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Hooks.Add(hook)
+}
+
+// WithField creates an entry from the standard logger and adds a field to
+// it. If you want multiple fields, use `WithFields`.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithField(key string, value interface{}) *Entry {
+ return std.WithField(key, value)
+}
+
+// WithFields creates an entry from the standard logger and adds multiple
+// fields to it. This is simply a helper for `WithField`, invoking it
+// once for each field.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithFields(fields Fields) *Entry {
+ return std.WithFields(fields)
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func Debug(args ...interface{}) {
+ std.Debug(args...)
+}
+
+// Print logs a message at level Info on the standard logger.
+func Print(args ...interface{}) {
+ std.Print(args...)
+}
+
+// Info logs a message at level Info on the standard logger.
+func Info(args ...interface{}) {
+ std.Info(args...)
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func Warn(args ...interface{}) {
+ std.Warn(args...)
+}
+
+// Warning logs a message at level Warn on the standard logger.
+func Warning(args ...interface{}) {
+ std.Warning(args...)
+}
+
+// Error logs a message at level Error on the standard logger.
+func Error(args ...interface{}) {
+ std.Error(args...)
+}
+
+// Panic logs a message at level Panic on the standard logger.
+func Panic(args ...interface{}) {
+ std.Panic(args...)
+}
+
+// Fatal logs a message at level Fatal on the standard logger.
+func Fatal(args ...interface{}) {
+ std.Fatal(args...)
+}
+
+// Debugf logs a message at level Debug on the standard logger.
+func Debugf(format string, args ...interface{}) {
+ std.Debugf(format, args...)
+}
+
+// Printf logs a message at level Info on the standard logger.
+func Printf(format string, args ...interface{}) {
+ std.Printf(format, args...)
+}
+
+// Infof logs a message at level Info on the standard logger.
+func Infof(format string, args ...interface{}) {
+ std.Infof(format, args...)
+}
+
+// Warnf logs a message at level Warn on the standard logger.
+func Warnf(format string, args ...interface{}) {
+ std.Warnf(format, args...)
+}
+
+// Warningf logs a message at level Warn on the standard logger.
+func Warningf(format string, args ...interface{}) {
+ std.Warningf(format, args...)
+}
+
+// Errorf logs a message at level Error on the standard logger.
+func Errorf(format string, args ...interface{}) {
+ std.Errorf(format, args...)
+}
+
+// Panicf logs a message at level Panic on the standard logger.
+func Panicf(format string, args ...interface{}) {
+ std.Panicf(format, args...)
+}
+
+// Fatalf logs a message at level Fatal on the standard logger.
+func Fatalf(format string, args ...interface{}) {
+ std.Fatalf(format, args...)
+}
+
+// Debugln logs a message at level Debug on the standard logger.
+func Debugln(args ...interface{}) {
+ std.Debugln(args...)
+}
+
+// Println logs a message at level Info on the standard logger.
+func Println(args ...interface{}) {
+ std.Println(args...)
+}
+
+// Infoln logs a message at level Info on the standard logger.
+func Infoln(args ...interface{}) {
+ std.Infoln(args...)
+}
+
+// Warnln logs a message at level Warn on the standard logger.
+func Warnln(args ...interface{}) {
+ std.Warnln(args...)
+}
+
+// Warningln logs a message at level Warn on the standard logger.
+func Warningln(args ...interface{}) {
+ std.Warningln(args...)
+}
+
+// Errorln logs a message at level Error on the standard logger.
+func Errorln(args ...interface{}) {
+ std.Errorln(args...)
+}
+
+// Panicln logs a message at level Panic on the standard logger.
+func Panicln(args ...interface{}) {
+ std.Panicln(args...)
+}
+
+// Fatalln logs a message at level Fatal on the standard logger.
+func Fatalln(args ...interface{}) {
+ std.Fatalln(args...)
+}
diff --git a/src/kube2msb/vendor/github.com/Sirupsen/logrus/formatter.go b/src/kube2msb/vendor/github.com/Sirupsen/logrus/formatter.go
new file mode 100644
index 0000000..038ce9f
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/Sirupsen/logrus/formatter.go
@@ -0,0 +1,44 @@
+package logrus
+
+// The Formatter interface is used to implement a custom Formatter. It takes an
+// `Entry`. It exposes all the fields, including the default ones:
+//
+// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
+// * `entry.Data["time"]`. The timestamp.
+// * `entry.Data["level"]. The level the entry was logged at.
+//
+// Any additional fields added with `WithField` or `WithFields` are also in
+// `entry.Data`. Format is expected to return an array of bytes which are then
+// logged to `logger.Out`.
+type Formatter interface {
+ Format(*Entry) ([]byte, error)
+}
+
+// This is to not silently overwrite `time`, `msg` and `level` fields when
+// dumping it. If this code wasn't there doing:
+//
+// logrus.WithField("level", 1).Info("hello")
+//
+// Would just silently drop the user provided level. Instead with this code
+// it'll logged as:
+//
+// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
+//
+// It's not exported because it's still using Data in an opinionated way. It's to
+// avoid code duplication between the two default formatters.
+func prefixFieldClashes(data Fields) {
+ _, ok := data["time"]
+ if ok {
+ data["fields.time"] = data["time"]
+ }
+
+ _, ok = data["msg"]
+ if ok {
+ data["fields.msg"] = data["msg"]
+ }
+
+ _, ok = data["level"]
+ if ok {
+ data["fields.level"] = data["level"]
+ }
+}
diff --git a/src/kube2msb/vendor/github.com/Sirupsen/logrus/hooks.go b/src/kube2msb/vendor/github.com/Sirupsen/logrus/hooks.go
new file mode 100644
index 0000000..0da2b36
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/Sirupsen/logrus/hooks.go
@@ -0,0 +1,34 @@
+package logrus
+
+// A hook to be fired when logging on the logging levels returned from
+// `Levels()` on your implementation of the interface. Note that this is not
+// fired in a goroutine or a channel with workers, you should handle such
+// functionality yourself if your call is non-blocking and you don't wish for
+// the logging calls for levels returned from `Levels()` to block.
+type Hook interface {
+ Levels() []Level
+ Fire(*Entry) error
+}
+
+// Internal type for storing the hooks on a logger instance.
+type levelHooks map[Level][]Hook
+
+// Add a hook to an instance of logger. This is called with
+// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
+func (hooks levelHooks) Add(hook Hook) {
+ for _, level := range hook.Levels() {
+ hooks[level] = append(hooks[level], hook)
+ }
+}
+
+// Fire all the hooks for the passed level. Used by `entry.log` to fire
+// appropriate hooks for a log entry.
+func (hooks levelHooks) Fire(level Level, entry *Entry) error {
+ for _, hook := range hooks[level] {
+ if err := hook.Fire(entry); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/Sirupsen/logrus/json_formatter.go b/src/kube2msb/vendor/github.com/Sirupsen/logrus/json_formatter.go
new file mode 100644
index 0000000..b09227c
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/Sirupsen/logrus/json_formatter.go
@@ -0,0 +1,26 @@
+package logrus
+
+import (
+ "encoding/json"
+ "fmt"
+ "time"
+)
+
+type JSONFormatter struct{}
+
+func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
+ data := make(Fields, len(entry.Data)+3)
+ for k, v := range entry.Data {
+ data[k] = v
+ }
+ prefixFieldClashes(data)
+ data["time"] = entry.Time.Format(time.RFC3339)
+ data["msg"] = entry.Message
+ data["level"] = entry.Level.String()
+
+ serialized, err := json.Marshal(data)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+ }
+ return append(serialized, '\n'), nil
+}
diff --git a/src/kube2msb/vendor/github.com/Sirupsen/logrus/logger.go b/src/kube2msb/vendor/github.com/Sirupsen/logrus/logger.go
new file mode 100644
index 0000000..b392e54
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/Sirupsen/logrus/logger.go
@@ -0,0 +1,161 @@
+package logrus
+
+import (
+ "io"
+ "os"
+ "sync"
+)
+
+type Logger struct {
+ // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
+ // file, or leave it default which is `os.Stdout`. You can also set this to
+ // something more adventorous, such as logging to Kafka.
+ Out io.Writer
+ // Hooks for the logger instance. These allow firing events based on logging
+ // levels and log entries. For example, to send errors to an error tracking
+ // service, log to StatsD or dump the core on fatal errors.
+ Hooks levelHooks
+ // All log entries pass through the formatter before logged to Out. The
+ // included formatters are `TextFormatter` and `JSONFormatter` for which
+ // TextFormatter is the default. In development (when a TTY is attached) it
+ // logs with colors, but to a file it wouldn't. You can easily implement your
+ // own that implements the `Formatter` interface, see the `README` or included
+ // formatters for examples.
+ Formatter Formatter
+ // The logging level the logger should log at. This is typically (and defaults
+ // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
+ // logged. `logrus.Debug` is useful in
+ Level Level
+ // Used to sync writing to the log.
+ mu sync.Mutex
+}
+
+// Creates a new logger. Configuration should be set by changing `Formatter`,
+// `Out` and `Hooks` directly on the default logger instance. You can also just
+// instantiate your own:
+//
+// var log = &Logger{
+// Out: os.Stderr,
+// Formatter: new(JSONFormatter),
+// Hooks: make(levelHooks),
+// Level: logrus.DebugLevel,
+// }
+//
+// It's recommended to make this a global instance called `log`.
+func New() *Logger {
+ return &Logger{
+ Out: os.Stdout,
+ Formatter: new(TextFormatter),
+ Hooks: make(levelHooks),
+ Level: InfoLevel,
+ }
+}
+
+// Adds a field to the log entry, note that you it doesn't log until you call
+// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
+// Ff you want multiple fields, use `WithFields`.
+func (logger *Logger) WithField(key string, value interface{}) *Entry {
+ return NewEntry(logger).WithField(key, value)
+}
+
+// Adds a struct of fields to the log entry. All it does is call `WithField` for
+// each `Field`.
+func (logger *Logger) WithFields(fields Fields) *Entry {
+ return NewEntry(logger).WithFields(fields)
+}
+
+func (logger *Logger) Debugf(format string, args ...interface{}) {
+ NewEntry(logger).Debugf(format, args...)
+}
+
+func (logger *Logger) Infof(format string, args ...interface{}) {
+ NewEntry(logger).Infof(format, args...)
+}
+
+func (logger *Logger) Printf(format string, args ...interface{}) {
+ NewEntry(logger).Printf(format, args...)
+}
+
+func (logger *Logger) Warnf(format string, args ...interface{}) {
+ NewEntry(logger).Warnf(format, args...)
+}
+
+func (logger *Logger) Warningf(format string, args ...interface{}) {
+ NewEntry(logger).Warnf(format, args...)
+}
+
+func (logger *Logger) Errorf(format string, args ...interface{}) {
+ NewEntry(logger).Errorf(format, args...)
+}
+
+func (logger *Logger) Fatalf(format string, args ...interface{}) {
+ NewEntry(logger).Fatalf(format, args...)
+}
+
+func (logger *Logger) Panicf(format string, args ...interface{}) {
+ NewEntry(logger).Panicf(format, args...)
+}
+
+func (logger *Logger) Debug(args ...interface{}) {
+ NewEntry(logger).Debug(args...)
+}
+
+func (logger *Logger) Info(args ...interface{}) {
+ NewEntry(logger).Info(args...)
+}
+
+func (logger *Logger) Print(args ...interface{}) {
+ NewEntry(logger).Info(args...)
+}
+
+func (logger *Logger) Warn(args ...interface{}) {
+ NewEntry(logger).Warn(args...)
+}
+
+func (logger *Logger) Warning(args ...interface{}) {
+ NewEntry(logger).Warn(args...)
+}
+
+func (logger *Logger) Error(args ...interface{}) {
+ NewEntry(logger).Error(args...)
+}
+
+func (logger *Logger) Fatal(args ...interface{}) {
+ NewEntry(logger).Fatal(args...)
+}
+
+func (logger *Logger) Panic(args ...interface{}) {
+ NewEntry(logger).Panic(args...)
+}
+
+func (logger *Logger) Debugln(args ...interface{}) {
+ NewEntry(logger).Debugln(args...)
+}
+
+func (logger *Logger) Infoln(args ...interface{}) {
+ NewEntry(logger).Infoln(args...)
+}
+
+func (logger *Logger) Println(args ...interface{}) {
+ NewEntry(logger).Println(args...)
+}
+
+func (logger *Logger) Warnln(args ...interface{}) {
+ NewEntry(logger).Warnln(args...)
+}
+
+func (logger *Logger) Warningln(args ...interface{}) {
+ NewEntry(logger).Warnln(args...)
+}
+
+func (logger *Logger) Errorln(args ...interface{}) {
+ NewEntry(logger).Errorln(args...)
+}
+
+func (logger *Logger) Fatalln(args ...interface{}) {
+ NewEntry(logger).Fatalln(args...)
+}
+
+func (logger *Logger) Panicln(args ...interface{}) {
+ NewEntry(logger).Panicln(args...)
+}
diff --git a/src/kube2msb/vendor/github.com/Sirupsen/logrus/logrus.go b/src/kube2msb/vendor/github.com/Sirupsen/logrus/logrus.go
new file mode 100644
index 0000000..43ee12e
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/Sirupsen/logrus/logrus.go
@@ -0,0 +1,94 @@
+package logrus
+
+import (
+ "fmt"
+ "log"
+)
+
+// Fields type, used to pass to `WithFields`.
+type Fields map[string]interface{}
+
+// Level type
+type Level uint8
+
+// Convert the Level to a string. E.g. PanicLevel becomes "panic".
+func (level Level) String() string {
+ switch level {
+ case DebugLevel:
+ return "debug"
+ case InfoLevel:
+ return "info"
+ case WarnLevel:
+ return "warning"
+ case ErrorLevel:
+ return "error"
+ case FatalLevel:
+ return "fatal"
+ case PanicLevel:
+ return "panic"
+ }
+
+ return "unknown"
+}
+
+// ParseLevel takes a string level and returns the Logrus log level constant.
+func ParseLevel(lvl string) (Level, error) {
+ switch lvl {
+ case "panic":
+ return PanicLevel, nil
+ case "fatal":
+ return FatalLevel, nil
+ case "error":
+ return ErrorLevel, nil
+ case "warn", "warning":
+ return WarnLevel, nil
+ case "info":
+ return InfoLevel, nil
+ case "debug":
+ return DebugLevel, nil
+ }
+
+ var l Level
+ return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
+}
+
+// These are the different logging levels. You can set the logging level to log
+// on your instance of logger, obtained with `logrus.New()`.
+const (
+ // PanicLevel level, highest level of severity. Logs and then calls panic with the
+ // message passed to Debug, Info, ...
+ PanicLevel Level = iota
+ // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
+ // logging level is set to Panic.
+ FatalLevel
+ // ErrorLevel level. Logs. Used for errors that should definitely be noted.
+ // Commonly used for hooks to send errors to an error tracking service.
+ ErrorLevel
+ // WarnLevel level. Non-critical entries that deserve eyes.
+ WarnLevel
+ // InfoLevel level. General operational entries about what's going on inside the
+ // application.
+ InfoLevel
+ // DebugLevel level. Usually only enabled when debugging. Very verbose logging.
+ DebugLevel
+)
+
+// Won't compile if StdLogger can't be realized by a log.Logger
+var _ StdLogger = &log.Logger{}
+
+// StdLogger is what your logrus-enabled library should take, that way
+// it'll accept a stdlib logger and a logrus logger. There's no standard
+// interface, this is the closest we get, unfortunately.
+type StdLogger interface {
+ Print(...interface{})
+ Printf(string, ...interface{})
+ Println(...interface{})
+
+ Fatal(...interface{})
+ Fatalf(string, ...interface{})
+ Fatalln(...interface{})
+
+ Panic(...interface{})
+ Panicf(string, ...interface{})
+ Panicln(...interface{})
+}
diff --git a/src/kube2msb/vendor/github.com/Sirupsen/logrus/terminal_darwin.go b/src/kube2msb/vendor/github.com/Sirupsen/logrus/terminal_darwin.go
new file mode 100644
index 0000000..8fe02a4
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/Sirupsen/logrus/terminal_darwin.go
@@ -0,0 +1,12 @@
+// Based on ssh/terminal:
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package logrus
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TIOCGETA
+
+type Termios syscall.Termios
diff --git a/src/kube2msb/vendor/github.com/Sirupsen/logrus/terminal_freebsd.go b/src/kube2msb/vendor/github.com/Sirupsen/logrus/terminal_freebsd.go
new file mode 100644
index 0000000..0428ee5
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/Sirupsen/logrus/terminal_freebsd.go
@@ -0,0 +1,20 @@
+/*
+ Go 1.2 doesn't include Termios for FreeBSD. This should be added in 1.3 and this could be merged with terminal_darwin.
+*/
+package logrus
+
+import (
+ "syscall"
+)
+
+const ioctlReadTermios = syscall.TIOCGETA
+
+type Termios struct {
+ Iflag uint32
+ Oflag uint32
+ Cflag uint32
+ Lflag uint32
+ Cc [20]uint8
+ Ispeed uint32
+ Ospeed uint32
+}
diff --git a/src/kube2msb/vendor/github.com/Sirupsen/logrus/terminal_linux.go b/src/kube2msb/vendor/github.com/Sirupsen/logrus/terminal_linux.go
new file mode 100644
index 0000000..a2c0b40
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/Sirupsen/logrus/terminal_linux.go
@@ -0,0 +1,12 @@
+// Based on ssh/terminal:
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package logrus
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TCGETS
+
+type Termios syscall.Termios
diff --git a/src/kube2msb/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go b/src/kube2msb/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
new file mode 100644
index 0000000..276447b
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
@@ -0,0 +1,21 @@
+// Based on ssh/terminal:
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux,!appengine darwin freebsd
+
+package logrus
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal() bool {
+ fd := syscall.Stdout
+ var termios Termios
+ _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
+ return err == 0
+}
diff --git a/src/kube2msb/vendor/github.com/Sirupsen/logrus/terminal_windows.go b/src/kube2msb/vendor/github.com/Sirupsen/logrus/terminal_windows.go
new file mode 100644
index 0000000..2e09f6f
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/Sirupsen/logrus/terminal_windows.go
@@ -0,0 +1,27 @@
+// Based on ssh/terminal:
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows
+
+package logrus
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+var kernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+var (
+ procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
+)
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal() bool {
+ fd := syscall.Stdout
+ var st uint32
+ r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
+ return r != 0 && e == 0
+}
diff --git a/src/kube2msb/vendor/github.com/Sirupsen/logrus/text_formatter.go b/src/kube2msb/vendor/github.com/Sirupsen/logrus/text_formatter.go
new file mode 100644
index 0000000..78e7889
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/Sirupsen/logrus/text_formatter.go
@@ -0,0 +1,124 @@
+package logrus
+
+import (
+ "bytes"
+ "fmt"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+)
+
+const (
+ nocolor = 0
+ red = 31
+ green = 32
+ yellow = 33
+ blue = 34
+)
+
+var (
+ baseTimestamp time.Time
+ isTerminal bool
+ noQuoteNeeded *regexp.Regexp
+)
+
+func init() {
+ baseTimestamp = time.Now()
+ isTerminal = IsTerminal()
+}
+
+func miniTS() int {
+ return int(time.Since(baseTimestamp) / time.Second)
+}
+
+type TextFormatter struct {
+ // Set to true to bypass checking for a TTY before outputting colors.
+ ForceColors bool
+ DisableColors bool
+ // Set to true to disable timestamp logging (useful when the output
+ // is redirected to a logging system already adding a timestamp)
+ DisableTimestamp bool
+}
+
+func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
+
+ var keys []string
+ for k := range entry.Data {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ b := &bytes.Buffer{}
+
+ prefixFieldClashes(entry.Data)
+
+ isColored := (f.ForceColors || isTerminal) && !f.DisableColors
+
+ if isColored {
+ printColored(b, entry, keys)
+ } else {
+ if !f.DisableTimestamp {
+ f.appendKeyValue(b, "time", entry.Time.Format(time.RFC3339))
+ }
+ f.appendKeyValue(b, "level", entry.Level.String())
+ f.appendKeyValue(b, "msg", entry.Message)
+ for _, key := range keys {
+ f.appendKeyValue(b, key, entry.Data[key])
+ }
+ }
+
+ b.WriteByte('\n')
+ return b.Bytes(), nil
+}
+
+func printColored(b *bytes.Buffer, entry *Entry, keys []string) {
+ var levelColor int
+ switch entry.Level {
+ case WarnLevel:
+ levelColor = yellow
+ case ErrorLevel, FatalLevel, PanicLevel:
+ levelColor = red
+ default:
+ levelColor = blue
+ }
+
+ levelText := strings.ToUpper(entry.Level.String())[0:4]
+
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message)
+ for _, k := range keys {
+ v := entry.Data[k]
+ fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%v", levelColor, k, v)
+ }
+}
+
+func needsQuoting(text string) bool {
+ for _, ch := range text {
+ if !((ch >= 'a' && ch <= 'z') ||
+ (ch >= 'A' && ch <= 'Z') ||
+ (ch >= '0' && ch < '9') ||
+ ch == '-' || ch == '.') {
+ return false
+ }
+ }
+ return true
+}
+
+func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key, value interface{}) {
+ switch value.(type) {
+ case string:
+ if needsQuoting(value.(string)) {
+ fmt.Fprintf(b, "%v=%s ", key, value)
+ } else {
+ fmt.Fprintf(b, "%v=%q ", key, value)
+ }
+ case error:
+ if needsQuoting(value.(error).Error()) {
+ fmt.Fprintf(b, "%v=%s ", key, value)
+ } else {
+ fmt.Fprintf(b, "%v=%q ", key, value)
+ }
+ default:
+ fmt.Fprintf(b, "%v=%v ", key, value)
+ }
+}
diff --git a/src/kube2msb/vendor/github.com/beorn7/perks/LICENSE b/src/kube2msb/vendor/github.com/beorn7/perks/LICENSE
new file mode 100644
index 0000000..339177b
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/beorn7/perks/LICENSE
@@ -0,0 +1,20 @@
+Copyright (C) 2013 Blake Mizerany
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/src/kube2msb/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/src/kube2msb/vendor/github.com/beorn7/perks/quantile/exampledata.txt
new file mode 100644
index 0000000..1602287
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/beorn7/perks/quantile/exampledata.txt
@@ -0,0 +1,2388 @@
+8
+5
+26
+12
+5
+235
+13
+6
+28
+30
+3
+3
+3
+3
+5
+2
+33
+7
+2
+4
+7
+12
+14
+5
+8
+3
+10
+4
+5
+3
+6
+6
+209
+20
+3
+10
+14
+3
+4
+6
+8
+5
+11
+7
+3
+2
+3
+3
+212
+5
+222
+4
+10
+10
+5
+6
+3
+8
+3
+10
+254
+220
+2
+3
+5
+24
+5
+4
+222
+7
+3
+3
+223
+8
+15
+12
+14
+14
+3
+2
+2
+3
+13
+3
+11
+4
+4
+6
+5
+7
+13
+5
+3
+5
+2
+5
+3
+5
+2
+7
+15
+17
+14
+3
+6
+6
+3
+17
+5
+4
+7
+6
+4
+4
+8
+6
+8
+3
+9
+3
+6
+3
+4
+5
+3
+3
+660
+4
+6
+10
+3
+6
+3
+2
+5
+13
+2
+4
+4
+10
+4
+8
+4
+3
+7
+9
+9
+3
+10
+37
+3
+13
+4
+12
+3
+6
+10
+8
+5
+21
+2
+3
+8
+3
+2
+3
+3
+4
+12
+2
+4
+8
+8
+4
+3
+2
+20
+1
+6
+32
+2
+11
+6
+18
+3
+8
+11
+3
+212
+3
+4
+2
+6
+7
+12
+11
+3
+2
+16
+10
+6
+4
+6
+3
+2
+7
+3
+2
+2
+2
+2
+5
+6
+4
+3
+10
+3
+4
+6
+5
+3
+4
+4
+5
+6
+4
+3
+4
+4
+5
+7
+5
+5
+3
+2
+7
+2
+4
+12
+4
+5
+6
+2
+4
+4
+8
+4
+15
+13
+7
+16
+5
+3
+23
+5
+5
+7
+3
+2
+9
+8
+7
+5
+8
+11
+4
+10
+76
+4
+47
+4
+3
+2
+7
+4
+2
+3
+37
+10
+4
+2
+20
+5
+4
+4
+10
+10
+4
+3
+7
+23
+240
+7
+13
+5
+5
+3
+3
+2
+5
+4
+2
+8
+7
+19
+2
+23
+8
+7
+2
+5
+3
+8
+3
+8
+13
+5
+5
+5
+2
+3
+23
+4
+9
+8
+4
+3
+3
+5
+220
+2
+3
+4
+6
+14
+3
+53
+6
+2
+5
+18
+6
+3
+219
+6
+5
+2
+5
+3
+6
+5
+15
+4
+3
+17
+3
+2
+4
+7
+2
+3
+3
+4
+4
+3
+2
+664
+6
+3
+23
+5
+5
+16
+5
+8
+2
+4
+2
+24
+12
+3
+2
+3
+5
+8
+3
+5
+4
+3
+14
+3
+5
+8
+2
+3
+7
+9
+4
+2
+3
+6
+8
+4
+3
+4
+6
+5
+3
+3
+6
+3
+19
+4
+4
+6
+3
+6
+3
+5
+22
+5
+4
+4
+3
+8
+11
+4
+9
+7
+6
+13
+4
+4
+4
+6
+17
+9
+3
+3
+3
+4
+3
+221
+5
+11
+3
+4
+2
+12
+6
+3
+5
+7
+5
+7
+4
+9
+7
+14
+37
+19
+217
+16
+3
+5
+2
+2
+7
+19
+7
+6
+7
+4
+24
+5
+11
+4
+7
+7
+9
+13
+3
+4
+3
+6
+28
+4
+4
+5
+5
+2
+5
+6
+4
+4
+6
+10
+5
+4
+3
+2
+3
+3
+6
+5
+5
+4
+3
+2
+3
+7
+4
+6
+18
+16
+8
+16
+4
+5
+8
+6
+9
+13
+1545
+6
+215
+6
+5
+6
+3
+45
+31
+5
+2
+2
+4
+3
+3
+2
+5
+4
+3
+5
+7
+7
+4
+5
+8
+5
+4
+749
+2
+31
+9
+11
+2
+11
+5
+4
+4
+7
+9
+11
+4
+5
+4
+7
+3
+4
+6
+2
+15
+3
+4
+3
+4
+3
+5
+2
+13
+5
+5
+3
+3
+23
+4
+4
+5
+7
+4
+13
+2
+4
+3
+4
+2
+6
+2
+7
+3
+5
+5
+3
+29
+5
+4
+4
+3
+10
+2
+3
+79
+16
+6
+6
+7
+7
+3
+5
+5
+7
+4
+3
+7
+9
+5
+6
+5
+9
+6
+3
+6
+4
+17
+2
+10
+9
+3
+6
+2
+3
+21
+22
+5
+11
+4
+2
+17
+2
+224
+2
+14
+3
+4
+4
+2
+4
+4
+4
+4
+5
+3
+4
+4
+10
+2
+6
+3
+3
+5
+7
+2
+7
+5
+6
+3
+218
+2
+2
+5
+2
+6
+3
+5
+222
+14
+6
+33
+3
+2
+5
+3
+3
+3
+9
+5
+3
+3
+2
+7
+4
+3
+4
+3
+5
+6
+5
+26
+4
+13
+9
+7
+3
+221
+3
+3
+4
+4
+4
+4
+2
+18
+5
+3
+7
+9
+6
+8
+3
+10
+3
+11
+9
+5
+4
+17
+5
+5
+6
+6
+3
+2
+4
+12
+17
+6
+7
+218
+4
+2
+4
+10
+3
+5
+15
+3
+9
+4
+3
+3
+6
+29
+3
+3
+4
+5
+5
+3
+8
+5
+6
+6
+7
+5
+3
+5
+3
+29
+2
+31
+5
+15
+24
+16
+5
+207
+4
+3
+3
+2
+15
+4
+4
+13
+5
+5
+4
+6
+10
+2
+7
+8
+4
+6
+20
+5
+3
+4
+3
+12
+12
+5
+17
+7
+3
+3
+3
+6
+10
+3
+5
+25
+80
+4
+9
+3
+2
+11
+3
+3
+2
+3
+8
+7
+5
+5
+19
+5
+3
+3
+12
+11
+2
+6
+5
+5
+5
+3
+3
+3
+4
+209
+14
+3
+2
+5
+19
+4
+4
+3
+4
+14
+5
+6
+4
+13
+9
+7
+4
+7
+10
+2
+9
+5
+7
+2
+8
+4
+6
+5
+5
+222
+8
+7
+12
+5
+216
+3
+4
+4
+6
+3
+14
+8
+7
+13
+4
+3
+3
+3
+3
+17
+5
+4
+3
+33
+6
+6
+33
+7
+5
+3
+8
+7
+5
+2
+9
+4
+2
+233
+24
+7
+4
+8
+10
+3
+4
+15
+2
+16
+3
+3
+13
+12
+7
+5
+4
+207
+4
+2
+4
+27
+15
+2
+5
+2
+25
+6
+5
+5
+6
+13
+6
+18
+6
+4
+12
+225
+10
+7
+5
+2
+2
+11
+4
+14
+21
+8
+10
+3
+5
+4
+232
+2
+5
+5
+3
+7
+17
+11
+6
+6
+23
+4
+6
+3
+5
+4
+2
+17
+3
+6
+5
+8
+3
+2
+2
+14
+9
+4
+4
+2
+5
+5
+3
+7
+6
+12
+6
+10
+3
+6
+2
+2
+19
+5
+4
+4
+9
+2
+4
+13
+3
+5
+6
+3
+6
+5
+4
+9
+6
+3
+5
+7
+3
+6
+6
+4
+3
+10
+6
+3
+221
+3
+5
+3
+6
+4
+8
+5
+3
+6
+4
+4
+2
+54
+5
+6
+11
+3
+3
+4
+4
+4
+3
+7
+3
+11
+11
+7
+10
+6
+13
+223
+213
+15
+231
+7
+3
+7
+228
+2
+3
+4
+4
+5
+6
+7
+4
+13
+3
+4
+5
+3
+6
+4
+6
+7
+2
+4
+3
+4
+3
+3
+6
+3
+7
+3
+5
+18
+5
+6
+8
+10
+3
+3
+3
+2
+4
+2
+4
+4
+5
+6
+6
+4
+10
+13
+3
+12
+5
+12
+16
+8
+4
+19
+11
+2
+4
+5
+6
+8
+5
+6
+4
+18
+10
+4
+2
+216
+6
+6
+6
+2
+4
+12
+8
+3
+11
+5
+6
+14
+5
+3
+13
+4
+5
+4
+5
+3
+28
+6
+3
+7
+219
+3
+9
+7
+3
+10
+6
+3
+4
+19
+5
+7
+11
+6
+15
+19
+4
+13
+11
+3
+7
+5
+10
+2
+8
+11
+2
+6
+4
+6
+24
+6
+3
+3
+3
+3
+6
+18
+4
+11
+4
+2
+5
+10
+8
+3
+9
+5
+3
+4
+5
+6
+2
+5
+7
+4
+4
+14
+6
+4
+4
+5
+5
+7
+2
+4
+3
+7
+3
+3
+6
+4
+5
+4
+4
+4
+3
+3
+3
+3
+8
+14
+2
+3
+5
+3
+2
+4
+5
+3
+7
+3
+3
+18
+3
+4
+4
+5
+7
+3
+3
+3
+13
+5
+4
+8
+211
+5
+5
+3
+5
+2
+5
+4
+2
+655
+6
+3
+5
+11
+2
+5
+3
+12
+9
+15
+11
+5
+12
+217
+2
+6
+17
+3
+3
+207
+5
+5
+4
+5
+9
+3
+2
+8
+5
+4
+3
+2
+5
+12
+4
+14
+5
+4
+2
+13
+5
+8
+4
+225
+4
+3
+4
+5
+4
+3
+3
+6
+23
+9
+2
+6
+7
+233
+4
+4
+6
+18
+3
+4
+6
+3
+4
+4
+2
+3
+7
+4
+13
+227
+4
+3
+5
+4
+2
+12
+9
+17
+3
+7
+14
+6
+4
+5
+21
+4
+8
+9
+2
+9
+25
+16
+3
+6
+4
+7
+8
+5
+2
+3
+5
+4
+3
+3
+5
+3
+3
+3
+2
+3
+19
+2
+4
+3
+4
+2
+3
+4
+4
+2
+4
+3
+3
+3
+2
+6
+3
+17
+5
+6
+4
+3
+13
+5
+3
+3
+3
+4
+9
+4
+2
+14
+12
+4
+5
+24
+4
+3
+37
+12
+11
+21
+3
+4
+3
+13
+4
+2
+3
+15
+4
+11
+4
+4
+3
+8
+3
+4
+4
+12
+8
+5
+3
+3
+4
+2
+220
+3
+5
+223
+3
+3
+3
+10
+3
+15
+4
+241
+9
+7
+3
+6
+6
+23
+4
+13
+7
+3
+4
+7
+4
+9
+3
+3
+4
+10
+5
+5
+1
+5
+24
+2
+4
+5
+5
+6
+14
+3
+8
+2
+3
+5
+13
+13
+3
+5
+2
+3
+15
+3
+4
+2
+10
+4
+4
+4
+5
+5
+3
+5
+3
+4
+7
+4
+27
+3
+6
+4
+15
+3
+5
+6
+6
+5
+4
+8
+3
+9
+2
+6
+3
+4
+3
+7
+4
+18
+3
+11
+3
+3
+8
+9
+7
+24
+3
+219
+7
+10
+4
+5
+9
+12
+2
+5
+4
+4
+4
+3
+3
+19
+5
+8
+16
+8
+6
+22
+3
+23
+3
+242
+9
+4
+3
+3
+5
+7
+3
+3
+5
+8
+3
+7
+5
+14
+8
+10
+3
+4
+3
+7
+4
+6
+7
+4
+10
+4
+3
+11
+3
+7
+10
+3
+13
+6
+8
+12
+10
+5
+7
+9
+3
+4
+7
+7
+10
+8
+30
+9
+19
+4
+3
+19
+15
+4
+13
+3
+215
+223
+4
+7
+4
+8
+17
+16
+3
+7
+6
+5
+5
+4
+12
+3
+7
+4
+4
+13
+4
+5
+2
+5
+6
+5
+6
+6
+7
+10
+18
+23
+9
+3
+3
+6
+5
+2
+4
+2
+7
+3
+3
+2
+5
+5
+14
+10
+224
+6
+3
+4
+3
+7
+5
+9
+3
+6
+4
+2
+5
+11
+4
+3
+3
+2
+8
+4
+7
+4
+10
+7
+3
+3
+18
+18
+17
+3
+3
+3
+4
+5
+3
+3
+4
+12
+7
+3
+11
+13
+5
+4
+7
+13
+5
+4
+11
+3
+12
+3
+6
+4
+4
+21
+4
+6
+9
+5
+3
+10
+8
+4
+6
+4
+4
+6
+5
+4
+8
+6
+4
+6
+4
+4
+5
+9
+6
+3
+4
+2
+9
+3
+18
+2
+4
+3
+13
+3
+6
+6
+8
+7
+9
+3
+2
+16
+3
+4
+6
+3
+2
+33
+22
+14
+4
+9
+12
+4
+5
+6
+3
+23
+9
+4
+3
+5
+5
+3
+4
+5
+3
+5
+3
+10
+4
+5
+5
+8
+4
+4
+6
+8
+5
+4
+3
+4
+6
+3
+3
+3
+5
+9
+12
+6
+5
+9
+3
+5
+3
+2
+2
+2
+18
+3
+2
+21
+2
+5
+4
+6
+4
+5
+10
+3
+9
+3
+2
+10
+7
+3
+6
+6
+4
+4
+8
+12
+7
+3
+7
+3
+3
+9
+3
+4
+5
+4
+4
+5
+5
+10
+15
+4
+4
+14
+6
+227
+3
+14
+5
+216
+22
+5
+4
+2
+2
+6
+3
+4
+2
+9
+9
+4
+3
+28
+13
+11
+4
+5
+3
+3
+2
+3
+3
+5
+3
+4
+3
+5
+23
+26
+3
+4
+5
+6
+4
+6
+3
+5
+5
+3
+4
+3
+2
+2
+2
+7
+14
+3
+6
+7
+17
+2
+2
+15
+14
+16
+4
+6
+7
+13
+6
+4
+5
+6
+16
+3
+3
+28
+3
+6
+15
+3
+9
+2
+4
+6
+3
+3
+22
+4
+12
+6
+7
+2
+5
+4
+10
+3
+16
+6
+9
+2
+5
+12
+7
+5
+5
+5
+5
+2
+11
+9
+17
+4
+3
+11
+7
+3
+5
+15
+4
+3
+4
+211
+8
+7
+5
+4
+7
+6
+7
+6
+3
+6
+5
+6
+5
+3
+4
+4
+26
+4
+6
+10
+4
+4
+3
+2
+3
+3
+4
+5
+9
+3
+9
+4
+4
+5
+5
+8
+2
+4
+2
+3
+8
+4
+11
+19
+5
+8
+6
+3
+5
+6
+12
+3
+2
+4
+16
+12
+3
+4
+4
+8
+6
+5
+6
+6
+219
+8
+222
+6
+16
+3
+13
+19
+5
+4
+3
+11
+6
+10
+4
+7
+7
+12
+5
+3
+3
+5
+6
+10
+3
+8
+2
+5
+4
+7
+2
+4
+4
+2
+12
+9
+6
+4
+2
+40
+2
+4
+10
+4
+223
+4
+2
+20
+6
+7
+24
+5
+4
+5
+2
+20
+16
+6
+5
+13
+2
+3
+3
+19
+3
+2
+4
+5
+6
+7
+11
+12
+5
+6
+7
+7
+3
+5
+3
+5
+3
+14
+3
+4
+4
+2
+11
+1
+7
+3
+9
+6
+11
+12
+5
+8
+6
+221
+4
+2
+12
+4
+3
+15
+4
+5
+226
+7
+218
+7
+5
+4
+5
+18
+4
+5
+9
+4
+4
+2
+9
+18
+18
+9
+5
+6
+6
+3
+3
+7
+3
+5
+4
+4
+4
+12
+3
+6
+31
+5
+4
+7
+3
+6
+5
+6
+5
+11
+2
+2
+11
+11
+6
+7
+5
+8
+7
+10
+5
+23
+7
+4
+3
+5
+34
+2
+5
+23
+7
+3
+6
+8
+4
+4
+4
+2
+5
+3
+8
+5
+4
+8
+25
+2
+3
+17
+8
+3
+4
+8
+7
+3
+15
+6
+5
+7
+21
+9
+5
+6
+6
+5
+3
+2
+3
+10
+3
+6
+3
+14
+7
+4
+4
+8
+7
+8
+2
+6
+12
+4
+213
+6
+5
+21
+8
+2
+5
+23
+3
+11
+2
+3
+6
+25
+2
+3
+6
+7
+6
+6
+4
+4
+6
+3
+17
+9
+7
+6
+4
+3
+10
+7
+2
+3
+3
+3
+11
+8
+3
+7
+6
+4
+14
+36
+3
+4
+3
+3
+22
+13
+21
+4
+2
+7
+4
+4
+17
+15
+3
+7
+11
+2
+4
+7
+6
+209
+6
+3
+2
+2
+24
+4
+9
+4
+3
+3
+3
+29
+2
+2
+4
+3
+3
+5
+4
+6
+3
+3
+2
+4
diff --git a/src/kube2msb/vendor/github.com/beorn7/perks/quantile/stream.go b/src/kube2msb/vendor/github.com/beorn7/perks/quantile/stream.go
new file mode 100644
index 0000000..587b1fc
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/beorn7/perks/quantile/stream.go
@@ -0,0 +1,292 @@
+// Package quantile computes approximate quantiles over an unbounded data
+// stream within low memory and CPU bounds.
+//
+// A small amount of accuracy is traded to achieve the above properties.
+//
+// Multiple streams can be merged before calling Query to generate a single set
+// of results. This is meaningful when the streams represent the same type of
+// data. See Merge and Samples.
+//
+// For more detailed information about the algorithm used, see:
+//
+// Effective Computation of Biased Quantiles over Data Streams
+//
+// http://www.cs.rutgers.edu/~muthu/bquant.pdf
+package quantile
+
+import (
+ "math"
+ "sort"
+)
+
+// Sample holds an observed value and meta information for compression. JSON
+// tags have been added for convenience.
+type Sample struct {
+ Value float64 `json:",string"`
+ Width float64 `json:",string"`
+ Delta float64 `json:",string"`
+}
+
+// Samples represents a slice of samples. It implements sort.Interface.
+type Samples []Sample
+
+func (a Samples) Len() int { return len(a) }
+func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
+func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+type invariant func(s *stream, r float64) float64
+
+// NewLowBiased returns an initialized Stream for low-biased quantiles
+// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
+// error guarantees can still be given even for the lower ranks of the data
+// distribution.
+//
+// The provided epsilon is a relative error, i.e. the true quantile of a value
+// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
+// properties.
+func NewLowBiased(epsilon float64) *Stream {
+ ƒ := func(s *stream, r float64) float64 {
+ return 2 * epsilon * r
+ }
+ return newStream(ƒ)
+}
+
+// NewHighBiased returns an initialized Stream for high-biased quantiles
+// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
+// error guarantees can still be given even for the higher ranks of the data
+// distribution.
+//
+// The provided epsilon is a relative error, i.e. the true quantile of a value
+// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
+// properties.
+func NewHighBiased(epsilon float64) *Stream {
+ ƒ := func(s *stream, r float64) float64 {
+ return 2 * epsilon * (s.n - r)
+ }
+ return newStream(ƒ)
+}
+
+// NewTargeted returns an initialized Stream concerned with a particular set of
+// quantile values that are supplied a priori. Knowing these a priori reduces
+// space and computation time. The targets map maps the desired quantiles to
+// their absolute errors, i.e. the true quantile of a value returned by a query
+// is guaranteed to be within (Quantile±Epsilon).
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
+func NewTargeted(targets map[float64]float64) *Stream {
+ ƒ := func(s *stream, r float64) float64 {
+ var m = math.MaxFloat64
+ var f float64
+ for quantile, epsilon := range targets {
+ if quantile*s.n <= r {
+ f = (2 * epsilon * r) / quantile
+ } else {
+ f = (2 * epsilon * (s.n - r)) / (1 - quantile)
+ }
+ if f < m {
+ m = f
+ }
+ }
+ return m
+ }
+ return newStream(ƒ)
+}
+
+// Stream computes quantiles for a stream of float64s. It is not thread-safe by
+// design. Take care when using across multiple goroutines.
+type Stream struct {
+ *stream
+ b Samples
+ sorted bool
+}
+
+func newStream(ƒ invariant) *Stream {
+ x := &stream{ƒ: ƒ}
+ return &Stream{x, make(Samples, 0, 500), true}
+}
+
+// Insert inserts v into the stream.
+func (s *Stream) Insert(v float64) {
+ s.insert(Sample{Value: v, Width: 1})
+}
+
+func (s *Stream) insert(sample Sample) {
+ s.b = append(s.b, sample)
+ s.sorted = false
+ if len(s.b) == cap(s.b) {
+ s.flush()
+ }
+}
+
+// Query returns the computed qth percentiles value. If s was created with
+// NewTargeted, and q is not in the set of quantiles provided a priori, Query
+// will return an unspecified result.
+func (s *Stream) Query(q float64) float64 {
+ if !s.flushed() {
+ // Fast path when there hasn't been enough data for a flush;
+ // this also yields better accuracy for small sets of data.
+ l := len(s.b)
+ if l == 0 {
+ return 0
+ }
+ i := int(float64(l) * q)
+ if i > 0 {
+ i -= 1
+ }
+ s.maybeSort()
+ return s.b[i].Value
+ }
+ s.flush()
+ return s.stream.query(q)
+}
+
+// Merge merges samples into the underlying streams samples. This is handy when
+// merging multiple streams from separate threads, database shards, etc.
+//
+// ATTENTION: This method is broken and does not yield correct results. The
+// underlying algorithm is not capable of merging streams correctly.
+func (s *Stream) Merge(samples Samples) {
+ sort.Sort(samples)
+ s.stream.merge(samples)
+}
+
+// Reset reinitializes and clears the list reusing the samples buffer memory.
+func (s *Stream) Reset() {
+ s.stream.reset()
+ s.b = s.b[:0]
+}
+
+// Samples returns stream samples held by s.
+func (s *Stream) Samples() Samples {
+ if !s.flushed() {
+ return s.b
+ }
+ s.flush()
+ return s.stream.samples()
+}
+
+// Count returns the total number of samples observed in the stream
+// since initialization.
+func (s *Stream) Count() int {
+ return len(s.b) + s.stream.count()
+}
+
+func (s *Stream) flush() {
+ s.maybeSort()
+ s.stream.merge(s.b)
+ s.b = s.b[:0]
+}
+
+func (s *Stream) maybeSort() {
+ if !s.sorted {
+ s.sorted = true
+ sort.Sort(s.b)
+ }
+}
+
+func (s *Stream) flushed() bool {
+ return len(s.stream.l) > 0
+}
+
+type stream struct {
+ n float64
+ l []Sample
+ ƒ invariant
+}
+
+func (s *stream) reset() {
+ s.l = s.l[:0]
+ s.n = 0
+}
+
+func (s *stream) insert(v float64) {
+ s.merge(Samples{{v, 1, 0}})
+}
+
+func (s *stream) merge(samples Samples) {
+ // TODO(beorn7): This tries to merge not only individual samples, but
+ // whole summaries. The paper doesn't mention merging summaries at
+ // all. Unittests show that the merging is inaccurate. Find out how to
+ // do merges properly.
+ var r float64
+ i := 0
+ for _, sample := range samples {
+ for ; i < len(s.l); i++ {
+ c := s.l[i]
+ if c.Value > sample.Value {
+ // Insert at position i.
+ s.l = append(s.l, Sample{})
+ copy(s.l[i+1:], s.l[i:])
+ s.l[i] = Sample{
+ sample.Value,
+ sample.Width,
+ math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
+ // TODO(beorn7): How to calculate delta correctly?
+ }
+ i++
+ goto inserted
+ }
+ r += c.Width
+ }
+ s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
+ i++
+ inserted:
+ s.n += sample.Width
+ r += sample.Width
+ }
+ s.compress()
+}
+
+func (s *stream) count() int {
+ return int(s.n)
+}
+
+func (s *stream) query(q float64) float64 {
+ t := math.Ceil(q * s.n)
+ t += math.Ceil(s.ƒ(s, t) / 2)
+ p := s.l[0]
+ var r float64
+ for _, c := range s.l[1:] {
+ r += p.Width
+ if r+c.Width+c.Delta > t {
+ return p.Value
+ }
+ p = c
+ }
+ return p.Value
+}
+
+func (s *stream) compress() {
+ if len(s.l) < 2 {
+ return
+ }
+ x := s.l[len(s.l)-1]
+ xi := len(s.l) - 1
+ r := s.n - 1 - x.Width
+
+ for i := len(s.l) - 2; i >= 0; i-- {
+ c := s.l[i]
+ if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
+ x.Width += c.Width
+ s.l[xi] = x
+ // Remove element at i.
+ copy(s.l[i:], s.l[i+1:])
+ s.l = s.l[:len(s.l)-1]
+ xi -= 1
+ } else {
+ x = c
+ xi = i
+ }
+ r -= c.Width
+ }
+}
+
+func (s *stream) samples() Samples {
+ samples := make(Samples, len(s.l))
+ copy(samples, s.l)
+ return samples
+}
diff --git a/src/kube2msb/vendor/github.com/blang/semver/LICENSE b/src/kube2msb/vendor/github.com/blang/semver/LICENSE
new file mode 100644
index 0000000..5ba5c86
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/blang/semver/LICENSE
@@ -0,0 +1,22 @@
+The MIT License
+
+Copyright (c) 2014 Benedikt Lang <github at benediktlang.de>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
diff --git a/src/kube2msb/vendor/github.com/blang/semver/README.md b/src/kube2msb/vendor/github.com/blang/semver/README.md
new file mode 100644
index 0000000..5171c5c
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/blang/semver/README.md
@@ -0,0 +1,142 @@
+semver for golang [![Build Status](https://drone.io/github.com/blang/semver/status.png)](https://drone.io/github.com/blang/semver/latest) [![GoDoc](https://godoc.org/github.com/blang/semver?status.png)](https://godoc.org/github.com/blang/semver) [![Coverage Status](https://img.shields.io/coveralls/blang/semver.svg)](https://coveralls.io/r/blang/semver?branch=master)
+======
+
+semver is a [Semantic Versioning](http://semver.org/) library written in golang. It fully covers spec version `2.0.0`.
+
+Usage
+-----
+```bash
+$ go get github.com/blang/semver
+```
+Note: Always vendor your dependencies or fix on a specific version tag.
+
+```go
+import github.com/blang/semver
+v1, err := semver.Make("1.0.0-beta")
+v2, err := semver.Make("2.0.0-beta")
+v1.Compare(v2)
+```
+
+Also check the [GoDocs](http://godoc.org/github.com/blang/semver).
+
+Why should I use this lib?
+-----
+
+- Fully spec compatible
+- No reflection
+- No regex
+- Fully tested (Coverage >99%)
+- Readable parsing/validation errors
+- Fast (See [Benchmarks](#benchmarks))
+- Only Stdlib
+- Uses values instead of pointers
+- Many features, see below
+
+
+Features
+-----
+
+- Parsing and validation at all levels
+- Comparator-like comparisons
+- Compare Helper Methods
+- InPlace manipulation
+- Sortable (implements sort.Interface)
+- database/sql compatible (sql.Scanner/Valuer)
+- encoding/json compatible (json.Marshaler/Unmarshaler)
+
+
+Example
+-----
+
+Have a look at full examples in [examples/main.go](examples/main.go)
+
+```go
+import github.com/blang/semver
+
+v, err := semver.Make("0.0.1-alpha.preview+123.github")
+fmt.Printf("Major: %d\n", v.Major)
+fmt.Printf("Minor: %d\n", v.Minor)
+fmt.Printf("Patch: %d\n", v.Patch)
+fmt.Printf("Pre: %s\n", v.Pre)
+fmt.Printf("Build: %s\n", v.Build)
+
+// Prerelease versions array
+if len(v.Pre) > 0 {
+ fmt.Println("Prerelease versions:")
+ for i, pre := range v.Pre {
+ fmt.Printf("%d: %q\n", i, pre)
+ }
+}
+
+// Build meta data array
+if len(v.Build) > 0 {
+ fmt.Println("Build meta data:")
+ for i, build := range v.Build {
+ fmt.Printf("%d: %q\n", i, build)
+ }
+}
+
+v001, err := semver.Make("0.0.1")
+// Compare using helpers: v.GT(v2), v.LT, v.GTE, v.LTE
+v001.GT(v) == true
+v.LT(v001) == true
+v.GTE(v) == true
+v.LTE(v) == true
+
+// Or use v.Compare(v2) for comparisons (-1, 0, 1):
+v001.Compare(v) == 1
+v.Compare(v001) == -1
+v.Compare(v) == 0
+
+// Manipulate Version in place:
+v.Pre[0], err = semver.NewPRVersion("beta")
+if err != nil {
+ fmt.Printf("Error parsing pre release version: %q", err)
+}
+
+fmt.Println("\nValidate versions:")
+v.Build[0] = "?"
+
+err = v.Validate()
+if err != nil {
+ fmt.Printf("Validation failed: %s\n", err)
+}
+```
+
+Benchmarks
+-----
+
+ BenchmarkParseSimple 5000000 328 ns/op 49 B/op 1 allocs/op
+ BenchmarkParseComplex 1000000 2105 ns/op 263 B/op 7 allocs/op
+ BenchmarkParseAverage 1000000 1301 ns/op 168 B/op 4 allocs/op
+ BenchmarkStringSimple 10000000 130 ns/op 5 B/op 1 allocs/op
+ BenchmarkStringLarger 5000000 280 ns/op 32 B/op 2 allocs/op
+ BenchmarkStringComplex 3000000 512 ns/op 80 B/op 3 allocs/op
+ BenchmarkStringAverage 5000000 387 ns/op 47 B/op 2 allocs/op
+ BenchmarkValidateSimple 500000000 7.92 ns/op 0 B/op 0 allocs/op
+ BenchmarkValidateComplex 2000000 923 ns/op 0 B/op 0 allocs/op
+ BenchmarkValidateAverage 5000000 452 ns/op 0 B/op 0 allocs/op
+ BenchmarkCompareSimple 100000000 11.2 ns/op 0 B/op 0 allocs/op
+ BenchmarkCompareComplex 50000000 40.9 ns/op 0 B/op 0 allocs/op
+ BenchmarkCompareAverage 50000000 43.8 ns/op 0 B/op 0 allocs/op
+ BenchmarkSort 5000000 436 ns/op 259 B/op 2 allocs/op
+
+See benchmark cases at [semver_test.go](semver_test.go)
+
+
+Motivation
+-----
+
+I simply couldn't find any lib supporting the full spec. Others were just wrong or used reflection and regex which i don't like.
+
+
+Contribution
+-----
+
+Feel free to make a pull request. For bigger changes create a issue first to discuss about it.
+
+
+License
+-----
+
+See [LICENSE](LICENSE) file.
diff --git a/src/kube2msb/vendor/github.com/blang/semver/json.go b/src/kube2msb/vendor/github.com/blang/semver/json.go
new file mode 100644
index 0000000..a74bf7c
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/blang/semver/json.go
@@ -0,0 +1,23 @@
+package semver
+
+import (
+ "encoding/json"
+)
+
+// MarshalJSON implements the encoding/json.Marshaler interface.
+func (v Version) MarshalJSON() ([]byte, error) {
+ return json.Marshal(v.String())
+}
+
+// UnmarshalJSON implements the encoding/json.Unmarshaler interface.
+func (v *Version) UnmarshalJSON(data []byte) (err error) {
+ var versionString string
+
+ if err = json.Unmarshal(data, &versionString); err != nil {
+ return
+ }
+
+ *v, err = Parse(versionString)
+
+ return
+}
diff --git a/src/kube2msb/vendor/github.com/blang/semver/semver.go b/src/kube2msb/vendor/github.com/blang/semver/semver.go
new file mode 100644
index 0000000..bbf85ce
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/blang/semver/semver.go
@@ -0,0 +1,395 @@
+package semver
+
+import (
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+const (
+ numbers string = "0123456789"
+ alphas = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-"
+ alphanum = alphas + numbers
+)
+
+// SpecVersion is the latest fully supported spec version of semver
+var SpecVersion = Version{
+ Major: 2,
+ Minor: 0,
+ Patch: 0,
+}
+
+// Version represents a semver compatible version
+type Version struct {
+ Major uint64
+ Minor uint64
+ Patch uint64
+ Pre []PRVersion
+ Build []string //No Precendence
+}
+
+// Version to string
+func (v Version) String() string {
+ b := make([]byte, 0, 5)
+ b = strconv.AppendUint(b, v.Major, 10)
+ b = append(b, '.')
+ b = strconv.AppendUint(b, v.Minor, 10)
+ b = append(b, '.')
+ b = strconv.AppendUint(b, v.Patch, 10)
+
+ if len(v.Pre) > 0 {
+ b = append(b, '-')
+ b = append(b, v.Pre[0].String()...)
+
+ for _, pre := range v.Pre[1:] {
+ b = append(b, '.')
+ b = append(b, pre.String()...)
+ }
+ }
+
+ if len(v.Build) > 0 {
+ b = append(b, '+')
+ b = append(b, v.Build[0]...)
+
+ for _, build := range v.Build[1:] {
+ b = append(b, '.')
+ b = append(b, build...)
+ }
+ }
+
+ return string(b)
+}
+
+// Equals checks if v is equal to o.
+func (v Version) Equals(o Version) bool {
+ return (v.Compare(o) == 0)
+}
+
+// EQ checks if v is equal to o.
+func (v Version) EQ(o Version) bool {
+ return (v.Compare(o) == 0)
+}
+
+// NE checks if v is not equal to o.
+func (v Version) NE(o Version) bool {
+ return (v.Compare(o) != 0)
+}
+
+// GT checks if v is greater than o.
+func (v Version) GT(o Version) bool {
+ return (v.Compare(o) == 1)
+}
+
+// GTE checks if v is greater than or equal to o.
+func (v Version) GTE(o Version) bool {
+ return (v.Compare(o) >= 0)
+}
+
+// GE checks if v is greater than or equal to o.
+func (v Version) GE(o Version) bool {
+ return (v.Compare(o) >= 0)
+}
+
+// LT checks if v is less than o.
+func (v Version) LT(o Version) bool {
+ return (v.Compare(o) == -1)
+}
+
+// LTE checks if v is less than or equal to o.
+func (v Version) LTE(o Version) bool {
+ return (v.Compare(o) <= 0)
+}
+
+// LE checks if v is less than or equal to o.
+func (v Version) LE(o Version) bool {
+ return (v.Compare(o) <= 0)
+}
+
+// Compare compares Versions v to o:
+// -1 == v is less than o
+// 0 == v is equal to o
+// 1 == v is greater than o
+func (v Version) Compare(o Version) int {
+ if v.Major != o.Major {
+ if v.Major > o.Major {
+ return 1
+ }
+ return -1
+ }
+ if v.Minor != o.Minor {
+ if v.Minor > o.Minor {
+ return 1
+ }
+ return -1
+ }
+ if v.Patch != o.Patch {
+ if v.Patch > o.Patch {
+ return 1
+ }
+ return -1
+ }
+
+ // Quick comparison if a version has no prerelease versions
+ if len(v.Pre) == 0 && len(o.Pre) == 0 {
+ return 0
+ } else if len(v.Pre) == 0 && len(o.Pre) > 0 {
+ return 1
+ } else if len(v.Pre) > 0 && len(o.Pre) == 0 {
+ return -1
+ }
+
+ i := 0
+ for ; i < len(v.Pre) && i < len(o.Pre); i++ {
+ if comp := v.Pre[i].Compare(o.Pre[i]); comp == 0 {
+ continue
+ } else if comp == 1 {
+ return 1
+ } else {
+ return -1
+ }
+ }
+
+ // If all pr versions are the equal but one has further prversion, this one greater
+ if i == len(v.Pre) && i == len(o.Pre) {
+ return 0
+ } else if i == len(v.Pre) && i < len(o.Pre) {
+ return -1
+ } else {
+ return 1
+ }
+
+}
+
+// Validate validates v and returns error in case
+func (v Version) Validate() error {
+ // Major, Minor, Patch already validated using uint64
+
+ for _, pre := range v.Pre {
+ if !pre.IsNum { //Numeric prerelease versions already uint64
+ if len(pre.VersionStr) == 0 {
+ return fmt.Errorf("Prerelease can not be empty %q", pre.VersionStr)
+ }
+ if !containsOnly(pre.VersionStr, alphanum) {
+ return fmt.Errorf("Invalid character(s) found in prerelease %q", pre.VersionStr)
+ }
+ }
+ }
+
+ for _, build := range v.Build {
+ if len(build) == 0 {
+ return fmt.Errorf("Build meta data can not be empty %q", build)
+ }
+ if !containsOnly(build, alphanum) {
+ return fmt.Errorf("Invalid character(s) found in build meta data %q", build)
+ }
+ }
+
+ return nil
+}
+
+// New is an alias for Parse and returns a pointer, parses version string and returns a validated Version or error
+func New(s string) (vp *Version, err error) {
+ v, err := Parse(s)
+ vp = &v
+ return
+}
+
+// Make is an alias for Parse, parses version string and returns a validated Version or error
+func Make(s string) (Version, error) {
+ return Parse(s)
+}
+
+// Parse parses version string and returns a validated Version or error
+func Parse(s string) (Version, error) {
+ if len(s) == 0 {
+ return Version{}, errors.New("Version string empty")
+ }
+
+ // Split into major.minor.(patch+pr+meta)
+ parts := strings.SplitN(s, ".", 3)
+ if len(parts) != 3 {
+ return Version{}, errors.New("No Major.Minor.Patch elements found")
+ }
+
+ // Major
+ if !containsOnly(parts[0], numbers) {
+ return Version{}, fmt.Errorf("Invalid character(s) found in major number %q", parts[0])
+ }
+ if hasLeadingZeroes(parts[0]) {
+ return Version{}, fmt.Errorf("Major number must not contain leading zeroes %q", parts[0])
+ }
+ major, err := strconv.ParseUint(parts[0], 10, 64)
+ if err != nil {
+ return Version{}, err
+ }
+
+ // Minor
+ if !containsOnly(parts[1], numbers) {
+ return Version{}, fmt.Errorf("Invalid character(s) found in minor number %q", parts[1])
+ }
+ if hasLeadingZeroes(parts[1]) {
+ return Version{}, fmt.Errorf("Minor number must not contain leading zeroes %q", parts[1])
+ }
+ minor, err := strconv.ParseUint(parts[1], 10, 64)
+ if err != nil {
+ return Version{}, err
+ }
+
+ v := Version{}
+ v.Major = major
+ v.Minor = minor
+
+ var build, prerelease []string
+ patchStr := parts[2]
+
+ if buildIndex := strings.IndexRune(patchStr, '+'); buildIndex != -1 {
+ build = strings.Split(patchStr[buildIndex+1:], ".")
+ patchStr = patchStr[:buildIndex]
+ }
+
+ if preIndex := strings.IndexRune(patchStr, '-'); preIndex != -1 {
+ prerelease = strings.Split(patchStr[preIndex+1:], ".")
+ patchStr = patchStr[:preIndex]
+ }
+
+ if !containsOnly(patchStr, numbers) {
+ return Version{}, fmt.Errorf("Invalid character(s) found in patch number %q", patchStr)
+ }
+ if hasLeadingZeroes(patchStr) {
+ return Version{}, fmt.Errorf("Patch number must not contain leading zeroes %q", patchStr)
+ }
+ patch, err := strconv.ParseUint(patchStr, 10, 64)
+ if err != nil {
+ return Version{}, err
+ }
+
+ v.Patch = patch
+
+ // Prerelease
+ for _, prstr := range prerelease {
+ parsedPR, err := NewPRVersion(prstr)
+ if err != nil {
+ return Version{}, err
+ }
+ v.Pre = append(v.Pre, parsedPR)
+ }
+
+ // Build meta data
+ for _, str := range build {
+ if len(str) == 0 {
+ return Version{}, errors.New("Build meta data is empty")
+ }
+ if !containsOnly(str, alphanum) {
+ return Version{}, fmt.Errorf("Invalid character(s) found in build meta data %q", str)
+ }
+ v.Build = append(v.Build, str)
+ }
+
+ return v, nil
+}
+
+// MustParse is like Parse but panics if the version cannot be parsed.
+func MustParse(s string) Version {
+ v, err := Parse(s)
+ if err != nil {
+ panic(`semver: Parse(` + s + `): ` + err.Error())
+ }
+ return v
+}
+
+// PRVersion represents a PreRelease Version
+type PRVersion struct {
+ VersionStr string
+ VersionNum uint64
+ IsNum bool
+}
+
+// NewPRVersion creates a new valid prerelease version
+func NewPRVersion(s string) (PRVersion, error) {
+ if len(s) == 0 {
+ return PRVersion{}, errors.New("Prerelease is empty")
+ }
+ v := PRVersion{}
+ if containsOnly(s, numbers) {
+ if hasLeadingZeroes(s) {
+ return PRVersion{}, fmt.Errorf("Numeric PreRelease version must not contain leading zeroes %q", s)
+ }
+ num, err := strconv.ParseUint(s, 10, 64)
+
+ // Might never be hit, but just in case
+ if err != nil {
+ return PRVersion{}, err
+ }
+ v.VersionNum = num
+ v.IsNum = true
+ } else if containsOnly(s, alphanum) {
+ v.VersionStr = s
+ v.IsNum = false
+ } else {
+ return PRVersion{}, fmt.Errorf("Invalid character(s) found in prerelease %q", s)
+ }
+ return v, nil
+}
+
+// IsNumeric checks if prerelease-version is numeric
+func (v PRVersion) IsNumeric() bool {
+ return v.IsNum
+}
+
+// Compare compares two PreRelease Versions v and o:
+// -1 == v is less than o
+// 0 == v is equal to o
+// 1 == v is greater than o
+func (v PRVersion) Compare(o PRVersion) int {
+ if v.IsNum && !o.IsNum {
+ return -1
+ } else if !v.IsNum && o.IsNum {
+ return 1
+ } else if v.IsNum && o.IsNum {
+ if v.VersionNum == o.VersionNum {
+ return 0
+ } else if v.VersionNum > o.VersionNum {
+ return 1
+ } else {
+ return -1
+ }
+ } else { // both are Alphas
+ if v.VersionStr == o.VersionStr {
+ return 0
+ } else if v.VersionStr > o.VersionStr {
+ return 1
+ } else {
+ return -1
+ }
+ }
+}
+
+// PreRelease version to string
+func (v PRVersion) String() string {
+ if v.IsNum {
+ return strconv.FormatUint(v.VersionNum, 10)
+ }
+ return v.VersionStr
+}
+
+func containsOnly(s string, set string) bool {
+ return strings.IndexFunc(s, func(r rune) bool {
+ return !strings.ContainsRune(set, r)
+ }) == -1
+}
+
+func hasLeadingZeroes(s string) bool {
+ return len(s) > 1 && s[0] == '0'
+}
+
+// NewBuildVersion creates a new valid build version
+func NewBuildVersion(s string) (string, error) {
+ if len(s) == 0 {
+ return "", errors.New("Buildversion is empty")
+ }
+ if !containsOnly(s, alphanum) {
+ return "", fmt.Errorf("Invalid character(s) found in build meta data %q", s)
+ }
+ return s, nil
+}
diff --git a/src/kube2msb/vendor/github.com/blang/semver/sort.go b/src/kube2msb/vendor/github.com/blang/semver/sort.go
new file mode 100644
index 0000000..e18f880
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/blang/semver/sort.go
@@ -0,0 +1,28 @@
+package semver
+
+import (
+ "sort"
+)
+
+// Versions represents multiple versions.
+type Versions []Version
+
+// Len returns length of version collection
+func (s Versions) Len() int {
+ return len(s)
+}
+
+// Swap swaps two versions inside the collection by its indices
+func (s Versions) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+// Less checks if version at index i is less than version at index j
+func (s Versions) Less(i, j int) bool {
+ return s[i].LT(s[j])
+}
+
+// Sort sorts a slice of versions
+func Sort(versions []Version) {
+ sort.Sort(Versions(versions))
+}
diff --git a/src/kube2msb/vendor/github.com/blang/semver/sql.go b/src/kube2msb/vendor/github.com/blang/semver/sql.go
new file mode 100644
index 0000000..eb4d802
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/blang/semver/sql.go
@@ -0,0 +1,30 @@
+package semver
+
+import (
+ "database/sql/driver"
+ "fmt"
+)
+
+// Scan implements the database/sql.Scanner interface.
+func (v *Version) Scan(src interface{}) (err error) {
+ var str string
+ switch src := src.(type) {
+ case string:
+ str = src
+ case []byte:
+ str = string(src)
+ default:
+ return fmt.Errorf("Version.Scan: cannot convert %T to string.", src)
+ }
+
+ if t, err := Parse(str); err == nil {
+ *v = t
+ }
+
+ return
+}
+
+// Value implements the database/sql/driver.Valuer interface.
+func (v Version) Value() (driver.Value, error) {
+ return v.String(), nil
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/LICENSE b/src/kube2msb/vendor/github.com/coreos/go-oidc/LICENSE
new file mode 100644
index 0000000..e06d208
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/LICENSE
@@ -0,0 +1,202 @@
+Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/NOTICE b/src/kube2msb/vendor/github.com/coreos/go-oidc/NOTICE
new file mode 100644
index 0000000..b39ddfa
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/NOTICE
@@ -0,0 +1,5 @@
+CoreOS Project
+Copyright 2014 CoreOS, Inc
+
+This product includes software developed at CoreOS, Inc.
+(http://www.coreos.com/).
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/http/client.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/http/client.go
new file mode 100644
index 0000000..fd079b4
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/http/client.go
@@ -0,0 +1,7 @@
+package http
+
+import "net/http"
+
+type Client interface {
+ Do(*http.Request) (*http.Response, error)
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/http/http.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/http/http.go
new file mode 100644
index 0000000..f0d051b
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/http/http.go
@@ -0,0 +1,159 @@
+package http
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "net/http"
+ "net/url"
+ "path"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/coreos/pkg/capnslog"
+)
+
+var (
+ log = capnslog.NewPackageLogger("github.com/coreos/go-oidc", "http")
+)
+
+func WriteError(w http.ResponseWriter, code int, msg string) {
+ e := struct {
+ Error string `json:"error"`
+ }{
+ Error: msg,
+ }
+ b, err := json.Marshal(e)
+ if err != nil {
+ log.Errorf("Failed marshaling %#v to JSON: %v", e, err)
+ }
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(code)
+ w.Write(b)
+}
+
+// BasicAuth parses a username and password from the request's
+// Authorization header. This was pulled from golang master:
+// https://codereview.appspot.com/76540043
+func BasicAuth(r *http.Request) (username, password string, ok bool) {
+ auth := r.Header.Get("Authorization")
+ if auth == "" {
+ return
+ }
+
+ if !strings.HasPrefix(auth, "Basic ") {
+ return
+ }
+ c, err := base64.StdEncoding.DecodeString(strings.TrimPrefix(auth, "Basic "))
+ if err != nil {
+ return
+ }
+ cs := string(c)
+ s := strings.IndexByte(cs, ':')
+ if s < 0 {
+ return
+ }
+ return cs[:s], cs[s+1:], true
+}
+
+func cacheControlMaxAge(hdr string) (time.Duration, bool, error) {
+ for _, field := range strings.Split(hdr, ",") {
+ parts := strings.SplitN(strings.TrimSpace(field), "=", 2)
+ k := strings.ToLower(strings.TrimSpace(parts[0]))
+ if k != "max-age" {
+ continue
+ }
+
+ if len(parts) == 1 {
+ return 0, false, errors.New("max-age has no value")
+ }
+
+ v := strings.TrimSpace(parts[1])
+ if v == "" {
+ return 0, false, errors.New("max-age has empty value")
+ }
+
+ age, err := strconv.Atoi(v)
+ if err != nil {
+ return 0, false, err
+ }
+
+ if age <= 0 {
+ return 0, false, nil
+ }
+
+ return time.Duration(age) * time.Second, true, nil
+ }
+
+ return 0, false, nil
+}
+
+func expires(date, expires string) (time.Duration, bool, error) {
+ if date == "" || expires == "" {
+ return 0, false, nil
+ }
+
+ te, err := time.Parse(time.RFC1123, expires)
+ if err != nil {
+ return 0, false, err
+ }
+
+ td, err := time.Parse(time.RFC1123, date)
+ if err != nil {
+ return 0, false, err
+ }
+
+ ttl := te.Sub(td)
+
+ // headers indicate data already expired, caller should not
+ // have to care about this case
+ if ttl <= 0 {
+ return 0, false, nil
+ }
+
+ return ttl, true, nil
+}
+
+func Cacheable(hdr http.Header) (time.Duration, bool, error) {
+ ttl, ok, err := cacheControlMaxAge(hdr.Get("Cache-Control"))
+ if err != nil || ok {
+ return ttl, ok, err
+ }
+
+ return expires(hdr.Get("Date"), hdr.Get("Expires"))
+}
+
+// MergeQuery appends additional query values to an existing URL.
+func MergeQuery(u url.URL, q url.Values) url.URL {
+ uv := u.Query()
+ for k, vs := range q {
+ for _, v := range vs {
+ uv.Add(k, v)
+ }
+ }
+ u.RawQuery = uv.Encode()
+ return u
+}
+
+// NewResourceLocation appends a resource id to the end of the requested URL path.
+func NewResourceLocation(reqURL *url.URL, id string) string {
+ var u url.URL
+ u = *reqURL
+ u.Path = path.Join(u.Path, id)
+ u.RawQuery = ""
+ u.Fragment = ""
+ return u.String()
+}
+
+// CopyRequest returns a clone of the provided *http.Request.
+// The returned object is a shallow copy of the struct and a
+// deep copy of its Header field.
+func CopyRequest(r *http.Request) *http.Request {
+ r2 := *r
+ r2.Header = make(http.Header)
+ for k, s := range r.Header {
+ r2.Header[k] = s
+ }
+ return &r2
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/http/middleware.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/http/middleware.go
new file mode 100644
index 0000000..270b3bc
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/http/middleware.go
@@ -0,0 +1,14 @@
+package http
+
+import (
+ "net/http"
+)
+
+type LoggingMiddleware struct {
+ Next http.Handler
+}
+
+func (l *LoggingMiddleware) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ log.Infof("HTTP %s %v", r.Method, r.URL)
+ l.Next.ServeHTTP(w, r)
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/http/url.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/http/url.go
new file mode 100644
index 0000000..df60eb1
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/http/url.go
@@ -0,0 +1,29 @@
+package http
+
+import (
+ "errors"
+ "net/url"
+)
+
+// ParseNonEmptyURL checks that a string is a parsable URL which is also not empty
+// since `url.Parse("")` does not return an error. Must contian a scheme and a host.
+func ParseNonEmptyURL(u string) (*url.URL, error) {
+ if u == "" {
+ return nil, errors.New("url is empty")
+ }
+
+ ur, err := url.Parse(u)
+ if err != nil {
+ return nil, err
+ }
+
+ if ur.Scheme == "" {
+ return nil, errors.New("url scheme is empty")
+ }
+
+ if ur.Host == "" {
+ return nil, errors.New("url host is empty")
+ }
+
+ return ur, nil
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/jose/claims.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/jose/claims.go
new file mode 100644
index 0000000..8b48bfd
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/jose/claims.go
@@ -0,0 +1,126 @@
+package jose
+
+import (
+ "encoding/json"
+ "fmt"
+ "math"
+ "time"
+)
+
+type Claims map[string]interface{}
+
+func (c Claims) Add(name string, value interface{}) {
+ c[name] = value
+}
+
+func (c Claims) StringClaim(name string) (string, bool, error) {
+ cl, ok := c[name]
+ if !ok {
+ return "", false, nil
+ }
+
+ v, ok := cl.(string)
+ if !ok {
+ return "", false, fmt.Errorf("unable to parse claim as string: %v", name)
+ }
+
+ return v, true, nil
+}
+
+func (c Claims) StringsClaim(name string) ([]string, bool, error) {
+ cl, ok := c[name]
+ if !ok {
+ return nil, false, nil
+ }
+
+ if v, ok := cl.([]string); ok {
+ return v, true, nil
+ }
+
+ // When unmarshaled, []string will become []interface{}.
+ if v, ok := cl.([]interface{}); ok {
+ var ret []string
+ for _, vv := range v {
+ str, ok := vv.(string)
+ if !ok {
+ return nil, false, fmt.Errorf("unable to parse claim as string array: %v", name)
+ }
+ ret = append(ret, str)
+ }
+ return ret, true, nil
+ }
+
+ return nil, false, fmt.Errorf("unable to parse claim as string array: %v", name)
+}
+
+func (c Claims) Int64Claim(name string) (int64, bool, error) {
+ cl, ok := c[name]
+ if !ok {
+ return 0, false, nil
+ }
+
+ v, ok := cl.(int64)
+ if !ok {
+ vf, ok := cl.(float64)
+ if !ok {
+ return 0, false, fmt.Errorf("unable to parse claim as int64: %v", name)
+ }
+ v = int64(vf)
+ }
+
+ return v, true, nil
+}
+
+func (c Claims) Float64Claim(name string) (float64, bool, error) {
+ cl, ok := c[name]
+ if !ok {
+ return 0, false, nil
+ }
+
+ v, ok := cl.(float64)
+ if !ok {
+ vi, ok := cl.(int64)
+ if !ok {
+ return 0, false, fmt.Errorf("unable to parse claim as float64: %v", name)
+ }
+ v = float64(vi)
+ }
+
+ return v, true, nil
+}
+
+func (c Claims) TimeClaim(name string) (time.Time, bool, error) {
+ v, ok, err := c.Float64Claim(name)
+ if !ok || err != nil {
+ return time.Time{}, ok, err
+ }
+
+ s := math.Trunc(v)
+ ns := (v - s) * math.Pow(10, 9)
+ return time.Unix(int64(s), int64(ns)).UTC(), true, nil
+}
+
+func decodeClaims(payload []byte) (Claims, error) {
+ var c Claims
+ if err := json.Unmarshal(payload, &c); err != nil {
+ return nil, fmt.Errorf("malformed JWT claims, unable to decode: %v", err)
+ }
+ return c, nil
+}
+
+func marshalClaims(c Claims) ([]byte, error) {
+ b, err := json.Marshal(c)
+ if err != nil {
+ return nil, err
+ }
+ return b, nil
+}
+
+func encodeClaims(c Claims) (string, error) {
+ b, err := marshalClaims(c)
+ if err != nil {
+ return "", err
+ }
+
+ return encodeSegment(b), nil
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/jose/jose.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/jose/jose.go
new file mode 100644
index 0000000..6209926
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/jose/jose.go
@@ -0,0 +1,112 @@
+package jose
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "strings"
+)
+
+const (
+ HeaderMediaType = "typ"
+ HeaderKeyAlgorithm = "alg"
+ HeaderKeyID = "kid"
+)
+
+const (
+ // Encryption Algorithm Header Parameter Values for JWS
+ // See: https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-40#page-6
+ AlgHS256 = "HS256"
+ AlgHS384 = "HS384"
+ AlgHS512 = "HS512"
+ AlgRS256 = "RS256"
+ AlgRS384 = "RS384"
+ AlgRS512 = "RS512"
+ AlgES256 = "ES256"
+ AlgES384 = "ES384"
+ AlgES512 = "ES512"
+ AlgPS256 = "PS256"
+ AlgPS384 = "PS384"
+ AlgPS512 = "PS512"
+ AlgNone = "none"
+)
+
+const (
+ // Algorithm Header Parameter Values for JWE
+ // See: https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-40#section-4.1
+ AlgRSA15 = "RSA1_5"
+ AlgRSAOAEP = "RSA-OAEP"
+ AlgRSAOAEP256 = "RSA-OAEP-256"
+ AlgA128KW = "A128KW"
+ AlgA192KW = "A192KW"
+ AlgA256KW = "A256KW"
+ AlgDir = "dir"
+ AlgECDHES = "ECDH-ES"
+ AlgECDHESA128KW = "ECDH-ES+A128KW"
+ AlgECDHESA192KW = "ECDH-ES+A192KW"
+ AlgECDHESA256KW = "ECDH-ES+A256KW"
+ AlgA128GCMKW = "A128GCMKW"
+ AlgA192GCMKW = "A192GCMKW"
+ AlgA256GCMKW = "A256GCMKW"
+ AlgPBES2HS256A128KW = "PBES2-HS256+A128KW"
+ AlgPBES2HS384A192KW = "PBES2-HS384+A192KW"
+ AlgPBES2HS512A256KW = "PBES2-HS512+A256KW"
+)
+
+const (
+ // Encryption Algorithm Header Parameter Values for JWE
+ // See: https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-40#page-22
+ EncA128CBCHS256 = "A128CBC-HS256"
+ EncA128CBCHS384 = "A128CBC-HS384"
+ EncA256CBCHS512 = "A256CBC-HS512"
+ EncA128GCM = "A128GCM"
+ EncA192GCM = "A192GCM"
+ EncA256GCM = "A256GCM"
+)
+
+type JOSEHeader map[string]string
+
+func (j JOSEHeader) Validate() error {
+ if _, exists := j[HeaderKeyAlgorithm]; !exists {
+ return fmt.Errorf("header missing %q parameter", HeaderKeyAlgorithm)
+ }
+
+ return nil
+}
+
+func decodeHeader(seg string) (JOSEHeader, error) {
+ b, err := decodeSegment(seg)
+ if err != nil {
+ return nil, err
+ }
+
+ var h JOSEHeader
+ err = json.Unmarshal(b, &h)
+ if err != nil {
+ return nil, err
+ }
+
+ return h, nil
+}
+
+func encodeHeader(h JOSEHeader) (string, error) {
+ b, err := json.Marshal(h)
+ if err != nil {
+ return "", err
+ }
+
+ return encodeSegment(b), nil
+}
+
+// Decode JWT specific base64url encoding with padding stripped
+func decodeSegment(seg string) ([]byte, error) {
+ if l := len(seg) % 4; l != 0 {
+ seg += strings.Repeat("=", 4-l)
+ }
+ return base64.URLEncoding.DecodeString(seg)
+}
+
+// Encode JWT specific base64url encoding with padding stripped
+func encodeSegment(seg []byte) string {
+ return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=")
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/jose/jwk.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/jose/jwk.go
new file mode 100644
index 0000000..b7a8e23
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/jose/jwk.go
@@ -0,0 +1,135 @@
+package jose
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/binary"
+ "encoding/json"
+ "math/big"
+ "strings"
+)
+
+// JSON Web Key
+// https://tools.ietf.org/html/draft-ietf-jose-json-web-key-36#page-5
+type JWK struct {
+ ID string
+ Type string
+ Alg string
+ Use string
+ Exponent int
+ Modulus *big.Int
+ Secret []byte
+}
+
+type jwkJSON struct {
+ ID string `json:"kid"`
+ Type string `json:"kty"`
+ Alg string `json:"alg"`
+ Use string `json:"use"`
+ Exponent string `json:"e"`
+ Modulus string `json:"n"`
+}
+
+func (j *JWK) MarshalJSON() ([]byte, error) {
+ t := jwkJSON{
+ ID: j.ID,
+ Type: j.Type,
+ Alg: j.Alg,
+ Use: j.Use,
+ Exponent: encodeExponent(j.Exponent),
+ Modulus: encodeModulus(j.Modulus),
+ }
+
+ return json.Marshal(&t)
+}
+
+func (j *JWK) UnmarshalJSON(data []byte) error {
+ var t jwkJSON
+ err := json.Unmarshal(data, &t)
+ if err != nil {
+ return err
+ }
+
+ e, err := decodeExponent(t.Exponent)
+ if err != nil {
+ return err
+ }
+
+ n, err := decodeModulus(t.Modulus)
+ if err != nil {
+ return err
+ }
+
+ j.ID = t.ID
+ j.Type = t.Type
+ j.Alg = t.Alg
+ j.Use = t.Use
+ j.Exponent = e
+ j.Modulus = n
+
+ return nil
+}
+
+type JWKSet struct {
+ Keys []JWK `json:"keys"`
+}
+
+func decodeExponent(e string) (int, error) {
+ decE, err := decodeBase64URLPaddingOptional(e)
+ if err != nil {
+ return 0, err
+ }
+ var eBytes []byte
+ if len(decE) < 8 {
+ eBytes = make([]byte, 8-len(decE), 8)
+ eBytes = append(eBytes, decE...)
+ } else {
+ eBytes = decE
+ }
+ eReader := bytes.NewReader(eBytes)
+ var E uint64
+ err = binary.Read(eReader, binary.BigEndian, &E)
+ if err != nil {
+ return 0, err
+ }
+ return int(E), nil
+}
+
+func encodeExponent(e int) string {
+ b := make([]byte, 8)
+ binary.BigEndian.PutUint64(b, uint64(e))
+ var idx int
+ for ; idx < 8; idx++ {
+ if b[idx] != 0x0 {
+ break
+ }
+ }
+ return base64.URLEncoding.EncodeToString(b[idx:])
+}
+
+// Turns a URL encoded modulus of a key into a big int.
+func decodeModulus(n string) (*big.Int, error) {
+ decN, err := decodeBase64URLPaddingOptional(n)
+ if err != nil {
+ return nil, err
+ }
+ N := big.NewInt(0)
+ N.SetBytes(decN)
+ return N, nil
+}
+
+func encodeModulus(n *big.Int) string {
+ return base64.URLEncoding.EncodeToString(n.Bytes())
+}
+
+// decodeBase64URLPaddingOptional decodes Base64 whether there is padding or not.
+// The stdlib version currently doesn't handle this.
+// We can get rid of this is if this bug:
+// https://github.com/golang/go/issues/4237
+// ever closes.
+func decodeBase64URLPaddingOptional(e string) ([]byte, error) {
+ if m := len(e) % 4; m != 0 {
+ e += strings.Repeat("=", 4-m)
+ }
+ return base64.URLEncoding.DecodeString(e)
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/jose/jws.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/jose/jws.go
new file mode 100644
index 0000000..1049ece
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/jose/jws.go
@@ -0,0 +1,51 @@
+package jose
+
+import (
+ "fmt"
+ "strings"
+)
+
+type JWS struct {
+ RawHeader string
+ Header JOSEHeader
+ RawPayload string
+ Payload []byte
+ Signature []byte
+}
+
+// Given a raw encoded JWS token parses it and verifies the structure.
+func ParseJWS(raw string) (JWS, error) {
+ parts := strings.Split(raw, ".")
+ if len(parts) != 3 {
+ return JWS{}, fmt.Errorf("malformed JWS, only %d segments", len(parts))
+ }
+
+ rawSig := parts[2]
+ jws := JWS{
+ RawHeader: parts[0],
+ RawPayload: parts[1],
+ }
+
+ header, err := decodeHeader(jws.RawHeader)
+ if err != nil {
+ return JWS{}, fmt.Errorf("malformed JWS, unable to decode header, %s", err)
+ }
+ if err = header.Validate(); err != nil {
+ return JWS{}, fmt.Errorf("malformed JWS, %s", err)
+ }
+ jws.Header = header
+
+ payload, err := decodeSegment(jws.RawPayload)
+ if err != nil {
+ return JWS{}, fmt.Errorf("malformed JWS, unable to decode payload: %s", err)
+ }
+ jws.Payload = payload
+
+ sig, err := decodeSegment(rawSig)
+ if err != nil {
+ return JWS{}, fmt.Errorf("malformed JWS, unable to decode signature: %s", err)
+ }
+ jws.Signature = sig
+
+ return jws, nil
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/jose/jwt.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/jose/jwt.go
new file mode 100644
index 0000000..3b3e963
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/jose/jwt.go
@@ -0,0 +1,82 @@
+package jose
+
+import "strings"
+
+type JWT JWS
+
+func ParseJWT(token string) (jwt JWT, err error) {
+ jws, err := ParseJWS(token)
+ if err != nil {
+ return
+ }
+
+ return JWT(jws), nil
+}
+
+func NewJWT(header JOSEHeader, claims Claims) (jwt JWT, err error) {
+ jwt = JWT{}
+
+ jwt.Header = header
+ jwt.Header[HeaderMediaType] = "JWT"
+
+ claimBytes, err := marshalClaims(claims)
+ if err != nil {
+ return
+ }
+ jwt.Payload = claimBytes
+
+ eh, err := encodeHeader(header)
+ if err != nil {
+ return
+ }
+ jwt.RawHeader = eh
+
+ ec, err := encodeClaims(claims)
+ if err != nil {
+ return
+ }
+ jwt.RawPayload = ec
+
+ return
+}
+
+func (j *JWT) KeyID() (string, bool) {
+ kID, ok := j.Header[HeaderKeyID]
+ return kID, ok
+}
+
+func (j *JWT) Claims() (Claims, error) {
+ return decodeClaims(j.Payload)
+}
+
+// Encoded data part of the token which may be signed.
+func (j *JWT) Data() string {
+ return strings.Join([]string{j.RawHeader, j.RawPayload}, ".")
+}
+
+// Full encoded JWT token string in format: header.claims.signature
+func (j *JWT) Encode() string {
+ d := j.Data()
+ s := encodeSegment(j.Signature)
+ return strings.Join([]string{d, s}, ".")
+}
+
+func NewSignedJWT(claims Claims, s Signer) (*JWT, error) {
+ header := JOSEHeader{
+ HeaderKeyAlgorithm: s.Alg(),
+ HeaderKeyID: s.ID(),
+ }
+
+ jwt, err := NewJWT(header, claims)
+ if err != nil {
+ return nil, err
+ }
+
+ sig, err := s.Sign([]byte(jwt.Data()))
+ if err != nil {
+ return nil, err
+ }
+ jwt.Signature = sig
+
+ return &jwt, nil
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/jose/sig.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/jose/sig.go
new file mode 100644
index 0000000..7b2b253
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/jose/sig.go
@@ -0,0 +1,24 @@
+package jose
+
+import (
+ "fmt"
+)
+
+type Verifier interface {
+ ID() string
+ Alg() string
+ Verify(sig []byte, data []byte) error
+}
+
+type Signer interface {
+ Verifier
+ Sign(data []byte) (sig []byte, err error)
+}
+
+func NewVerifier(jwk JWK) (Verifier, error) {
+ if jwk.Type != "RSA" {
+ return nil, fmt.Errorf("unsupported key type %q", jwk.Type)
+ }
+
+ return NewVerifierRSA(jwk)
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/jose/sig_hmac.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/jose/sig_hmac.go
new file mode 100644
index 0000000..b3ca3ef
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/jose/sig_hmac.go
@@ -0,0 +1,67 @@
+package jose
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/hmac"
+ _ "crypto/sha256"
+ "errors"
+ "fmt"
+)
+
+type VerifierHMAC struct {
+ KeyID string
+ Hash crypto.Hash
+ Secret []byte
+}
+
+type SignerHMAC struct {
+ VerifierHMAC
+}
+
+func NewVerifierHMAC(jwk JWK) (*VerifierHMAC, error) {
+ if jwk.Alg != "" && jwk.Alg != "HS256" {
+ return nil, fmt.Errorf("unsupported key algorithm %q", jwk.Alg)
+ }
+
+ v := VerifierHMAC{
+ KeyID: jwk.ID,
+ Secret: jwk.Secret,
+ Hash: crypto.SHA256,
+ }
+
+ return &v, nil
+}
+
+func (v *VerifierHMAC) ID() string {
+ return v.KeyID
+}
+
+func (v *VerifierHMAC) Alg() string {
+ return "HS256"
+}
+
+func (v *VerifierHMAC) Verify(sig []byte, data []byte) error {
+ h := hmac.New(v.Hash.New, v.Secret)
+ h.Write(data)
+ if !bytes.Equal(sig, h.Sum(nil)) {
+ return errors.New("invalid hmac signature")
+ }
+ return nil
+}
+
+func NewSignerHMAC(kid string, secret []byte) *SignerHMAC {
+ return &SignerHMAC{
+ VerifierHMAC: VerifierHMAC{
+ KeyID: kid,
+ Secret: secret,
+ Hash: crypto.SHA256,
+ },
+ }
+}
+
+func (s *SignerHMAC) Sign(data []byte) ([]byte, error) {
+ h := hmac.New(s.Hash.New, s.Secret)
+ h.Write(data)
+ return h.Sum(nil), nil
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/jose/sig_rsa.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/jose/sig_rsa.go
new file mode 100644
index 0000000..004e45d
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/jose/sig_rsa.go
@@ -0,0 +1,67 @@
+package jose
+
+import (
+ "crypto"
+ "crypto/rand"
+ "crypto/rsa"
+ "fmt"
+)
+
+type VerifierRSA struct {
+ KeyID string
+ Hash crypto.Hash
+ PublicKey rsa.PublicKey
+}
+
+type SignerRSA struct {
+ PrivateKey rsa.PrivateKey
+ VerifierRSA
+}
+
+func NewVerifierRSA(jwk JWK) (*VerifierRSA, error) {
+ if jwk.Alg != "" && jwk.Alg != "RS256" {
+ return nil, fmt.Errorf("unsupported key algorithm %q", jwk.Alg)
+ }
+
+ v := VerifierRSA{
+ KeyID: jwk.ID,
+ PublicKey: rsa.PublicKey{
+ N: jwk.Modulus,
+ E: jwk.Exponent,
+ },
+ Hash: crypto.SHA256,
+ }
+
+ return &v, nil
+}
+
+func NewSignerRSA(kid string, key rsa.PrivateKey) *SignerRSA {
+ return &SignerRSA{
+ PrivateKey: key,
+ VerifierRSA: VerifierRSA{
+ KeyID: kid,
+ PublicKey: key.PublicKey,
+ Hash: crypto.SHA256,
+ },
+ }
+}
+
+func (v *VerifierRSA) ID() string {
+ return v.KeyID
+}
+
+func (v *VerifierRSA) Alg() string {
+ return "RS256"
+}
+
+func (v *VerifierRSA) Verify(sig []byte, data []byte) error {
+ h := v.Hash.New()
+ h.Write(data)
+ return rsa.VerifyPKCS1v15(&v.PublicKey, v.Hash, h.Sum(nil), sig)
+}
+
+func (s *SignerRSA) Sign(data []byte) ([]byte, error) {
+ h := s.Hash.New()
+ h.Write(data)
+ return rsa.SignPKCS1v15(rand.Reader, &s.PrivateKey, s.Hash, h.Sum(nil))
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/key/key.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/key/key.go
new file mode 100644
index 0000000..d0142a9
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/key/key.go
@@ -0,0 +1,153 @@
+package key
+
+import (
+ "crypto/rand"
+ "crypto/rsa"
+ "encoding/base64"
+ "encoding/json"
+ "math/big"
+ "time"
+
+ "github.com/coreos/go-oidc/jose"
+)
+
+func NewPublicKey(jwk jose.JWK) *PublicKey {
+ return &PublicKey{jwk: jwk}
+}
+
+type PublicKey struct {
+ jwk jose.JWK
+}
+
+func (k *PublicKey) MarshalJSON() ([]byte, error) {
+ return json.Marshal(&k.jwk)
+}
+
+func (k *PublicKey) UnmarshalJSON(data []byte) error {
+ var jwk jose.JWK
+ if err := json.Unmarshal(data, &jwk); err != nil {
+ return err
+ }
+ k.jwk = jwk
+ return nil
+}
+
+func (k *PublicKey) ID() string {
+ return k.jwk.ID
+}
+
+func (k *PublicKey) Verifier() (jose.Verifier, error) {
+ return jose.NewVerifierRSA(k.jwk)
+}
+
+type PrivateKey struct {
+ KeyID string
+ PrivateKey *rsa.PrivateKey
+}
+
+func (k *PrivateKey) ID() string {
+ return k.KeyID
+}
+
+func (k *PrivateKey) Signer() jose.Signer {
+ return jose.NewSignerRSA(k.ID(), *k.PrivateKey)
+}
+
+func (k *PrivateKey) JWK() jose.JWK {
+ return jose.JWK{
+ ID: k.KeyID,
+ Type: "RSA",
+ Alg: "RS256",
+ Use: "sig",
+ Exponent: k.PrivateKey.PublicKey.E,
+ Modulus: k.PrivateKey.PublicKey.N,
+ }
+}
+
+type KeySet interface {
+ ExpiresAt() time.Time
+}
+
+type PublicKeySet struct {
+ keys []PublicKey
+ index map[string]*PublicKey
+ expiresAt time.Time
+}
+
+func NewPublicKeySet(jwks []jose.JWK, exp time.Time) *PublicKeySet {
+ keys := make([]PublicKey, len(jwks))
+ index := make(map[string]*PublicKey)
+ for i, jwk := range jwks {
+ keys[i] = *NewPublicKey(jwk)
+ index[keys[i].ID()] = &keys[i]
+ }
+ return &PublicKeySet{
+ keys: keys,
+ index: index,
+ expiresAt: exp,
+ }
+}
+
+func (s *PublicKeySet) ExpiresAt() time.Time {
+ return s.expiresAt
+}
+
+func (s *PublicKeySet) Keys() []PublicKey {
+ return s.keys
+}
+
+func (s *PublicKeySet) Key(id string) *PublicKey {
+ return s.index[id]
+}
+
+type PrivateKeySet struct {
+ keys []*PrivateKey
+ ActiveKeyID string
+ expiresAt time.Time
+}
+
+func NewPrivateKeySet(keys []*PrivateKey, exp time.Time) *PrivateKeySet {
+ return &PrivateKeySet{
+ keys: keys,
+ ActiveKeyID: keys[0].ID(),
+ expiresAt: exp.UTC(),
+ }
+}
+
+func (s *PrivateKeySet) Keys() []*PrivateKey {
+ return s.keys
+}
+
+func (s *PrivateKeySet) ExpiresAt() time.Time {
+ return s.expiresAt
+}
+
+func (s *PrivateKeySet) Active() *PrivateKey {
+ for i, k := range s.keys {
+ if k.ID() == s.ActiveKeyID {
+ return s.keys[i]
+ }
+ }
+
+ return nil
+}
+
+type GeneratePrivateKeyFunc func() (*PrivateKey, error)
+
+func GeneratePrivateKey() (*PrivateKey, error) {
+ pk, err := rsa.GenerateKey(rand.Reader, 2048)
+ if err != nil {
+ return nil, err
+ }
+
+ k := PrivateKey{
+ KeyID: base64BigInt(pk.PublicKey.N),
+ PrivateKey: pk,
+ }
+
+ return &k, nil
+}
+
+func base64BigInt(b *big.Int) string {
+ return base64.URLEncoding.EncodeToString(b.Bytes())
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/key/manager.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/key/manager.go
new file mode 100644
index 0000000..476ab6a
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/key/manager.go
@@ -0,0 +1,99 @@
+package key
+
+import (
+ "errors"
+ "time"
+
+ "github.com/jonboulle/clockwork"
+
+ "github.com/coreos/go-oidc/jose"
+ "github.com/coreos/pkg/health"
+)
+
+type PrivateKeyManager interface {
+ ExpiresAt() time.Time
+ Signer() (jose.Signer, error)
+ JWKs() ([]jose.JWK, error)
+ PublicKeys() ([]PublicKey, error)
+
+ WritableKeySetRepo
+ health.Checkable
+}
+
+func NewPrivateKeyManager() PrivateKeyManager {
+ return &privateKeyManager{
+ clock: clockwork.NewRealClock(),
+ }
+}
+
+type privateKeyManager struct {
+ keySet *PrivateKeySet
+ clock clockwork.Clock
+}
+
+func (m *privateKeyManager) ExpiresAt() time.Time {
+ if m.keySet == nil {
+ return m.clock.Now().UTC()
+ }
+
+ return m.keySet.ExpiresAt()
+}
+
+func (m *privateKeyManager) Signer() (jose.Signer, error) {
+ if err := m.Healthy(); err != nil {
+ return nil, err
+ }
+
+ return m.keySet.Active().Signer(), nil
+}
+
+func (m *privateKeyManager) JWKs() ([]jose.JWK, error) {
+ if err := m.Healthy(); err != nil {
+ return nil, err
+ }
+
+ keys := m.keySet.Keys()
+ jwks := make([]jose.JWK, len(keys))
+ for i, k := range keys {
+ jwks[i] = k.JWK()
+ }
+ return jwks, nil
+}
+
+func (m *privateKeyManager) PublicKeys() ([]PublicKey, error) {
+ jwks, err := m.JWKs()
+ if err != nil {
+ return nil, err
+ }
+ keys := make([]PublicKey, len(jwks))
+ for i, jwk := range jwks {
+ keys[i] = *NewPublicKey(jwk)
+ }
+ return keys, nil
+}
+
+func (m *privateKeyManager) Healthy() error {
+ if m.keySet == nil {
+ return errors.New("private key manager uninitialized")
+ }
+
+ if len(m.keySet.Keys()) == 0 {
+ return errors.New("private key manager zero keys")
+ }
+
+ if m.keySet.ExpiresAt().Before(m.clock.Now().UTC()) {
+ return errors.New("private key manager keys expired")
+ }
+
+ return nil
+}
+
+func (m *privateKeyManager) Set(keySet KeySet) error {
+ privKeySet, ok := keySet.(*PrivateKeySet)
+ if !ok {
+ return errors.New("unable to cast to PrivateKeySet")
+ }
+
+ m.keySet = privKeySet
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/key/repo.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/key/repo.go
new file mode 100644
index 0000000..1acdeb3
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/key/repo.go
@@ -0,0 +1,55 @@
+package key
+
+import (
+ "errors"
+ "sync"
+)
+
+var ErrorNoKeys = errors.New("no keys found")
+
+type WritableKeySetRepo interface {
+ Set(KeySet) error
+}
+
+type ReadableKeySetRepo interface {
+ Get() (KeySet, error)
+}
+
+type PrivateKeySetRepo interface {
+ WritableKeySetRepo
+ ReadableKeySetRepo
+}
+
+func NewPrivateKeySetRepo() PrivateKeySetRepo {
+ return &memPrivateKeySetRepo{}
+}
+
+type memPrivateKeySetRepo struct {
+ mu sync.RWMutex
+ pks PrivateKeySet
+}
+
+func (r *memPrivateKeySetRepo) Set(ks KeySet) error {
+ pks, ok := ks.(*PrivateKeySet)
+ if !ok {
+ return errors.New("unable to cast to PrivateKeySet")
+ } else if pks == nil {
+ return errors.New("nil KeySet")
+ }
+
+ r.mu.Lock()
+ defer r.mu.Unlock()
+
+ r.pks = *pks
+ return nil
+}
+
+func (r *memPrivateKeySetRepo) Get() (KeySet, error) {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+
+ if r.pks.keys == nil {
+ return nil, ErrorNoKeys
+ }
+ return KeySet(&r.pks), nil
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/key/rotate.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/key/rotate.go
new file mode 100644
index 0000000..9c5508b
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/key/rotate.go
@@ -0,0 +1,165 @@
+package key
+
+import (
+ "errors"
+ "time"
+
+ "github.com/coreos/pkg/capnslog"
+ ptime "github.com/coreos/pkg/timeutil"
+ "github.com/jonboulle/clockwork"
+)
+
+var (
+ log = capnslog.NewPackageLogger("github.com/coreos/go-oidc", "key")
+
+ ErrorPrivateKeysExpired = errors.New("private keys have expired")
+)
+
+func NewPrivateKeyRotator(repo PrivateKeySetRepo, ttl time.Duration) *PrivateKeyRotator {
+ return &PrivateKeyRotator{
+ repo: repo,
+ ttl: ttl,
+
+ keep: 2,
+ generateKey: GeneratePrivateKey,
+ clock: clockwork.NewRealClock(),
+ }
+}
+
+type PrivateKeyRotator struct {
+ repo PrivateKeySetRepo
+ generateKey GeneratePrivateKeyFunc
+ clock clockwork.Clock
+ keep int
+ ttl time.Duration
+}
+
+func (r *PrivateKeyRotator) expiresAt() time.Time {
+ return r.clock.Now().UTC().Add(r.ttl)
+}
+
+func (r *PrivateKeyRotator) Healthy() error {
+ pks, err := r.privateKeySet()
+ if err != nil {
+ return err
+ }
+
+ if r.clock.Now().After(pks.ExpiresAt()) {
+ return ErrorPrivateKeysExpired
+ }
+
+ return nil
+}
+
+func (r *PrivateKeyRotator) privateKeySet() (*PrivateKeySet, error) {
+ ks, err := r.repo.Get()
+ if err != nil {
+ return nil, err
+ }
+
+ pks, ok := ks.(*PrivateKeySet)
+ if !ok {
+ return nil, errors.New("unable to cast to PrivateKeySet")
+ }
+ return pks, nil
+}
+
+func (r *PrivateKeyRotator) nextRotation() (time.Duration, error) {
+ pks, err := r.privateKeySet()
+ if err == ErrorNoKeys {
+ log.Infof("No keys in private key set; must rotate immediately")
+ return 0, nil
+ }
+ if err != nil {
+ return 0, err
+ }
+
+ now := r.clock.Now()
+
+ // Ideally, we want to rotate after half the TTL has elapsed.
+ idealRotationTime := pks.ExpiresAt().Add(-r.ttl / 2)
+
+ // If we are past the ideal rotation time, rotate immediatly.
+ return max(0, idealRotationTime.Sub(now)), nil
+}
+
+func max(a, b time.Duration) time.Duration {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+func (r *PrivateKeyRotator) Run() chan struct{} {
+ attempt := func() {
+ k, err := r.generateKey()
+ if err != nil {
+ log.Errorf("Failed generating signing key: %v", err)
+ return
+ }
+
+ exp := r.expiresAt()
+ if err := rotatePrivateKeys(r.repo, k, r.keep, exp); err != nil {
+ log.Errorf("Failed key rotation: %v", err)
+ return
+ }
+
+ log.Infof("Rotated signing keys: id=%s expiresAt=%s", k.ID(), exp)
+ }
+
+ stop := make(chan struct{})
+ go func() {
+ for {
+ var nextRotation time.Duration
+ var sleep time.Duration
+ var err error
+ for {
+ if nextRotation, err = r.nextRotation(); err == nil {
+ break
+ }
+ sleep = ptime.ExpBackoff(sleep, time.Minute)
+ log.Errorf("error getting nextRotation, retrying in %v: %v", sleep, err)
+ time.Sleep(sleep)
+ }
+
+ log.Infof("will rotate keys in %v", nextRotation)
+ select {
+ case <-r.clock.After(nextRotation):
+ attempt()
+ case <-stop:
+ return
+ }
+ }
+ }()
+
+ return stop
+}
+
+func rotatePrivateKeys(repo PrivateKeySetRepo, k *PrivateKey, keep int, exp time.Time) error {
+ ks, err := repo.Get()
+ if err != nil && err != ErrorNoKeys {
+ return err
+ }
+
+ var keys []*PrivateKey
+ if ks != nil {
+ pks, ok := ks.(*PrivateKeySet)
+ if !ok {
+ return errors.New("unable to cast to PrivateKeySet")
+ }
+ keys = pks.Keys()
+ }
+
+ keys = append([]*PrivateKey{k}, keys...)
+ if l := len(keys); l > keep {
+ keys = keys[0:keep]
+ }
+
+ nks := PrivateKeySet{
+ keys: keys,
+ ActiveKeyID: k.ID(),
+ expiresAt: exp,
+ }
+
+ return repo.Set(KeySet(&nks))
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/key/sync.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/key/sync.go
new file mode 100644
index 0000000..e8d5d03
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/key/sync.go
@@ -0,0 +1,91 @@
+package key
+
+import (
+ "errors"
+ "time"
+
+ "github.com/jonboulle/clockwork"
+
+ "github.com/coreos/pkg/timeutil"
+)
+
+func NewKeySetSyncer(r ReadableKeySetRepo, w WritableKeySetRepo) *KeySetSyncer {
+ return &KeySetSyncer{
+ readable: r,
+ writable: w,
+ clock: clockwork.NewRealClock(),
+ }
+}
+
+type KeySetSyncer struct {
+ readable ReadableKeySetRepo
+ writable WritableKeySetRepo
+ clock clockwork.Clock
+}
+
+func (s *KeySetSyncer) Run() chan struct{} {
+ stop := make(chan struct{})
+ go func() {
+ var failing bool
+ var next time.Duration
+ for {
+ exp, err := syncKeySet(s.readable, s.writable, s.clock)
+ if err != nil || exp == 0 {
+ if !failing {
+ failing = true
+ next = time.Second
+ } else {
+ next = timeutil.ExpBackoff(next, time.Minute)
+ }
+ if exp == 0 {
+ log.Errorf("Synced to already expired key set, retrying in %v: %v", next, err)
+
+ } else {
+ log.Errorf("Failed syncing key set, retrying in %v: %v", next, err)
+ }
+ } else {
+ failing = false
+ next = exp / 2
+ log.Infof("Synced key set, checking again in %v", next)
+ }
+
+ select {
+ case <-s.clock.After(next):
+ continue
+ case <-stop:
+ return
+ }
+ }
+ }()
+
+ return stop
+}
+
+func Sync(r ReadableKeySetRepo, w WritableKeySetRepo) (time.Duration, error) {
+ return syncKeySet(r, w, clockwork.NewRealClock())
+}
+
+// syncKeySet copies the keyset from r to the KeySet at w and returns the duration in which the KeySet will expire.
+// If keyset has already expired, returns a zero duration.
+func syncKeySet(r ReadableKeySetRepo, w WritableKeySetRepo, clock clockwork.Clock) (exp time.Duration, err error) {
+ var ks KeySet
+ ks, err = r.Get()
+ if err != nil {
+ return
+ }
+
+ if ks == nil {
+ err = errors.New("no source KeySet")
+ return
+ }
+
+ if err = w.Set(ks); err != nil {
+ return
+ }
+
+ now := clock.Now()
+ if ks.ExpiresAt().After(now) {
+ exp = ks.ExpiresAt().Sub(now)
+ }
+ return
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/oauth2/error.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/oauth2/error.go
new file mode 100644
index 0000000..50d8909
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/oauth2/error.go
@@ -0,0 +1,29 @@
+package oauth2
+
+const (
+ ErrorAccessDenied = "access_denied"
+ ErrorInvalidClient = "invalid_client"
+ ErrorInvalidGrant = "invalid_grant"
+ ErrorInvalidRequest = "invalid_request"
+ ErrorServerError = "server_error"
+ ErrorUnauthorizedClient = "unauthorized_client"
+ ErrorUnsupportedGrantType = "unsupported_grant_type"
+ ErrorUnsupportedResponseType = "unsupported_response_type"
+)
+
+type Error struct {
+ Type string `json:"error"`
+ Description string `json:"error_description,omitempty"`
+ State string `json:"state,omitempty"`
+}
+
+func (e *Error) Error() string {
+ if e.Description != "" {
+ return e.Type + ": " + e.Description
+ }
+ return e.Type
+}
+
+func NewError(typ string) *Error {
+ return &Error{Type: typ}
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/oauth2/oauth2.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/oauth2/oauth2.go
new file mode 100644
index 0000000..1c68293
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/oauth2/oauth2.go
@@ -0,0 +1,416 @@
+package oauth2
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "mime"
+ "net/http"
+ "net/url"
+ "sort"
+ "strconv"
+ "strings"
+
+ phttp "github.com/coreos/go-oidc/http"
+)
+
+// ResponseTypesEqual compares two response_type values. If either
+// contains a space, it is treated as an unordered list. For example,
+// comparing "code id_token" and "id_token code" would evaluate to true.
+func ResponseTypesEqual(r1, r2 string) bool {
+ if !strings.Contains(r1, " ") || !strings.Contains(r2, " ") {
+ // fast route, no split needed
+ return r1 == r2
+ }
+
+ // split, sort, and compare
+ r1Fields := strings.Fields(r1)
+ r2Fields := strings.Fields(r2)
+ if len(r1Fields) != len(r2Fields) {
+ return false
+ }
+ sort.Strings(r1Fields)
+ sort.Strings(r2Fields)
+ for i, r1Field := range r1Fields {
+ if r1Field != r2Fields[i] {
+ return false
+ }
+ }
+ return true
+}
+
+const (
+ // OAuth2.0 response types registered by OIDC.
+ //
+ // See: https://openid.net/specs/oauth-v2-multiple-response-types-1_0.html#RegistryContents
+ ResponseTypeCode = "code"
+ ResponseTypeCodeIDToken = "code id_token"
+ ResponseTypeCodeIDTokenToken = "code id_token token"
+ ResponseTypeIDToken = "id_token"
+ ResponseTypeIDTokenToken = "id_token token"
+ ResponseTypeToken = "token"
+ ResponseTypeNone = "none"
+)
+
+const (
+ GrantTypeAuthCode = "authorization_code"
+ GrantTypeClientCreds = "client_credentials"
+ GrantTypeUserCreds = "password"
+ GrantTypeImplicit = "implicit"
+ GrantTypeRefreshToken = "refresh_token"
+
+ AuthMethodClientSecretPost = "client_secret_post"
+ AuthMethodClientSecretBasic = "client_secret_basic"
+ AuthMethodClientSecretJWT = "client_secret_jwt"
+ AuthMethodPrivateKeyJWT = "private_key_jwt"
+)
+
+type Config struct {
+ Credentials ClientCredentials
+ Scope []string
+ RedirectURL string
+ AuthURL string
+ TokenURL string
+
+ // Must be one of the AuthMethodXXX methods above. Right now, only
+ // AuthMethodClientSecretPost and AuthMethodClientSecretBasic are supported.
+ AuthMethod string
+}
+
+type Client struct {
+ hc phttp.Client
+ creds ClientCredentials
+ scope []string
+ authURL *url.URL
+ redirectURL *url.URL
+ tokenURL *url.URL
+ authMethod string
+}
+
+type ClientCredentials struct {
+ ID string
+ Secret string
+}
+
+func NewClient(hc phttp.Client, cfg Config) (c *Client, err error) {
+ if len(cfg.Credentials.ID) == 0 {
+ err = errors.New("missing client id")
+ return
+ }
+
+ if len(cfg.Credentials.Secret) == 0 {
+ err = errors.New("missing client secret")
+ return
+ }
+
+ if cfg.AuthMethod == "" {
+ cfg.AuthMethod = AuthMethodClientSecretBasic
+ } else if cfg.AuthMethod != AuthMethodClientSecretPost && cfg.AuthMethod != AuthMethodClientSecretBasic {
+ err = fmt.Errorf("auth method %q is not supported", cfg.AuthMethod)
+ return
+ }
+
+ au, err := phttp.ParseNonEmptyURL(cfg.AuthURL)
+ if err != nil {
+ return
+ }
+
+ tu, err := phttp.ParseNonEmptyURL(cfg.TokenURL)
+ if err != nil {
+ return
+ }
+
+ // Allow empty redirect URL in the case where the client
+ // only needs to verify a given token.
+ ru, err := url.Parse(cfg.RedirectURL)
+ if err != nil {
+ return
+ }
+
+ c = &Client{
+ creds: cfg.Credentials,
+ scope: cfg.Scope,
+ redirectURL: ru,
+ authURL: au,
+ tokenURL: tu,
+ hc: hc,
+ authMethod: cfg.AuthMethod,
+ }
+
+ return
+}
+
+// Return the embedded HTTP client
+func (c *Client) HttpClient() phttp.Client {
+ return c.hc
+}
+
+// Generate the url for initial redirect to oauth provider.
+func (c *Client) AuthCodeURL(state, accessType, prompt string) string {
+ v := c.commonURLValues()
+ v.Set("state", state)
+ if strings.ToLower(accessType) == "offline" {
+ v.Set("access_type", "offline")
+ }
+
+ if prompt != "" {
+ v.Set("prompt", prompt)
+ }
+ v.Set("response_type", "code")
+
+ q := v.Encode()
+ u := *c.authURL
+ if u.RawQuery == "" {
+ u.RawQuery = q
+ } else {
+ u.RawQuery += "&" + q
+ }
+ return u.String()
+}
+
+func (c *Client) commonURLValues() url.Values {
+ return url.Values{
+ "redirect_uri": {c.redirectURL.String()},
+ "scope": {strings.Join(c.scope, " ")},
+ "client_id": {c.creds.ID},
+ }
+}
+
+func (c *Client) newAuthenticatedRequest(urlToken string, values url.Values) (*http.Request, error) {
+ var req *http.Request
+ var err error
+ switch c.authMethod {
+ case AuthMethodClientSecretPost:
+ values.Set("client_secret", c.creds.Secret)
+ req, err = http.NewRequest("POST", urlToken, strings.NewReader(values.Encode()))
+ if err != nil {
+ return nil, err
+ }
+ case AuthMethodClientSecretBasic:
+ req, err = http.NewRequest("POST", urlToken, strings.NewReader(values.Encode()))
+ if err != nil {
+ return nil, err
+ }
+ encodedID := url.QueryEscape(c.creds.ID)
+ encodedSecret := url.QueryEscape(c.creds.Secret)
+ req.SetBasicAuth(encodedID, encodedSecret)
+ default:
+ panic("misconfigured client: auth method not supported")
+ }
+
+ req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+ return req, nil
+
+}
+
+// ClientCredsToken posts the client id and secret to obtain a token scoped to the OAuth2 client via the "client_credentials" grant type.
+// May not be supported by all OAuth2 servers.
+func (c *Client) ClientCredsToken(scope []string) (result TokenResponse, err error) {
+ v := url.Values{
+ "scope": {strings.Join(scope, " ")},
+ "grant_type": {GrantTypeClientCreds},
+ }
+
+ req, err := c.newAuthenticatedRequest(c.tokenURL.String(), v)
+ if err != nil {
+ return
+ }
+
+ resp, err := c.hc.Do(req)
+ if err != nil {
+ return
+ }
+ defer resp.Body.Close()
+
+ return parseTokenResponse(resp)
+}
+
+// UserCredsToken posts the username and password to obtain a token scoped to the OAuth2 client via the "password" grant_type
+// May not be supported by all OAuth2 servers.
+func (c *Client) UserCredsToken(username, password string) (result TokenResponse, err error) {
+ v := url.Values{
+ "scope": {strings.Join(c.scope, " ")},
+ "grant_type": {GrantTypeUserCreds},
+ "username": {username},
+ "password": {password},
+ }
+
+ req, err := c.newAuthenticatedRequest(c.tokenURL.String(), v)
+ if err != nil {
+ return
+ }
+
+ resp, err := c.hc.Do(req)
+ if err != nil {
+ return
+ }
+ defer resp.Body.Close()
+
+ return parseTokenResponse(resp)
+}
+
+// RequestToken requests a token from the Token Endpoint with the specified grantType.
+// If 'grantType' == GrantTypeAuthCode, then 'value' should be the authorization code.
+// If 'grantType' == GrantTypeRefreshToken, then 'value' should be the refresh token.
+func (c *Client) RequestToken(grantType, value string) (result TokenResponse, err error) {
+ v := c.commonURLValues()
+
+ v.Set("grant_type", grantType)
+ v.Set("client_secret", c.creds.Secret)
+ switch grantType {
+ case GrantTypeAuthCode:
+ v.Set("code", value)
+ case GrantTypeRefreshToken:
+ v.Set("refresh_token", value)
+ default:
+ err = fmt.Errorf("unsupported grant_type: %v", grantType)
+ return
+ }
+
+ req, err := c.newAuthenticatedRequest(c.tokenURL.String(), v)
+ if err != nil {
+ return
+ }
+
+ resp, err := c.hc.Do(req)
+ if err != nil {
+ return
+ }
+ defer resp.Body.Close()
+
+ return parseTokenResponse(resp)
+}
+
+func parseTokenResponse(resp *http.Response) (result TokenResponse, err error) {
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return
+ }
+ badStatusCode := resp.StatusCode < 200 || resp.StatusCode > 299
+
+ contentType, _, err := mime.ParseMediaType(resp.Header.Get("Content-Type"))
+ if err != nil {
+ return
+ }
+
+ result = TokenResponse{
+ RawBody: body,
+ }
+
+ newError := func(typ, desc, state string) error {
+ if typ == "" {
+ return fmt.Errorf("unrecognized error %s", body)
+ }
+ return &Error{typ, desc, state}
+ }
+
+ if contentType == "application/x-www-form-urlencoded" || contentType == "text/plain" {
+ var vals url.Values
+ vals, err = url.ParseQuery(string(body))
+ if err != nil {
+ return
+ }
+ if error := vals.Get("error"); error != "" || badStatusCode {
+ err = newError(error, vals.Get("error_description"), vals.Get("state"))
+ return
+ }
+ e := vals.Get("expires_in")
+ if e == "" {
+ e = vals.Get("expires")
+ }
+ if e != "" {
+ result.Expires, err = strconv.Atoi(e)
+ if err != nil {
+ return
+ }
+ }
+ result.AccessToken = vals.Get("access_token")
+ result.TokenType = vals.Get("token_type")
+ result.IDToken = vals.Get("id_token")
+ result.RefreshToken = vals.Get("refresh_token")
+ result.Scope = vals.Get("scope")
+ } else {
+ var r struct {
+ AccessToken string `json:"access_token"`
+ TokenType string `json:"token_type"`
+ IDToken string `json:"id_token"`
+ RefreshToken string `json:"refresh_token"`
+ Scope string `json:"scope"`
+ State string `json:"state"`
+ ExpiresIn int `json:"expires_in"`
+ Expires int `json:"expires"`
+ Error string `json:"error"`
+ Desc string `json:"error_description"`
+ }
+ if err = json.Unmarshal(body, &r); err != nil {
+ return
+ }
+ if r.Error != "" || badStatusCode {
+ err = newError(r.Error, r.Desc, r.State)
+ return
+ }
+ result.AccessToken = r.AccessToken
+ result.TokenType = r.TokenType
+ result.IDToken = r.IDToken
+ result.RefreshToken = r.RefreshToken
+ result.Scope = r.Scope
+ if r.ExpiresIn == 0 {
+ result.Expires = r.Expires
+ } else {
+ result.Expires = r.ExpiresIn
+ }
+ }
+ return
+}
+
+type TokenResponse struct {
+ AccessToken string
+ TokenType string
+ Expires int
+ IDToken string
+ RefreshToken string // OPTIONAL.
+ Scope string // OPTIONAL, if identical to the scope requested by the client, otherwise, REQUIRED.
+ RawBody []byte // In case callers need some other non-standard info from the token response
+}
+
+type AuthCodeRequest struct {
+ ResponseType string
+ ClientID string
+ RedirectURL *url.URL
+ Scope []string
+ State string
+}
+
+func ParseAuthCodeRequest(q url.Values) (AuthCodeRequest, error) {
+ acr := AuthCodeRequest{
+ ResponseType: q.Get("response_type"),
+ ClientID: q.Get("client_id"),
+ State: q.Get("state"),
+ Scope: make([]string, 0),
+ }
+
+ qs := strings.TrimSpace(q.Get("scope"))
+ if qs != "" {
+ acr.Scope = strings.Split(qs, " ")
+ }
+
+ err := func() error {
+ if acr.ClientID == "" {
+ return NewError(ErrorInvalidRequest)
+ }
+
+ redirectURL := q.Get("redirect_uri")
+ if redirectURL != "" {
+ ru, err := url.Parse(redirectURL)
+ if err != nil {
+ return NewError(ErrorInvalidRequest)
+ }
+ acr.RedirectURL = ru
+ }
+
+ return nil
+ }()
+
+ return acr, err
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/client.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/client.go
new file mode 100644
index 0000000..7a3cb40
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/client.go
@@ -0,0 +1,846 @@
+package oidc
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net/http"
+ "net/mail"
+ "net/url"
+ "sync"
+ "time"
+
+ phttp "github.com/coreos/go-oidc/http"
+ "github.com/coreos/go-oidc/jose"
+ "github.com/coreos/go-oidc/key"
+ "github.com/coreos/go-oidc/oauth2"
+)
+
+const (
+ // amount of time that must pass after the last key sync
+ // completes before another attempt may begin
+ keySyncWindow = 5 * time.Second
+)
+
+var (
+ DefaultScope = []string{"openid", "email", "profile"}
+
+ supportedAuthMethods = map[string]struct{}{
+ oauth2.AuthMethodClientSecretBasic: struct{}{},
+ oauth2.AuthMethodClientSecretPost: struct{}{},
+ }
+)
+
+type ClientCredentials oauth2.ClientCredentials
+
+type ClientIdentity struct {
+ Credentials ClientCredentials
+ Metadata ClientMetadata
+}
+
+type JWAOptions struct {
+ // SigningAlg specifies an JWA alg for signing JWTs.
+ //
+ // Specifying this field implies different actions depending on the context. It may
+ // require objects be serialized and signed as a JWT instead of plain JSON, or
+ // require an existing JWT object use the specified alg.
+ //
+ // See: http://openid.net/specs/openid-connect-registration-1_0.html#ClientMetadata
+ SigningAlg string
+ // EncryptionAlg, if provided, specifies that the returned or sent object be stored
+ // (or nested) within a JWT object and encrypted with the provided JWA alg.
+ EncryptionAlg string
+ // EncryptionEnc specifies the JWA enc algorithm to use with EncryptionAlg. If
+ // EncryptionAlg is provided and EncryptionEnc is omitted, this field defaults
+ // to A128CBC-HS256.
+ //
+ // If EncryptionEnc is provided EncryptionAlg must also be specified.
+ EncryptionEnc string
+}
+
+func (opt JWAOptions) valid() error {
+ if opt.EncryptionEnc != "" && opt.EncryptionAlg == "" {
+ return errors.New("encryption encoding provided with no encryption algorithm")
+ }
+ return nil
+}
+
+func (opt JWAOptions) defaults() JWAOptions {
+ if opt.EncryptionAlg != "" && opt.EncryptionEnc == "" {
+ opt.EncryptionEnc = jose.EncA128CBCHS256
+ }
+ return opt
+}
+
+var (
+ // Ensure ClientMetadata satisfies these interfaces.
+ _ json.Marshaler = &ClientMetadata{}
+ _ json.Unmarshaler = &ClientMetadata{}
+)
+
+// ClientMetadata holds metadata that the authorization server associates
+// with a client identifier. The fields range from human-facing display
+// strings such as client name, to items that impact the security of the
+// protocol, such as the list of valid redirect URIs.
+//
+// See http://openid.net/specs/openid-connect-registration-1_0.html#ClientMetadata
+//
+// TODO: support language specific claim representations
+// http://openid.net/specs/openid-connect-registration-1_0.html#LanguagesAndScripts
+type ClientMetadata struct {
+ RedirectURIs []url.URL // Required
+
+ // A list of OAuth 2.0 "response_type" values that the client wishes to restrict
+ // itself to. Either "code", "token", or another registered extension.
+ //
+ // If omitted, only "code" will be used.
+ ResponseTypes []string
+ // A list of OAuth 2.0 grant types the client wishes to restrict itself to.
+ // The grant type values used by OIDC are "authorization_code", "implicit",
+ // and "refresh_token".
+ //
+ // If ommitted, only "authorization_code" will be used.
+ GrantTypes []string
+ // "native" or "web". If omitted, "web".
+ ApplicationType string
+
+ // List of email addresses.
+ Contacts []mail.Address
+ // Name of client to be presented to the end-user.
+ ClientName string
+ // URL that references a logo for the Client application.
+ LogoURI *url.URL
+ // URL of the home page of the Client.
+ ClientURI *url.URL
+ // Profile data policies and terms of use to be provided to the end user.
+ PolicyURI *url.URL
+ TermsOfServiceURI *url.URL
+
+ // URL to or the value of the client's JSON Web Key Set document.
+ JWKSURI *url.URL
+ JWKS *jose.JWKSet
+
+ // URL referencing a flie with a single JSON array of redirect URIs.
+ SectorIdentifierURI *url.URL
+
+ SubjectType string
+
+ // Options to restrict the JWS alg and enc values used for server responses and requests.
+ IDTokenResponseOptions JWAOptions
+ UserInfoResponseOptions JWAOptions
+ RequestObjectOptions JWAOptions
+
+ // Client requested authorization method and signing options for the token endpoint.
+ //
+ // Defaults to "client_secret_basic"
+ TokenEndpointAuthMethod string
+ TokenEndpointAuthSigningAlg string
+
+ // DefaultMaxAge specifies the maximum amount of time in seconds before an authorized
+ // user must reauthroize.
+ //
+ // If 0, no limitation is placed on the maximum.
+ DefaultMaxAge int64
+ // RequireAuthTime specifies if the auth_time claim in the ID token is required.
+ RequireAuthTime bool
+
+ // Default Authentication Context Class Reference values for authentication requests.
+ DefaultACRValues []string
+
+ // URI that a third party can use to initiate a login by the relaying party.
+ //
+ // See: http://openid.net/specs/openid-connect-core-1_0.html#ThirdPartyInitiatedLogin
+ InitiateLoginURI *url.URL
+ // Pre-registered request_uri values that may be cached by the server.
+ RequestURIs []url.URL
+}
+
+// Defaults returns a shallow copy of ClientMetadata with default
+// values replacing omitted fields.
+func (m ClientMetadata) Defaults() ClientMetadata {
+ if len(m.ResponseTypes) == 0 {
+ m.ResponseTypes = []string{oauth2.ResponseTypeCode}
+ }
+ if len(m.GrantTypes) == 0 {
+ m.GrantTypes = []string{oauth2.GrantTypeAuthCode}
+ }
+ if m.ApplicationType == "" {
+ m.ApplicationType = "web"
+ }
+ if m.TokenEndpointAuthMethod == "" {
+ m.TokenEndpointAuthMethod = oauth2.AuthMethodClientSecretBasic
+ }
+ m.IDTokenResponseOptions = m.IDTokenResponseOptions.defaults()
+ m.UserInfoResponseOptions = m.UserInfoResponseOptions.defaults()
+ m.RequestObjectOptions = m.RequestObjectOptions.defaults()
+ return m
+}
+
+func (m *ClientMetadata) MarshalJSON() ([]byte, error) {
+ e := m.toEncodableStruct()
+ return json.Marshal(&e)
+}
+
+func (m *ClientMetadata) UnmarshalJSON(data []byte) error {
+ var e encodableClientMetadata
+ if err := json.Unmarshal(data, &e); err != nil {
+ return err
+ }
+ meta, err := e.toStruct()
+ if err != nil {
+ return err
+ }
+ if err := meta.Valid(); err != nil {
+ return err
+ }
+ *m = meta
+ return nil
+}
+
+type encodableClientMetadata struct {
+ RedirectURIs []string `json:"redirect_uris"` // Required
+ ResponseTypes []string `json:"response_types,omitempty"`
+ GrantTypes []string `json:"grant_types,omitempty"`
+ ApplicationType string `json:"application_type,omitempty"`
+ Contacts []string `json:"contacts,omitempty"`
+ ClientName string `json:"client_name,omitempty"`
+ LogoURI string `json:"logo_uri,omitempty"`
+ ClientURI string `json:"client_uri,omitempty"`
+ PolicyURI string `json:"policy_uri,omitempty"`
+ TermsOfServiceURI string `json:"tos_uri,omitempty"`
+ JWKSURI string `json:"jwks_uri,omitempty"`
+ JWKS *jose.JWKSet `json:"jwks,omitempty"`
+ SectorIdentifierURI string `json:"sector_identifier_uri,omitempty"`
+ SubjectType string `json:"subject_type,omitempty"`
+ IDTokenSignedResponseAlg string `json:"id_token_signed_response_alg,omitempty"`
+ IDTokenEncryptedResponseAlg string `json:"id_token_encrypted_response_alg,omitempty"`
+ IDTokenEncryptedResponseEnc string `json:"id_token_encrypted_response_enc,omitempty"`
+ UserInfoSignedResponseAlg string `json:"userinfo_signed_response_alg,omitempty"`
+ UserInfoEncryptedResponseAlg string `json:"userinfo_encrypted_response_alg,omitempty"`
+ UserInfoEncryptedResponseEnc string `json:"userinfo_encrypted_response_enc,omitempty"`
+ RequestObjectSigningAlg string `json:"request_object_signing_alg,omitempty"`
+ RequestObjectEncryptionAlg string `json:"request_object_encryption_alg,omitempty"`
+ RequestObjectEncryptionEnc string `json:"request_object_encryption_enc,omitempty"`
+ TokenEndpointAuthMethod string `json:"token_endpoint_auth_method,omitempty"`
+ TokenEndpointAuthSigningAlg string `json:"token_endpoint_auth_signing_alg,omitempty"`
+ DefaultMaxAge int64 `json:"default_max_age,omitempty"`
+ RequireAuthTime bool `json:"require_auth_time,omitempty"`
+ DefaultACRValues []string `json:"default_acr_values,omitempty"`
+ InitiateLoginURI string `json:"initiate_login_uri,omitempty"`
+ RequestURIs []string `json:"request_uris,omitempty"`
+}
+
+func (c *encodableClientMetadata) toStruct() (ClientMetadata, error) {
+ p := stickyErrParser{}
+ m := ClientMetadata{
+ RedirectURIs: p.parseURIs(c.RedirectURIs, "redirect_uris"),
+ ResponseTypes: c.ResponseTypes,
+ GrantTypes: c.GrantTypes,
+ ApplicationType: c.ApplicationType,
+ Contacts: p.parseEmails(c.Contacts, "contacts"),
+ ClientName: c.ClientName,
+ LogoURI: p.parseURI(c.LogoURI, "logo_uri"),
+ ClientURI: p.parseURI(c.ClientURI, "client_uri"),
+ PolicyURI: p.parseURI(c.PolicyURI, "policy_uri"),
+ TermsOfServiceURI: p.parseURI(c.TermsOfServiceURI, "tos_uri"),
+ JWKSURI: p.parseURI(c.JWKSURI, "jwks_uri"),
+ JWKS: c.JWKS,
+ SectorIdentifierURI: p.parseURI(c.SectorIdentifierURI, "sector_identifier_uri"),
+ SubjectType: c.SubjectType,
+ TokenEndpointAuthMethod: c.TokenEndpointAuthMethod,
+ TokenEndpointAuthSigningAlg: c.TokenEndpointAuthSigningAlg,
+ DefaultMaxAge: c.DefaultMaxAge,
+ RequireAuthTime: c.RequireAuthTime,
+ DefaultACRValues: c.DefaultACRValues,
+ InitiateLoginURI: p.parseURI(c.InitiateLoginURI, "initiate_login_uri"),
+ RequestURIs: p.parseURIs(c.RequestURIs, "request_uris"),
+ IDTokenResponseOptions: JWAOptions{
+ c.IDTokenSignedResponseAlg,
+ c.IDTokenEncryptedResponseAlg,
+ c.IDTokenEncryptedResponseEnc,
+ },
+ UserInfoResponseOptions: JWAOptions{
+ c.UserInfoSignedResponseAlg,
+ c.UserInfoEncryptedResponseAlg,
+ c.UserInfoEncryptedResponseEnc,
+ },
+ RequestObjectOptions: JWAOptions{
+ c.RequestObjectSigningAlg,
+ c.RequestObjectEncryptionAlg,
+ c.RequestObjectEncryptionEnc,
+ },
+ }
+ if p.firstErr != nil {
+ return ClientMetadata{}, p.firstErr
+ }
+ return m, nil
+}
+
+// stickyErrParser parses URIs and email addresses. Once it encounters
+// a parse error, subsequent calls become no-op.
+type stickyErrParser struct {
+ firstErr error
+}
+
+func (p *stickyErrParser) parseURI(s, field string) *url.URL {
+ if p.firstErr != nil || s == "" {
+ return nil
+ }
+ u, err := url.Parse(s)
+ if err == nil {
+ if u.Host == "" {
+ err = errors.New("no host in URI")
+ } else if u.Scheme != "http" && u.Scheme != "https" {
+ err = errors.New("invalid URI scheme")
+ }
+ }
+ if err != nil {
+ p.firstErr = fmt.Errorf("failed to parse %s: %v", field, err)
+ return nil
+ }
+ return u
+}
+
+func (p *stickyErrParser) parseURIs(s []string, field string) []url.URL {
+ if p.firstErr != nil || len(s) == 0 {
+ return nil
+ }
+ uris := make([]url.URL, len(s))
+ for i, val := range s {
+ if val == "" {
+ p.firstErr = fmt.Errorf("invalid URI in field %s", field)
+ return nil
+ }
+ if u := p.parseURI(val, field); u != nil {
+ uris[i] = *u
+ }
+ }
+ return uris
+}
+
+func (p *stickyErrParser) parseEmails(s []string, field string) []mail.Address {
+ if p.firstErr != nil || len(s) == 0 {
+ return nil
+ }
+ addrs := make([]mail.Address, len(s))
+ for i, addr := range s {
+ if addr == "" {
+ p.firstErr = fmt.Errorf("invalid email in field %s", field)
+ return nil
+ }
+ a, err := mail.ParseAddress(addr)
+ if err != nil {
+ p.firstErr = fmt.Errorf("invalid email in field %s: %v", field, err)
+ return nil
+ }
+ addrs[i] = *a
+ }
+ return addrs
+}
+
+func (m *ClientMetadata) toEncodableStruct() encodableClientMetadata {
+ return encodableClientMetadata{
+ RedirectURIs: urisToStrings(m.RedirectURIs),
+ ResponseTypes: m.ResponseTypes,
+ GrantTypes: m.GrantTypes,
+ ApplicationType: m.ApplicationType,
+ Contacts: emailsToStrings(m.Contacts),
+ ClientName: m.ClientName,
+ LogoURI: uriToString(m.LogoURI),
+ ClientURI: uriToString(m.ClientURI),
+ PolicyURI: uriToString(m.PolicyURI),
+ TermsOfServiceURI: uriToString(m.TermsOfServiceURI),
+ JWKSURI: uriToString(m.JWKSURI),
+ JWKS: m.JWKS,
+ SectorIdentifierURI: uriToString(m.SectorIdentifierURI),
+ SubjectType: m.SubjectType,
+ IDTokenSignedResponseAlg: m.IDTokenResponseOptions.SigningAlg,
+ IDTokenEncryptedResponseAlg: m.IDTokenResponseOptions.EncryptionAlg,
+ IDTokenEncryptedResponseEnc: m.IDTokenResponseOptions.EncryptionEnc,
+ UserInfoSignedResponseAlg: m.UserInfoResponseOptions.SigningAlg,
+ UserInfoEncryptedResponseAlg: m.UserInfoResponseOptions.EncryptionAlg,
+ UserInfoEncryptedResponseEnc: m.UserInfoResponseOptions.EncryptionEnc,
+ RequestObjectSigningAlg: m.RequestObjectOptions.SigningAlg,
+ RequestObjectEncryptionAlg: m.RequestObjectOptions.EncryptionAlg,
+ RequestObjectEncryptionEnc: m.RequestObjectOptions.EncryptionEnc,
+ TokenEndpointAuthMethod: m.TokenEndpointAuthMethod,
+ TokenEndpointAuthSigningAlg: m.TokenEndpointAuthSigningAlg,
+ DefaultMaxAge: m.DefaultMaxAge,
+ RequireAuthTime: m.RequireAuthTime,
+ DefaultACRValues: m.DefaultACRValues,
+ InitiateLoginURI: uriToString(m.InitiateLoginURI),
+ RequestURIs: urisToStrings(m.RequestURIs),
+ }
+}
+
+func uriToString(u *url.URL) string {
+ if u == nil {
+ return ""
+ }
+ return u.String()
+}
+
+func urisToStrings(urls []url.URL) []string {
+ if len(urls) == 0 {
+ return nil
+ }
+ sli := make([]string, len(urls))
+ for i, u := range urls {
+ sli[i] = u.String()
+ }
+ return sli
+}
+
+func emailsToStrings(addrs []mail.Address) []string {
+ if len(addrs) == 0 {
+ return nil
+ }
+ sli := make([]string, len(addrs))
+ for i, addr := range addrs {
+ sli[i] = addr.String()
+ }
+ return sli
+}
+
+// Valid determines if a ClientMetadata conforms with the OIDC specification.
+//
+// Valid is called by UnmarshalJSON.
+//
+// NOTE(ericchiang): For development purposes Valid does not mandate 'https' for
+// URLs fields where the OIDC spec requires it. This may change in future releases
+// of this package. See: https://github.com/coreos/go-oidc/issues/34
+func (m *ClientMetadata) Valid() error {
+ if len(m.RedirectURIs) == 0 {
+ return errors.New("zero redirect URLs")
+ }
+
+ validURI := func(u *url.URL, fieldName string) error {
+ if u.Host == "" {
+ return fmt.Errorf("no host for uri field %s", fieldName)
+ }
+ if u.Scheme != "http" && u.Scheme != "https" {
+ return fmt.Errorf("uri field %s scheme is not http or https", fieldName)
+ }
+ return nil
+ }
+
+ uris := []struct {
+ val *url.URL
+ name string
+ }{
+ {m.LogoURI, "logo_uri"},
+ {m.ClientURI, "client_uri"},
+ {m.PolicyURI, "policy_uri"},
+ {m.TermsOfServiceURI, "tos_uri"},
+ {m.JWKSURI, "jwks_uri"},
+ {m.SectorIdentifierURI, "sector_identifier_uri"},
+ {m.InitiateLoginURI, "initiate_login_uri"},
+ }
+
+ for _, uri := range uris {
+ if uri.val == nil {
+ continue
+ }
+ if err := validURI(uri.val, uri.name); err != nil {
+ return err
+ }
+ }
+
+ uriLists := []struct {
+ vals []url.URL
+ name string
+ }{
+ {m.RedirectURIs, "redirect_uris"},
+ {m.RequestURIs, "request_uris"},
+ }
+ for _, list := range uriLists {
+ for _, uri := range list.vals {
+ if err := validURI(&uri, list.name); err != nil {
+ return err
+ }
+ }
+ }
+
+ options := []struct {
+ option JWAOptions
+ name string
+ }{
+ {m.IDTokenResponseOptions, "id_token response"},
+ {m.UserInfoResponseOptions, "userinfo response"},
+ {m.RequestObjectOptions, "request_object"},
+ }
+ for _, option := range options {
+ if err := option.option.valid(); err != nil {
+ return fmt.Errorf("invalid JWA values for %s: %v", option.name, err)
+ }
+ }
+ return nil
+}
+
+type ClientRegistrationResponse struct {
+ ClientID string // Required
+ ClientSecret string
+ RegistrationAccessToken string
+ RegistrationClientURI string
+ // If IsZero is true, unspecified.
+ ClientIDIssuedAt time.Time
+ // Time at which the client_secret will expire.
+ // If IsZero is true, it will not expire.
+ ClientSecretExpiresAt time.Time
+
+ ClientMetadata
+}
+
+type encodableClientRegistrationResponse struct {
+ ClientID string `json:"client_id"` // Required
+ ClientSecret string `json:"client_secret,omitempty"`
+ RegistrationAccessToken string `json:"registration_access_token,omitempty"`
+ RegistrationClientURI string `json:"registration_client_uri,omitempty"`
+ ClientIDIssuedAt int64 `json:"client_id_issued_at,omitempty"`
+ // Time at which the client_secret will expire, in seconds since the epoch.
+ // If 0 it will not expire.
+ ClientSecretExpiresAt int64 `json:"client_secret_expires_at"` // Required
+
+ encodableClientMetadata
+}
+
+func unixToSec(t time.Time) int64 {
+ if t.IsZero() {
+ return 0
+ }
+ return t.Unix()
+}
+
+func (c *ClientRegistrationResponse) MarshalJSON() ([]byte, error) {
+ e := encodableClientRegistrationResponse{
+ ClientID: c.ClientID,
+ ClientSecret: c.ClientSecret,
+ RegistrationAccessToken: c.RegistrationAccessToken,
+ RegistrationClientURI: c.RegistrationClientURI,
+ ClientIDIssuedAt: unixToSec(c.ClientIDIssuedAt),
+ ClientSecretExpiresAt: unixToSec(c.ClientSecretExpiresAt),
+ encodableClientMetadata: c.ClientMetadata.toEncodableStruct(),
+ }
+ return json.Marshal(&e)
+}
+
+func secToUnix(sec int64) time.Time {
+ if sec == 0 {
+ return time.Time{}
+ }
+ return time.Unix(sec, 0)
+}
+
+func (c *ClientRegistrationResponse) UnmarshalJSON(data []byte) error {
+ var e encodableClientRegistrationResponse
+ if err := json.Unmarshal(data, &e); err != nil {
+ return err
+ }
+ if e.ClientID == "" {
+ return errors.New("no client_id in client registration response")
+ }
+ metadata, err := e.encodableClientMetadata.toStruct()
+ if err != nil {
+ return err
+ }
+ *c = ClientRegistrationResponse{
+ ClientID: e.ClientID,
+ ClientSecret: e.ClientSecret,
+ RegistrationAccessToken: e.RegistrationAccessToken,
+ RegistrationClientURI: e.RegistrationClientURI,
+ ClientIDIssuedAt: secToUnix(e.ClientIDIssuedAt),
+ ClientSecretExpiresAt: secToUnix(e.ClientSecretExpiresAt),
+ ClientMetadata: metadata,
+ }
+ return nil
+}
+
+type ClientConfig struct {
+ HTTPClient phttp.Client
+ Credentials ClientCredentials
+ Scope []string
+ RedirectURL string
+ ProviderConfig ProviderConfig
+ KeySet key.PublicKeySet
+}
+
+func NewClient(cfg ClientConfig) (*Client, error) {
+ // Allow empty redirect URL in the case where the client
+ // only needs to verify a given token.
+ ru, err := url.Parse(cfg.RedirectURL)
+ if err != nil {
+ return nil, fmt.Errorf("invalid redirect URL: %v", err)
+ }
+
+ c := Client{
+ credentials: cfg.Credentials,
+ httpClient: cfg.HTTPClient,
+ scope: cfg.Scope,
+ redirectURL: ru.String(),
+ providerConfig: newProviderConfigRepo(cfg.ProviderConfig),
+ keySet: cfg.KeySet,
+ }
+
+ if c.httpClient == nil {
+ c.httpClient = http.DefaultClient
+ }
+
+ if c.scope == nil {
+ c.scope = make([]string, len(DefaultScope))
+ copy(c.scope, DefaultScope)
+ }
+
+ return &c, nil
+}
+
+type Client struct {
+ httpClient phttp.Client
+ providerConfig *providerConfigRepo
+ credentials ClientCredentials
+ redirectURL string
+ scope []string
+ keySet key.PublicKeySet
+ providerSyncer *ProviderConfigSyncer
+
+ keySetSyncMutex sync.RWMutex
+ lastKeySetSync time.Time
+}
+
+func (c *Client) Healthy() error {
+ now := time.Now().UTC()
+
+ cfg := c.providerConfig.Get()
+
+ if cfg.Empty() {
+ return errors.New("oidc client provider config empty")
+ }
+
+ if !cfg.ExpiresAt.IsZero() && cfg.ExpiresAt.Before(now) {
+ return errors.New("oidc client provider config expired")
+ }
+
+ return nil
+}
+
+func (c *Client) OAuthClient() (*oauth2.Client, error) {
+ cfg := c.providerConfig.Get()
+ authMethod, err := chooseAuthMethod(cfg)
+ if err != nil {
+ return nil, err
+ }
+
+ ocfg := oauth2.Config{
+ Credentials: oauth2.ClientCredentials(c.credentials),
+ RedirectURL: c.redirectURL,
+ AuthURL: cfg.AuthEndpoint.String(),
+ TokenURL: cfg.TokenEndpoint.String(),
+ Scope: c.scope,
+ AuthMethod: authMethod,
+ }
+
+ return oauth2.NewClient(c.httpClient, ocfg)
+}
+
+func chooseAuthMethod(cfg ProviderConfig) (string, error) {
+ if len(cfg.TokenEndpointAuthMethodsSupported) == 0 {
+ return oauth2.AuthMethodClientSecretBasic, nil
+ }
+
+ for _, authMethod := range cfg.TokenEndpointAuthMethodsSupported {
+ if _, ok := supportedAuthMethods[authMethod]; ok {
+ return authMethod, nil
+ }
+ }
+
+ return "", errors.New("no supported auth methods")
+}
+
+// SyncProviderConfig starts the provider config syncer
+func (c *Client) SyncProviderConfig(discoveryURL string) chan struct{} {
+ r := NewHTTPProviderConfigGetter(c.httpClient, discoveryURL)
+ s := NewProviderConfigSyncer(r, c.providerConfig)
+ stop := s.Run()
+ s.WaitUntilInitialSync()
+ return stop
+}
+
+func (c *Client) maybeSyncKeys() error {
+ tooSoon := func() bool {
+ return time.Now().UTC().Before(c.lastKeySetSync.Add(keySyncWindow))
+ }
+
+ // ignore request to sync keys if a sync operation has been
+ // attempted too recently
+ if tooSoon() {
+ return nil
+ }
+
+ c.keySetSyncMutex.Lock()
+ defer c.keySetSyncMutex.Unlock()
+
+ // check again, as another goroutine may have been holding
+ // the lock while updating the keys
+ if tooSoon() {
+ return nil
+ }
+
+ cfg := c.providerConfig.Get()
+ r := NewRemotePublicKeyRepo(c.httpClient, cfg.KeysEndpoint.String())
+ w := &clientKeyRepo{client: c}
+ _, err := key.Sync(r, w)
+ c.lastKeySetSync = time.Now().UTC()
+
+ return err
+}
+
+type clientKeyRepo struct {
+ client *Client
+}
+
+func (r *clientKeyRepo) Set(ks key.KeySet) error {
+ pks, ok := ks.(*key.PublicKeySet)
+ if !ok {
+ return errors.New("unable to cast to PublicKey")
+ }
+ r.client.keySet = *pks
+ return nil
+}
+
+func (c *Client) ClientCredsToken(scope []string) (jose.JWT, error) {
+ cfg := c.providerConfig.Get()
+
+ if !cfg.SupportsGrantType(oauth2.GrantTypeClientCreds) {
+ return jose.JWT{}, fmt.Errorf("%v grant type is not supported", oauth2.GrantTypeClientCreds)
+ }
+
+ oac, err := c.OAuthClient()
+ if err != nil {
+ return jose.JWT{}, err
+ }
+
+ t, err := oac.ClientCredsToken(scope)
+ if err != nil {
+ return jose.JWT{}, err
+ }
+
+ jwt, err := jose.ParseJWT(t.IDToken)
+ if err != nil {
+ return jose.JWT{}, err
+ }
+
+ return jwt, c.VerifyJWT(jwt)
+}
+
+// ExchangeAuthCode exchanges an OAuth2 auth code for an OIDC JWT ID token.
+func (c *Client) ExchangeAuthCode(code string) (jose.JWT, error) {
+ oac, err := c.OAuthClient()
+ if err != nil {
+ return jose.JWT{}, err
+ }
+
+ t, err := oac.RequestToken(oauth2.GrantTypeAuthCode, code)
+ if err != nil {
+ return jose.JWT{}, err
+ }
+
+ jwt, err := jose.ParseJWT(t.IDToken)
+ if err != nil {
+ return jose.JWT{}, err
+ }
+
+ return jwt, c.VerifyJWT(jwt)
+}
+
+// RefreshToken uses a refresh token to exchange for a new OIDC JWT ID Token.
+func (c *Client) RefreshToken(refreshToken string) (jose.JWT, error) {
+ oac, err := c.OAuthClient()
+ if err != nil {
+ return jose.JWT{}, err
+ }
+
+ t, err := oac.RequestToken(oauth2.GrantTypeRefreshToken, refreshToken)
+ if err != nil {
+ return jose.JWT{}, err
+ }
+
+ jwt, err := jose.ParseJWT(t.IDToken)
+ if err != nil {
+ return jose.JWT{}, err
+ }
+
+ return jwt, c.VerifyJWT(jwt)
+}
+
+func (c *Client) VerifyJWT(jwt jose.JWT) error {
+ var keysFunc func() []key.PublicKey
+ if kID, ok := jwt.KeyID(); ok {
+ keysFunc = c.keysFuncWithID(kID)
+ } else {
+ keysFunc = c.keysFuncAll()
+ }
+
+ v := NewJWTVerifier(
+ c.providerConfig.Get().Issuer.String(),
+ c.credentials.ID,
+ c.maybeSyncKeys, keysFunc)
+
+ return v.Verify(jwt)
+}
+
+// keysFuncWithID returns a function that retrieves at most unexpired
+// public key from the Client that matches the provided ID
+func (c *Client) keysFuncWithID(kID string) func() []key.PublicKey {
+ return func() []key.PublicKey {
+ c.keySetSyncMutex.RLock()
+ defer c.keySetSyncMutex.RUnlock()
+
+ if c.keySet.ExpiresAt().Before(time.Now()) {
+ return []key.PublicKey{}
+ }
+
+ k := c.keySet.Key(kID)
+ if k == nil {
+ return []key.PublicKey{}
+ }
+
+ return []key.PublicKey{*k}
+ }
+}
+
+// keysFuncAll returns a function that retrieves all unexpired public
+// keys from the Client
+func (c *Client) keysFuncAll() func() []key.PublicKey {
+ return func() []key.PublicKey {
+ c.keySetSyncMutex.RLock()
+ defer c.keySetSyncMutex.RUnlock()
+
+ if c.keySet.ExpiresAt().Before(time.Now()) {
+ return []key.PublicKey{}
+ }
+
+ return c.keySet.Keys()
+ }
+}
+
+type providerConfigRepo struct {
+ mu sync.RWMutex
+ config ProviderConfig // do not access directly, use Get()
+}
+
+func newProviderConfigRepo(pc ProviderConfig) *providerConfigRepo {
+ return &providerConfigRepo{sync.RWMutex{}, pc}
+}
+
+// returns an error to implement ProviderConfigSetter
+func (r *providerConfigRepo) Set(cfg ProviderConfig) error {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ r.config = cfg
+ return nil
+}
+
+func (r *providerConfigRepo) Get() ProviderConfig {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ return r.config
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/identity.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/identity.go
new file mode 100644
index 0000000..9bfa8e3
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/identity.go
@@ -0,0 +1,44 @@
+package oidc
+
+import (
+ "errors"
+ "time"
+
+ "github.com/coreos/go-oidc/jose"
+)
+
+type Identity struct {
+ ID string
+ Name string
+ Email string
+ ExpiresAt time.Time
+}
+
+func IdentityFromClaims(claims jose.Claims) (*Identity, error) {
+ if claims == nil {
+ return nil, errors.New("nil claim set")
+ }
+
+ var ident Identity
+ var err error
+ var ok bool
+
+ if ident.ID, ok, err = claims.StringClaim("sub"); err != nil {
+ return nil, err
+ } else if !ok {
+ return nil, errors.New("missing required claim: sub")
+ }
+
+ if ident.Email, _, err = claims.StringClaim("email"); err != nil {
+ return nil, err
+ }
+
+ exp, ok, err := claims.TimeClaim("exp")
+ if err != nil {
+ return nil, err
+ } else if ok {
+ ident.ExpiresAt = exp
+ }
+
+ return &ident, nil
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/interface.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/interface.go
new file mode 100644
index 0000000..248cac0
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/interface.go
@@ -0,0 +1,3 @@
+package oidc
+
+type LoginFunc func(ident Identity, sessionKey string) (redirectURL string, err error)
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/key.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/key.go
new file mode 100644
index 0000000..82a0f56
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/key.go
@@ -0,0 +1,67 @@
+package oidc
+
+import (
+ "encoding/json"
+ "errors"
+ "net/http"
+ "time"
+
+ phttp "github.com/coreos/go-oidc/http"
+ "github.com/coreos/go-oidc/jose"
+ "github.com/coreos/go-oidc/key"
+)
+
+// DefaultPublicKeySetTTL is the default TTL set on the PublicKeySet if no
+// Cache-Control header is provided by the JWK Set document endpoint.
+const DefaultPublicKeySetTTL = 24 * time.Hour
+
+// NewRemotePublicKeyRepo is responsible for fetching the JWK Set document.
+func NewRemotePublicKeyRepo(hc phttp.Client, ep string) *remotePublicKeyRepo {
+ return &remotePublicKeyRepo{hc: hc, ep: ep}
+}
+
+type remotePublicKeyRepo struct {
+ hc phttp.Client
+ ep string
+}
+
+// Get returns a PublicKeySet fetched from the JWK Set document endpoint. A TTL
+// is set on the Key Set to avoid it having to be re-retrieved for every
+// encryption event. This TTL is typically controlled by the endpoint returning
+// a Cache-Control header, but defaults to 24 hours if no Cache-Control header
+// is found.
+func (r *remotePublicKeyRepo) Get() (key.KeySet, error) {
+ req, err := http.NewRequest("GET", r.ep, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ resp, err := r.hc.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var d struct {
+ Keys []jose.JWK `json:"keys"`
+ }
+ if err := json.NewDecoder(resp.Body).Decode(&d); err != nil {
+ return nil, err
+ }
+
+ if len(d.Keys) == 0 {
+ return nil, errors.New("zero keys in response")
+ }
+
+ ttl, ok, err := phttp.Cacheable(resp.Header)
+ if err != nil {
+ return nil, err
+ }
+ if !ok {
+ ttl = DefaultPublicKeySetTTL
+ }
+
+ exp := time.Now().UTC().Add(ttl)
+ ks := key.NewPublicKeySet(d.Keys, exp)
+ return ks, nil
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/provider.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/provider.go
new file mode 100644
index 0000000..1235890
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/provider.go
@@ -0,0 +1,688 @@
+package oidc
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net/http"
+ "net/url"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/coreos/pkg/capnslog"
+ "github.com/coreos/pkg/timeutil"
+ "github.com/jonboulle/clockwork"
+
+ phttp "github.com/coreos/go-oidc/http"
+ "github.com/coreos/go-oidc/oauth2"
+)
+
+var (
+ log = capnslog.NewPackageLogger("github.com/coreos/go-oidc", "http")
+)
+
+const (
+ // Subject Identifier types defined by the OIDC spec. Specifies if the provider
+ // should provide the same sub claim value to all clients (public) or a unique
+ // value for each client (pairwise).
+ //
+ // See: http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes
+ SubjectTypePublic = "public"
+ SubjectTypePairwise = "pairwise"
+)
+
+var (
+ // Default values for omitted provider config fields.
+ //
+ // Use ProviderConfig's Defaults method to fill a provider config with these values.
+ DefaultGrantTypesSupported = []string{oauth2.GrantTypeAuthCode, oauth2.GrantTypeImplicit}
+ DefaultResponseModesSupported = []string{"query", "fragment"}
+ DefaultTokenEndpointAuthMethodsSupported = []string{oauth2.AuthMethodClientSecretBasic}
+ DefaultClaimTypesSupported = []string{"normal"}
+)
+
+const (
+ MaximumProviderConfigSyncInterval = 24 * time.Hour
+ MinimumProviderConfigSyncInterval = time.Minute
+
+ discoveryConfigPath = "/.well-known/openid-configuration"
+)
+
+// internally configurable for tests
+var minimumProviderConfigSyncInterval = MinimumProviderConfigSyncInterval
+
+var (
+ // Ensure ProviderConfig satisfies these interfaces.
+ _ json.Marshaler = &ProviderConfig{}
+ _ json.Unmarshaler = &ProviderConfig{}
+)
+
+// ProviderConfig represents the OpenID Provider Metadata specifying what
+// configurations a provider supports.
+//
+// See: http://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata
+type ProviderConfig struct {
+ Issuer *url.URL // Required
+ AuthEndpoint *url.URL // Required
+ TokenEndpoint *url.URL // Required if grant types other than "implicit" are supported
+ UserInfoEndpoint *url.URL
+ KeysEndpoint *url.URL // Required
+ RegistrationEndpoint *url.URL
+
+ // Servers MAY choose not to advertise some supported scope values even when this
+ // parameter is used, although those defined in OpenID Core SHOULD be listed, if supported.
+ ScopesSupported []string
+ // OAuth2.0 response types supported.
+ ResponseTypesSupported []string // Required
+ // OAuth2.0 response modes supported.
+ //
+ // If omitted, defaults to DefaultResponseModesSupported.
+ ResponseModesSupported []string
+ // OAuth2.0 grant types supported.
+ //
+ // If omitted, defaults to DefaultGrantTypesSupported.
+ GrantTypesSupported []string
+ ACRValuesSupported []string
+ // SubjectTypesSupported specifies strategies for providing values for the sub claim.
+ SubjectTypesSupported []string // Required
+
+ // JWA signing and encryption algorith values supported for ID tokens.
+ IDTokenSigningAlgValues []string // Required
+ IDTokenEncryptionAlgValues []string
+ IDTokenEncryptionEncValues []string
+
+ // JWA signing and encryption algorith values supported for user info responses.
+ UserInfoSigningAlgValues []string
+ UserInfoEncryptionAlgValues []string
+ UserInfoEncryptionEncValues []string
+
+ // JWA signing and encryption algorith values supported for request objects.
+ ReqObjSigningAlgValues []string
+ ReqObjEncryptionAlgValues []string
+ ReqObjEncryptionEncValues []string
+
+ TokenEndpointAuthMethodsSupported []string
+ TokenEndpointAuthSigningAlgValuesSupported []string
+ DisplayValuesSupported []string
+ ClaimTypesSupported []string
+ ClaimsSupported []string
+ ServiceDocs *url.URL
+ ClaimsLocalsSupported []string
+ UILocalsSupported []string
+ ClaimsParameterSupported bool
+ RequestParameterSupported bool
+ RequestURIParamaterSupported bool
+ RequireRequestURIRegistration bool
+
+ Policy *url.URL
+ TermsOfService *url.URL
+
+ // Not part of the OpenID Provider Metadata
+ ExpiresAt time.Time
+}
+
+// Defaults returns a shallow copy of ProviderConfig with default
+// values replacing omitted fields.
+//
+// var cfg oidc.ProviderConfig
+// // Fill provider config with default values for omitted fields.
+// cfg = cfg.Defaults()
+//
+func (p ProviderConfig) Defaults() ProviderConfig {
+ setDefault := func(val *[]string, defaultVal []string) {
+ if len(*val) == 0 {
+ *val = defaultVal
+ }
+ }
+ setDefault(&p.GrantTypesSupported, DefaultGrantTypesSupported)
+ setDefault(&p.ResponseModesSupported, DefaultResponseModesSupported)
+ setDefault(&p.TokenEndpointAuthMethodsSupported, DefaultTokenEndpointAuthMethodsSupported)
+ setDefault(&p.ClaimTypesSupported, DefaultClaimTypesSupported)
+ return p
+}
+
+func (p *ProviderConfig) MarshalJSON() ([]byte, error) {
+ e := p.toEncodableStruct()
+ return json.Marshal(&e)
+}
+
+func (p *ProviderConfig) UnmarshalJSON(data []byte) error {
+ var e encodableProviderConfig
+ if err := json.Unmarshal(data, &e); err != nil {
+ return err
+ }
+ conf, err := e.toStruct()
+ if err != nil {
+ return err
+ }
+ if err := conf.Valid(); err != nil {
+ return err
+ }
+ *p = conf
+ return nil
+}
+
+type encodableProviderConfig struct {
+ Issuer string `json:"issuer"`
+ AuthEndpoint string `json:"authorization_endpoint"`
+ TokenEndpoint string `json:"token_endpoint"`
+ UserInfoEndpoint string `json:"userinfo_endpoint,omitempty"`
+ KeysEndpoint string `json:"jwks_uri"`
+ RegistrationEndpoint string `json:"registration_endpoint,omitempty"`
+
+ // Use 'omitempty' for all slices as per OIDC spec:
+ // "Claims that return multiple values are represented as JSON arrays.
+ // Claims with zero elements MUST be omitted from the response."
+ // http://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationResponse
+
+ ScopesSupported []string `json:"scopes_supported,omitempty"`
+ ResponseTypesSupported []string `json:"response_types_supported,omitempty"`
+ ResponseModesSupported []string `json:"response_modes_supported,omitempty"`
+ GrantTypesSupported []string `json:"grant_types_supported,omitempty"`
+ ACRValuesSupported []string `json:"acr_values_supported,omitempty"`
+ SubjectTypesSupported []string `json:"subject_types_supported,omitempty"`
+
+ IDTokenSigningAlgValues []string `json:"id_token_signing_alg_values_supported,omitempty"`
+ IDTokenEncryptionAlgValues []string `json:"id_token_encryption_alg_values_supported,omitempty"`
+ IDTokenEncryptionEncValues []string `json:"id_token_encryption_enc_values_supported,omitempty"`
+ UserInfoSigningAlgValues []string `json:"userinfo_signing_alg_values_supported,omitempty"`
+ UserInfoEncryptionAlgValues []string `json:"userinfo_encryption_alg_values_supported,omitempty"`
+ UserInfoEncryptionEncValues []string `json:"userinfo_encryption_enc_values_supported,omitempty"`
+ ReqObjSigningAlgValues []string `json:"request_object_signing_alg_values_supported,omitempty"`
+ ReqObjEncryptionAlgValues []string `json:"request_object_encryption_alg_values_supported,omitempty"`
+ ReqObjEncryptionEncValues []string `json:"request_object_encryption_enc_values_supported,omitempty"`
+
+ TokenEndpointAuthMethodsSupported []string `json:"token_endpoint_auth_methods_supported,omitempty"`
+ TokenEndpointAuthSigningAlgValuesSupported []string `json:"token_endpoint_auth_signing_alg_values_supported,omitempty"`
+
+ DisplayValuesSupported []string `json:"display_values_supported,omitempty"`
+ ClaimTypesSupported []string `json:"claim_types_supported,omitempty"`
+ ClaimsSupported []string `json:"claims_supported,omitempty"`
+ ServiceDocs string `json:"service_documentation,omitempty"`
+ ClaimsLocalsSupported []string `json:"claims_locales_supported,omitempty"`
+ UILocalsSupported []string `json:"ui_locales_supported,omitempty"`
+ ClaimsParameterSupported bool `json:"claims_parameter_supported,omitempty"`
+ RequestParameterSupported bool `json:"request_parameter_supported,omitempty"`
+ RequestURIParamaterSupported bool `json:"request_uri_parameter_supported,omitempty"`
+ RequireRequestURIRegistration bool `json:"require_request_uri_registration,omitempty"`
+
+ Policy string `json:"op_policy_uri,omitempty"`
+ TermsOfService string `json:"op_tos_uri,omitempty"`
+}
+
+func (cfg ProviderConfig) toEncodableStruct() encodableProviderConfig {
+ return encodableProviderConfig{
+ Issuer: uriToString(cfg.Issuer),
+ AuthEndpoint: uriToString(cfg.AuthEndpoint),
+ TokenEndpoint: uriToString(cfg.TokenEndpoint),
+ UserInfoEndpoint: uriToString(cfg.UserInfoEndpoint),
+ KeysEndpoint: uriToString(cfg.KeysEndpoint),
+ RegistrationEndpoint: uriToString(cfg.RegistrationEndpoint),
+ ScopesSupported: cfg.ScopesSupported,
+ ResponseTypesSupported: cfg.ResponseTypesSupported,
+ ResponseModesSupported: cfg.ResponseModesSupported,
+ GrantTypesSupported: cfg.GrantTypesSupported,
+ ACRValuesSupported: cfg.ACRValuesSupported,
+ SubjectTypesSupported: cfg.SubjectTypesSupported,
+ IDTokenSigningAlgValues: cfg.IDTokenSigningAlgValues,
+ IDTokenEncryptionAlgValues: cfg.IDTokenEncryptionAlgValues,
+ IDTokenEncryptionEncValues: cfg.IDTokenEncryptionEncValues,
+ UserInfoSigningAlgValues: cfg.UserInfoSigningAlgValues,
+ UserInfoEncryptionAlgValues: cfg.UserInfoEncryptionAlgValues,
+ UserInfoEncryptionEncValues: cfg.UserInfoEncryptionEncValues,
+ ReqObjSigningAlgValues: cfg.ReqObjSigningAlgValues,
+ ReqObjEncryptionAlgValues: cfg.ReqObjEncryptionAlgValues,
+ ReqObjEncryptionEncValues: cfg.ReqObjEncryptionEncValues,
+ TokenEndpointAuthMethodsSupported: cfg.TokenEndpointAuthMethodsSupported,
+ TokenEndpointAuthSigningAlgValuesSupported: cfg.TokenEndpointAuthSigningAlgValuesSupported,
+ DisplayValuesSupported: cfg.DisplayValuesSupported,
+ ClaimTypesSupported: cfg.ClaimTypesSupported,
+ ClaimsSupported: cfg.ClaimsSupported,
+ ServiceDocs: uriToString(cfg.ServiceDocs),
+ ClaimsLocalsSupported: cfg.ClaimsLocalsSupported,
+ UILocalsSupported: cfg.UILocalsSupported,
+ ClaimsParameterSupported: cfg.ClaimsParameterSupported,
+ RequestParameterSupported: cfg.RequestParameterSupported,
+ RequestURIParamaterSupported: cfg.RequestURIParamaterSupported,
+ RequireRequestURIRegistration: cfg.RequireRequestURIRegistration,
+ Policy: uriToString(cfg.Policy),
+ TermsOfService: uriToString(cfg.TermsOfService),
+ }
+}
+
+func (e encodableProviderConfig) toStruct() (ProviderConfig, error) {
+ p := stickyErrParser{}
+ conf := ProviderConfig{
+ Issuer: p.parseURI(e.Issuer, "issuer"),
+ AuthEndpoint: p.parseURI(e.AuthEndpoint, "authorization_endpoint"),
+ TokenEndpoint: p.parseURI(e.TokenEndpoint, "token_endpoint"),
+ UserInfoEndpoint: p.parseURI(e.UserInfoEndpoint, "userinfo_endpoint"),
+ KeysEndpoint: p.parseURI(e.KeysEndpoint, "jwks_uri"),
+ RegistrationEndpoint: p.parseURI(e.RegistrationEndpoint, "registration_endpoint"),
+ ScopesSupported: e.ScopesSupported,
+ ResponseTypesSupported: e.ResponseTypesSupported,
+ ResponseModesSupported: e.ResponseModesSupported,
+ GrantTypesSupported: e.GrantTypesSupported,
+ ACRValuesSupported: e.ACRValuesSupported,
+ SubjectTypesSupported: e.SubjectTypesSupported,
+ IDTokenSigningAlgValues: e.IDTokenSigningAlgValues,
+ IDTokenEncryptionAlgValues: e.IDTokenEncryptionAlgValues,
+ IDTokenEncryptionEncValues: e.IDTokenEncryptionEncValues,
+ UserInfoSigningAlgValues: e.UserInfoSigningAlgValues,
+ UserInfoEncryptionAlgValues: e.UserInfoEncryptionAlgValues,
+ UserInfoEncryptionEncValues: e.UserInfoEncryptionEncValues,
+ ReqObjSigningAlgValues: e.ReqObjSigningAlgValues,
+ ReqObjEncryptionAlgValues: e.ReqObjEncryptionAlgValues,
+ ReqObjEncryptionEncValues: e.ReqObjEncryptionEncValues,
+ TokenEndpointAuthMethodsSupported: e.TokenEndpointAuthMethodsSupported,
+ TokenEndpointAuthSigningAlgValuesSupported: e.TokenEndpointAuthSigningAlgValuesSupported,
+ DisplayValuesSupported: e.DisplayValuesSupported,
+ ClaimTypesSupported: e.ClaimTypesSupported,
+ ClaimsSupported: e.ClaimsSupported,
+ ServiceDocs: p.parseURI(e.ServiceDocs, "service_documentation"),
+ ClaimsLocalsSupported: e.ClaimsLocalsSupported,
+ UILocalsSupported: e.UILocalsSupported,
+ ClaimsParameterSupported: e.ClaimsParameterSupported,
+ RequestParameterSupported: e.RequestParameterSupported,
+ RequestURIParamaterSupported: e.RequestURIParamaterSupported,
+ RequireRequestURIRegistration: e.RequireRequestURIRegistration,
+ Policy: p.parseURI(e.Policy, "op_policy-uri"),
+ TermsOfService: p.parseURI(e.TermsOfService, "op_tos_uri"),
+ }
+ if p.firstErr != nil {
+ return ProviderConfig{}, p.firstErr
+ }
+ return conf, nil
+}
+
+// Empty returns if a ProviderConfig holds no information.
+//
+// This case generally indicates a ProviderConfigGetter has experienced an error
+// and has nothing to report.
+func (p ProviderConfig) Empty() bool {
+ return p.Issuer == nil
+}
+
+func contains(sli []string, ele string) bool {
+ for _, s := range sli {
+ if s == ele {
+ return true
+ }
+ }
+ return false
+}
+
+// Valid determines if a ProviderConfig conforms with the OIDC specification.
+// If Valid returns successfully it guarantees required field are non-nil and
+// URLs are well formed.
+//
+// Valid is called by UnmarshalJSON.
+//
+// NOTE(ericchiang): For development purposes Valid does not mandate 'https' for
+// URLs fields where the OIDC spec requires it. This may change in future releases
+// of this package. See: https://github.com/coreos/go-oidc/issues/34
+func (p ProviderConfig) Valid() error {
+ grantTypes := p.GrantTypesSupported
+ if len(grantTypes) == 0 {
+ grantTypes = DefaultGrantTypesSupported
+ }
+ implicitOnly := true
+ for _, grantType := range grantTypes {
+ if grantType != oauth2.GrantTypeImplicit {
+ implicitOnly = false
+ break
+ }
+ }
+
+ if len(p.SubjectTypesSupported) == 0 {
+ return errors.New("missing required field subject_types_supported")
+ }
+ if len(p.IDTokenSigningAlgValues) == 0 {
+ return errors.New("missing required field id_token_signing_alg_values_supported")
+ }
+
+ if len(p.ScopesSupported) != 0 && !contains(p.ScopesSupported, "openid") {
+ return errors.New("scoped_supported must be unspecified or include 'openid'")
+ }
+
+ if !contains(p.IDTokenSigningAlgValues, "RS256") {
+ return errors.New("id_token_signing_alg_values_supported must include 'RS256'")
+ }
+ if contains(p.TokenEndpointAuthMethodsSupported, "none") {
+ return errors.New("token_endpoint_auth_signing_alg_values_supported cannot include 'none'")
+ }
+
+ uris := []struct {
+ val *url.URL
+ name string
+ required bool
+ }{
+ {p.Issuer, "issuer", true},
+ {p.AuthEndpoint, "authorization_endpoint", true},
+ {p.TokenEndpoint, "token_endpoint", !implicitOnly},
+ {p.UserInfoEndpoint, "userinfo_endpoint", false},
+ {p.KeysEndpoint, "jwks_uri", true},
+ {p.RegistrationEndpoint, "registration_endpoint", false},
+ {p.ServiceDocs, "service_documentation", false},
+ {p.Policy, "op_policy_uri", false},
+ {p.TermsOfService, "op_tos_uri", false},
+ }
+
+ for _, uri := range uris {
+ if uri.val == nil {
+ if !uri.required {
+ continue
+ }
+ return fmt.Errorf("empty value for required uri field %s", uri.name)
+ }
+ if uri.val.Host == "" {
+ return fmt.Errorf("no host for uri field %s", uri.name)
+ }
+ if uri.val.Scheme != "http" && uri.val.Scheme != "https" {
+ return fmt.Errorf("uri field %s schemeis not http or https", uri.name)
+ }
+ }
+ return nil
+}
+
+// Supports determines if provider supports a client given their respective metadata.
+func (p ProviderConfig) Supports(c ClientMetadata) error {
+ if err := p.Valid(); err != nil {
+ return fmt.Errorf("invalid provider config: %v", err)
+ }
+ if err := c.Valid(); err != nil {
+ return fmt.Errorf("invalid client config: %v", err)
+ }
+
+ // Fill default values for omitted fields
+ c = c.Defaults()
+ p = p.Defaults()
+
+ // Do the supported values list the requested one?
+ supports := []struct {
+ supported []string
+ requested string
+ name string
+ }{
+ {p.IDTokenSigningAlgValues, c.IDTokenResponseOptions.SigningAlg, "id_token_signed_response_alg"},
+ {p.IDTokenEncryptionAlgValues, c.IDTokenResponseOptions.EncryptionAlg, "id_token_encryption_response_alg"},
+ {p.IDTokenEncryptionEncValues, c.IDTokenResponseOptions.EncryptionEnc, "id_token_encryption_response_enc"},
+ {p.UserInfoSigningAlgValues, c.UserInfoResponseOptions.SigningAlg, "userinfo_signed_response_alg"},
+ {p.UserInfoEncryptionAlgValues, c.UserInfoResponseOptions.EncryptionAlg, "userinfo_encryption_response_alg"},
+ {p.UserInfoEncryptionEncValues, c.UserInfoResponseOptions.EncryptionEnc, "userinfo_encryption_response_enc"},
+ {p.ReqObjSigningAlgValues, c.RequestObjectOptions.SigningAlg, "request_object_signing_alg"},
+ {p.ReqObjEncryptionAlgValues, c.RequestObjectOptions.EncryptionAlg, "request_object_encryption_alg"},
+ {p.ReqObjEncryptionEncValues, c.RequestObjectOptions.EncryptionEnc, "request_object_encryption_enc"},
+ }
+ for _, field := range supports {
+ if field.requested == "" {
+ continue
+ }
+ if !contains(field.supported, field.requested) {
+ return fmt.Errorf("provider does not support requested value for field %s", field.name)
+ }
+ }
+
+ stringsEqual := func(s1, s2 string) bool { return s1 == s2 }
+
+ // For lists, are the list of requested values a subset of the supported ones?
+ supportsAll := []struct {
+ supported []string
+ requested []string
+ name string
+ // OAuth2.0 response_type can be space separated lists where order doesn't matter.
+ // For example "id_token token" is the same as "token id_token"
+ // Support a custom compare method.
+ comp func(s1, s2 string) bool
+ }{
+ {p.GrantTypesSupported, c.GrantTypes, "grant_types", stringsEqual},
+ {p.ResponseTypesSupported, c.ResponseTypes, "response_type", oauth2.ResponseTypesEqual},
+ }
+ for _, field := range supportsAll {
+ requestLoop:
+ for _, req := range field.requested {
+ for _, sup := range field.supported {
+ if field.comp(req, sup) {
+ continue requestLoop
+ }
+ }
+ return fmt.Errorf("provider does not support requested value for field %s", field.name)
+ }
+ }
+
+ // TODO(ericchiang): Are there more checks we feel comfortable with begin strict about?
+
+ return nil
+}
+
+func (p ProviderConfig) SupportsGrantType(grantType string) bool {
+ var supported []string
+ if len(p.GrantTypesSupported) == 0 {
+ supported = DefaultGrantTypesSupported
+ } else {
+ supported = p.GrantTypesSupported
+ }
+
+ for _, t := range supported {
+ if t == grantType {
+ return true
+ }
+ }
+ return false
+}
+
+type ProviderConfigGetter interface {
+ Get() (ProviderConfig, error)
+}
+
+type ProviderConfigSetter interface {
+ Set(ProviderConfig) error
+}
+
+type ProviderConfigSyncer struct {
+ from ProviderConfigGetter
+ to ProviderConfigSetter
+ clock clockwork.Clock
+
+ initialSyncDone bool
+ initialSyncWait sync.WaitGroup
+}
+
+func NewProviderConfigSyncer(from ProviderConfigGetter, to ProviderConfigSetter) *ProviderConfigSyncer {
+ return &ProviderConfigSyncer{
+ from: from,
+ to: to,
+ clock: clockwork.NewRealClock(),
+ }
+}
+
+func (s *ProviderConfigSyncer) Run() chan struct{} {
+ stop := make(chan struct{})
+
+ var next pcsStepper
+ next = &pcsStepNext{aft: time.Duration(0)}
+
+ s.initialSyncWait.Add(1)
+ go func() {
+ for {
+ select {
+ case <-s.clock.After(next.after()):
+ next = next.step(s.sync)
+ case <-stop:
+ return
+ }
+ }
+ }()
+
+ return stop
+}
+
+func (s *ProviderConfigSyncer) WaitUntilInitialSync() {
+ s.initialSyncWait.Wait()
+}
+
+func (s *ProviderConfigSyncer) sync() (time.Duration, error) {
+ cfg, err := s.from.Get()
+ if err != nil {
+ return 0, err
+ }
+
+ if err = s.to.Set(cfg); err != nil {
+ return 0, fmt.Errorf("error setting provider config: %v", err)
+ }
+
+ if !s.initialSyncDone {
+ s.initialSyncWait.Done()
+ s.initialSyncDone = true
+ }
+
+ log.Infof("Updating provider config: config=%#v", cfg)
+
+ return nextSyncAfter(cfg.ExpiresAt, s.clock), nil
+}
+
+type pcsStepFunc func() (time.Duration, error)
+
+type pcsStepper interface {
+ after() time.Duration
+ step(pcsStepFunc) pcsStepper
+}
+
+type pcsStepNext struct {
+ aft time.Duration
+}
+
+func (n *pcsStepNext) after() time.Duration {
+ return n.aft
+}
+
+func (n *pcsStepNext) step(fn pcsStepFunc) (next pcsStepper) {
+ ttl, err := fn()
+ if err == nil {
+ next = &pcsStepNext{aft: ttl}
+ log.Debugf("Synced provider config, next attempt in %v", next.after())
+ } else {
+ next = &pcsStepRetry{aft: time.Second}
+ log.Errorf("Provider config sync failed, retrying in %v: %v", next.after(), err)
+ }
+ return
+}
+
+type pcsStepRetry struct {
+ aft time.Duration
+}
+
+func (r *pcsStepRetry) after() time.Duration {
+ return r.aft
+}
+
+func (r *pcsStepRetry) step(fn pcsStepFunc) (next pcsStepper) {
+ ttl, err := fn()
+ if err == nil {
+ next = &pcsStepNext{aft: ttl}
+ log.Infof("Provider config sync no longer failing")
+ } else {
+ next = &pcsStepRetry{aft: timeutil.ExpBackoff(r.aft, time.Minute)}
+ log.Errorf("Provider config sync still failing, retrying in %v: %v", next.after(), err)
+ }
+ return
+}
+
+func nextSyncAfter(exp time.Time, clock clockwork.Clock) time.Duration {
+ if exp.IsZero() {
+ return MaximumProviderConfigSyncInterval
+ }
+
+ t := exp.Sub(clock.Now()) / 2
+ if t > MaximumProviderConfigSyncInterval {
+ t = MaximumProviderConfigSyncInterval
+ } else if t < minimumProviderConfigSyncInterval {
+ t = minimumProviderConfigSyncInterval
+ }
+
+ return t
+}
+
+type httpProviderConfigGetter struct {
+ hc phttp.Client
+ issuerURL string
+ clock clockwork.Clock
+}
+
+func NewHTTPProviderConfigGetter(hc phttp.Client, issuerURL string) *httpProviderConfigGetter {
+ return &httpProviderConfigGetter{
+ hc: hc,
+ issuerURL: issuerURL,
+ clock: clockwork.NewRealClock(),
+ }
+}
+
+func (r *httpProviderConfigGetter) Get() (cfg ProviderConfig, err error) {
+ // If the Issuer value contains a path component, any terminating / MUST be removed before
+ // appending /.well-known/openid-configuration.
+ // https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationRequest
+ discoveryURL := strings.TrimSuffix(r.issuerURL, "/") + discoveryConfigPath
+ req, err := http.NewRequest("GET", discoveryURL, nil)
+ if err != nil {
+ return
+ }
+
+ resp, err := r.hc.Do(req)
+ if err != nil {
+ return
+ }
+ defer resp.Body.Close()
+
+ if err = json.NewDecoder(resp.Body).Decode(&cfg); err != nil {
+ return
+ }
+
+ var ttl time.Duration
+ var ok bool
+ ttl, ok, err = phttp.Cacheable(resp.Header)
+ if err != nil {
+ return
+ } else if ok {
+ cfg.ExpiresAt = r.clock.Now().UTC().Add(ttl)
+ }
+
+ // The issuer value returned MUST be identical to the Issuer URL that was directly used to retrieve the configuration information.
+ // http://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationValidation
+ if !urlEqual(cfg.Issuer.String(), r.issuerURL) {
+ err = fmt.Errorf(`"issuer" in config (%v) does not match provided issuer URL (%v)`, cfg.Issuer, r.issuerURL)
+ return
+ }
+
+ return
+}
+
+func FetchProviderConfig(hc phttp.Client, issuerURL string) (ProviderConfig, error) {
+ if hc == nil {
+ hc = http.DefaultClient
+ }
+
+ g := NewHTTPProviderConfigGetter(hc, issuerURL)
+ return g.Get()
+}
+
+func WaitForProviderConfig(hc phttp.Client, issuerURL string) (pcfg ProviderConfig) {
+ return waitForProviderConfig(hc, issuerURL, clockwork.NewRealClock())
+}
+
+func waitForProviderConfig(hc phttp.Client, issuerURL string, clock clockwork.Clock) (pcfg ProviderConfig) {
+ var sleep time.Duration
+ var err error
+ for {
+ pcfg, err = FetchProviderConfig(hc, issuerURL)
+ if err == nil {
+ break
+ }
+
+ sleep = timeutil.ExpBackoff(sleep, time.Minute)
+ fmt.Printf("Failed fetching provider config, trying again in %v: %v\n", sleep, err)
+ time.Sleep(sleep)
+ }
+
+ return
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/transport.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/transport.go
new file mode 100644
index 0000000..61c926d
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/transport.go
@@ -0,0 +1,88 @@
+package oidc
+
+import (
+ "fmt"
+ "net/http"
+ "sync"
+
+ phttp "github.com/coreos/go-oidc/http"
+ "github.com/coreos/go-oidc/jose"
+)
+
+type TokenRefresher interface {
+ // Verify checks if the provided token is currently valid or not.
+ Verify(jose.JWT) error
+
+ // Refresh attempts to authenticate and retrieve a new token.
+ Refresh() (jose.JWT, error)
+}
+
+type ClientCredsTokenRefresher struct {
+ Issuer string
+ OIDCClient *Client
+}
+
+func (c *ClientCredsTokenRefresher) Verify(jwt jose.JWT) (err error) {
+ _, err = VerifyClientClaims(jwt, c.Issuer)
+ return
+}
+
+func (c *ClientCredsTokenRefresher) Refresh() (jwt jose.JWT, err error) {
+ if err = c.OIDCClient.Healthy(); err != nil {
+ err = fmt.Errorf("unable to authenticate, unhealthy OIDC client: %v", err)
+ return
+ }
+
+ jwt, err = c.OIDCClient.ClientCredsToken([]string{"openid"})
+ if err != nil {
+ err = fmt.Errorf("unable to verify auth code with issuer: %v", err)
+ return
+ }
+
+ return
+}
+
+type AuthenticatedTransport struct {
+ TokenRefresher
+ http.RoundTripper
+
+ mu sync.Mutex
+ jwt jose.JWT
+}
+
+func (t *AuthenticatedTransport) verifiedJWT() (jose.JWT, error) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+
+ if t.TokenRefresher.Verify(t.jwt) == nil {
+ return t.jwt, nil
+ }
+
+ jwt, err := t.TokenRefresher.Refresh()
+ if err != nil {
+ return jose.JWT{}, fmt.Errorf("unable to acquire valid JWT: %v", err)
+ }
+
+ t.jwt = jwt
+ return t.jwt, nil
+}
+
+// SetJWT sets the JWT held by the Transport.
+// This is useful for cases in which you want to set an initial JWT.
+func (t *AuthenticatedTransport) SetJWT(jwt jose.JWT) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+
+ t.jwt = jwt
+}
+
+func (t *AuthenticatedTransport) RoundTrip(r *http.Request) (*http.Response, error) {
+ jwt, err := t.verifiedJWT()
+ if err != nil {
+ return nil, err
+ }
+
+ req := phttp.CopyRequest(r)
+ req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", jwt.Encode()))
+ return t.RoundTripper.RoundTrip(req)
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/util.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/util.go
new file mode 100644
index 0000000..f2a5a19
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/util.go
@@ -0,0 +1,109 @@
+package oidc
+
+import (
+ "crypto/rand"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "net"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+
+ "github.com/coreos/go-oidc/jose"
+)
+
+// RequestTokenExtractor funcs extract a raw encoded token from a request.
+type RequestTokenExtractor func(r *http.Request) (string, error)
+
+// ExtractBearerToken is a RequestTokenExtractor which extracts a bearer token from a request's
+// Authorization header.
+func ExtractBearerToken(r *http.Request) (string, error) {
+ ah := r.Header.Get("Authorization")
+ if ah == "" {
+ return "", errors.New("missing Authorization header")
+ }
+
+ if len(ah) <= 6 || strings.ToUpper(ah[0:6]) != "BEARER" {
+ return "", errors.New("should be a bearer token")
+ }
+
+ val := ah[7:]
+ if len(val) == 0 {
+ return "", errors.New("bearer token is empty")
+ }
+
+ return val, nil
+}
+
+// CookieTokenExtractor returns a RequestTokenExtractor which extracts a token from the named cookie in a request.
+func CookieTokenExtractor(cookieName string) RequestTokenExtractor {
+ return func(r *http.Request) (string, error) {
+ ck, err := r.Cookie(cookieName)
+ if err != nil {
+ return "", fmt.Errorf("token cookie not found in request: %v", err)
+ }
+
+ if ck.Value == "" {
+ return "", errors.New("token cookie found but is empty")
+ }
+
+ return ck.Value, nil
+ }
+}
+
+func NewClaims(iss, sub string, aud interface{}, iat, exp time.Time) jose.Claims {
+ return jose.Claims{
+ // required
+ "iss": iss,
+ "sub": sub,
+ "aud": aud,
+ "iat": iat.Unix(),
+ "exp": exp.Unix(),
+ }
+}
+
+func GenClientID(hostport string) (string, error) {
+ b, err := randBytes(32)
+ if err != nil {
+ return "", err
+ }
+
+ var host string
+ if strings.Contains(hostport, ":") {
+ host, _, err = net.SplitHostPort(hostport)
+ if err != nil {
+ return "", err
+ }
+ } else {
+ host = hostport
+ }
+
+ return fmt.Sprintf("%s@%s", base64.URLEncoding.EncodeToString(b), host), nil
+}
+
+func randBytes(n int) ([]byte, error) {
+ b := make([]byte, n)
+ got, err := rand.Read(b)
+ if err != nil {
+ return nil, err
+ } else if n != got {
+ return nil, errors.New("unable to generate enough random data")
+ }
+ return b, nil
+}
+
+// urlEqual checks two urls for equality using only the host and path portions.
+func urlEqual(url1, url2 string) bool {
+ u1, err := url.Parse(url1)
+ if err != nil {
+ return false
+ }
+ u2, err := url.Parse(url2)
+ if err != nil {
+ return false
+ }
+
+ return strings.ToLower(u1.Host+u1.Path) == strings.ToLower(u2.Host+u2.Path)
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/verification.go b/src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/verification.go
new file mode 100644
index 0000000..0024130
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-oidc/oidc/verification.go
@@ -0,0 +1,188 @@
+package oidc
+
+import (
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/jonboulle/clockwork"
+
+ "github.com/coreos/go-oidc/jose"
+ "github.com/coreos/go-oidc/key"
+)
+
+func VerifySignature(jwt jose.JWT, keys []key.PublicKey) (bool, error) {
+ jwtBytes := []byte(jwt.Data())
+ for _, k := range keys {
+ v, err := k.Verifier()
+ if err != nil {
+ return false, err
+ }
+ if v.Verify(jwt.Signature, jwtBytes) == nil {
+ return true, nil
+ }
+ }
+ return false, nil
+}
+
+// containsString returns true if the given string(needle) is found
+// in the string array(haystack).
+func containsString(needle string, haystack []string) bool {
+ for _, v := range haystack {
+ if v == needle {
+ return true
+ }
+ }
+ return false
+}
+
+// Verify claims in accordance with OIDC spec
+// http://openid.net/specs/openid-connect-basic-1_0.html#IDTokenValidation
+func VerifyClaims(jwt jose.JWT, issuer, clientID string) error {
+ now := time.Now().UTC()
+
+ claims, err := jwt.Claims()
+ if err != nil {
+ return err
+ }
+
+ ident, err := IdentityFromClaims(claims)
+ if err != nil {
+ return err
+ }
+
+ if ident.ExpiresAt.Before(now) {
+ return errors.New("token is expired")
+ }
+
+ // iss REQUIRED. Issuer Identifier for the Issuer of the response.
+ // The iss value is a case sensitive URL using the https scheme that contains scheme,
+ // host, and optionally, port number and path components and no query or fragment components.
+ if iss, exists := claims["iss"].(string); exists {
+ if !urlEqual(iss, issuer) {
+ return fmt.Errorf("invalid claim value: 'iss'. expected=%s, found=%s.", issuer, iss)
+ }
+ } else {
+ return errors.New("missing claim: 'iss'")
+ }
+
+ // iat REQUIRED. Time at which the JWT was issued.
+ // Its value is a JSON number representing the number of seconds from 1970-01-01T0:0:0Z
+ // as measured in UTC until the date/time.
+ if _, exists := claims["iat"].(float64); !exists {
+ return errors.New("missing claim: 'iat'")
+ }
+
+ // aud REQUIRED. Audience(s) that this ID Token is intended for.
+ // It MUST contain the OAuth 2.0 client_id of the Relying Party as an audience value.
+ // It MAY also contain identifiers for other audiences. In the general case, the aud
+ // value is an array of case sensitive strings. In the common special case when there
+ // is one audience, the aud value MAY be a single case sensitive string.
+ if aud, ok, err := claims.StringClaim("aud"); err == nil && ok {
+ if aud != clientID {
+ return fmt.Errorf("invalid claims, 'aud' claim and 'client_id' do not match, aud=%s, client_id=%s", aud, clientID)
+ }
+ } else if aud, ok, err := claims.StringsClaim("aud"); err == nil && ok {
+ if !containsString(clientID, aud) {
+ return fmt.Errorf("invalid claims, cannot find 'client_id' in 'aud' claim, aud=%v, client_id=%s", aud, clientID)
+ }
+ } else {
+ return errors.New("invalid claim value: 'aud' is required, and should be either string or string array")
+ }
+
+ return nil
+}
+
+// VerifyClientClaims verifies all the required claims are valid for a "client credentials" JWT.
+// Returns the client ID if valid, or an error if invalid.
+func VerifyClientClaims(jwt jose.JWT, issuer string) (string, error) {
+ claims, err := jwt.Claims()
+ if err != nil {
+ return "", fmt.Errorf("failed to parse JWT claims: %v", err)
+ }
+
+ iss, ok, err := claims.StringClaim("iss")
+ if err != nil {
+ return "", fmt.Errorf("failed to parse 'iss' claim: %v", err)
+ } else if !ok {
+ return "", errors.New("missing required 'iss' claim")
+ } else if !urlEqual(iss, issuer) {
+ return "", fmt.Errorf("'iss' claim does not match expected issuer, iss=%s", iss)
+ }
+
+ sub, ok, err := claims.StringClaim("sub")
+ if err != nil {
+ return "", fmt.Errorf("failed to parse 'sub' claim: %v", err)
+ } else if !ok {
+ return "", errors.New("missing required 'sub' claim")
+ }
+
+ if aud, ok, err := claims.StringClaim("aud"); err == nil && ok {
+ if aud != sub {
+ return "", fmt.Errorf("invalid claims, 'aud' claim and 'sub' claim do not match, aud=%s, sub=%s", aud, sub)
+ }
+ } else if aud, ok, err := claims.StringsClaim("aud"); err == nil && ok {
+ if !containsString(sub, aud) {
+ return "", fmt.Errorf("invalid claims, cannot find 'sud' in 'aud' claim, aud=%v, sub=%s", aud, sub)
+ }
+ } else {
+ return "", errors.New("invalid claim value: 'aud' is required, and should be either string or string array")
+ }
+
+ now := time.Now().UTC()
+ exp, ok, err := claims.TimeClaim("exp")
+ if err != nil {
+ return "", fmt.Errorf("failed to parse 'exp' claim: %v", err)
+ } else if !ok {
+ return "", errors.New("missing required 'exp' claim")
+ } else if exp.Before(now) {
+ return "", fmt.Errorf("token already expired at: %v", exp)
+ }
+
+ return sub, nil
+}
+
+type JWTVerifier struct {
+ issuer string
+ clientID string
+ syncFunc func() error
+ keysFunc func() []key.PublicKey
+ clock clockwork.Clock
+}
+
+func NewJWTVerifier(issuer, clientID string, syncFunc func() error, keysFunc func() []key.PublicKey) JWTVerifier {
+ return JWTVerifier{
+ issuer: issuer,
+ clientID: clientID,
+ syncFunc: syncFunc,
+ keysFunc: keysFunc,
+ clock: clockwork.NewRealClock(),
+ }
+}
+
+func (v *JWTVerifier) Verify(jwt jose.JWT) error {
+ ok, err := VerifySignature(jwt, v.keysFunc())
+ if ok {
+ goto SignatureVerified
+ } else if err != nil {
+ return fmt.Errorf("oidc: JWT signature verification failed: %v", err)
+ }
+
+ if err = v.syncFunc(); err != nil {
+ return fmt.Errorf("oidc: failed syncing KeySet: %v", err)
+ }
+
+ ok, err = VerifySignature(jwt, v.keysFunc())
+ if err != nil {
+ return fmt.Errorf("oidc: JWT signature verification failed: %v", err)
+ } else if !ok {
+ return errors.New("oidc: unable to verify JWT signature: no matching keys")
+ }
+
+SignatureVerified:
+ if err := VerifyClaims(jwt, v.issuer, v.clientID); err != nil {
+ return fmt.Errorf("oidc: JWT claims invalid: %v", err)
+ }
+
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/go-systemd/LICENSE b/src/kube2msb/vendor/github.com/coreos/go-systemd/LICENSE
new file mode 100644
index 0000000..37ec93a
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-systemd/LICENSE
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/src/kube2msb/vendor/github.com/coreos/go-systemd/journal/journal.go b/src/kube2msb/vendor/github.com/coreos/go-systemd/journal/journal.go
new file mode 100644
index 0000000..7f43499
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/go-systemd/journal/journal.go
@@ -0,0 +1,179 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package journal provides write bindings to the local systemd journal.
+// It is implemented in pure Go and connects to the journal directly over its
+// unix socket.
+//
+// To read from the journal, see the "sdjournal" package, which wraps the
+// sd-journal a C API.
+//
+// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html
+package journal
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "os"
+ "strconv"
+ "strings"
+ "syscall"
+)
+
+// Priority of a journal message
+type Priority int
+
+const (
+ PriEmerg Priority = iota
+ PriAlert
+ PriCrit
+ PriErr
+ PriWarning
+ PriNotice
+ PriInfo
+ PriDebug
+)
+
+var conn net.Conn
+
+func init() {
+ var err error
+ conn, err = net.Dial("unixgram", "/run/systemd/journal/socket")
+ if err != nil {
+ conn = nil
+ }
+}
+
+// Enabled returns true if the local systemd journal is available for logging
+func Enabled() bool {
+ return conn != nil
+}
+
+// Send a message to the local systemd journal. vars is a map of journald
+// fields to values. Fields must be composed of uppercase letters, numbers,
+// and underscores, but must not start with an underscore. Within these
+// restrictions, any arbitrary field name may be used. Some names have special
+// significance: see the journalctl documentation
+// (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html)
+// for more details. vars may be nil.
+func Send(message string, priority Priority, vars map[string]string) error {
+ if conn == nil {
+ return journalError("could not connect to journald socket")
+ }
+
+ data := new(bytes.Buffer)
+ appendVariable(data, "PRIORITY", strconv.Itoa(int(priority)))
+ appendVariable(data, "MESSAGE", message)
+ for k, v := range vars {
+ appendVariable(data, k, v)
+ }
+
+ _, err := io.Copy(conn, data)
+ if err != nil && isSocketSpaceError(err) {
+ file, err := tempFd()
+ if err != nil {
+ return journalError(err.Error())
+ }
+ defer file.Close()
+ _, err = io.Copy(file, data)
+ if err != nil {
+ return journalError(err.Error())
+ }
+
+ rights := syscall.UnixRights(int(file.Fd()))
+
+ /* this connection should always be a UnixConn, but better safe than sorry */
+ unixConn, ok := conn.(*net.UnixConn)
+ if !ok {
+ return journalError("can't send file through non-Unix connection")
+ }
+ unixConn.WriteMsgUnix([]byte{}, rights, nil)
+ } else if err != nil {
+ return journalError(err.Error())
+ }
+ return nil
+}
+
+// Print prints a message to the local systemd journal using Send().
+func Print(priority Priority, format string, a ...interface{}) error {
+ return Send(fmt.Sprintf(format, a...), priority, nil)
+}
+
+func appendVariable(w io.Writer, name, value string) {
+ if !validVarName(name) {
+ journalError("variable name contains invalid character, ignoring")
+ }
+ if strings.ContainsRune(value, '\n') {
+ /* When the value contains a newline, we write:
+ * - the variable name, followed by a newline
+ * - the size (in 64bit little endian format)
+ * - the data, followed by a newline
+ */
+ fmt.Fprintln(w, name)
+ binary.Write(w, binary.LittleEndian, uint64(len(value)))
+ fmt.Fprintln(w, value)
+ } else {
+ /* just write the variable and value all on one line */
+ fmt.Fprintf(w, "%s=%s\n", name, value)
+ }
+}
+
+func validVarName(name string) bool {
+ /* The variable name must be in uppercase and consist only of characters,
+ * numbers and underscores, and may not begin with an underscore. (from the docs)
+ */
+
+ valid := name[0] != '_'
+ for _, c := range name {
+ valid = valid && ('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_'
+ }
+ return valid
+}
+
+func isSocketSpaceError(err error) bool {
+ opErr, ok := err.(*net.OpError)
+ if !ok {
+ return false
+ }
+
+ sysErr, ok := opErr.Err.(syscall.Errno)
+ if !ok {
+ return false
+ }
+
+ return sysErr == syscall.EMSGSIZE || sysErr == syscall.ENOBUFS
+}
+
+func tempFd() (*os.File, error) {
+ file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX")
+ if err != nil {
+ return nil, err
+ }
+ syscall.Unlink(file.Name())
+ if err != nil {
+ return nil, err
+ }
+ return file, nil
+}
+
+func journalError(s string) error {
+ s = "journal error: " + s
+ fmt.Fprintln(os.Stderr, s)
+ return errors.New(s)
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/pkg/LICENSE b/src/kube2msb/vendor/github.com/coreos/pkg/LICENSE
new file mode 100644
index 0000000..e06d208
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/pkg/LICENSE
@@ -0,0 +1,202 @@
+Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/src/kube2msb/vendor/github.com/coreos/pkg/NOTICE b/src/kube2msb/vendor/github.com/coreos/pkg/NOTICE
new file mode 100644
index 0000000..b39ddfa
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/pkg/NOTICE
@@ -0,0 +1,5 @@
+CoreOS Project
+Copyright 2014 CoreOS, Inc
+
+This product includes software developed at CoreOS, Inc.
+(http://www.coreos.com/).
diff --git a/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/README.md b/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/README.md
new file mode 100644
index 0000000..81efb1f
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/README.md
@@ -0,0 +1,39 @@
+# capnslog, the CoreOS logging package
+
+There are far too many logging packages out there, with varying degrees of licenses, far too many features (colorization, all sorts of log frameworks) or are just a pain to use (lack of `Fatalln()`?).
+capnslog provides a simple but consistent logging interface suitable for all kinds of projects.
+
+### Design Principles
+
+##### `package main` is the place where logging gets turned on and routed
+
+A library should not touch log options, only generate log entries. Libraries are silent until main lets them speak.
+
+##### All log options are runtime-configurable.
+
+Still the job of `main` to expose these configurations. `main` may delegate this to, say, a configuration webhook, but does so explicitly.
+
+##### There is one log object per package. It is registered under its repository and package name.
+
+`main` activates logging for its repository and any dependency repositories it would also like to have output in its logstream. `main` also dictates at which level each subpackage logs.
+
+##### There is *one* output stream, and it is an `io.Writer` composed with a formatter.
+
+Splitting streams is probably not the job of your program, but rather, your log aggregation framework. If you must split output streams, again, `main` configures this and you can write a very simple two-output struct that satisfies io.Writer.
+
+Fancy colorful formatting and JSON output are beyond the scope of a basic logging framework -- they're application/log-collector dependant. These are, at best, provided as options, but more likely, provided by your application.
+
+##### Log objects are an interface
+
+An object knows best how to print itself. Log objects can collect more interesting metadata if they wish, however, because text isn't going away anytime soon, they must all be marshalable to text. The simplest log object is a string, which returns itself. If you wish to do more fancy tricks for printing your log objects, see also JSON output -- introspect and write a formatter which can handle your advanced log interface. Making strings is the only thing guaranteed.
+
+##### Log levels have specific meanings:
+
+ * Critical: Unrecoverable. Must fail.
+ * Error: Data has been lost, a request has failed for a bad reason, or a required resource has been lost
+ * Warning: (Hopefully) Temporary conditions that may cause errors, but may work fine. A replica disappearing (that may reconnect) is a warning.
+ * Notice: Normal, but important (uncommon) log information.
+ * Info: Normal, working log information, everything is fine, but helpful notices for auditing or common operations.
+ * Debug: Everything is still fine, but even common operations may be logged, and less helpful but more quantity of notices.
+ * Trace: Anything goes, from logging every function call as part of a common operation, to tracing execution of a query.
+
diff --git a/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/formatters.go b/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/formatters.go
new file mode 100644
index 0000000..b305a84
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/formatters.go
@@ -0,0 +1,157 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package capnslog
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "log"
+ "runtime"
+ "strings"
+ "time"
+)
+
+type Formatter interface {
+ Format(pkg string, level LogLevel, depth int, entries ...interface{})
+ Flush()
+}
+
+func NewStringFormatter(w io.Writer) Formatter {
+ return &StringFormatter{
+ w: bufio.NewWriter(w),
+ }
+}
+
+type StringFormatter struct {
+ w *bufio.Writer
+}
+
+func (s *StringFormatter) Format(pkg string, l LogLevel, i int, entries ...interface{}) {
+ now := time.Now().UTC()
+ s.w.WriteString(now.Format(time.RFC3339))
+ s.w.WriteByte(' ')
+ writeEntries(s.w, pkg, l, i, entries...)
+ s.Flush()
+}
+
+func writeEntries(w *bufio.Writer, pkg string, _ LogLevel, _ int, entries ...interface{}) {
+ if pkg != "" {
+ w.WriteString(pkg + ": ")
+ }
+ str := fmt.Sprint(entries...)
+ endsInNL := strings.HasSuffix(str, "\n")
+ w.WriteString(str)
+ if !endsInNL {
+ w.WriteString("\n")
+ }
+}
+
+func (s *StringFormatter) Flush() {
+ s.w.Flush()
+}
+
+func NewPrettyFormatter(w io.Writer, debug bool) Formatter {
+ return &PrettyFormatter{
+ w: bufio.NewWriter(w),
+ debug: debug,
+ }
+}
+
+type PrettyFormatter struct {
+ w *bufio.Writer
+ debug bool
+}
+
+func (c *PrettyFormatter) Format(pkg string, l LogLevel, depth int, entries ...interface{}) {
+ now := time.Now()
+ ts := now.Format("2006-01-02 15:04:05")
+ c.w.WriteString(ts)
+ ms := now.Nanosecond() / 1000
+ c.w.WriteString(fmt.Sprintf(".%06d", ms))
+ if c.debug {
+ _, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call.
+ if !ok {
+ file = "???"
+ line = 1
+ } else {
+ slash := strings.LastIndex(file, "/")
+ if slash >= 0 {
+ file = file[slash+1:]
+ }
+ }
+ if line < 0 {
+ line = 0 // not a real line number
+ }
+ c.w.WriteString(fmt.Sprintf(" [%s:%d]", file, line))
+ }
+ c.w.WriteString(fmt.Sprint(" ", l.Char(), " | "))
+ writeEntries(c.w, pkg, l, depth, entries...)
+ c.Flush()
+}
+
+func (c *PrettyFormatter) Flush() {
+ c.w.Flush()
+}
+
+// LogFormatter emulates the form of the traditional built-in logger.
+type LogFormatter struct {
+ logger *log.Logger
+ prefix string
+}
+
+// NewLogFormatter is a helper to produce a new LogFormatter struct. It uses the
+// golang log package to actually do the logging work so that logs look similar.
+func NewLogFormatter(w io.Writer, prefix string, flag int) Formatter {
+ return &LogFormatter{
+ logger: log.New(w, "", flag), // don't use prefix here
+ prefix: prefix, // save it instead
+ }
+}
+
+// Format builds a log message for the LogFormatter. The LogLevel is ignored.
+func (lf *LogFormatter) Format(pkg string, _ LogLevel, _ int, entries ...interface{}) {
+ str := fmt.Sprint(entries...)
+ prefix := lf.prefix
+ if pkg != "" {
+ prefix = fmt.Sprintf("%s%s: ", prefix, pkg)
+ }
+ lf.logger.Output(5, fmt.Sprintf("%s%v", prefix, str)) // call depth is 5
+}
+
+// Flush is included so that the interface is complete, but is a no-op.
+func (lf *LogFormatter) Flush() {
+ // noop
+}
+
+// NilFormatter is a no-op log formatter that does nothing.
+type NilFormatter struct {
+}
+
+// NewNilFormatter is a helper to produce a new LogFormatter struct. It logs no
+// messages so that you can cause part of your logging to be silent.
+func NewNilFormatter() Formatter {
+ return &NilFormatter{}
+}
+
+// Format does nothing.
+func (_ *NilFormatter) Format(_ string, _ LogLevel, _ int, _ ...interface{}) {
+ // noop
+}
+
+// Flush is included so that the interface is complete, but is a no-op.
+func (_ *NilFormatter) Flush() {
+ // noop
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go b/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go
new file mode 100644
index 0000000..426603e
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go
@@ -0,0 +1,96 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package capnslog
+
+import (
+ "bufio"
+ "bytes"
+ "io"
+ "os"
+ "runtime"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var pid = os.Getpid()
+
+type GlogFormatter struct {
+ StringFormatter
+}
+
+func NewGlogFormatter(w io.Writer) *GlogFormatter {
+ g := &GlogFormatter{}
+ g.w = bufio.NewWriter(w)
+ return g
+}
+
+func (g GlogFormatter) Format(pkg string, level LogLevel, depth int, entries ...interface{}) {
+ g.w.Write(GlogHeader(level, depth+1))
+ g.StringFormatter.Format(pkg, level, depth+1, entries...)
+}
+
+func GlogHeader(level LogLevel, depth int) []byte {
+ // Lmmdd hh:mm:ss.uuuuuu threadid file:line]
+ now := time.Now().UTC()
+ _, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call.
+ if !ok {
+ file = "???"
+ line = 1
+ } else {
+ slash := strings.LastIndex(file, "/")
+ if slash >= 0 {
+ file = file[slash+1:]
+ }
+ }
+ if line < 0 {
+ line = 0 // not a real line number
+ }
+ buf := &bytes.Buffer{}
+ buf.Grow(30)
+ _, month, day := now.Date()
+ hour, minute, second := now.Clock()
+ buf.WriteString(level.Char())
+ twoDigits(buf, int(month))
+ twoDigits(buf, day)
+ buf.WriteByte(' ')
+ twoDigits(buf, hour)
+ buf.WriteByte(':')
+ twoDigits(buf, minute)
+ buf.WriteByte(':')
+ twoDigits(buf, second)
+ buf.WriteByte('.')
+ buf.WriteString(strconv.Itoa(now.Nanosecond() / 1000))
+ buf.WriteByte('Z')
+ buf.WriteByte(' ')
+ buf.WriteString(strconv.Itoa(pid))
+ buf.WriteByte(' ')
+ buf.WriteString(file)
+ buf.WriteByte(':')
+ buf.WriteString(strconv.Itoa(line))
+ buf.WriteByte(']')
+ buf.WriteByte(' ')
+ return buf.Bytes()
+}
+
+const digits = "0123456789"
+
+func twoDigits(b *bytes.Buffer, d int) {
+ c2 := digits[d%10]
+ d /= 10
+ c1 := digits[d%10]
+ b.WriteByte(c1)
+ b.WriteByte(c2)
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/init.go b/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/init.go
new file mode 100644
index 0000000..44b8cd3
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/init.go
@@ -0,0 +1,49 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// +build !windows
+
+package capnslog
+
+import (
+ "io"
+ "os"
+ "syscall"
+)
+
+// Here's where the opinionation comes in. We need some sensible defaults,
+// especially after taking over the log package. Your project (whatever it may
+// be) may see things differently. That's okay; there should be no defaults in
+// the main package that cannot be controlled or overridden programatically,
+// otherwise it's a bug. Doing so is creating your own init_log.go file much
+// like this one.
+
+func init() {
+ initHijack()
+
+ // Go `log` pacakge uses os.Stderr.
+ SetFormatter(NewDefaultFormatter(os.Stderr))
+ SetGlobalLogLevel(INFO)
+}
+
+func NewDefaultFormatter(out io.Writer) Formatter {
+ if syscall.Getppid() == 1 {
+ // We're running under init, which may be systemd.
+ f, err := NewJournaldFormatter()
+ if err == nil {
+ return f
+ }
+ }
+ return NewPrettyFormatter(out, false)
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/init_windows.go b/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/init_windows.go
new file mode 100644
index 0000000..4553050
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/init_windows.go
@@ -0,0 +1,25 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package capnslog
+
+import "os"
+
+func init() {
+ initHijack()
+
+ // Go `log` package uses os.Stderr.
+ SetFormatter(NewPrettyFormatter(os.Stderr, false))
+ SetGlobalLogLevel(INFO)
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go b/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go
new file mode 100644
index 0000000..72e0520
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go
@@ -0,0 +1,68 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// +build !windows
+
+package capnslog
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/coreos/go-systemd/journal"
+)
+
+func NewJournaldFormatter() (Formatter, error) {
+ if !journal.Enabled() {
+ return nil, errors.New("No systemd detected")
+ }
+ return &journaldFormatter{}, nil
+}
+
+type journaldFormatter struct{}
+
+func (j *journaldFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) {
+ var pri journal.Priority
+ switch l {
+ case CRITICAL:
+ pri = journal.PriCrit
+ case ERROR:
+ pri = journal.PriErr
+ case WARNING:
+ pri = journal.PriWarning
+ case NOTICE:
+ pri = journal.PriNotice
+ case INFO:
+ pri = journal.PriInfo
+ case DEBUG:
+ pri = journal.PriDebug
+ case TRACE:
+ pri = journal.PriDebug
+ default:
+ panic("Unhandled loglevel")
+ }
+ msg := fmt.Sprint(entries...)
+ tags := map[string]string{
+ "PACKAGE": pkg,
+ "SYSLOG_IDENTIFIER": filepath.Base(os.Args[0]),
+ }
+ err := journal.Send(msg, pri, tags)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ }
+}
+
+func (j *journaldFormatter) Flush() {}
diff --git a/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/log_hijack.go b/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/log_hijack.go
new file mode 100644
index 0000000..970086b
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/log_hijack.go
@@ -0,0 +1,39 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package capnslog
+
+import (
+ "log"
+)
+
+func initHijack() {
+ pkg := NewPackageLogger("log", "")
+ w := packageWriter{pkg}
+ log.SetFlags(0)
+ log.SetPrefix("")
+ log.SetOutput(w)
+}
+
+type packageWriter struct {
+ pl *PackageLogger
+}
+
+func (p packageWriter) Write(b []byte) (int, error) {
+ if p.pl.level < INFO {
+ return 0, nil
+ }
+ p.pl.internalLog(calldepth+2, INFO, string(b))
+ return len(b), nil
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/logmap.go b/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/logmap.go
new file mode 100644
index 0000000..8495448
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/logmap.go
@@ -0,0 +1,240 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package capnslog
+
+import (
+ "errors"
+ "strings"
+ "sync"
+)
+
+// LogLevel is the set of all log levels.
+type LogLevel int8
+
+const (
+ // CRITICAL is the lowest log level; only errors which will end the program will be propagated.
+ CRITICAL LogLevel = iota - 1
+ // ERROR is for errors that are not fatal but lead to troubling behavior.
+ ERROR
+ // WARNING is for errors which are not fatal and not errors, but are unusual. Often sourced from misconfigurations.
+ WARNING
+ // NOTICE is for normal but significant conditions.
+ NOTICE
+ // INFO is a log level for common, everyday log updates.
+ INFO
+ // DEBUG is the default hidden level for more verbose updates about internal processes.
+ DEBUG
+ // TRACE is for (potentially) call by call tracing of programs.
+ TRACE
+)
+
+// Char returns a single-character representation of the log level.
+func (l LogLevel) Char() string {
+ switch l {
+ case CRITICAL:
+ return "C"
+ case ERROR:
+ return "E"
+ case WARNING:
+ return "W"
+ case NOTICE:
+ return "N"
+ case INFO:
+ return "I"
+ case DEBUG:
+ return "D"
+ case TRACE:
+ return "T"
+ default:
+ panic("Unhandled loglevel")
+ }
+}
+
+// String returns a multi-character representation of the log level.
+func (l LogLevel) String() string {
+ switch l {
+ case CRITICAL:
+ return "CRITICAL"
+ case ERROR:
+ return "ERROR"
+ case WARNING:
+ return "WARNING"
+ case NOTICE:
+ return "NOTICE"
+ case INFO:
+ return "INFO"
+ case DEBUG:
+ return "DEBUG"
+ case TRACE:
+ return "TRACE"
+ default:
+ panic("Unhandled loglevel")
+ }
+}
+
+// Update using the given string value. Fulfills the flag.Value interface.
+func (l *LogLevel) Set(s string) error {
+ value, err := ParseLevel(s)
+ if err != nil {
+ return err
+ }
+
+ *l = value
+ return nil
+}
+
+// ParseLevel translates some potential loglevel strings into their corresponding levels.
+func ParseLevel(s string) (LogLevel, error) {
+ switch s {
+ case "CRITICAL", "C":
+ return CRITICAL, nil
+ case "ERROR", "0", "E":
+ return ERROR, nil
+ case "WARNING", "1", "W":
+ return WARNING, nil
+ case "NOTICE", "2", "N":
+ return NOTICE, nil
+ case "INFO", "3", "I":
+ return INFO, nil
+ case "DEBUG", "4", "D":
+ return DEBUG, nil
+ case "TRACE", "5", "T":
+ return TRACE, nil
+ }
+ return CRITICAL, errors.New("couldn't parse log level " + s)
+}
+
+type RepoLogger map[string]*PackageLogger
+
+type loggerStruct struct {
+ sync.Mutex
+ repoMap map[string]RepoLogger
+ formatter Formatter
+}
+
+// logger is the global logger
+var logger = new(loggerStruct)
+
+// SetGlobalLogLevel sets the log level for all packages in all repositories
+// registered with capnslog.
+func SetGlobalLogLevel(l LogLevel) {
+ logger.Lock()
+ defer logger.Unlock()
+ for _, r := range logger.repoMap {
+ r.setRepoLogLevelInternal(l)
+ }
+}
+
+// GetRepoLogger may return the handle to the repository's set of packages' loggers.
+func GetRepoLogger(repo string) (RepoLogger, error) {
+ logger.Lock()
+ defer logger.Unlock()
+ r, ok := logger.repoMap[repo]
+ if !ok {
+ return nil, errors.New("no packages registered for repo " + repo)
+ }
+ return r, nil
+}
+
+// MustRepoLogger returns the handle to the repository's packages' loggers.
+func MustRepoLogger(repo string) RepoLogger {
+ r, err := GetRepoLogger(repo)
+ if err != nil {
+ panic(err)
+ }
+ return r
+}
+
+// SetRepoLogLevel sets the log level for all packages in the repository.
+func (r RepoLogger) SetRepoLogLevel(l LogLevel) {
+ logger.Lock()
+ defer logger.Unlock()
+ r.setRepoLogLevelInternal(l)
+}
+
+func (r RepoLogger) setRepoLogLevelInternal(l LogLevel) {
+ for _, v := range r {
+ v.level = l
+ }
+}
+
+// ParseLogLevelConfig parses a comma-separated string of "package=loglevel", in
+// order, and returns a map of the results, for use in SetLogLevel.
+func (r RepoLogger) ParseLogLevelConfig(conf string) (map[string]LogLevel, error) {
+ setlist := strings.Split(conf, ",")
+ out := make(map[string]LogLevel)
+ for _, setstring := range setlist {
+ setting := strings.Split(setstring, "=")
+ if len(setting) != 2 {
+ return nil, errors.New("oddly structured `pkg=level` option: " + setstring)
+ }
+ l, err := ParseLevel(setting[1])
+ if err != nil {
+ return nil, err
+ }
+ out[setting[0]] = l
+ }
+ return out, nil
+}
+
+// SetLogLevel takes a map of package names within a repository to their desired
+// loglevel, and sets the levels appropriately. Unknown packages are ignored.
+// "*" is a special package name that corresponds to all packages, and will be
+// processed first.
+func (r RepoLogger) SetLogLevel(m map[string]LogLevel) {
+ logger.Lock()
+ defer logger.Unlock()
+ if l, ok := m["*"]; ok {
+ r.setRepoLogLevelInternal(l)
+ }
+ for k, v := range m {
+ l, ok := r[k]
+ if !ok {
+ continue
+ }
+ l.level = v
+ }
+}
+
+// SetFormatter sets the formatting function for all logs.
+func SetFormatter(f Formatter) {
+ logger.Lock()
+ defer logger.Unlock()
+ logger.formatter = f
+}
+
+// NewPackageLogger creates a package logger object.
+// This should be defined as a global var in your package, referencing your repo.
+func NewPackageLogger(repo string, pkg string) (p *PackageLogger) {
+ logger.Lock()
+ defer logger.Unlock()
+ if logger.repoMap == nil {
+ logger.repoMap = make(map[string]RepoLogger)
+ }
+ r, rok := logger.repoMap[repo]
+ if !rok {
+ logger.repoMap[repo] = make(RepoLogger)
+ r = logger.repoMap[repo]
+ }
+ p, pok := r[pkg]
+ if !pok {
+ r[pkg] = &PackageLogger{
+ pkg: pkg,
+ level: INFO,
+ }
+ p = r[pkg]
+ }
+ return
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go b/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go
new file mode 100644
index 0000000..e2c4668
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go
@@ -0,0 +1,171 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package capnslog
+
+import (
+ "fmt"
+ "os"
+)
+
+type PackageLogger struct {
+ pkg string
+ level LogLevel
+}
+
+const calldepth = 2
+
+func (p *PackageLogger) internalLog(depth int, inLevel LogLevel, entries ...interface{}) {
+ logger.Lock()
+ defer logger.Unlock()
+ if inLevel != CRITICAL && p.level < inLevel {
+ return
+ }
+ if logger.formatter != nil {
+ logger.formatter.Format(p.pkg, inLevel, depth+1, entries...)
+ }
+}
+
+func (p *PackageLogger) LevelAt(l LogLevel) bool {
+ logger.Lock()
+ defer logger.Unlock()
+ return p.level >= l
+}
+
+// Log a formatted string at any level between ERROR and TRACE
+func (p *PackageLogger) Logf(l LogLevel, format string, args ...interface{}) {
+ p.internalLog(calldepth, l, fmt.Sprintf(format, args...))
+}
+
+// Log a message at any level between ERROR and TRACE
+func (p *PackageLogger) Log(l LogLevel, args ...interface{}) {
+ p.internalLog(calldepth, l, fmt.Sprint(args...))
+}
+
+// log stdlib compatibility
+
+func (p *PackageLogger) Println(args ...interface{}) {
+ p.internalLog(calldepth, INFO, fmt.Sprintln(args...))
+}
+
+func (p *PackageLogger) Printf(format string, args ...interface{}) {
+ p.Logf(INFO, format, args...)
+}
+
+func (p *PackageLogger) Print(args ...interface{}) {
+ p.internalLog(calldepth, INFO, fmt.Sprint(args...))
+}
+
+// Panic and fatal
+
+func (p *PackageLogger) Panicf(format string, args ...interface{}) {
+ s := fmt.Sprintf(format, args...)
+ p.internalLog(calldepth, CRITICAL, s)
+ panic(s)
+}
+
+func (p *PackageLogger) Panic(args ...interface{}) {
+ s := fmt.Sprint(args...)
+ p.internalLog(calldepth, CRITICAL, s)
+ panic(s)
+}
+
+func (p *PackageLogger) Fatalf(format string, args ...interface{}) {
+ p.Logf(CRITICAL, format, args...)
+ os.Exit(1)
+}
+
+func (p *PackageLogger) Fatal(args ...interface{}) {
+ s := fmt.Sprint(args...)
+ p.internalLog(calldepth, CRITICAL, s)
+ os.Exit(1)
+}
+
+// Error Functions
+
+func (p *PackageLogger) Errorf(format string, args ...interface{}) {
+ p.Logf(ERROR, format, args...)
+}
+
+func (p *PackageLogger) Error(entries ...interface{}) {
+ p.internalLog(calldepth, ERROR, entries...)
+}
+
+// Warning Functions
+
+func (p *PackageLogger) Warningf(format string, args ...interface{}) {
+ p.Logf(WARNING, format, args...)
+}
+
+func (p *PackageLogger) Warning(entries ...interface{}) {
+ p.internalLog(calldepth, WARNING, entries...)
+}
+
+// Notice Functions
+
+func (p *PackageLogger) Noticef(format string, args ...interface{}) {
+ p.Logf(NOTICE, format, args...)
+}
+
+func (p *PackageLogger) Notice(entries ...interface{}) {
+ p.internalLog(calldepth, NOTICE, entries...)
+}
+
+// Info Functions
+
+func (p *PackageLogger) Infof(format string, args ...interface{}) {
+ p.Logf(INFO, format, args...)
+}
+
+func (p *PackageLogger) Info(entries ...interface{}) {
+ p.internalLog(calldepth, INFO, entries...)
+}
+
+// Debug Functions
+
+func (p *PackageLogger) Debugf(format string, args ...interface{}) {
+ if p.level < DEBUG {
+ return
+ }
+ p.Logf(DEBUG, format, args...)
+}
+
+func (p *PackageLogger) Debug(entries ...interface{}) {
+ if p.level < DEBUG {
+ return
+ }
+ p.internalLog(calldepth, DEBUG, entries...)
+}
+
+// Trace Functions
+
+func (p *PackageLogger) Tracef(format string, args ...interface{}) {
+ if p.level < TRACE {
+ return
+ }
+ p.Logf(TRACE, format, args...)
+}
+
+func (p *PackageLogger) Trace(entries ...interface{}) {
+ if p.level < TRACE {
+ return
+ }
+ p.internalLog(calldepth, TRACE, entries...)
+}
+
+func (p *PackageLogger) Flush() {
+ logger.Lock()
+ defer logger.Unlock()
+ logger.formatter.Flush()
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go b/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go
new file mode 100644
index 0000000..4be5a1f
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go
@@ -0,0 +1,65 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// +build !windows
+
+package capnslog
+
+import (
+ "fmt"
+ "log/syslog"
+)
+
+func NewSyslogFormatter(w *syslog.Writer) Formatter {
+ return &syslogFormatter{w}
+}
+
+func NewDefaultSyslogFormatter(tag string) (Formatter, error) {
+ w, err := syslog.New(syslog.LOG_DEBUG, tag)
+ if err != nil {
+ return nil, err
+ }
+ return NewSyslogFormatter(w), nil
+}
+
+type syslogFormatter struct {
+ w *syslog.Writer
+}
+
+func (s *syslogFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) {
+ for _, entry := range entries {
+ str := fmt.Sprint(entry)
+ switch l {
+ case CRITICAL:
+ s.w.Crit(str)
+ case ERROR:
+ s.w.Err(str)
+ case WARNING:
+ s.w.Warning(str)
+ case NOTICE:
+ s.w.Notice(str)
+ case INFO:
+ s.w.Info(str)
+ case DEBUG:
+ s.w.Debug(str)
+ case TRACE:
+ s.w.Debug(str)
+ default:
+ panic("Unhandled loglevel")
+ }
+ }
+}
+
+func (s *syslogFormatter) Flush() {
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/pkg/health/README.md b/src/kube2msb/vendor/github.com/coreos/pkg/health/README.md
new file mode 100644
index 0000000..5ec34c2
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/pkg/health/README.md
@@ -0,0 +1,11 @@
+health
+====
+
+A simple framework for implementing an HTTP health check endpoint on servers.
+
+Users implement their `health.Checkable` types, and create a `health.Checker`, from which they can get an `http.HandlerFunc` using `health.Checker.MakeHealthHandlerFunc`.
+
+### Documentation
+
+For more details, visit the docs on [gopkgdoc](http://godoc.org/github.com/coreos/pkg/health)
+
diff --git a/src/kube2msb/vendor/github.com/coreos/pkg/health/health.go b/src/kube2msb/vendor/github.com/coreos/pkg/health/health.go
new file mode 100644
index 0000000..a1c3610
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/pkg/health/health.go
@@ -0,0 +1,127 @@
+package health
+
+import (
+ "expvar"
+ "fmt"
+ "log"
+ "net/http"
+
+ "github.com/coreos/pkg/httputil"
+)
+
+// Checkables should return nil when the thing they are checking is healthy, and an error otherwise.
+type Checkable interface {
+ Healthy() error
+}
+
+// Checker provides a way to make an endpoint which can be probed for system health.
+type Checker struct {
+ // Checks are the Checkables to be checked when probing.
+ Checks []Checkable
+
+ // Unhealthyhandler is called when one or more of the checks are unhealthy.
+ // If not provided DefaultUnhealthyHandler is called.
+ UnhealthyHandler UnhealthyHandler
+
+ // HealthyHandler is called when all checks are healthy.
+ // If not provided, DefaultHealthyHandler is called.
+ HealthyHandler http.HandlerFunc
+}
+
+func (c Checker) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ unhealthyHandler := c.UnhealthyHandler
+ if unhealthyHandler == nil {
+ unhealthyHandler = DefaultUnhealthyHandler
+ }
+
+ successHandler := c.HealthyHandler
+ if successHandler == nil {
+ successHandler = DefaultHealthyHandler
+ }
+
+ if r.Method != "GET" {
+ w.Header().Set("Allow", "GET")
+ w.WriteHeader(http.StatusMethodNotAllowed)
+ return
+ }
+
+ if err := Check(c.Checks); err != nil {
+ unhealthyHandler(w, r, err)
+ return
+ }
+
+ successHandler(w, r)
+}
+
+type UnhealthyHandler func(w http.ResponseWriter, r *http.Request, err error)
+
+type StatusResponse struct {
+ Status string `json:"status"`
+ Details *StatusResponseDetails `json:"details,omitempty"`
+}
+
+type StatusResponseDetails struct {
+ Code int `json:"code,omitempty"`
+ Message string `json:"message,omitempty"`
+}
+
+func Check(checks []Checkable) (err error) {
+ errs := []error{}
+ for _, c := range checks {
+ if e := c.Healthy(); e != nil {
+ errs = append(errs, e)
+ }
+ }
+
+ switch len(errs) {
+ case 0:
+ err = nil
+ case 1:
+ err = errs[0]
+ default:
+ err = fmt.Errorf("multiple health check failure: %v", errs)
+ }
+
+ return
+}
+
+func DefaultHealthyHandler(w http.ResponseWriter, r *http.Request) {
+ err := httputil.WriteJSONResponse(w, http.StatusOK, StatusResponse{
+ Status: "ok",
+ })
+ if err != nil {
+ // TODO(bobbyrullo): replace with logging from new logging pkg,
+ // once it lands.
+ log.Printf("Failed to write JSON response: %v", err)
+ }
+}
+
+func DefaultUnhealthyHandler(w http.ResponseWriter, r *http.Request, err error) {
+ writeErr := httputil.WriteJSONResponse(w, http.StatusInternalServerError, StatusResponse{
+ Status: "error",
+ Details: &StatusResponseDetails{
+ Code: http.StatusInternalServerError,
+ Message: err.Error(),
+ },
+ })
+ if writeErr != nil {
+ // TODO(bobbyrullo): replace with logging from new logging pkg,
+ // once it lands.
+ log.Printf("Failed to write JSON response: %v", err)
+ }
+}
+
+// ExpvarHandler is copied from https://golang.org/src/expvar/expvar.go, where it's sadly unexported.
+func ExpvarHandler(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "application/json; charset=utf-8")
+ fmt.Fprintf(w, "{\n")
+ first := true
+ expvar.Do(func(kv expvar.KeyValue) {
+ if !first {
+ fmt.Fprintf(w, ",\n")
+ }
+ first = false
+ fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
+ })
+ fmt.Fprintf(w, "\n}\n")
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/pkg/httputil/README.md b/src/kube2msb/vendor/github.com/coreos/pkg/httputil/README.md
new file mode 100644
index 0000000..44fa751
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/pkg/httputil/README.md
@@ -0,0 +1,13 @@
+httputil
+====
+
+Common code for dealing with HTTP.
+
+Includes:
+
+* Code for returning JSON responses.
+
+### Documentation
+
+Visit the docs on [gopkgdoc](http://godoc.org/github.com/coreos/pkg/httputil)
+
diff --git a/src/kube2msb/vendor/github.com/coreos/pkg/httputil/cookie.go b/src/kube2msb/vendor/github.com/coreos/pkg/httputil/cookie.go
new file mode 100644
index 0000000..c37a37b
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/pkg/httputil/cookie.go
@@ -0,0 +1,21 @@
+package httputil
+
+import (
+ "net/http"
+ "time"
+)
+
+// DeleteCookies effectively deletes all named cookies
+// by wiping all data and setting to expire immediately.
+func DeleteCookies(w http.ResponseWriter, cookieNames ...string) {
+ for _, n := range cookieNames {
+ c := &http.Cookie{
+ Name: n,
+ Value: "",
+ Path: "/",
+ MaxAge: -1,
+ Expires: time.Time{},
+ }
+ http.SetCookie(w, c)
+ }
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/pkg/httputil/json.go b/src/kube2msb/vendor/github.com/coreos/pkg/httputil/json.go
new file mode 100644
index 0000000..0b09235
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/pkg/httputil/json.go
@@ -0,0 +1,27 @@
+package httputil
+
+import (
+ "encoding/json"
+ "net/http"
+)
+
+const (
+ JSONContentType = "application/json"
+)
+
+func WriteJSONResponse(w http.ResponseWriter, code int, resp interface{}) error {
+ enc, err := json.Marshal(resp)
+ if err != nil {
+ w.WriteHeader(http.StatusInternalServerError)
+ return err
+ }
+
+ w.Header().Set("Content-Type", JSONContentType)
+ w.WriteHeader(code)
+
+ _, err = w.Write(enc)
+ if err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/coreos/pkg/timeutil/backoff.go b/src/kube2msb/vendor/github.com/coreos/pkg/timeutil/backoff.go
new file mode 100644
index 0000000..b34fb49
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/coreos/pkg/timeutil/backoff.go
@@ -0,0 +1,15 @@
+package timeutil
+
+import (
+ "time"
+)
+
+func ExpBackoff(prev, max time.Duration) time.Duration {
+ if prev == 0 {
+ return time.Second
+ }
+ if prev > max/2 {
+ return max
+ }
+ return 2 * prev
+}
diff --git a/src/kube2msb/vendor/github.com/davecgh/go-spew/LICENSE b/src/kube2msb/vendor/github.com/davecgh/go-spew/LICENSE
new file mode 100644
index 0000000..2a7cfd2
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/davecgh/go-spew/LICENSE
@@ -0,0 +1,13 @@
+Copyright (c) 2012-2013 Dave Collins <dave@davec.name>
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/src/kube2msb/vendor/github.com/davecgh/go-spew/spew/bypass.go b/src/kube2msb/vendor/github.com/davecgh/go-spew/spew/bypass.go
new file mode 100644
index 0000000..565bf58
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/davecgh/go-spew/spew/bypass.go
@@ -0,0 +1,151 @@
+// Copyright (c) 2015 Dave Collins <dave@davec.name>
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is not running on Google App Engine and "-tags disableunsafe"
+// is not added to the go build command line.
+// +build !appengine,!disableunsafe
+
+package spew
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+const (
+ // UnsafeDisabled is a build-time constant which specifies whether or
+ // not access to the unsafe package is available.
+ UnsafeDisabled = false
+
+ // ptrSize is the size of a pointer on the current arch.
+ ptrSize = unsafe.Sizeof((*byte)(nil))
+)
+
+var (
+ // offsetPtr, offsetScalar, and offsetFlag are the offsets for the
+ // internal reflect.Value fields. These values are valid before golang
+ // commit ecccf07e7f9d which changed the format. The are also valid
+ // after commit 82f48826c6c7 which changed the format again to mirror
+ // the original format. Code in the init function updates these offsets
+ // as necessary.
+ offsetPtr = uintptr(ptrSize)
+ offsetScalar = uintptr(0)
+ offsetFlag = uintptr(ptrSize * 2)
+
+ // flagKindWidth and flagKindShift indicate various bits that the
+ // reflect package uses internally to track kind information.
+ //
+ // flagRO indicates whether or not the value field of a reflect.Value is
+ // read-only.
+ //
+ // flagIndir indicates whether the value field of a reflect.Value is
+ // the actual data or a pointer to the data.
+ //
+ // These values are valid before golang commit 90a7c3c86944 which
+ // changed their positions. Code in the init function updates these
+ // flags as necessary.
+ flagKindWidth = uintptr(5)
+ flagKindShift = uintptr(flagKindWidth - 1)
+ flagRO = uintptr(1 << 0)
+ flagIndir = uintptr(1 << 1)
+)
+
+func init() {
+ // Older versions of reflect.Value stored small integers directly in the
+ // ptr field (which is named val in the older versions). Versions
+ // between commits ecccf07e7f9d and 82f48826c6c7 added a new field named
+ // scalar for this purpose which unfortunately came before the flag
+ // field, so the offset of the flag field is different for those
+ // versions.
+ //
+ // This code constructs a new reflect.Value from a known small integer
+ // and checks if the size of the reflect.Value struct indicates it has
+ // the scalar field. When it does, the offsets are updated accordingly.
+ vv := reflect.ValueOf(0xf00)
+ if unsafe.Sizeof(vv) == (ptrSize * 4) {
+ offsetScalar = ptrSize * 2
+ offsetFlag = ptrSize * 3
+ }
+
+ // Commit 90a7c3c86944 changed the flag positions such that the low
+ // order bits are the kind. This code extracts the kind from the flags
+ // field and ensures it's the correct type. When it's not, the flag
+ // order has been changed to the newer format, so the flags are updated
+ // accordingly.
+ upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag)
+ upfv := *(*uintptr)(upf)
+ flagKindMask := uintptr((1<<flagKindWidth - 1) << flagKindShift)
+ if (upfv&flagKindMask)>>flagKindShift != uintptr(reflect.Int) {
+ flagKindShift = 0
+ flagRO = 1 << 5
+ flagIndir = 1 << 6
+
+ // Commit adf9b30e5594 modified the flags to separate the
+ // flagRO flag into two bits which specifies whether or not the
+ // field is embedded. This causes flagIndir to move over a bit
+ // and means that flagRO is the combination of either of the
+ // original flagRO bit and the new bit.
+ //
+ // This code detects the change by extracting what used to be
+ // the indirect bit to ensure it's set. When it's not, the flag
+ // order has been changed to the newer format, so the flags are
+ // updated accordingly.
+ if upfv&flagIndir == 0 {
+ flagRO = 3 << 5
+ flagIndir = 1 << 7
+ }
+ }
+}
+
+// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
+// the typical safety restrictions preventing access to unaddressable and
+// unexported data. It works by digging the raw pointer to the underlying
+// value out of the protected value and generating a new unprotected (unsafe)
+// reflect.Value to it.
+//
+// This allows us to check for implementations of the Stringer and error
+// interfaces to be used for pretty printing ordinarily unaddressable and
+// inaccessible values such as unexported struct fields.
+func unsafeReflectValue(v reflect.Value) (rv reflect.Value) {
+ indirects := 1
+ vt := v.Type()
+ upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr)
+ rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag))
+ if rvf&flagIndir != 0 {
+ vt = reflect.PtrTo(v.Type())
+ indirects++
+ } else if offsetScalar != 0 {
+ // The value is in the scalar field when it's not one of the
+ // reference types.
+ switch vt.Kind() {
+ case reflect.Uintptr:
+ case reflect.Chan:
+ case reflect.Func:
+ case reflect.Map:
+ case reflect.Ptr:
+ case reflect.UnsafePointer:
+ default:
+ upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) +
+ offsetScalar)
+ }
+ }
+
+ pv := reflect.NewAt(vt, upv)
+ rv = pv
+ for i := 0; i < indirects; i++ {
+ rv = rv.Elem()
+ }
+ return rv
+}
diff --git a/src/kube2msb/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/src/kube2msb/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
new file mode 100644
index 0000000..457e412
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
@@ -0,0 +1,37 @@
+// Copyright (c) 2015 Dave Collins <dave@davec.name>
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when either the code is running on Google App Engine or "-tags disableunsafe"
+// is added to the go build command line.
+// +build appengine disableunsafe
+
+package spew
+
+import "reflect"
+
+const (
+ // UnsafeDisabled is a build-time constant which specifies whether or
+ // not access to the unsafe package is available.
+ UnsafeDisabled = true
+)
+
+// unsafeReflectValue typically converts the passed reflect.Value into a one
+// that bypasses the typical safety restrictions preventing access to
+// unaddressable and unexported data. However, doing this relies on access to
+// the unsafe package. This is a stub version which simply returns the passed
+// reflect.Value when the unsafe package is not available.
+func unsafeReflectValue(v reflect.Value) reflect.Value {
+ return v
+}
diff --git a/src/kube2msb/vendor/github.com/davecgh/go-spew/spew/common.go b/src/kube2msb/vendor/github.com/davecgh/go-spew/spew/common.go
new file mode 100644
index 0000000..14f02dc
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/davecgh/go-spew/spew/common.go
@@ -0,0 +1,341 @@
+/*
+ * Copyright (c) 2013 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "reflect"
+ "sort"
+ "strconv"
+)
+
+// Some constants in the form of bytes to avoid string overhead. This mirrors
+// the technique used in the fmt package.
+var (
+ panicBytes = []byte("(PANIC=")
+ plusBytes = []byte("+")
+ iBytes = []byte("i")
+ trueBytes = []byte("true")
+ falseBytes = []byte("false")
+ interfaceBytes = []byte("(interface {})")
+ commaNewlineBytes = []byte(",\n")
+ newlineBytes = []byte("\n")
+ openBraceBytes = []byte("{")
+ openBraceNewlineBytes = []byte("{\n")
+ closeBraceBytes = []byte("}")
+ asteriskBytes = []byte("*")
+ colonBytes = []byte(":")
+ colonSpaceBytes = []byte(": ")
+ openParenBytes = []byte("(")
+ closeParenBytes = []byte(")")
+ spaceBytes = []byte(" ")
+ pointerChainBytes = []byte("->")
+ nilAngleBytes = []byte("<nil>")
+ maxNewlineBytes = []byte("<max depth reached>\n")
+ maxShortBytes = []byte("<max>")
+ circularBytes = []byte("<already shown>")
+ circularShortBytes = []byte("<shown>")
+ invalidAngleBytes = []byte("<invalid>")
+ openBracketBytes = []byte("[")
+ closeBracketBytes = []byte("]")
+ percentBytes = []byte("%")
+ precisionBytes = []byte(".")
+ openAngleBytes = []byte("<")
+ closeAngleBytes = []byte(">")
+ openMapBytes = []byte("map[")
+ closeMapBytes = []byte("]")
+ lenEqualsBytes = []byte("len=")
+ capEqualsBytes = []byte("cap=")
+)
+
+// hexDigits is used to map a decimal value to a hex digit.
+var hexDigits = "0123456789abcdef"
+
+// catchPanic handles any panics that might occur during the handleMethods
+// calls.
+func catchPanic(w io.Writer, v reflect.Value) {
+ if err := recover(); err != nil {
+ w.Write(panicBytes)
+ fmt.Fprintf(w, "%v", err)
+ w.Write(closeParenBytes)
+ }
+}
+
+// handleMethods attempts to call the Error and String methods on the underlying
+// type the passed reflect.Value represents and outputes the result to Writer w.
+//
+// It handles panics in any called methods by catching and displaying the error
+// as the formatted value.
+func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
+ // We need an interface to check if the type implements the error or
+ // Stringer interface. However, the reflect package won't give us an
+ // interface on certain things like unexported struct fields in order
+ // to enforce visibility rules. We use unsafe, when it's available,
+ // to bypass these restrictions since this package does not mutate the
+ // values.
+ if !v.CanInterface() {
+ if UnsafeDisabled {
+ return false
+ }
+
+ v = unsafeReflectValue(v)
+ }
+
+ // Choose whether or not to do error and Stringer interface lookups against
+ // the base type or a pointer to the base type depending on settings.
+ // Technically calling one of these methods with a pointer receiver can
+ // mutate the value, however, types which choose to satisify an error or
+ // Stringer interface with a pointer receiver should not be mutating their
+ // state inside these interface methods.
+ if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
+ v = unsafeReflectValue(v)
+ }
+ if v.CanAddr() {
+ v = v.Addr()
+ }
+
+ // Is it an error or Stringer?
+ switch iface := v.Interface().(type) {
+ case error:
+ defer catchPanic(w, v)
+ if cs.ContinueOnMethod {
+ w.Write(openParenBytes)
+ w.Write([]byte(iface.Error()))
+ w.Write(closeParenBytes)
+ w.Write(spaceBytes)
+ return false
+ }
+
+ w.Write([]byte(iface.Error()))
+ return true
+
+ case fmt.Stringer:
+ defer catchPanic(w, v)
+ if cs.ContinueOnMethod {
+ w.Write(openParenBytes)
+ w.Write([]byte(iface.String()))
+ w.Write(closeParenBytes)
+ w.Write(spaceBytes)
+ return false
+ }
+ w.Write([]byte(iface.String()))
+ return true
+ }
+ return false
+}
+
+// printBool outputs a boolean value as true or false to Writer w.
+func printBool(w io.Writer, val bool) {
+ if val {
+ w.Write(trueBytes)
+ } else {
+ w.Write(falseBytes)
+ }
+}
+
+// printInt outputs a signed integer value to Writer w.
+func printInt(w io.Writer, val int64, base int) {
+ w.Write([]byte(strconv.FormatInt(val, base)))
+}
+
+// printUint outputs an unsigned integer value to Writer w.
+func printUint(w io.Writer, val uint64, base int) {
+ w.Write([]byte(strconv.FormatUint(val, base)))
+}
+
+// printFloat outputs a floating point value using the specified precision,
+// which is expected to be 32 or 64bit, to Writer w.
+func printFloat(w io.Writer, val float64, precision int) {
+ w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
+}
+
+// printComplex outputs a complex value using the specified float precision
+// for the real and imaginary parts to Writer w.
+func printComplex(w io.Writer, c complex128, floatPrecision int) {
+ r := real(c)
+ w.Write(openParenBytes)
+ w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
+ i := imag(c)
+ if i >= 0 {
+ w.Write(plusBytes)
+ }
+ w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
+ w.Write(iBytes)
+ w.Write(closeParenBytes)
+}
+
+// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x'
+// prefix to Writer w.
+func printHexPtr(w io.Writer, p uintptr) {
+ // Null pointer.
+ num := uint64(p)
+ if num == 0 {
+ w.Write(nilAngleBytes)
+ return
+ }
+
+ // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
+ buf := make([]byte, 18)
+
+ // It's simpler to construct the hex string right to left.
+ base := uint64(16)
+ i := len(buf) - 1
+ for num >= base {
+ buf[i] = hexDigits[num%base]
+ num /= base
+ i--
+ }
+ buf[i] = hexDigits[num]
+
+ // Add '0x' prefix.
+ i--
+ buf[i] = 'x'
+ i--
+ buf[i] = '0'
+
+ // Strip unused leading bytes.
+ buf = buf[i:]
+ w.Write(buf)
+}
+
+// valuesSorter implements sort.Interface to allow a slice of reflect.Value
+// elements to be sorted.
+type valuesSorter struct {
+ values []reflect.Value
+ strings []string // either nil or same len and values
+ cs *ConfigState
+}
+
+// newValuesSorter initializes a valuesSorter instance, which holds a set of
+// surrogate keys on which the data should be sorted. It uses flags in
+// ConfigState to decide if and how to populate those surrogate keys.
+func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
+ vs := &valuesSorter{values: values, cs: cs}
+ if canSortSimply(vs.values[0].Kind()) {
+ return vs
+ }
+ if !cs.DisableMethods {
+ vs.strings = make([]string, len(values))
+ for i := range vs.values {
+ b := bytes.Buffer{}
+ if !handleMethods(cs, &b, vs.values[i]) {
+ vs.strings = nil
+ break
+ }
+ vs.strings[i] = b.String()
+ }
+ }
+ if vs.strings == nil && cs.SpewKeys {
+ vs.strings = make([]string, len(values))
+ for i := range vs.values {
+ vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
+ }
+ }
+ return vs
+}
+
+// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
+// directly, or whether it should be considered for sorting by surrogate keys
+// (if the ConfigState allows it).
+func canSortSimply(kind reflect.Kind) bool {
+ // This switch parallels valueSortLess, except for the default case.
+ switch kind {
+ case reflect.Bool:
+ return true
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ return true
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ return true
+ case reflect.Float32, reflect.Float64:
+ return true
+ case reflect.String:
+ return true
+ case reflect.Uintptr:
+ return true
+ case reflect.Array:
+ return true
+ }
+ return false
+}
+
+// Len returns the number of values in the slice. It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Len() int {
+ return len(s.values)
+}
+
+// Swap swaps the values at the passed indices. It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Swap(i, j int) {
+ s.values[i], s.values[j] = s.values[j], s.values[i]
+ if s.strings != nil {
+ s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
+ }
+}
+
+// valueSortLess returns whether the first value should sort before the second
+// value. It is used by valueSorter.Less as part of the sort.Interface
+// implementation.
+func valueSortLess(a, b reflect.Value) bool {
+ switch a.Kind() {
+ case reflect.Bool:
+ return !a.Bool() && b.Bool()
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ return a.Int() < b.Int()
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ return a.Uint() < b.Uint()
+ case reflect.Float32, reflect.Float64:
+ return a.Float() < b.Float()
+ case reflect.String:
+ return a.String() < b.String()
+ case reflect.Uintptr:
+ return a.Uint() < b.Uint()
+ case reflect.Array:
+ // Compare the contents of both arrays.
+ l := a.Len()
+ for i := 0; i < l; i++ {
+ av := a.Index(i)
+ bv := b.Index(i)
+ if av.Interface() == bv.Interface() {
+ continue
+ }
+ return valueSortLess(av, bv)
+ }
+ }
+ return a.String() < b.String()
+}
+
+// Less returns whether the value at index i should sort before the
+// value at index j. It is part of the sort.Interface implementation.
+func (s *valuesSorter) Less(i, j int) bool {
+ if s.strings == nil {
+ return valueSortLess(s.values[i], s.values[j])
+ }
+ return s.strings[i] < s.strings[j]
+}
+
+// sortValues is a sort function that handles both native types and any type that
+// can be converted to error or Stringer. Other inputs are sorted according to
+// their Value.String() value to ensure display stability.
+func sortValues(values []reflect.Value, cs *ConfigState) {
+ if len(values) == 0 {
+ return
+ }
+ sort.Sort(newValuesSorter(values, cs))
+}
diff --git a/src/kube2msb/vendor/github.com/davecgh/go-spew/spew/config.go b/src/kube2msb/vendor/github.com/davecgh/go-spew/spew/config.go
new file mode 100644
index 0000000..ee1ab07
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/davecgh/go-spew/spew/config.go
@@ -0,0 +1,297 @@
+/*
+ * Copyright (c) 2013 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+)
+
+// ConfigState houses the configuration options used by spew to format and
+// display values. There is a global instance, Config, that is used to control
+// all top-level Formatter and Dump functionality. Each ConfigState instance
+// provides methods equivalent to the top-level functions.
+//
+// The zero value for ConfigState provides no indentation. You would typically
+// want to set it to a space or a tab.
+//
+// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
+// with default settings. See the documentation of NewDefaultConfig for default
+// values.
+type ConfigState struct {
+ // Indent specifies the string to use for each indentation level. The
+ // global config instance that all top-level functions use set this to a
+ // single space by default. If you would like more indentation, you might
+ // set this to a tab with "\t" or perhaps two spaces with " ".
+ Indent string
+
+ // MaxDepth controls the maximum number of levels to descend into nested
+ // data structures. The default, 0, means there is no limit.
+ //
+ // NOTE: Circular data structures are properly detected, so it is not
+ // necessary to set this value unless you specifically want to limit deeply
+ // nested data structures.
+ MaxDepth int
+
+ // DisableMethods specifies whether or not error and Stringer interfaces are
+ // invoked for types that implement them.
+ DisableMethods bool
+
+ // DisablePointerMethods specifies whether or not to check for and invoke
+ // error and Stringer interfaces on types which only accept a pointer
+ // receiver when the current type is not a pointer.
+ //
+ // NOTE: This might be an unsafe action since calling one of these methods
+ // with a pointer receiver could technically mutate the value, however,
+ // in practice, types which choose to satisify an error or Stringer
+ // interface with a pointer receiver should not be mutating their state
+ // inside these interface methods. As a result, this option relies on
+ // access to the unsafe package, so it will not have any effect when
+ // running in environments without access to the unsafe package such as
+ // Google App Engine or with the "disableunsafe" build tag specified.
+ DisablePointerMethods bool
+
+ // ContinueOnMethod specifies whether or not recursion should continue once
+ // a custom error or Stringer interface is invoked. The default, false,
+ // means it will print the results of invoking the custom error or Stringer
+ // interface and return immediately instead of continuing to recurse into
+ // the internals of the data type.
+ //
+ // NOTE: This flag does not have any effect if method invocation is disabled
+ // via the DisableMethods or DisablePointerMethods options.
+ ContinueOnMethod bool
+
+ // SortKeys specifies map keys should be sorted before being printed. Use
+ // this to have a more deterministic, diffable output. Note that only
+ // native types (bool, int, uint, floats, uintptr and string) and types
+ // that support the error or Stringer interfaces (if methods are
+ // enabled) are supported, with other types sorted according to the
+ // reflect.Value.String() output which guarantees display stability.
+ SortKeys bool
+
+ // SpewKeys specifies that, as a last resort attempt, map keys should
+ // be spewed to strings and sorted by those strings. This is only
+ // considered if SortKeys is true.
+ SpewKeys bool
+}
+
+// Config is the active configuration of the top-level functions.
+// The configuration can be changed by modifying the contents of spew.Config.
+var Config = ConfigState{Indent: " "}
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the formatted string as a value that satisfies error. See NewFormatter
+// for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
+ return fmt.Errorf(format, c.convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprint(w, c.convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+ return fmt.Fprintf(w, format, c.convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a Formatter interface returned by c.NewFormatter. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprintln(w, c.convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
+ return fmt.Print(c.convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
+ return fmt.Printf(format, c.convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
+ return fmt.Println(c.convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprint(a ...interface{}) string {
+ return fmt.Sprint(c.convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
+ return fmt.Sprintf(format, c.convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a Formatter interface returned by c.NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintln(a ...interface{}) string {
+ return fmt.Sprintln(c.convertArgs(a)...)
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface. As a result, it integrates cleanly with standard fmt package
+printing functions. The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly. It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+c.Printf, c.Println, or c.Printf.
+*/
+func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
+ return newFormatter(c, v)
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w. It formats
+// exactly the same as Dump.
+func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
+ fdump(c, w, a...)
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value. It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by modifying the public members
+of c. See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func (c *ConfigState) Dump(a ...interface{}) {
+ fdump(c, os.Stdout, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func (c *ConfigState) Sdump(a ...interface{}) string {
+ var buf bytes.Buffer
+ fdump(c, &buf, a...)
+ return buf.String()
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a spew Formatter interface using
+// the ConfigState associated with s.
+func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
+ formatters = make([]interface{}, len(args))
+ for index, arg := range args {
+ formatters[index] = newFormatter(c, arg)
+ }
+ return formatters
+}
+
+// NewDefaultConfig returns a ConfigState with the following default settings.
+//
+// Indent: " "
+// MaxDepth: 0
+// DisableMethods: false
+// DisablePointerMethods: false
+// ContinueOnMethod: false
+// SortKeys: false
+func NewDefaultConfig() *ConfigState {
+ return &ConfigState{Indent: " "}
+}
diff --git a/src/kube2msb/vendor/github.com/davecgh/go-spew/spew/doc.go b/src/kube2msb/vendor/github.com/davecgh/go-spew/spew/doc.go
new file mode 100644
index 0000000..5be0c40
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/davecgh/go-spew/spew/doc.go
@@ -0,0 +1,202 @@
+/*
+ * Copyright (c) 2013 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+Package spew implements a deep pretty printer for Go data structures to aid in
+debugging.
+
+A quick overview of the additional features spew provides over the built-in
+printing facilities for Go data types are as follows:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output (only when using
+ Dump style)
+
+There are two different approaches spew allows for dumping Go data structures:
+
+ * Dump style which prints with newlines, customizable indentation,
+ and additional debug information such as types and all pointer addresses
+ used to indirect to the final value
+ * A custom Formatter interface that integrates cleanly with the standard fmt
+ package and replaces %v, %+v, %#v, and %#+v to provide inline printing
+ similar to the default %v while providing the additional functionality
+ outlined above and passing unsupported format verbs such as %x and %q
+ along to fmt
+
+Quick Start
+
+This section demonstrates how to quickly get started with spew. See the
+sections below for further details on formatting and configuration options.
+
+To dump a variable with full newlines, indentation, type, and pointer
+information use Dump, Fdump, or Sdump:
+ spew.Dump(myVar1, myVar2, ...)
+ spew.Fdump(someWriter, myVar1, myVar2, ...)
+ str := spew.Sdump(myVar1, myVar2, ...)
+
+Alternatively, if you would prefer to use format strings with a compacted inline
+printing style, use the convenience wrappers Printf, Fprintf, etc with
+%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
+%#+v (adds types and pointer addresses):
+ spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+ spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+Configuration Options
+
+Configuration of spew is handled by fields in the ConfigState type. For
+convenience, all of the top-level functions use a global state available
+via the spew.Config global.
+
+It is also possible to create a ConfigState instance that provides methods
+equivalent to the top-level functions. This allows concurrent configuration
+options. See the ConfigState documentation for more details.
+
+The following configuration options are available:
+ * Indent
+ String to use for each indentation level for Dump functions.
+ It is a single space by default. A popular alternative is "\t".
+
+ * MaxDepth
+ Maximum number of levels to descend into nested data structures.
+ There is no limit by default.
+
+ * DisableMethods
+ Disables invocation of error and Stringer interface methods.
+ Method invocation is enabled by default.
+
+ * DisablePointerMethods
+ Disables invocation of error and Stringer interface methods on types
+ which only accept pointer receivers from non-pointer variables.
+ Pointer method invocation is enabled by default.
+
+ * ContinueOnMethod
+ Enables recursion into types after invoking error and Stringer interface
+ methods. Recursion after method invocation is disabled by default.
+
+ * SortKeys
+ Specifies map keys should be sorted before being printed. Use
+ this to have a more deterministic, diffable output. Note that
+ only native types (bool, int, uint, floats, uintptr and string)
+ and types which implement error or Stringer interfaces are
+ supported with other types sorted according to the
+ reflect.Value.String() output which guarantees display
+ stability. Natural map order is used by default.
+
+ * SpewKeys
+ Specifies that, as a last resort attempt, map keys should be
+ spewed to strings and sorted by those strings. This is only
+ considered if SortKeys is true.
+
+Dump Usage
+
+Simply call spew.Dump with a list of variables you want to dump:
+
+ spew.Dump(myVar1, myVar2, ...)
+
+You may also call spew.Fdump if you would prefer to output to an arbitrary
+io.Writer. For example, to dump to standard error:
+
+ spew.Fdump(os.Stderr, myVar1, myVar2, ...)
+
+A third option is to call spew.Sdump to get the formatted output as a string:
+
+ str := spew.Sdump(myVar1, myVar2, ...)
+
+Sample Dump Output
+
+See the Dump example for details on the setup of the types and variables being
+shown here.
+
+ (main.Foo) {
+ unexportedField: (*main.Bar)(0xf84002e210)({
+ flag: (main.Flag) flagTwo,
+ data: (uintptr) <nil>
+ }),
+ ExportedField: (map[interface {}]interface {}) (len=1) {
+ (string) (len=3) "one": (bool) true
+ }
+ }
+
+Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
+command as shown.
+ ([]uint8) (len=32 cap=32) {
+ 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
+ 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
+ 00000020 31 32 |12|
+ }
+
+Custom Formatter
+
+Spew provides a custom formatter that implements the fmt.Formatter interface
+so that it integrates cleanly with standard fmt package printing functions. The
+formatter is useful for inline printing of smaller data types similar to the
+standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Custom Formatter Usage
+
+The simplest way to make use of the spew custom formatter is to call one of the
+convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
+functions have syntax you are most likely already familiar with:
+
+ spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+ spew.Println(myVar, myVar2)
+ spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+See the Index for the full list convenience functions.
+
+Sample Formatter Output
+
+Double pointer to a uint8:
+ %v: <**>5
+ %+v: <**>(0xf8400420d0->0xf8400420c8)5
+ %#v: (**uint8)5
+ %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
+
+Pointer to circular struct with a uint8 field and a pointer to itself:
+ %v: <*>{1 <*><shown>}
+ %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
+ %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
+ %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
+
+See the Printf example for details on the setup of variables being shown
+here.
+
+Errors
+
+Since it is possible for custom Stringer/error interfaces to panic, spew
+detects them and handles them internally by printing the panic information
+inline with the output. Since spew is intended to provide deep pretty printing
+capabilities on structures, it intentionally does not return any errors.
+*/
+package spew
diff --git a/src/kube2msb/vendor/github.com/davecgh/go-spew/spew/dump.go b/src/kube2msb/vendor/github.com/davecgh/go-spew/spew/dump.go
new file mode 100644
index 0000000..a0ff95e
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/davecgh/go-spew/spew/dump.go
@@ -0,0 +1,509 @@
+/*
+ * Copyright (c) 2013 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var (
+ // uint8Type is a reflect.Type representing a uint8. It is used to
+ // convert cgo types to uint8 slices for hexdumping.
+ uint8Type = reflect.TypeOf(uint8(0))
+
+ // cCharRE is a regular expression that matches a cgo char.
+ // It is used to detect character arrays to hexdump them.
+ cCharRE = regexp.MustCompile("^.*\\._Ctype_char$")
+
+ // cUnsignedCharRE is a regular expression that matches a cgo unsigned
+ // char. It is used to detect unsigned character arrays to hexdump
+ // them.
+ cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$")
+
+ // cUint8tCharRE is a regular expression that matches a cgo uint8_t.
+ // It is used to detect uint8_t arrays to hexdump them.
+ cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$")
+)
+
+// dumpState contains information about the state of a dump operation.
+type dumpState struct {
+ w io.Writer
+ depth int
+ pointers map[uintptr]int
+ ignoreNextType bool
+ ignoreNextIndent bool
+ cs *ConfigState
+}
+
+// indent performs indentation according to the depth level and cs.Indent
+// option.
+func (d *dumpState) indent() {
+ if d.ignoreNextIndent {
+ d.ignoreNextIndent = false
+ return
+ }
+ d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ v = v.Elem()
+ }
+ return v
+}
+
+// dumpPtr handles formatting of pointers by indirecting them as necessary.
+func (d *dumpState) dumpPtr(v reflect.Value) {
+ // Remove pointers at or below the current depth from map used to detect
+ // circular refs.
+ for k, depth := range d.pointers {
+ if depth >= d.depth {
+ delete(d.pointers, k)
+ }
+ }
+
+ // Keep list of all dereferenced pointers to show later.
+ pointerChain := make([]uintptr, 0)
+
+ // Figure out how many levels of indirection there are by dereferencing
+ // pointers and unpacking interfaces down the chain while detecting circular
+ // references.
+ nilFound := false
+ cycleFound := false
+ indirects := 0
+ ve := v
+ for ve.Kind() == reflect.Ptr {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ indirects++
+ addr := ve.Pointer()
+ pointerChain = append(pointerChain, addr)
+ if pd, ok := d.pointers[addr]; ok && pd < d.depth {
+ cycleFound = true
+ indirects--
+ break
+ }
+ d.pointers[addr] = d.depth
+
+ ve = ve.Elem()
+ if ve.Kind() == reflect.Interface {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ ve = ve.Elem()
+ }
+ }
+
+ // Display type information.
+ d.w.Write(openParenBytes)
+ d.w.Write(bytes.Repeat(asteriskBytes, indirects))
+ d.w.Write([]byte(ve.Type().String()))
+ d.w.Write(closeParenBytes)
+
+ // Display pointer information.
+ if len(pointerChain) > 0 {
+ d.w.Write(openParenBytes)
+ for i, addr := range pointerChain {
+ if i > 0 {
+ d.w.Write(pointerChainBytes)
+ }
+ printHexPtr(d.w, addr)
+ }
+ d.w.Write(closeParenBytes)
+ }
+
+ // Display dereferenced value.
+ d.w.Write(openParenBytes)
+ switch {
+ case nilFound == true:
+ d.w.Write(nilAngleBytes)
+
+ case cycleFound == true:
+ d.w.Write(circularBytes)
+
+ default:
+ d.ignoreNextType = true
+ d.dump(ve)
+ }
+ d.w.Write(closeParenBytes)
+}
+
+// dumpSlice handles formatting of arrays and slices. Byte (uint8 under
+// reflection) arrays and slices are dumped in hexdump -C fashion.
+func (d *dumpState) dumpSlice(v reflect.Value) {
+ // Determine whether this type should be hex dumped or not. Also,
+ // for types which should be hexdumped, try to use the underlying data
+ // first, then fall back to trying to convert them to a uint8 slice.
+ var buf []uint8
+ doConvert := false
+ doHexDump := false
+ numEntries := v.Len()
+ if numEntries > 0 {
+ vt := v.Index(0).Type()
+ vts := vt.String()
+ switch {
+ // C types that need to be converted.
+ case cCharRE.MatchString(vts):
+ fallthrough
+ case cUnsignedCharRE.MatchString(vts):
+ fallthrough
+ case cUint8tCharRE.MatchString(vts):
+ doConvert = true
+
+ // Try to use existing uint8 slices and fall back to converting
+ // and copying if that fails.
+ case vt.Kind() == reflect.Uint8:
+ // We need an addressable interface to convert the type
+ // to a byte slice. However, the reflect package won't
+ // give us an interface on certain things like
+ // unexported struct fields in order to enforce
+ // visibility rules. We use unsafe, when available, to
+ // bypass these restrictions since this package does not
+ // mutate the values.
+ vs := v
+ if !vs.CanInterface() || !vs.CanAddr() {
+ vs = unsafeReflectValue(vs)
+ }
+ if !UnsafeDisabled {
+ vs = vs.Slice(0, numEntries)
+
+ // Use the existing uint8 slice if it can be
+ // type asserted.
+ iface := vs.Interface()
+ if slice, ok := iface.([]uint8); ok {
+ buf = slice
+ doHexDump = true
+ break
+ }
+ }
+
+ // The underlying data needs to be converted if it can't
+ // be type asserted to a uint8 slice.
+ doConvert = true
+ }
+
+ // Copy and convert the underlying type if needed.
+ if doConvert && vt.ConvertibleTo(uint8Type) {
+ // Convert and copy each element into a uint8 byte
+ // slice.
+ buf = make([]uint8, numEntries)
+ for i := 0; i < numEntries; i++ {
+ vv := v.Index(i)
+ buf[i] = uint8(vv.Convert(uint8Type).Uint())
+ }
+ doHexDump = true
+ }
+ }
+
+ // Hexdump the entire slice as needed.
+ if doHexDump {
+ indent := strings.Repeat(d.cs.Indent, d.depth)
+ str := indent + hex.Dump(buf)
+ str = strings.Replace(str, "\n", "\n"+indent, -1)
+ str = strings.TrimRight(str, d.cs.Indent)
+ d.w.Write([]byte(str))
+ return
+ }
+
+ // Recursively call dump for each item.
+ for i := 0; i < numEntries; i++ {
+ d.dump(d.unpackValue(v.Index(i)))
+ if i < (numEntries - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+}
+
+// dump is the main workhorse for dumping a value. It uses the passed reflect
+// value to figure out what kind of object we are dealing with and formats it
+// appropriately. It is a recursive function, however circular data structures
+// are detected and handled properly.
+func (d *dumpState) dump(v reflect.Value) {
+ // Handle invalid reflect values immediately.
+ kind := v.Kind()
+ if kind == reflect.Invalid {
+ d.w.Write(invalidAngleBytes)
+ return
+ }
+
+ // Handle pointers specially.
+ if kind == reflect.Ptr {
+ d.indent()
+ d.dumpPtr(v)
+ return
+ }
+
+ // Print type information unless already handled elsewhere.
+ if !d.ignoreNextType {
+ d.indent()
+ d.w.Write(openParenBytes)
+ d.w.Write([]byte(v.Type().String()))
+ d.w.Write(closeParenBytes)
+ d.w.Write(spaceBytes)
+ }
+ d.ignoreNextType = false
+
+ // Display length and capacity if the built-in len and cap functions
+ // work with the value's kind and the len/cap itself is non-zero.
+ valueLen, valueCap := 0, 0
+ switch v.Kind() {
+ case reflect.Array, reflect.Slice, reflect.Chan:
+ valueLen, valueCap = v.Len(), v.Cap()
+ case reflect.Map, reflect.String:
+ valueLen = v.Len()
+ }
+ if valueLen != 0 || valueCap != 0 {
+ d.w.Write(openParenBytes)
+ if valueLen != 0 {
+ d.w.Write(lenEqualsBytes)
+ printInt(d.w, int64(valueLen), 10)
+ }
+ if valueCap != 0 {
+ if valueLen != 0 {
+ d.w.Write(spaceBytes)
+ }
+ d.w.Write(capEqualsBytes)
+ printInt(d.w, int64(valueCap), 10)
+ }
+ d.w.Write(closeParenBytes)
+ d.w.Write(spaceBytes)
+ }
+
+ // Call Stringer/error interfaces if they exist and the handle methods flag
+ // is enabled
+ if !d.cs.DisableMethods {
+ if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+ if handled := handleMethods(d.cs, d.w, v); handled {
+ return
+ }
+ }
+ }
+
+ switch kind {
+ case reflect.Invalid:
+ // Do nothing. We should never get here since invalid has already
+ // been handled above.
+
+ case reflect.Bool:
+ printBool(d.w, v.Bool())
+
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ printInt(d.w, v.Int(), 10)
+
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ printUint(d.w, v.Uint(), 10)
+
+ case reflect.Float32:
+ printFloat(d.w, v.Float(), 32)
+
+ case reflect.Float64:
+ printFloat(d.w, v.Float(), 64)
+
+ case reflect.Complex64:
+ printComplex(d.w, v.Complex(), 32)
+
+ case reflect.Complex128:
+ printComplex(d.w, v.Complex(), 64)
+
+ case reflect.Slice:
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ break
+ }
+ fallthrough
+
+ case reflect.Array:
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ d.dumpSlice(v)
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.String:
+ d.w.Write([]byte(strconv.Quote(v.String())))
+
+ case reflect.Interface:
+ // The only time we should get here is for nil interfaces due to
+ // unpackValue calls.
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ }
+
+ case reflect.Ptr:
+ // Do nothing. We should never get here since pointers have already
+ // been handled above.
+
+ case reflect.Map:
+ // nil maps should be indicated as different than empty maps
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ break
+ }
+
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ numEntries := v.Len()
+ keys := v.MapKeys()
+ if d.cs.SortKeys {
+ sortValues(keys, d.cs)
+ }
+ for i, key := range keys {
+ d.dump(d.unpackValue(key))
+ d.w.Write(colonSpaceBytes)
+ d.ignoreNextIndent = true
+ d.dump(d.unpackValue(v.MapIndex(key)))
+ if i < (numEntries - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.Struct:
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ vt := v.Type()
+ numFields := v.NumField()
+ for i := 0; i < numFields; i++ {
+ d.indent()
+ vtf := vt.Field(i)
+ d.w.Write([]byte(vtf.Name))
+ d.w.Write(colonSpaceBytes)
+ d.ignoreNextIndent = true
+ d.dump(d.unpackValue(v.Field(i)))
+ if i < (numFields - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.Uintptr:
+ printHexPtr(d.w, uintptr(v.Uint()))
+
+ case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+ printHexPtr(d.w, v.Pointer())
+
+ // There were not any other types at the time this code was written, but
+ // fall back to letting the default fmt package handle it in case any new
+ // types are added.
+ default:
+ if v.CanInterface() {
+ fmt.Fprintf(d.w, "%v", v.Interface())
+ } else {
+ fmt.Fprintf(d.w, "%v", v.String())
+ }
+ }
+}
+
+// fdump is a helper function to consolidate the logic from the various public
+// methods which take varying writers and config states.
+func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
+ for _, arg := range a {
+ if arg == nil {
+ w.Write(interfaceBytes)
+ w.Write(spaceBytes)
+ w.Write(nilAngleBytes)
+ w.Write(newlineBytes)
+ continue
+ }
+
+ d := dumpState{w: w, cs: cs}
+ d.pointers = make(map[uintptr]int)
+ d.dump(reflect.ValueOf(arg))
+ d.w.Write(newlineBytes)
+ }
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w. It formats
+// exactly the same as Dump.
+func Fdump(w io.Writer, a ...interface{}) {
+ fdump(&Config, w, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func Sdump(a ...interface{}) string {
+ var buf bytes.Buffer
+ fdump(&Config, &buf, a...)
+ return buf.String()
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value. It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by an exported package global,
+spew.Config. See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func Dump(a ...interface{}) {
+ fdump(&Config, os.Stdout, a...)
+}
diff --git a/src/kube2msb/vendor/github.com/davecgh/go-spew/spew/format.go b/src/kube2msb/vendor/github.com/davecgh/go-spew/spew/format.go
new file mode 100644
index 0000000..ecf3b80
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/davecgh/go-spew/spew/format.go
@@ -0,0 +1,419 @@
+/*
+ * Copyright (c) 2013 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+// supportedFlags is a list of all the character flags supported by fmt package.
+const supportedFlags = "0-+# "
+
+// formatState implements the fmt.Formatter interface and contains information
+// about the state of a formatting operation. The NewFormatter function can
+// be used to get a new Formatter which can be used directly as arguments
+// in standard fmt package printing calls.
+type formatState struct {
+ value interface{}
+ fs fmt.State
+ depth int
+ pointers map[uintptr]int
+ ignoreNextType bool
+ cs *ConfigState
+}
+
+// buildDefaultFormat recreates the original format string without precision
+// and width information to pass in to fmt.Sprintf in the case of an
+// unrecognized type. Unless new types are added to the language, this
+// function won't ever be called.
+func (f *formatState) buildDefaultFormat() (format string) {
+ buf := bytes.NewBuffer(percentBytes)
+
+ for _, flag := range supportedFlags {
+ if f.fs.Flag(int(flag)) {
+ buf.WriteRune(flag)
+ }
+ }
+
+ buf.WriteRune('v')
+
+ format = buf.String()
+ return format
+}
+
+// constructOrigFormat recreates the original format string including precision
+// and width information to pass along to the standard fmt package. This allows
+// automatic deferral of all format strings this package doesn't support.
+func (f *formatState) constructOrigFormat(verb rune) (format string) {
+ buf := bytes.NewBuffer(percentBytes)
+
+ for _, flag := range supportedFlags {
+ if f.fs.Flag(int(flag)) {
+ buf.WriteRune(flag)
+ }
+ }
+
+ if width, ok := f.fs.Width(); ok {
+ buf.WriteString(strconv.Itoa(width))
+ }
+
+ if precision, ok := f.fs.Precision(); ok {
+ buf.Write(precisionBytes)
+ buf.WriteString(strconv.Itoa(precision))
+ }
+
+ buf.WriteRune(verb)
+
+ format = buf.String()
+ return format
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible and
+// ensures that types for values which have been unpacked from an interface
+// are displayed when the show types flag is also set.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
+ if v.Kind() == reflect.Interface {
+ f.ignoreNextType = false
+ if !v.IsNil() {
+ v = v.Elem()
+ }
+ }
+ return v
+}
+
+// formatPtr handles formatting of pointers by indirecting them as necessary.
+func (f *formatState) formatPtr(v reflect.Value) {
+ // Display nil if top level pointer is nil.
+ showTypes := f.fs.Flag('#')
+ if v.IsNil() && (!showTypes || f.ignoreNextType) {
+ f.fs.Write(nilAngleBytes)
+ return
+ }
+
+ // Remove pointers at or below the current depth from map used to detect
+ // circular refs.
+ for k, depth := range f.pointers {
+ if depth >= f.depth {
+ delete(f.pointers, k)
+ }
+ }
+
+ // Keep list of all dereferenced pointers to possibly show later.
+ pointerChain := make([]uintptr, 0)
+
+ // Figure out how many levels of indirection there are by derferencing
+ // pointers and unpacking interfaces down the chain while detecting circular
+ // references.
+ nilFound := false
+ cycleFound := false
+ indirects := 0
+ ve := v
+ for ve.Kind() == reflect.Ptr {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ indirects++
+ addr := ve.Pointer()
+ pointerChain = append(pointerChain, addr)
+ if pd, ok := f.pointers[addr]; ok && pd < f.depth {
+ cycleFound = true
+ indirects--
+ break
+ }
+ f.pointers[addr] = f.depth
+
+ ve = ve.Elem()
+ if ve.Kind() == reflect.Interface {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ ve = ve.Elem()
+ }
+ }
+
+ // Display type or indirection level depending on flags.
+ if showTypes && !f.ignoreNextType {
+ f.fs.Write(openParenBytes)
+ f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
+ f.fs.Write([]byte(ve.Type().String()))
+ f.fs.Write(closeParenBytes)
+ } else {
+ if nilFound || cycleFound {
+ indirects += strings.Count(ve.Type().String(), "*")
+ }
+ f.fs.Write(openAngleBytes)
+ f.fs.Write([]byte(strings.Repeat("*", indirects)))
+ f.fs.Write(closeAngleBytes)
+ }
+
+ // Display pointer information depending on flags.
+ if f.fs.Flag('+') && (len(pointerChain) > 0) {
+ f.fs.Write(openParenBytes)
+ for i, addr := range pointerChain {
+ if i > 0 {
+ f.fs.Write(pointerChainBytes)
+ }
+ printHexPtr(f.fs, addr)
+ }
+ f.fs.Write(closeParenBytes)
+ }
+
+ // Display dereferenced value.
+ switch {
+ case nilFound == true:
+ f.fs.Write(nilAngleBytes)
+
+ case cycleFound == true:
+ f.fs.Write(circularShortBytes)
+
+ default:
+ f.ignoreNextType = true
+ f.format(ve)
+ }
+}
+
+// format is the main workhorse for providing the Formatter interface. It
+// uses the passed reflect value to figure out what kind of object we are
+// dealing with and formats it appropriately. It is a recursive function,
+// however circular data structures are detected and handled properly.
+func (f *formatState) format(v reflect.Value) {
+ // Handle invalid reflect values immediately.
+ kind := v.Kind()
+ if kind == reflect.Invalid {
+ f.fs.Write(invalidAngleBytes)
+ return
+ }
+
+ // Handle pointers specially.
+ if kind == reflect.Ptr {
+ f.formatPtr(v)
+ return
+ }
+
+ // Print type information unless already handled elsewhere.
+ if !f.ignoreNextType && f.fs.Flag('#') {
+ f.fs.Write(openParenBytes)
+ f.fs.Write([]byte(v.Type().String()))
+ f.fs.Write(closeParenBytes)
+ }
+ f.ignoreNextType = false
+
+ // Call Stringer/error interfaces if they exist and the handle methods
+ // flag is enabled.
+ if !f.cs.DisableMethods {
+ if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+ if handled := handleMethods(f.cs, f.fs, v); handled {
+ return
+ }
+ }
+ }
+
+ switch kind {
+ case reflect.Invalid:
+ // Do nothing. We should never get here since invalid has already
+ // been handled above.
+
+ case reflect.Bool:
+ printBool(f.fs, v.Bool())
+
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ printInt(f.fs, v.Int(), 10)
+
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ printUint(f.fs, v.Uint(), 10)
+
+ case reflect.Float32:
+ printFloat(f.fs, v.Float(), 32)
+
+ case reflect.Float64:
+ printFloat(f.fs, v.Float(), 64)
+
+ case reflect.Complex64:
+ printComplex(f.fs, v.Complex(), 32)
+
+ case reflect.Complex128:
+ printComplex(f.fs, v.Complex(), 64)
+
+ case reflect.Slice:
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ break
+ }
+ fallthrough
+
+ case reflect.Array:
+ f.fs.Write(openBracketBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ numEntries := v.Len()
+ for i := 0; i < numEntries; i++ {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ f.ignoreNextType = true
+ f.format(f.unpackValue(v.Index(i)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeBracketBytes)
+
+ case reflect.String:
+ f.fs.Write([]byte(v.String()))
+
+ case reflect.Interface:
+ // The only time we should get here is for nil interfaces due to
+ // unpackValue calls.
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ }
+
+ case reflect.Ptr:
+ // Do nothing. We should never get here since pointers have already
+ // been handled above.
+
+ case reflect.Map:
+ // nil maps should be indicated as different than empty maps
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ break
+ }
+
+ f.fs.Write(openMapBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ keys := v.MapKeys()
+ if f.cs.SortKeys {
+ sortValues(keys, f.cs)
+ }
+ for i, key := range keys {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ f.ignoreNextType = true
+ f.format(f.unpackValue(key))
+ f.fs.Write(colonBytes)
+ f.ignoreNextType = true
+ f.format(f.unpackValue(v.MapIndex(key)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeMapBytes)
+
+ case reflect.Struct:
+ numFields := v.NumField()
+ f.fs.Write(openBraceBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ vt := v.Type()
+ for i := 0; i < numFields; i++ {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ vtf := vt.Field(i)
+ if f.fs.Flag('+') || f.fs.Flag('#') {
+ f.fs.Write([]byte(vtf.Name))
+ f.fs.Write(colonBytes)
+ }
+ f.format(f.unpackValue(v.Field(i)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeBraceBytes)
+
+ case reflect.Uintptr:
+ printHexPtr(f.fs, uintptr(v.Uint()))
+
+ case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+ printHexPtr(f.fs, v.Pointer())
+
+ // There were not any other types at the time this code was written, but
+ // fall back to letting the default fmt package handle it if any get added.
+ default:
+ format := f.buildDefaultFormat()
+ if v.CanInterface() {
+ fmt.Fprintf(f.fs, format, v.Interface())
+ } else {
+ fmt.Fprintf(f.fs, format, v.String())
+ }
+ }
+}
+
+// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
+// details.
+func (f *formatState) Format(fs fmt.State, verb rune) {
+ f.fs = fs
+
+ // Use standard formatting for verbs that are not v.
+ if verb != 'v' {
+ format := f.constructOrigFormat(verb)
+ fmt.Fprintf(fs, format, f.value)
+ return
+ }
+
+ if f.value == nil {
+ if fs.Flag('#') {
+ fs.Write(interfaceBytes)
+ }
+ fs.Write(nilAngleBytes)
+ return
+ }
+
+ f.format(reflect.ValueOf(f.value))
+}
+
+// newFormatter is a helper function to consolidate the logic from the various
+// public methods which take varying config states.
+func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
+ fs := &formatState{value: v, cs: cs}
+ fs.pointers = make(map[uintptr]int)
+ return fs
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface. As a result, it integrates cleanly with standard fmt package
+printing functions. The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly. It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+Printf, Println, or Fprintf.
+*/
+func NewFormatter(v interface{}) fmt.Formatter {
+ return newFormatter(&Config, v)
+}
diff --git a/src/kube2msb/vendor/github.com/davecgh/go-spew/spew/spew.go b/src/kube2msb/vendor/github.com/davecgh/go-spew/spew/spew.go
new file mode 100644
index 0000000..d8233f5
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/davecgh/go-spew/spew/spew.go
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2013 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "fmt"
+ "io"
+)
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the formatted string as a value that satisfies error. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Errorf(format string, a ...interface{}) (err error) {
+ return fmt.Errorf(format, convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprint(w, convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+ return fmt.Fprintf(w, format, convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a default Formatter interface returned by NewFormatter. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprintln(w, convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
+func Print(a ...interface{}) (n int, err error) {
+ return fmt.Print(convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Printf(format string, a ...interface{}) (n int, err error) {
+ return fmt.Printf(format, convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
+func Println(a ...interface{}) (n int, err error) {
+ return fmt.Println(convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprint(a ...interface{}) string {
+ return fmt.Sprint(convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintf(format string, a ...interface{}) string {
+ return fmt.Sprintf(format, convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a default Formatter interface returned by NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintln(a ...interface{}) string {
+ return fmt.Sprintln(convertArgs(a)...)
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a default spew Formatter interface.
+func convertArgs(args []interface{}) (formatters []interface{}) {
+ formatters = make([]interface{}, len(args))
+ for index, arg := range args {
+ formatters[index] = NewFormatter(arg)
+ }
+ return formatters
+}
diff --git a/src/kube2msb/vendor/github.com/docker/distribution/LICENSE b/src/kube2msb/vendor/github.com/docker/distribution/LICENSE
new file mode 100644
index 0000000..e06d208
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/docker/distribution/LICENSE
@@ -0,0 +1,202 @@
+Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/src/kube2msb/vendor/github.com/docker/distribution/digest/digest.go b/src/kube2msb/vendor/github.com/docker/distribution/digest/digest.go
new file mode 100644
index 0000000..31d821b
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/docker/distribution/digest/digest.go
@@ -0,0 +1,139 @@
+package digest
+
+import (
+ "fmt"
+ "hash"
+ "io"
+ "regexp"
+ "strings"
+)
+
+const (
+ // DigestSha256EmptyTar is the canonical sha256 digest of empty data
+ DigestSha256EmptyTar = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+)
+
+// Digest allows simple protection of hex formatted digest strings, prefixed
+// by their algorithm. Strings of type Digest have some guarantee of being in
+// the correct format and it provides quick access to the components of a
+// digest string.
+//
+// The following is an example of the contents of Digest types:
+//
+// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc
+//
+// This allows to abstract the digest behind this type and work only in those
+// terms.
+type Digest string
+
+// NewDigest returns a Digest from alg and a hash.Hash object.
+func NewDigest(alg Algorithm, h hash.Hash) Digest {
+ return NewDigestFromBytes(alg, h.Sum(nil))
+}
+
+// NewDigestFromBytes returns a new digest from the byte contents of p.
+// Typically, this can come from hash.Hash.Sum(...) or xxx.SumXXX(...)
+// functions. This is also useful for rebuilding digests from binary
+// serializations.
+func NewDigestFromBytes(alg Algorithm, p []byte) Digest {
+ return Digest(fmt.Sprintf("%s:%x", alg, p))
+}
+
+// NewDigestFromHex returns a Digest from alg and a the hex encoded digest.
+func NewDigestFromHex(alg, hex string) Digest {
+ return Digest(fmt.Sprintf("%s:%s", alg, hex))
+}
+
+// DigestRegexp matches valid digest types.
+var DigestRegexp = regexp.MustCompile(`[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+`)
+
+// DigestRegexpAnchored matches valid digest types, anchored to the start and end of the match.
+var DigestRegexpAnchored = regexp.MustCompile(`^` + DigestRegexp.String() + `$`)
+
+var (
+ // ErrDigestInvalidFormat returned when digest format invalid.
+ ErrDigestInvalidFormat = fmt.Errorf("invalid checksum digest format")
+
+ // ErrDigestInvalidLength returned when digest has invalid length.
+ ErrDigestInvalidLength = fmt.Errorf("invalid checksum digest length")
+
+ // ErrDigestUnsupported returned when the digest algorithm is unsupported.
+ ErrDigestUnsupported = fmt.Errorf("unsupported digest algorithm")
+)
+
+// ParseDigest parses s and returns the validated digest object. An error will
+// be returned if the format is invalid.
+func ParseDigest(s string) (Digest, error) {
+ d := Digest(s)
+
+ return d, d.Validate()
+}
+
+// FromReader returns the most valid digest for the underlying content using
+// the canonical digest algorithm.
+func FromReader(rd io.Reader) (Digest, error) {
+ return Canonical.FromReader(rd)
+}
+
+// FromBytes digests the input and returns a Digest.
+func FromBytes(p []byte) Digest {
+ return Canonical.FromBytes(p)
+}
+
+// Validate checks that the contents of d is a valid digest, returning an
+// error if not.
+func (d Digest) Validate() error {
+ s := string(d)
+
+ if !DigestRegexpAnchored.MatchString(s) {
+ return ErrDigestInvalidFormat
+ }
+
+ i := strings.Index(s, ":")
+ if i < 0 {
+ return ErrDigestInvalidFormat
+ }
+
+ // case: "sha256:" with no hex.
+ if i+1 == len(s) {
+ return ErrDigestInvalidFormat
+ }
+
+ switch algorithm := Algorithm(s[:i]); algorithm {
+ case SHA256, SHA384, SHA512:
+ if algorithm.Size()*2 != len(s[i+1:]) {
+ return ErrDigestInvalidLength
+ }
+ break
+ default:
+ return ErrDigestUnsupported
+ }
+
+ return nil
+}
+
+// Algorithm returns the algorithm portion of the digest. This will panic if
+// the underlying digest is not in a valid format.
+func (d Digest) Algorithm() Algorithm {
+ return Algorithm(d[:d.sepIndex()])
+}
+
+// Hex returns the hex digest portion of the digest. This will panic if the
+// underlying digest is not in a valid format.
+func (d Digest) Hex() string {
+ return string(d[d.sepIndex()+1:])
+}
+
+func (d Digest) String() string {
+ return string(d)
+}
+
+func (d Digest) sepIndex() int {
+ i := strings.Index(string(d), ":")
+
+ if i < 0 {
+ panic("could not find ':' in digest: " + d)
+ }
+
+ return i
+}
diff --git a/src/kube2msb/vendor/github.com/docker/distribution/digest/digester.go b/src/kube2msb/vendor/github.com/docker/distribution/digest/digester.go
new file mode 100644
index 0000000..f3105a4
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/docker/distribution/digest/digester.go
@@ -0,0 +1,155 @@
+package digest
+
+import (
+ "crypto"
+ "fmt"
+ "hash"
+ "io"
+)
+
+// Algorithm identifies and implementation of a digester by an identifier.
+// Note the that this defines both the hash algorithm used and the string
+// encoding.
+type Algorithm string
+
+// supported digest types
+const (
+ SHA256 Algorithm = "sha256" // sha256 with hex encoding
+ SHA384 Algorithm = "sha384" // sha384 with hex encoding
+ SHA512 Algorithm = "sha512" // sha512 with hex encoding
+
+ // Canonical is the primary digest algorithm used with the distribution
+ // project. Other digests may be used but this one is the primary storage
+ // digest.
+ Canonical = SHA256
+)
+
+var (
+ // TODO(stevvooe): Follow the pattern of the standard crypto package for
+ // registration of digests. Effectively, we are a registerable set and
+ // common symbol access.
+
+ // algorithms maps values to hash.Hash implementations. Other algorithms
+ // may be available but they cannot be calculated by the digest package.
+ algorithms = map[Algorithm]crypto.Hash{
+ SHA256: crypto.SHA256,
+ SHA384: crypto.SHA384,
+ SHA512: crypto.SHA512,
+ }
+)
+
+// Available returns true if the digest type is available for use. If this
+// returns false, New and Hash will return nil.
+func (a Algorithm) Available() bool {
+ h, ok := algorithms[a]
+ if !ok {
+ return false
+ }
+
+ // check availability of the hash, as well
+ return h.Available()
+}
+
+func (a Algorithm) String() string {
+ return string(a)
+}
+
+// Size returns number of bytes returned by the hash.
+func (a Algorithm) Size() int {
+ h, ok := algorithms[a]
+ if !ok {
+ return 0
+ }
+ return h.Size()
+}
+
+// Set implemented to allow use of Algorithm as a command line flag.
+func (a *Algorithm) Set(value string) error {
+ if value == "" {
+ *a = Canonical
+ } else {
+ // just do a type conversion, support is queried with Available.
+ *a = Algorithm(value)
+ }
+
+ return nil
+}
+
+// New returns a new digester for the specified algorithm. If the algorithm
+// does not have a digester implementation, nil will be returned. This can be
+// checked by calling Available before calling New.
+func (a Algorithm) New() Digester {
+ return &digester{
+ alg: a,
+ hash: a.Hash(),
+ }
+}
+
+// Hash returns a new hash as used by the algorithm. If not available, the
+// method will panic. Check Algorithm.Available() before calling.
+func (a Algorithm) Hash() hash.Hash {
+ if !a.Available() {
+ // NOTE(stevvooe): A missing hash is usually a programming error that
+ // must be resolved at compile time. We don't import in the digest
+ // package to allow users to choose their hash implementation (such as
+ // when using stevvooe/resumable or a hardware accelerated package).
+ //
+ // Applications that may want to resolve the hash at runtime should
+ // call Algorithm.Available before call Algorithm.Hash().
+ panic(fmt.Sprintf("%v not available (make sure it is imported)", a))
+ }
+
+ return algorithms[a].New()
+}
+
+// FromReader returns the digest of the reader using the algorithm.
+func (a Algorithm) FromReader(rd io.Reader) (Digest, error) {
+ digester := a.New()
+
+ if _, err := io.Copy(digester.Hash(), rd); err != nil {
+ return "", err
+ }
+
+ return digester.Digest(), nil
+}
+
+// FromBytes digests the input and returns a Digest.
+func (a Algorithm) FromBytes(p []byte) Digest {
+ digester := a.New()
+
+ if _, err := digester.Hash().Write(p); err != nil {
+ // Writes to a Hash should never fail. None of the existing
+ // hash implementations in the stdlib or hashes vendored
+ // here can return errors from Write. Having a panic in this
+ // condition instead of having FromBytes return an error value
+ // avoids unnecessary error handling paths in all callers.
+ panic("write to hash function returned error: " + err.Error())
+ }
+
+ return digester.Digest()
+}
+
+// TODO(stevvooe): Allow resolution of verifiers using the digest type and
+// this registration system.
+
+// Digester calculates the digest of written data. Writes should go directly
+// to the return value of Hash, while calling Digest will return the current
+// value of the digest.
+type Digester interface {
+ Hash() hash.Hash // provides direct access to underlying hash instance.
+ Digest() Digest
+}
+
+// digester provides a simple digester definition that embeds a hasher.
+type digester struct {
+ alg Algorithm
+ hash hash.Hash
+}
+
+func (d *digester) Hash() hash.Hash {
+ return d.hash
+}
+
+func (d *digester) Digest() Digest {
+ return NewDigest(d.alg, d.hash)
+}
diff --git a/src/kube2msb/vendor/github.com/docker/distribution/digest/doc.go b/src/kube2msb/vendor/github.com/docker/distribution/digest/doc.go
new file mode 100644
index 0000000..f64b0db
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/docker/distribution/digest/doc.go
@@ -0,0 +1,42 @@
+// Package digest provides a generalized type to opaquely represent message
+// digests and their operations within the registry. The Digest type is
+// designed to serve as a flexible identifier in a content-addressable system.
+// More importantly, it provides tools and wrappers to work with
+// hash.Hash-based digests with little effort.
+//
+// Basics
+//
+// The format of a digest is simply a string with two parts, dubbed the
+// "algorithm" and the "digest", separated by a colon:
+//
+// <algorithm>:<digest>
+//
+// An example of a sha256 digest representation follows:
+//
+// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc
+//
+// In this case, the string "sha256" is the algorithm and the hex bytes are
+// the "digest".
+//
+// Because the Digest type is simply a string, once a valid Digest is
+// obtained, comparisons are cheap, quick and simple to express with the
+// standard equality operator.
+//
+// Verification
+//
+// The main benefit of using the Digest type is simple verification against a
+// given digest. The Verifier interface, modeled after the stdlib hash.Hash
+// interface, provides a common write sink for digest verification. After
+// writing is complete, calling the Verifier.Verified method will indicate
+// whether or not the stream of bytes matches the target digest.
+//
+// Missing Features
+//
+// In addition to the above, we intend to add the following features to this
+// package:
+//
+// 1. A Digester type that supports write sink digest calculation.
+//
+// 2. Suspend and resume of ongoing digest calculations to support efficient digest verification in the registry.
+//
+package digest
diff --git a/src/kube2msb/vendor/github.com/docker/distribution/digest/set.go b/src/kube2msb/vendor/github.com/docker/distribution/digest/set.go
new file mode 100644
index 0000000..4b9313c
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/docker/distribution/digest/set.go
@@ -0,0 +1,245 @@
+package digest
+
+import (
+ "errors"
+ "sort"
+ "strings"
+ "sync"
+)
+
+var (
+ // ErrDigestNotFound is used when a matching digest
+ // could not be found in a set.
+ ErrDigestNotFound = errors.New("digest not found")
+
+ // ErrDigestAmbiguous is used when multiple digests
+ // are found in a set. None of the matching digests
+ // should be considered valid matches.
+ ErrDigestAmbiguous = errors.New("ambiguous digest string")
+)
+
+// Set is used to hold a unique set of digests which
+// may be easily referenced by easily referenced by a string
+// representation of the digest as well as short representation.
+// The uniqueness of the short representation is based on other
+// digests in the set. If digests are omitted from this set,
+// collisions in a larger set may not be detected, therefore it
+// is important to always do short representation lookups on
+// the complete set of digests. To mitigate collisions, an
+// appropriately long short code should be used.
+type Set struct {
+ mutex sync.RWMutex
+ entries digestEntries
+}
+
+// NewSet creates an empty set of digests
+// which may have digests added.
+func NewSet() *Set {
+ return &Set{
+ entries: digestEntries{},
+ }
+}
+
+// checkShortMatch checks whether two digests match as either whole
+// values or short values. This function does not test equality,
+// rather whether the second value could match against the first
+// value.
+func checkShortMatch(alg Algorithm, hex, shortAlg, shortHex string) bool {
+ if len(hex) == len(shortHex) {
+ if hex != shortHex {
+ return false
+ }
+ if len(shortAlg) > 0 && string(alg) != shortAlg {
+ return false
+ }
+ } else if !strings.HasPrefix(hex, shortHex) {
+ return false
+ } else if len(shortAlg) > 0 && string(alg) != shortAlg {
+ return false
+ }
+ return true
+}
+
+// Lookup looks for a digest matching the given string representation.
+// If no digests could be found ErrDigestNotFound will be returned
+// with an empty digest value. If multiple matches are found
+// ErrDigestAmbiguous will be returned with an empty digest value.
+func (dst *Set) Lookup(d string) (Digest, error) {
+ dst.mutex.RLock()
+ defer dst.mutex.RUnlock()
+ if len(dst.entries) == 0 {
+ return "", ErrDigestNotFound
+ }
+ var (
+ searchFunc func(int) bool
+ alg Algorithm
+ hex string
+ )
+ dgst, err := ParseDigest(d)
+ if err == ErrDigestInvalidFormat {
+ hex = d
+ searchFunc = func(i int) bool {
+ return dst.entries[i].val >= d
+ }
+ } else {
+ hex = dgst.Hex()
+ alg = dgst.Algorithm()
+ searchFunc = func(i int) bool {
+ if dst.entries[i].val == hex {
+ return dst.entries[i].alg >= alg
+ }
+ return dst.entries[i].val >= hex
+ }
+ }
+ idx := sort.Search(len(dst.entries), searchFunc)
+ if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) {
+ return "", ErrDigestNotFound
+ }
+ if dst.entries[idx].alg == alg && dst.entries[idx].val == hex {
+ return dst.entries[idx].digest, nil
+ }
+ if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) {
+ return "", ErrDigestAmbiguous
+ }
+
+ return dst.entries[idx].digest, nil
+}
+
+// Add adds the given digest to the set. An error will be returned
+// if the given digest is invalid. If the digest already exists in the
+// set, this operation will be a no-op.
+func (dst *Set) Add(d Digest) error {
+ if err := d.Validate(); err != nil {
+ return err
+ }
+ dst.mutex.Lock()
+ defer dst.mutex.Unlock()
+ entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
+ searchFunc := func(i int) bool {
+ if dst.entries[i].val == entry.val {
+ return dst.entries[i].alg >= entry.alg
+ }
+ return dst.entries[i].val >= entry.val
+ }
+ idx := sort.Search(len(dst.entries), searchFunc)
+ if idx == len(dst.entries) {
+ dst.entries = append(dst.entries, entry)
+ return nil
+ } else if dst.entries[idx].digest == d {
+ return nil
+ }
+
+ entries := append(dst.entries, nil)
+ copy(entries[idx+1:], entries[idx:len(entries)-1])
+ entries[idx] = entry
+ dst.entries = entries
+ return nil
+}
+
+// Remove removes the given digest from the set. An err will be
+// returned if the given digest is invalid. If the digest does
+// not exist in the set, this operation will be a no-op.
+func (dst *Set) Remove(d Digest) error {
+ if err := d.Validate(); err != nil {
+ return err
+ }
+ dst.mutex.Lock()
+ defer dst.mutex.Unlock()
+ entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
+ searchFunc := func(i int) bool {
+ if dst.entries[i].val == entry.val {
+ return dst.entries[i].alg >= entry.alg
+ }
+ return dst.entries[i].val >= entry.val
+ }
+ idx := sort.Search(len(dst.entries), searchFunc)
+ // Not found if idx is after or value at idx is not digest
+ if idx == len(dst.entries) || dst.entries[idx].digest != d {
+ return nil
+ }
+
+ entries := dst.entries
+ copy(entries[idx:], entries[idx+1:])
+ entries = entries[:len(entries)-1]
+ dst.entries = entries
+
+ return nil
+}
+
+// All returns all the digests in the set
+func (dst *Set) All() []Digest {
+ dst.mutex.RLock()
+ defer dst.mutex.RUnlock()
+ retValues := make([]Digest, len(dst.entries))
+ for i := range dst.entries {
+ retValues[i] = dst.entries[i].digest
+ }
+
+ return retValues
+}
+
+// ShortCodeTable returns a map of Digest to unique short codes. The
+// length represents the minimum value, the maximum length may be the
+// entire value of digest if uniqueness cannot be achieved without the
+// full value. This function will attempt to make short codes as short
+// as possible to be unique.
+func ShortCodeTable(dst *Set, length int) map[Digest]string {
+ dst.mutex.RLock()
+ defer dst.mutex.RUnlock()
+ m := make(map[Digest]string, len(dst.entries))
+ l := length
+ resetIdx := 0
+ for i := 0; i < len(dst.entries); i++ {
+ var short string
+ extended := true
+ for extended {
+ extended = false
+ if len(dst.entries[i].val) <= l {
+ short = dst.entries[i].digest.String()
+ } else {
+ short = dst.entries[i].val[:l]
+ for j := i + 1; j < len(dst.entries); j++ {
+ if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) {
+ if j > resetIdx {
+ resetIdx = j
+ }
+ extended = true
+ } else {
+ break
+ }
+ }
+ if extended {
+ l++
+ }
+ }
+ }
+ m[dst.entries[i].digest] = short
+ if i >= resetIdx {
+ l = length
+ }
+ }
+ return m
+}
+
+type digestEntry struct {
+ alg Algorithm
+ val string
+ digest Digest
+}
+
+type digestEntries []*digestEntry
+
+func (d digestEntries) Len() int {
+ return len(d)
+}
+
+func (d digestEntries) Less(i, j int) bool {
+ if d[i].val != d[j].val {
+ return d[i].val < d[j].val
+ }
+ return d[i].alg < d[j].alg
+}
+
+func (d digestEntries) Swap(i, j int) {
+ d[i], d[j] = d[j], d[i]
+}
diff --git a/src/kube2msb/vendor/github.com/docker/distribution/digest/verifiers.go b/src/kube2msb/vendor/github.com/docker/distribution/digest/verifiers.go
new file mode 100644
index 0000000..9af3be1
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/docker/distribution/digest/verifiers.go
@@ -0,0 +1,44 @@
+package digest
+
+import (
+ "hash"
+ "io"
+)
+
+// Verifier presents a general verification interface to be used with message
+// digests and other byte stream verifications. Users instantiate a Verifier
+// from one of the various methods, write the data under test to it then check
+// the result with the Verified method.
+type Verifier interface {
+ io.Writer
+
+ // Verified will return true if the content written to Verifier matches
+ // the digest.
+ Verified() bool
+}
+
+// NewDigestVerifier returns a verifier that compares the written bytes
+// against a passed in digest.
+func NewDigestVerifier(d Digest) (Verifier, error) {
+ if err := d.Validate(); err != nil {
+ return nil, err
+ }
+
+ return hashVerifier{
+ hash: d.Algorithm().Hash(),
+ digest: d,
+ }, nil
+}
+
+type hashVerifier struct {
+ digest Digest
+ hash hash.Hash
+}
+
+func (hv hashVerifier) Write(p []byte) (n int, err error) {
+ return hv.hash.Write(p)
+}
+
+func (hv hashVerifier) Verified() bool {
+ return hv.digest == NewDigest(hv.digest.Algorithm(), hv.hash)
+}
diff --git a/src/kube2msb/vendor/github.com/docker/distribution/reference/reference.go b/src/kube2msb/vendor/github.com/docker/distribution/reference/reference.go
new file mode 100644
index 0000000..bb09fa2
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/docker/distribution/reference/reference.go
@@ -0,0 +1,334 @@
+// Package reference provides a general type to represent any way of referencing images within the registry.
+// Its main purpose is to abstract tags and digests (content-addressable hash).
+//
+// Grammar
+//
+// reference := name [ ":" tag ] [ "@" digest ]
+// name := [hostname '/'] component ['/' component]*
+// hostname := hostcomponent ['.' hostcomponent]* [':' port-number]
+// hostcomponent := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
+// port-number := /[0-9]+/
+// component := alpha-numeric [separator alpha-numeric]*
+// alpha-numeric := /[a-z0-9]+/
+// separator := /[_.]|__|[-]*/
+//
+// tag := /[\w][\w.-]{0,127}/
+//
+// digest := digest-algorithm ":" digest-hex
+// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]
+// digest-algorithm-separator := /[+.-_]/
+// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/
+// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value
+package reference
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/docker/distribution/digest"
+)
+
+const (
+ // NameTotalLengthMax is the maximum total number of characters in a repository name.
+ NameTotalLengthMax = 255
+)
+
+var (
+ // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference.
+ ErrReferenceInvalidFormat = errors.New("invalid reference format")
+
+ // ErrTagInvalidFormat represents an error while trying to parse a string as a tag.
+ ErrTagInvalidFormat = errors.New("invalid tag format")
+
+ // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag.
+ ErrDigestInvalidFormat = errors.New("invalid digest format")
+
+ // ErrNameEmpty is returned for empty, invalid repository names.
+ ErrNameEmpty = errors.New("repository name must have at least one component")
+
+ // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax.
+ ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax)
+)
+
+// Reference is an opaque object reference identifier that may include
+// modifiers such as a hostname, name, tag, and digest.
+type Reference interface {
+ // String returns the full reference
+ String() string
+}
+
+// Field provides a wrapper type for resolving correct reference types when
+// working with encoding.
+type Field struct {
+ reference Reference
+}
+
+// AsField wraps a reference in a Field for encoding.
+func AsField(reference Reference) Field {
+ return Field{reference}
+}
+
+// Reference unwraps the reference type from the field to
+// return the Reference object. This object should be
+// of the appropriate type to further check for different
+// reference types.
+func (f Field) Reference() Reference {
+ return f.reference
+}
+
+// MarshalText serializes the field to byte text which
+// is the string of the reference.
+func (f Field) MarshalText() (p []byte, err error) {
+ return []byte(f.reference.String()), nil
+}
+
+// UnmarshalText parses text bytes by invoking the
+// reference parser to ensure the appropriately
+// typed reference object is wrapped by field.
+func (f *Field) UnmarshalText(p []byte) error {
+ r, err := Parse(string(p))
+ if err != nil {
+ return err
+ }
+
+ f.reference = r
+ return nil
+}
+
+// Named is an object with a full name
+type Named interface {
+ Reference
+ Name() string
+}
+
+// Tagged is an object which has a tag
+type Tagged interface {
+ Reference
+ Tag() string
+}
+
+// NamedTagged is an object including a name and tag.
+type NamedTagged interface {
+ Named
+ Tag() string
+}
+
+// Digested is an object which has a digest
+// in which it can be referenced by
+type Digested interface {
+ Reference
+ Digest() digest.Digest
+}
+
+// Canonical reference is an object with a fully unique
+// name including a name with hostname and digest
+type Canonical interface {
+ Named
+ Digest() digest.Digest
+}
+
+// SplitHostname splits a named reference into a
+// hostname and name string. If no valid hostname is
+// found, the hostname is empty and the full value
+// is returned as name
+func SplitHostname(named Named) (string, string) {
+ name := named.Name()
+ match := anchoredNameRegexp.FindStringSubmatch(name)
+ if match == nil || len(match) != 3 {
+ return "", name
+ }
+ return match[1], match[2]
+}
+
+// Parse parses s and returns a syntactically valid Reference.
+// If an error was encountered it is returned, along with a nil Reference.
+// NOTE: Parse will not handle short digests.
+func Parse(s string) (Reference, error) {
+ matches := ReferenceRegexp.FindStringSubmatch(s)
+ if matches == nil {
+ if s == "" {
+ return nil, ErrNameEmpty
+ }
+ // TODO(dmcgowan): Provide more specific and helpful error
+ return nil, ErrReferenceInvalidFormat
+ }
+
+ if len(matches[1]) > NameTotalLengthMax {
+ return nil, ErrNameTooLong
+ }
+
+ ref := reference{
+ name: matches[1],
+ tag: matches[2],
+ }
+ if matches[3] != "" {
+ var err error
+ ref.digest, err = digest.ParseDigest(matches[3])
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ r := getBestReferenceType(ref)
+ if r == nil {
+ return nil, ErrNameEmpty
+ }
+
+ return r, nil
+}
+
+// ParseNamed parses s and returns a syntactically valid reference implementing
+// the Named interface. The reference must have a name, otherwise an error is
+// returned.
+// If an error was encountered it is returned, along with a nil Reference.
+// NOTE: ParseNamed will not handle short digests.
+func ParseNamed(s string) (Named, error) {
+ ref, err := Parse(s)
+ if err != nil {
+ return nil, err
+ }
+ named, isNamed := ref.(Named)
+ if !isNamed {
+ return nil, fmt.Errorf("reference %s has no name", ref.String())
+ }
+ return named, nil
+}
+
+// WithName returns a named object representing the given string. If the input
+// is invalid ErrReferenceInvalidFormat will be returned.
+func WithName(name string) (Named, error) {
+ if len(name) > NameTotalLengthMax {
+ return nil, ErrNameTooLong
+ }
+ if !anchoredNameRegexp.MatchString(name) {
+ return nil, ErrReferenceInvalidFormat
+ }
+ return repository(name), nil
+}
+
+// WithTag combines the name from "name" and the tag from "tag" to form a
+// reference incorporating both the name and the tag.
+func WithTag(name Named, tag string) (NamedTagged, error) {
+ if !anchoredTagRegexp.MatchString(tag) {
+ return nil, ErrTagInvalidFormat
+ }
+ return taggedReference{
+ name: name.Name(),
+ tag: tag,
+ }, nil
+}
+
+// WithDigest combines the name from "name" and the digest from "digest" to form
+// a reference incorporating both the name and the digest.
+func WithDigest(name Named, digest digest.Digest) (Canonical, error) {
+ if !anchoredDigestRegexp.MatchString(digest.String()) {
+ return nil, ErrDigestInvalidFormat
+ }
+ return canonicalReference{
+ name: name.Name(),
+ digest: digest,
+ }, nil
+}
+
+func getBestReferenceType(ref reference) Reference {
+ if ref.name == "" {
+ // Allow digest only references
+ if ref.digest != "" {
+ return digestReference(ref.digest)
+ }
+ return nil
+ }
+ if ref.tag == "" {
+ if ref.digest != "" {
+ return canonicalReference{
+ name: ref.name,
+ digest: ref.digest,
+ }
+ }
+ return repository(ref.name)
+ }
+ if ref.digest == "" {
+ return taggedReference{
+ name: ref.name,
+ tag: ref.tag,
+ }
+ }
+
+ return ref
+}
+
+type reference struct {
+ name string
+ tag string
+ digest digest.Digest
+}
+
+func (r reference) String() string {
+ return r.name + ":" + r.tag + "@" + r.digest.String()
+}
+
+func (r reference) Name() string {
+ return r.name
+}
+
+func (r reference) Tag() string {
+ return r.tag
+}
+
+func (r reference) Digest() digest.Digest {
+ return r.digest
+}
+
+type repository string
+
+func (r repository) String() string {
+ return string(r)
+}
+
+func (r repository) Name() string {
+ return string(r)
+}
+
+type digestReference digest.Digest
+
+func (d digestReference) String() string {
+ return d.String()
+}
+
+func (d digestReference) Digest() digest.Digest {
+ return digest.Digest(d)
+}
+
+type taggedReference struct {
+ name string
+ tag string
+}
+
+func (t taggedReference) String() string {
+ return t.name + ":" + t.tag
+}
+
+func (t taggedReference) Name() string {
+ return t.name
+}
+
+func (t taggedReference) Tag() string {
+ return t.tag
+}
+
+type canonicalReference struct {
+ name string
+ digest digest.Digest
+}
+
+func (c canonicalReference) String() string {
+ return c.name + "@" + c.digest.String()
+}
+
+func (c canonicalReference) Name() string {
+ return c.name
+}
+
+func (c canonicalReference) Digest() digest.Digest {
+ return c.digest
+}
diff --git a/src/kube2msb/vendor/github.com/docker/distribution/reference/regexp.go b/src/kube2msb/vendor/github.com/docker/distribution/reference/regexp.go
new file mode 100644
index 0000000..9a7d366
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/docker/distribution/reference/regexp.go
@@ -0,0 +1,124 @@
+package reference
+
+import "regexp"
+
+var (
+ // alphaNumericRegexp defines the alpha numeric atom, typically a
+ // component of names. This only allows lower case characters and digits.
+ alphaNumericRegexp = match(`[a-z0-9]+`)
+
+ // separatorRegexp defines the separators allowed to be embedded in name
+ // components. This allow one period, one or two underscore and multiple
+ // dashes.
+ separatorRegexp = match(`(?:[._]|__|[-]*)`)
+
+ // nameComponentRegexp restricts registry path component names to start
+ // with at least one letter or number, with following parts able to be
+ // separated by one period, one or two underscore and multiple dashes.
+ nameComponentRegexp = expression(
+ alphaNumericRegexp,
+ optional(repeated(separatorRegexp, alphaNumericRegexp)))
+
+ // hostnameComponentRegexp restricts the registry hostname component of a
+ // repository name to start with a component as defined by hostnameRegexp
+ // and followed by an optional port.
+ hostnameComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`)
+
+ // hostnameRegexp defines the structure of potential hostname components
+ // that may be part of image names. This is purposely a subset of what is
+ // allowed by DNS to ensure backwards compatibility with Docker image
+ // names.
+ hostnameRegexp = expression(
+ hostnameComponentRegexp,
+ optional(repeated(literal(`.`), hostnameComponentRegexp)),
+ optional(literal(`:`), match(`[0-9]+`)))
+
+ // TagRegexp matches valid tag names. From docker/docker:graph/tags.go.
+ TagRegexp = match(`[\w][\w.-]{0,127}`)
+
+ // anchoredTagRegexp matches valid tag names, anchored at the start and
+ // end of the matched string.
+ anchoredTagRegexp = anchored(TagRegexp)
+
+ // DigestRegexp matches valid digests.
+ DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`)
+
+ // anchoredDigestRegexp matches valid digests, anchored at the start and
+ // end of the matched string.
+ anchoredDigestRegexp = anchored(DigestRegexp)
+
+ // NameRegexp is the format for the name component of references. The
+ // regexp has capturing groups for the hostname and name part omitting
+ // the separating forward slash from either.
+ NameRegexp = expression(
+ optional(hostnameRegexp, literal(`/`)),
+ nameComponentRegexp,
+ optional(repeated(literal(`/`), nameComponentRegexp)))
+
+ // anchoredNameRegexp is used to parse a name value, capturing the
+ // hostname and trailing components.
+ anchoredNameRegexp = anchored(
+ optional(capture(hostnameRegexp), literal(`/`)),
+ capture(nameComponentRegexp,
+ optional(repeated(literal(`/`), nameComponentRegexp))))
+
+ // ReferenceRegexp is the full supported format of a reference. The regexp
+ // is anchored and has capturing groups for name, tag, and digest
+ // components.
+ ReferenceRegexp = anchored(capture(NameRegexp),
+ optional(literal(":"), capture(TagRegexp)),
+ optional(literal("@"), capture(DigestRegexp)))
+)
+
+// match compiles the string to a regular expression.
+var match = regexp.MustCompile
+
+// literal compiles s into a literal regular expression, escaping any regexp
+// reserved characters.
+func literal(s string) *regexp.Regexp {
+ re := match(regexp.QuoteMeta(s))
+
+ if _, complete := re.LiteralPrefix(); !complete {
+ panic("must be a literal")
+ }
+
+ return re
+}
+
+// expression defines a full expression, where each regular expression must
+// follow the previous.
+func expression(res ...*regexp.Regexp) *regexp.Regexp {
+ var s string
+ for _, re := range res {
+ s += re.String()
+ }
+
+ return match(s)
+}
+
+// optional wraps the expression in a non-capturing group and makes the
+// production optional.
+func optional(res ...*regexp.Regexp) *regexp.Regexp {
+ return match(group(expression(res...)).String() + `?`)
+}
+
+// repeated wraps the regexp in a non-capturing group to get one or more
+// matches.
+func repeated(res ...*regexp.Regexp) *regexp.Regexp {
+ return match(group(expression(res...)).String() + `+`)
+}
+
+// group wraps the regexp in a non-capturing group.
+func group(res ...*regexp.Regexp) *regexp.Regexp {
+ return match(`(?:` + expression(res...).String() + `)`)
+}
+
+// capture wraps the expression in a capturing group.
+func capture(res ...*regexp.Regexp) *regexp.Regexp {
+ return match(`(` + expression(res...).String() + `)`)
+}
+
+// anchored anchors the regular expression by adding start and end delimiters.
+func anchored(res ...*regexp.Regexp) *regexp.Regexp {
+ return match(`^` + expression(res...).String() + `$`)
+}
diff --git a/src/kube2msb/vendor/github.com/docker/go-units/CONTRIBUTING.md b/src/kube2msb/vendor/github.com/docker/go-units/CONTRIBUTING.md
new file mode 100644
index 0000000..9ea86d7
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/docker/go-units/CONTRIBUTING.md
@@ -0,0 +1,67 @@
+# Contributing to go-units
+
+Want to hack on go-units? Awesome! Here are instructions to get you started.
+
+go-units is a part of the [Docker](https://www.docker.com) project, and follows
+the same rules and principles. If you're already familiar with the way
+Docker does things, you'll feel right at home.
+
+Otherwise, go read Docker's
+[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md),
+[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md),
+[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and
+[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md).
+
+### Sign your work
+
+The sign-off is a simple line at the end of the explanation for the patch. Your
+signature certifies that you wrote the patch or otherwise have the right to pass
+it on as an open-source patch. The rules are pretty simple: if you can certify
+the below (from [developercertificate.org](http://developercertificate.org/)):
+
+```
+Developer Certificate of Origin
+Version 1.1
+
+Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
+660 York Street, Suite 102,
+San Francisco, CA 94110 USA
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+Developer's Certificate of Origin 1.1
+
+By making a contribution to this project, I certify that:
+
+(a) The contribution was created in whole or in part by me and I
+ have the right to submit it under the open source license
+ indicated in the file; or
+
+(b) The contribution is based upon previous work that, to the best
+ of my knowledge, is covered under an appropriate open source
+ license and I have the right under that license to submit that
+ work with modifications, whether created in whole or in part
+ by me, under the same open source license (unless I am
+ permitted to submit under a different license), as indicated
+ in the file; or
+
+(c) The contribution was provided directly to me by some other
+ person who certified (a), (b) or (c) and I have not modified
+ it.
+
+(d) I understand and agree that this project and the contribution
+ are public and that a record of the contribution (including all
+ personal information I submit with it, including my sign-off) is
+ maintained indefinitely and may be redistributed consistent with
+ this project or the open source license(s) involved.
+```
+
+Then you just add a line to every git commit message:
+
+ Signed-off-by: Joe Smith <joe.smith@email.com>
+
+Use your real name (sorry, no pseudonyms or anonymous contributions.)
+
+If you set your `user.name` and `user.email` git configs, you can sign your
+commit automatically with `git commit -s`.
diff --git a/src/kube2msb/vendor/github.com/docker/go-units/LICENSE.code b/src/kube2msb/vendor/github.com/docker/go-units/LICENSE.code
new file mode 100644
index 0000000..b55b37b
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/docker/go-units/LICENSE.code
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ https://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2015 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/src/kube2msb/vendor/github.com/docker/go-units/LICENSE.docs b/src/kube2msb/vendor/github.com/docker/go-units/LICENSE.docs
new file mode 100644
index 0000000..e26cd4f
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/docker/go-units/LICENSE.docs
@@ -0,0 +1,425 @@
+Attribution-ShareAlike 4.0 International
+
+=======================================================================
+
+Creative Commons Corporation ("Creative Commons") is not a law firm and
+does not provide legal services or legal advice. Distribution of
+Creative Commons public licenses does not create a lawyer-client or
+other relationship. Creative Commons makes its licenses and related
+information available on an "as-is" basis. Creative Commons gives no
+warranties regarding its licenses, any material licensed under their
+terms and conditions, or any related information. Creative Commons
+disclaims all liability for damages resulting from their use to the
+fullest extent possible.
+
+Using Creative Commons Public Licenses
+
+Creative Commons public licenses provide a standard set of terms and
+conditions that creators and other rights holders may use to share
+original works of authorship and other material subject to copyright
+and certain other rights specified in the public license below. The
+following considerations are for informational purposes only, are not
+exhaustive, and do not form part of our licenses.
+
+ Considerations for licensors: Our public licenses are
+ intended for use by those authorized to give the public
+ permission to use material in ways otherwise restricted by
+ copyright and certain other rights. Our licenses are
+ irrevocable. Licensors should read and understand the terms
+ and conditions of the license they choose before applying it.
+ Licensors should also secure all rights necessary before
+ applying our licenses so that the public can reuse the
+ material as expected. Licensors should clearly mark any
+ material not subject to the license. This includes other CC-
+ licensed material, or material used under an exception or
+ limitation to copyright. More considerations for licensors:
+ wiki.creativecommons.org/Considerations_for_licensors
+
+ Considerations for the public: By using one of our public
+ licenses, a licensor grants the public permission to use the
+ licensed material under specified terms and conditions. If
+ the licensor's permission is not necessary for any reason--for
+ example, because of any applicable exception or limitation to
+ copyright--then that use is not regulated by the license. Our
+ licenses grant only permissions under copyright and certain
+ other rights that a licensor has authority to grant. Use of
+ the licensed material may still be restricted for other
+ reasons, including because others have copyright or other
+ rights in the material. A licensor may make special requests,
+ such as asking that all changes be marked or described.
+ Although not required by our licenses, you are encouraged to
+ respect those requests where reasonable. More_considerations
+ for the public:
+ wiki.creativecommons.org/Considerations_for_licensees
+
+=======================================================================
+
+Creative Commons Attribution-ShareAlike 4.0 International Public
+License
+
+By exercising the Licensed Rights (defined below), You accept and agree
+to be bound by the terms and conditions of this Creative Commons
+Attribution-ShareAlike 4.0 International Public License ("Public
+License"). To the extent this Public License may be interpreted as a
+contract, You are granted the Licensed Rights in consideration of Your
+acceptance of these terms and conditions, and the Licensor grants You
+such rights in consideration of benefits the Licensor receives from
+making the Licensed Material available under these terms and
+conditions.
+
+
+Section 1 -- Definitions.
+
+ a. Adapted Material means material subject to Copyright and Similar
+ Rights that is derived from or based upon the Licensed Material
+ and in which the Licensed Material is translated, altered,
+ arranged, transformed, or otherwise modified in a manner requiring
+ permission under the Copyright and Similar Rights held by the
+ Licensor. For purposes of this Public License, where the Licensed
+ Material is a musical work, performance, or sound recording,
+ Adapted Material is always produced where the Licensed Material is
+ synched in timed relation with a moving image.
+
+ b. Adapter's License means the license You apply to Your Copyright
+ and Similar Rights in Your contributions to Adapted Material in
+ accordance with the terms and conditions of this Public License.
+
+ c. BY-SA Compatible License means a license listed at
+ creativecommons.org/compatiblelicenses, approved by Creative
+ Commons as essentially the equivalent of this Public License.
+
+ d. Copyright and Similar Rights means copyright and/or similar rights
+ closely related to copyright including, without limitation,
+ performance, broadcast, sound recording, and Sui Generis Database
+ Rights, without regard to how the rights are labeled or
+ categorized. For purposes of this Public License, the rights
+ specified in Section 2(b)(1)-(2) are not Copyright and Similar
+ Rights.
+
+ e. Effective Technological Measures means those measures that, in the
+ absence of proper authority, may not be circumvented under laws
+ fulfilling obligations under Article 11 of the WIPO Copyright
+ Treaty adopted on December 20, 1996, and/or similar international
+ agreements.
+
+ f. Exceptions and Limitations means fair use, fair dealing, and/or
+ any other exception or limitation to Copyright and Similar Rights
+ that applies to Your use of the Licensed Material.
+
+ g. License Elements means the license attributes listed in the name
+ of a Creative Commons Public License. The License Elements of this
+ Public License are Attribution and ShareAlike.
+
+ h. Licensed Material means the artistic or literary work, database,
+ or other material to which the Licensor applied this Public
+ License.
+
+ i. Licensed Rights means the rights granted to You subject to the
+ terms and conditions of this Public License, which are limited to
+ all Copyright and Similar Rights that apply to Your use of the
+ Licensed Material and that the Licensor has authority to license.
+
+ j. Licensor means the individual(s) or entity(ies) granting rights
+ under this Public License.
+
+ k. Share means to provide material to the public by any means or
+ process that requires permission under the Licensed Rights, such
+ as reproduction, public display, public performance, distribution,
+ dissemination, communication, or importation, and to make material
+ available to the public including in ways that members of the
+ public may access the material from a place and at a time
+ individually chosen by them.
+
+ l. Sui Generis Database Rights means rights other than copyright
+ resulting from Directive 96/9/EC of the European Parliament and of
+ the Council of 11 March 1996 on the legal protection of databases,
+ as amended and/or succeeded, as well as other essentially
+ equivalent rights anywhere in the world.
+
+ m. You means the individual or entity exercising the Licensed Rights
+ under this Public License. Your has a corresponding meaning.
+
+
+Section 2 -- Scope.
+
+ a. License grant.
+
+ 1. Subject to the terms and conditions of this Public License,
+ the Licensor hereby grants You a worldwide, royalty-free,
+ non-sublicensable, non-exclusive, irrevocable license to
+ exercise the Licensed Rights in the Licensed Material to:
+
+ a. reproduce and Share the Licensed Material, in whole or
+ in part; and
+
+ b. produce, reproduce, and Share Adapted Material.
+
+ 2. Exceptions and Limitations. For the avoidance of doubt, where
+ Exceptions and Limitations apply to Your use, this Public
+ License does not apply, and You do not need to comply with
+ its terms and conditions.
+
+ 3. Term. The term of this Public License is specified in Section
+ 6(a).
+
+ 4. Media and formats; technical modifications allowed. The
+ Licensor authorizes You to exercise the Licensed Rights in
+ all media and formats whether now known or hereafter created,
+ and to make technical modifications necessary to do so. The
+ Licensor waives and/or agrees not to assert any right or
+ authority to forbid You from making technical modifications
+ necessary to exercise the Licensed Rights, including
+ technical modifications necessary to circumvent Effective
+ Technological Measures. For purposes of this Public License,
+ simply making modifications authorized by this Section 2(a)
+ (4) never produces Adapted Material.
+
+ 5. Downstream recipients.
+
+ a. Offer from the Licensor -- Licensed Material. Every
+ recipient of the Licensed Material automatically
+ receives an offer from the Licensor to exercise the
+ Licensed Rights under the terms and conditions of this
+ Public License.
+
+ b. Additional offer from the Licensor -- Adapted Material.
+ Every recipient of Adapted Material from You
+ automatically receives an offer from the Licensor to
+ exercise the Licensed Rights in the Adapted Material
+ under the conditions of the Adapter's License You apply.
+
+ c. No downstream restrictions. You may not offer or impose
+ any additional or different terms or conditions on, or
+ apply any Effective Technological Measures to, the
+ Licensed Material if doing so restricts exercise of the
+ Licensed Rights by any recipient of the Licensed
+ Material.
+
+ 6. No endorsement. Nothing in this Public License constitutes or
+ may be construed as permission to assert or imply that You
+ are, or that Your use of the Licensed Material is, connected
+ with, or sponsored, endorsed, or granted official status by,
+ the Licensor or others designated to receive attribution as
+ provided in Section 3(a)(1)(A)(i).
+
+ b. Other rights.
+
+ 1. Moral rights, such as the right of integrity, are not
+ licensed under this Public License, nor are publicity,
+ privacy, and/or other similar personality rights; however, to
+ the extent possible, the Licensor waives and/or agrees not to
+ assert any such rights held by the Licensor to the limited
+ extent necessary to allow You to exercise the Licensed
+ Rights, but not otherwise.
+
+ 2. Patent and trademark rights are not licensed under this
+ Public License.
+
+ 3. To the extent possible, the Licensor waives any right to
+ collect royalties from You for the exercise of the Licensed
+ Rights, whether directly or through a collecting society
+ under any voluntary or waivable statutory or compulsory
+ licensing scheme. In all other cases the Licensor expressly
+ reserves any right to collect such royalties.
+
+
+Section 3 -- License Conditions.
+
+Your exercise of the Licensed Rights is expressly made subject to the
+following conditions.
+
+ a. Attribution.
+
+ 1. If You Share the Licensed Material (including in modified
+ form), You must:
+
+ a. retain the following if it is supplied by the Licensor
+ with the Licensed Material:
+
+ i. identification of the creator(s) of the Licensed
+ Material and any others designated to receive
+ attribution, in any reasonable manner requested by
+ the Licensor (including by pseudonym if
+ designated);
+
+ ii. a copyright notice;
+
+ iii. a notice that refers to this Public License;
+
+ iv. a notice that refers to the disclaimer of
+ warranties;
+
+ v. a URI or hyperlink to the Licensed Material to the
+ extent reasonably practicable;
+
+ b. indicate if You modified the Licensed Material and
+ retain an indication of any previous modifications; and
+
+ c. indicate the Licensed Material is licensed under this
+ Public License, and include the text of, or the URI or
+ hyperlink to, this Public License.
+
+ 2. You may satisfy the conditions in Section 3(a)(1) in any
+ reasonable manner based on the medium, means, and context in
+ which You Share the Licensed Material. For example, it may be
+ reasonable to satisfy the conditions by providing a URI or
+ hyperlink to a resource that includes the required
+ information.
+
+ 3. If requested by the Licensor, You must remove any of the
+ information required by Section 3(a)(1)(A) to the extent
+ reasonably practicable.
+
+ b. ShareAlike.
+
+ In addition to the conditions in Section 3(a), if You Share
+ Adapted Material You produce, the following conditions also apply.
+
+ 1. The Adapter's License You apply must be a Creative Commons
+ license with the same License Elements, this version or
+ later, or a BY-SA Compatible License.
+
+ 2. You must include the text of, or the URI or hyperlink to, the
+ Adapter's License You apply. You may satisfy this condition
+ in any reasonable manner based on the medium, means, and
+ context in which You Share Adapted Material.
+
+ 3. You may not offer or impose any additional or different terms
+ or conditions on, or apply any Effective Technological
+ Measures to, Adapted Material that restrict exercise of the
+ rights granted under the Adapter's License You apply.
+
+
+Section 4 -- Sui Generis Database Rights.
+
+Where the Licensed Rights include Sui Generis Database Rights that
+apply to Your use of the Licensed Material:
+
+ a. for the avoidance of doubt, Section 2(a)(1) grants You the right
+ to extract, reuse, reproduce, and Share all or a substantial
+ portion of the contents of the database;
+
+ b. if You include all or a substantial portion of the database
+ contents in a database in which You have Sui Generis Database
+ Rights, then the database in which You have Sui Generis Database
+ Rights (but not its individual contents) is Adapted Material,
+
+ including for purposes of Section 3(b); and
+ c. You must comply with the conditions in Section 3(a) if You Share
+ all or a substantial portion of the contents of the database.
+
+For the avoidance of doubt, this Section 4 supplements and does not
+replace Your obligations under this Public License where the Licensed
+Rights include other Copyright and Similar Rights.
+
+
+Section 5 -- Disclaimer of Warranties and Limitation of Liability.
+
+ a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
+ EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
+ AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
+ ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
+ IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
+ WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
+ PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
+ ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
+ KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
+ ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
+
+ b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
+ TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
+ NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
+ INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
+ COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
+ USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
+ ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
+ DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
+ IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
+
+ c. The disclaimer of warranties and limitation of liability provided
+ above shall be interpreted in a manner that, to the extent
+ possible, most closely approximates an absolute disclaimer and
+ waiver of all liability.
+
+
+Section 6 -- Term and Termination.
+
+ a. This Public License applies for the term of the Copyright and
+ Similar Rights licensed here. However, if You fail to comply with
+ this Public License, then Your rights under this Public License
+ terminate automatically.
+
+ b. Where Your right to use the Licensed Material has terminated under
+ Section 6(a), it reinstates:
+
+ 1. automatically as of the date the violation is cured, provided
+ it is cured within 30 days of Your discovery of the
+ violation; or
+
+ 2. upon express reinstatement by the Licensor.
+
+ For the avoidance of doubt, this Section 6(b) does not affect any
+ right the Licensor may have to seek remedies for Your violations
+ of this Public License.
+
+ c. For the avoidance of doubt, the Licensor may also offer the
+ Licensed Material under separate terms or conditions or stop
+ distributing the Licensed Material at any time; however, doing so
+ will not terminate this Public License.
+
+ d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
+ License.
+
+
+Section 7 -- Other Terms and Conditions.
+
+ a. The Licensor shall not be bound by any additional or different
+ terms or conditions communicated by You unless expressly agreed.
+
+ b. Any arrangements, understandings, or agreements regarding the
+ Licensed Material not stated herein are separate from and
+ independent of the terms and conditions of this Public License.
+
+
+Section 8 -- Interpretation.
+
+ a. For the avoidance of doubt, this Public License does not, and
+ shall not be interpreted to, reduce, limit, restrict, or impose
+ conditions on any use of the Licensed Material that could lawfully
+ be made without permission under this Public License.
+
+ b. To the extent possible, if any provision of this Public License is
+ deemed unenforceable, it shall be automatically reformed to the
+ minimum extent necessary to make it enforceable. If the provision
+ cannot be reformed, it shall be severed from this Public License
+ without affecting the enforceability of the remaining terms and
+ conditions.
+
+ c. No term or condition of this Public License will be waived and no
+ failure to comply consented to unless expressly agreed to by the
+ Licensor.
+
+ d. Nothing in this Public License constitutes or may be interpreted
+ as a limitation upon, or waiver of, any privileges and immunities
+ that apply to the Licensor or You, including from the legal
+ processes of any jurisdiction or authority.
+
+
+=======================================================================
+
+Creative Commons is not a party to its public licenses.
+Notwithstanding, Creative Commons may elect to apply one of its public
+licenses to material it publishes and in those instances will be
+considered the "Licensor." Except for the limited purpose of indicating
+that material is shared under a Creative Commons public license or as
+otherwise permitted by the Creative Commons policies published at
+creativecommons.org/policies, Creative Commons does not authorize the
+use of the trademark "Creative Commons" or any other trademark or logo
+of Creative Commons without its prior written consent including,
+without limitation, in connection with any unauthorized modifications
+to any of its public licenses or any other arrangements,
+understandings, or agreements concerning use of licensed material. For
+the avoidance of doubt, this paragraph does not form part of the public
+licenses.
+
+Creative Commons may be contacted at creativecommons.org.
diff --git a/src/kube2msb/vendor/github.com/docker/go-units/MAINTAINERS b/src/kube2msb/vendor/github.com/docker/go-units/MAINTAINERS
new file mode 100644
index 0000000..477be8b
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/docker/go-units/MAINTAINERS
@@ -0,0 +1,27 @@
+# go-connections maintainers file
+#
+# This file describes who runs the docker/go-connections project and how.
+# This is a living document - if you see something out of date or missing, speak up!
+#
+# It is structured to be consumable by both humans and programs.
+# To extract its contents programmatically, use any TOML-compliant parser.
+#
+# This file is compiled into the MAINTAINERS file in docker/opensource.
+#
+[Org]
+ [Org."Core maintainers"]
+ people = [
+ "calavera",
+ ]
+
+[people]
+
+# A reference list of all people associated with the project.
+# All other sections should refer to people by their canonical key
+# in the people section.
+
+ # ADD YOURSELF HERE IN ALPHABETICAL ORDER
+ [people.calavera]
+ Name = "David Calavera"
+ Email = "david.calavera@gmail.com"
+ GitHub = "calavera"
diff --git a/src/kube2msb/vendor/github.com/docker/go-units/README.md b/src/kube2msb/vendor/github.com/docker/go-units/README.md
new file mode 100644
index 0000000..3ce4d79
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/docker/go-units/README.md
@@ -0,0 +1,18 @@
+[![GoDoc](https://godoc.org/github.com/docker/go-units?status.svg)](https://godoc.org/github.com/docker/go-units)
+
+# Introduction
+
+go-units is a library to transform human friendly measurements into machine friendly values.
+
+## Usage
+
+See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation.
+
+## Copyright and license
+
+Copyright © 2015 Docker, Inc. All rights reserved, except as follows. Code
+is released under the Apache 2.0 license. The README.md file, and files in the
+"docs" folder are licensed under the Creative Commons Attribution 4.0
+International License under the terms and conditions set forth in the file
+"LICENSE.docs". You may obtain a duplicate copy of the same license, titled
+CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/.
diff --git a/src/kube2msb/vendor/github.com/docker/go-units/circle.yml b/src/kube2msb/vendor/github.com/docker/go-units/circle.yml
new file mode 100644
index 0000000..9043b35
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/docker/go-units/circle.yml
@@ -0,0 +1,11 @@
+dependencies:
+ post:
+ # install golint
+ - go get github.com/golang/lint/golint
+
+test:
+ pre:
+ # run analysis before tests
+ - go vet ./...
+ - test -z "$(golint ./... | tee /dev/stderr)"
+ - test -z "$(gofmt -s -l . | tee /dev/stderr)"
diff --git a/src/kube2msb/vendor/github.com/docker/go-units/duration.go b/src/kube2msb/vendor/github.com/docker/go-units/duration.go
new file mode 100644
index 0000000..c219a8a
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/docker/go-units/duration.go
@@ -0,0 +1,33 @@
+// Package units provides helper function to parse and print size and time units
+// in human-readable format.
+package units
+
+import (
+ "fmt"
+ "time"
+)
+
+// HumanDuration returns a human-readable approximation of a duration
+// (eg. "About a minute", "4 hours ago", etc.).
+func HumanDuration(d time.Duration) string {
+ if seconds := int(d.Seconds()); seconds < 1 {
+ return "Less than a second"
+ } else if seconds < 60 {
+ return fmt.Sprintf("%d seconds", seconds)
+ } else if minutes := int(d.Minutes()); minutes == 1 {
+ return "About a minute"
+ } else if minutes < 60 {
+ return fmt.Sprintf("%d minutes", minutes)
+ } else if hours := int(d.Hours()); hours == 1 {
+ return "About an hour"
+ } else if hours < 48 {
+ return fmt.Sprintf("%d hours", hours)
+ } else if hours < 24*7*2 {
+ return fmt.Sprintf("%d days", hours/24)
+ } else if hours < 24*30*3 {
+ return fmt.Sprintf("%d weeks", hours/24/7)
+ } else if hours < 24*365*2 {
+ return fmt.Sprintf("%d months", hours/24/30)
+ }
+ return fmt.Sprintf("%d years", int(d.Hours())/24/365)
+}
diff --git a/src/kube2msb/vendor/github.com/docker/go-units/size.go b/src/kube2msb/vendor/github.com/docker/go-units/size.go
new file mode 100644
index 0000000..3b59daf
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/docker/go-units/size.go
@@ -0,0 +1,95 @@
+package units
+
+import (
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// See: http://en.wikipedia.org/wiki/Binary_prefix
+const (
+ // Decimal
+
+ KB = 1000
+ MB = 1000 * KB
+ GB = 1000 * MB
+ TB = 1000 * GB
+ PB = 1000 * TB
+
+ // Binary
+
+ KiB = 1024
+ MiB = 1024 * KiB
+ GiB = 1024 * MiB
+ TiB = 1024 * GiB
+ PiB = 1024 * TiB
+)
+
+type unitMap map[string]int64
+
+var (
+ decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB}
+ binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB}
+ sizeRegex = regexp.MustCompile(`^(\d+)([kKmMgGtTpP])?[bB]?$`)
+)
+
+var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
+var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
+
+// CustomSize returns a human-readable approximation of a size
+// using custom format.
+func CustomSize(format string, size float64, base float64, _map []string) string {
+ i := 0
+ for size >= base {
+ size = size / base
+ i++
+ }
+ return fmt.Sprintf(format, size, _map[i])
+}
+
+// HumanSize returns a human-readable approximation of a size
+// capped at 4 valid numbers (eg. "2.746 MB", "796 KB").
+func HumanSize(size float64) string {
+ return CustomSize("%.4g %s", size, 1000.0, decimapAbbrs)
+}
+
+// BytesSize returns a human-readable size in bytes, kibibytes,
+// mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB").
+func BytesSize(size float64) string {
+ return CustomSize("%.4g %s", size, 1024.0, binaryAbbrs)
+}
+
+// FromHumanSize returns an integer from a human-readable specification of a
+// size using SI standard (eg. "44kB", "17MB").
+func FromHumanSize(size string) (int64, error) {
+ return parseSize(size, decimalMap)
+}
+
+// RAMInBytes parses a human-readable string representing an amount of RAM
+// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and
+// returns the number of bytes, or -1 if the string is unparseable.
+// Units are case-insensitive, and the 'b' suffix is optional.
+func RAMInBytes(size string) (int64, error) {
+ return parseSize(size, binaryMap)
+}
+
+// Parses the human-readable size string into the amount it represents.
+func parseSize(sizeStr string, uMap unitMap) (int64, error) {
+ matches := sizeRegex.FindStringSubmatch(sizeStr)
+ if len(matches) != 3 {
+ return -1, fmt.Errorf("invalid size: '%s'", sizeStr)
+ }
+
+ size, err := strconv.ParseInt(matches[1], 10, 0)
+ if err != nil {
+ return -1, err
+ }
+
+ unitPrefix := strings.ToLower(matches[2])
+ if mul, ok := uMap[unitPrefix]; ok {
+ size *= mul
+ }
+
+ return size, nil
+}
diff --git a/src/kube2msb/vendor/github.com/docker/go-units/ulimit.go b/src/kube2msb/vendor/github.com/docker/go-units/ulimit.go
new file mode 100644
index 0000000..5ac7fd8
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/docker/go-units/ulimit.go
@@ -0,0 +1,118 @@
+package units
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// Ulimit is a human friendly version of Rlimit.
+type Ulimit struct {
+ Name string
+ Hard int64
+ Soft int64
+}
+
+// Rlimit specifies the resource limits, such as max open files.
+type Rlimit struct {
+ Type int `json:"type,omitempty"`
+ Hard uint64 `json:"hard,omitempty"`
+ Soft uint64 `json:"soft,omitempty"`
+}
+
+const (
+ // magic numbers for making the syscall
+ // some of these are defined in the syscall package, but not all.
+ // Also since Windows client doesn't get access to the syscall package, need to
+ // define these here
+ rlimitAs = 9
+ rlimitCore = 4
+ rlimitCPU = 0
+ rlimitData = 2
+ rlimitFsize = 1
+ rlimitLocks = 10
+ rlimitMemlock = 8
+ rlimitMsgqueue = 12
+ rlimitNice = 13
+ rlimitNofile = 7
+ rlimitNproc = 6
+ rlimitRss = 5
+ rlimitRtprio = 14
+ rlimitRttime = 15
+ rlimitSigpending = 11
+ rlimitStack = 3
+)
+
+var ulimitNameMapping = map[string]int{
+ //"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container.
+ "core": rlimitCore,
+ "cpu": rlimitCPU,
+ "data": rlimitData,
+ "fsize": rlimitFsize,
+ "locks": rlimitLocks,
+ "memlock": rlimitMemlock,
+ "msgqueue": rlimitMsgqueue,
+ "nice": rlimitNice,
+ "nofile": rlimitNofile,
+ "nproc": rlimitNproc,
+ "rss": rlimitRss,
+ "rtprio": rlimitRtprio,
+ "rttime": rlimitRttime,
+ "sigpending": rlimitSigpending,
+ "stack": rlimitStack,
+}
+
+// ParseUlimit parses and returns a Ulimit from the specified string.
+func ParseUlimit(val string) (*Ulimit, error) {
+ parts := strings.SplitN(val, "=", 2)
+ if len(parts) != 2 {
+ return nil, fmt.Errorf("invalid ulimit argument: %s", val)
+ }
+
+ if _, exists := ulimitNameMapping[parts[0]]; !exists {
+ return nil, fmt.Errorf("invalid ulimit type: %s", parts[0])
+ }
+
+ var (
+ soft int64
+ hard = &soft // default to soft in case no hard was set
+ temp int64
+ err error
+ )
+ switch limitVals := strings.Split(parts[1], ":"); len(limitVals) {
+ case 2:
+ temp, err = strconv.ParseInt(limitVals[1], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ hard = &temp
+ fallthrough
+ case 1:
+ soft, err = strconv.ParseInt(limitVals[0], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ default:
+ return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1])
+ }
+
+ if soft > *hard {
+ return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, *hard)
+ }
+
+ return &Ulimit{Name: parts[0], Soft: soft, Hard: *hard}, nil
+}
+
+// GetRlimit returns the RLimit corresponding to Ulimit.
+func (u *Ulimit) GetRlimit() (*Rlimit, error) {
+ t, exists := ulimitNameMapping[u.Name]
+ if !exists {
+ return nil, fmt.Errorf("invalid ulimit name %s", u.Name)
+ }
+
+ return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil
+}
+
+func (u *Ulimit) String() string {
+ return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard)
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/CHANGES.md b/src/kube2msb/vendor/github.com/emicklei/go-restful/CHANGES.md
new file mode 100644
index 0000000..070bca7
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/CHANGES.md
@@ -0,0 +1,163 @@
+Change history of go-restful
+=
+2016-02-14
+- take the qualify factor of the Accept header mediatype into account when deciding the contentype of the response
+- add constructors for custom entity accessors for xml and json
+
+2015-09-27
+- rename new WriteStatusAnd... to WriteHeaderAnd... for consistency
+
+2015-09-25
+- fixed problem with changing Header after WriteHeader (issue 235)
+
+2015-09-14
+- changed behavior of WriteHeader (immediate write) and WriteEntity (no status write)
+- added support for custom EntityReaderWriters.
+
+2015-08-06
+- add support for reading entities from compressed request content
+- use sync.Pool for compressors of http response and request body
+- add Description to Parameter for documentation in Swagger UI
+
+2015-03-20
+- add configurable logging
+
+2015-03-18
+- if not specified, the Operation is derived from the Route function
+
+2015-03-17
+- expose Parameter creation functions
+- make trace logger an interface
+- fix OPTIONSFilter
+- customize rendering of ServiceError
+- JSR311 router now handles wildcards
+- add Notes to Route
+
+2014-11-27
+- (api add) PrettyPrint per response. (as proposed in #167)
+
+2014-11-12
+- (api add) ApiVersion(.) for documentation in Swagger UI
+
+2014-11-10
+- (api change) struct fields tagged with "description" show up in Swagger UI
+
+2014-10-31
+- (api change) ReturnsError -> Returns
+- (api add) RouteBuilder.Do(aBuilder) for DRY use of RouteBuilder
+- fix swagger nested structs
+- sort Swagger response messages by code
+
+2014-10-23
+- (api add) ReturnsError allows you to document Http codes in swagger
+- fixed problem with greedy CurlyRouter
+- (api add) Access-Control-Max-Age in CORS
+- add tracing functionality (injectable) for debugging purposes
+- support JSON parse 64bit int
+- fix empty parameters for swagger
+- WebServicesUrl is now optional for swagger
+- fixed duplicate AccessControlAllowOrigin in CORS
+- (api change) expose ServeMux in container
+- (api add) added AllowedDomains in CORS
+- (api add) ParameterNamed for detailed documentation
+
+2014-04-16
+- (api add) expose constructor of Request for testing.
+
+2014-06-27
+- (api add) ParameterNamed gives access to a Parameter definition and its data (for further specification).
+- (api add) SetCacheReadEntity allow scontrol over whether or not the request body is being cached (default true for compatibility reasons).
+
+2014-07-03
+- (api add) CORS can be configured with a list of allowed domains
+
+2014-03-12
+- (api add) Route path parameters can use wildcard or regular expressions. (requires CurlyRouter)
+
+2014-02-26
+- (api add) Request now provides information about the matched Route, see method SelectedRoutePath
+
+2014-02-17
+- (api change) renamed parameter constants (go-lint checks)
+
+2014-01-10
+ - (api add) support for CloseNotify, see http://golang.org/pkg/net/http/#CloseNotifier
+
+2014-01-07
+ - (api change) Write* methods in Response now return the error or nil.
+ - added example of serving HTML from a Go template.
+ - fixed comparing Allowed headers in CORS (is now case-insensitive)
+
+2013-11-13
+ - (api add) Response knows how many bytes are written to the response body.
+
+2013-10-29
+ - (api add) RecoverHandler(handler RecoverHandleFunction) to change how panic recovery is handled. Default behavior is to log and return a stacktrace. This may be a security issue as it exposes sourcecode information.
+
+2013-10-04
+ - (api add) Response knows what HTTP status has been written
+ - (api add) Request can have attributes (map of string->interface, also called request-scoped variables
+
+2013-09-12
+ - (api change) Router interface simplified
+ - Implemented CurlyRouter, a Router that does not use|allow regular expressions in paths
+
+2013-08-05
+ - add OPTIONS support
+ - add CORS support
+
+2013-08-27
+ - fixed some reported issues (see github)
+ - (api change) deprecated use of WriteError; use WriteErrorString instead
+
+2014-04-15
+ - (fix) v1.0.1 tag: fix Issue 111: WriteErrorString
+
+2013-08-08
+ - (api add) Added implementation Container: a WebServices collection with its own http.ServeMux allowing multiple endpoints per program. Existing uses of go-restful will register their services to the DefaultContainer.
+ - (api add) the swagger package has be extended to have a UI per container.
+ - if panic is detected then a small stack trace is printed (thanks to runner-mei)
+ - (api add) WriteErrorString to Response
+
+Important API changes:
+
+ - (api remove) package variable DoNotRecover no longer works ; use restful.DefaultContainer.DoNotRecover(true) instead.
+ - (api remove) package variable EnableContentEncoding no longer works ; use restful.DefaultContainer.EnableContentEncoding(true) instead.
+
+
+2013-07-06
+
+ - (api add) Added support for response encoding (gzip and deflate(zlib)). This feature is disabled on default (for backwards compatibility). Use restful.EnableContentEncoding = true in your initialization to enable this feature.
+
+2013-06-19
+
+ - (improve) DoNotRecover option, moved request body closer, improved ReadEntity
+
+2013-06-03
+
+ - (api change) removed Dispatcher interface, hide PathExpression
+ - changed receiver names of type functions to be more idiomatic Go
+
+2013-06-02
+
+ - (optimize) Cache the RegExp compilation of Paths.
+
+2013-05-22
+
+ - (api add) Added support for request/response filter functions
+
+2013-05-18
+
+
+ - (api add) Added feature to change the default Http Request Dispatch function (travis cline)
+ - (api change) Moved Swagger Webservice to swagger package (see example restful-user)
+
+[2012-11-14 .. 2013-05-18>
+
+ - See https://github.com/emicklei/go-restful/commits
+
+2012-11-14
+
+ - Initial commit
+
+
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/LICENSE b/src/kube2msb/vendor/github.com/emicklei/go-restful/LICENSE
new file mode 100644
index 0000000..ece7ec6
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/LICENSE
@@ -0,0 +1,22 @@
+Copyright (c) 2012,2013 Ernest Micklei
+
+MIT License
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/README.md b/src/kube2msb/vendor/github.com/emicklei/go-restful/README.md
new file mode 100644
index 0000000..e5492e4
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/README.md
@@ -0,0 +1,74 @@
+go-restful
+==========
+
+package for building REST-style Web Services using Google Go
+
+REST asks developers to use HTTP methods explicitly and in a way that's consistent with the protocol definition. This basic REST design principle establishes a one-to-one mapping between create, read, update, and delete (CRUD) operations and HTTP methods. According to this mapping:
+
+- GET = Retrieve a representation of a resource
+- POST = Create if you are sending content to the server to create a subordinate of the specified resource collection, using some server-side algorithm.
+- PUT = Create if you are sending the full content of the specified resource (URI).
+- PUT = Update if you are updating the full content of the specified resource.
+- DELETE = Delete if you are requesting the server to delete the resource
+- PATCH = Update partial content of a resource
+- OPTIONS = Get information about the communication options for the request URI
+
+### Example
+
+```Go
+ws := new(restful.WebService)
+ws.
+ Path("/users").
+ Consumes(restful.MIME_XML, restful.MIME_JSON).
+ Produces(restful.MIME_JSON, restful.MIME_XML)
+
+ws.Route(ws.GET("/{user-id}").To(u.findUser).
+ Doc("get a user").
+ Param(ws.PathParameter("user-id", "identifier of the user").DataType("string")).
+ Writes(User{}))
+...
+
+func (u UserResource) findUser(request *restful.Request, response *restful.Response) {
+ id := request.PathParameter("user-id")
+ ...
+}
+```
+
+[Full API of a UserResource](https://github.com/emicklei/go-restful/tree/master/examples/restful-user-resource.go)
+
+### Features
+
+- Routes for request &#8594; function mapping with path parameter (e.g. {id}) support
+- Configurable router:
+ - Routing algorithm after [JSR311](http://jsr311.java.net/nonav/releases/1.1/spec/spec.html) that is implemented using (but does **not** accept) regular expressions (See RouterJSR311 which is used by default)
+ - Fast routing algorithm that allows static elements, regular expressions and dynamic parameters in the URL path (e.g. /meetings/{id} or /static/{subpath:*}, See CurlyRouter)
+- Request API for reading structs from JSON/XML and accesing parameters (path,query,header)
+- Response API for writing structs to JSON/XML and setting headers
+- Filters for intercepting the request &#8594; response flow on Service or Route level
+- Request-scoped variables using attributes
+- Containers for WebServices on different HTTP endpoints
+- Content encoding (gzip,deflate) of request and response payloads
+- Automatic responses on OPTIONS (using a filter)
+- Automatic CORS request handling (using a filter)
+- API declaration for Swagger UI (see swagger package)
+- Panic recovery to produce HTTP 500, customizable using RecoverHandler(...)
+- Route errors produce HTTP 404/405/406/415 errors, customizable using ServiceErrorHandler(...)
+- Configurable (trace) logging
+- Customizable encoding using EntityReaderWriter registration
+- Customizable gzip/deflate readers and writers using CompressorProvider registration
+
+### Resources
+
+- [Documentation on godoc.org](http://godoc.org/github.com/emicklei/go-restful)
+- [Code examples](https://github.com/emicklei/go-restful/tree/master/examples)
+- [Example posted on blog](http://ernestmicklei.com/2012/11/24/go-restful-first-working-example/)
+- [Design explained on blog](http://ernestmicklei.com/2012/11/11/go-restful-api-design/)
+- [sourcegraph](https://sourcegraph.com/github.com/emicklei/go-restful)
+- [gopkg.in](https://gopkg.in/emicklei/go-restful.v1)
+- [showcase: Mora - MongoDB REST Api server](https://github.com/emicklei/mora)
+
+[![Build Status](https://drone.io/github.com/emicklei/go-restful/status.png)](https://drone.io/github.com/emicklei/go-restful/latest)
+
+(c) 2012 - 2015, http://ernestmicklei.com. MIT License
+
+Type ```git shortlog -s``` for a full list of contributors.
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/Srcfile b/src/kube2msb/vendor/github.com/emicklei/go-restful/Srcfile
new file mode 100644
index 0000000..16fd186
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/Srcfile
@@ -0,0 +1 @@
+{"SkipDirs": ["examples"]}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/bench_test.sh b/src/kube2msb/vendor/github.com/emicklei/go-restful/bench_test.sh
new file mode 100644
index 0000000..47ffbe4
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/bench_test.sh
@@ -0,0 +1,10 @@
+#go test -run=none -file bench_test.go -test.bench . -cpuprofile=bench_test.out
+
+go test -c
+./go-restful.test -test.run=none -test.cpuprofile=tmp.prof -test.bench=BenchmarkMany
+./go-restful.test -test.run=none -test.cpuprofile=curly.prof -test.bench=BenchmarkManyCurly
+
+#go tool pprof go-restful.test tmp.prof
+go tool pprof go-restful.test curly.prof
+
+
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/compress.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/compress.go
new file mode 100644
index 0000000..220b377
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/compress.go
@@ -0,0 +1,123 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "bufio"
+ "compress/gzip"
+ "compress/zlib"
+ "errors"
+ "io"
+ "net"
+ "net/http"
+ "strings"
+)
+
+// OBSOLETE : use restful.DefaultContainer.EnableContentEncoding(true) to change this setting.
+var EnableContentEncoding = false
+
+// CompressingResponseWriter is a http.ResponseWriter that can perform content encoding (gzip and zlib)
+type CompressingResponseWriter struct {
+ writer http.ResponseWriter
+ compressor io.WriteCloser
+ encoding string
+}
+
+// Header is part of http.ResponseWriter interface
+func (c *CompressingResponseWriter) Header() http.Header {
+ return c.writer.Header()
+}
+
+// WriteHeader is part of http.ResponseWriter interface
+func (c *CompressingResponseWriter) WriteHeader(status int) {
+ c.writer.WriteHeader(status)
+}
+
+// Write is part of http.ResponseWriter interface
+// It is passed through the compressor
+func (c *CompressingResponseWriter) Write(bytes []byte) (int, error) {
+ if c.isCompressorClosed() {
+ return -1, errors.New("Compressing error: tried to write data using closed compressor")
+ }
+ return c.compressor.Write(bytes)
+}
+
+// CloseNotify is part of http.CloseNotifier interface
+func (c *CompressingResponseWriter) CloseNotify() <-chan bool {
+ return c.writer.(http.CloseNotifier).CloseNotify()
+}
+
+// Close the underlying compressor
+func (c *CompressingResponseWriter) Close() error {
+ if c.isCompressorClosed() {
+ return errors.New("Compressing error: tried to close already closed compressor")
+ }
+
+ c.compressor.Close()
+ if ENCODING_GZIP == c.encoding {
+ currentCompressorProvider.ReleaseGzipWriter(c.compressor.(*gzip.Writer))
+ }
+ if ENCODING_DEFLATE == c.encoding {
+ currentCompressorProvider.ReleaseZlibWriter(c.compressor.(*zlib.Writer))
+ }
+ // gc hint needed?
+ c.compressor = nil
+ return nil
+}
+
+func (c *CompressingResponseWriter) isCompressorClosed() bool {
+ return nil == c.compressor
+}
+
+// Hijack implements the Hijacker interface
+// This is especially useful when combining Container.EnabledContentEncoding
+// in combination with websockets (for instance gorilla/websocket)
+func (c *CompressingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ hijacker, ok := c.writer.(http.Hijacker)
+ if !ok {
+ return nil, nil, errors.New("ResponseWriter doesn't support Hijacker interface")
+ }
+ return hijacker.Hijack()
+}
+
+// WantsCompressedResponse reads the Accept-Encoding header to see if and which encoding is requested.
+func wantsCompressedResponse(httpRequest *http.Request) (bool, string) {
+ header := httpRequest.Header.Get(HEADER_AcceptEncoding)
+ gi := strings.Index(header, ENCODING_GZIP)
+ zi := strings.Index(header, ENCODING_DEFLATE)
+ // use in order of appearance
+ if gi == -1 {
+ return zi != -1, ENCODING_DEFLATE
+ } else if zi == -1 {
+ return gi != -1, ENCODING_GZIP
+ } else {
+ if gi < zi {
+ return true, ENCODING_GZIP
+ }
+ return true, ENCODING_DEFLATE
+ }
+}
+
+// NewCompressingResponseWriter create a CompressingResponseWriter for a known encoding = {gzip,deflate}
+func NewCompressingResponseWriter(httpWriter http.ResponseWriter, encoding string) (*CompressingResponseWriter, error) {
+ httpWriter.Header().Set(HEADER_ContentEncoding, encoding)
+ c := new(CompressingResponseWriter)
+ c.writer = httpWriter
+ var err error
+ if ENCODING_GZIP == encoding {
+ w := currentCompressorProvider.AcquireGzipWriter()
+ w.Reset(httpWriter)
+ c.compressor = w
+ c.encoding = ENCODING_GZIP
+ } else if ENCODING_DEFLATE == encoding {
+ w := currentCompressorProvider.AcquireZlibWriter()
+ w.Reset(httpWriter)
+ c.compressor = w
+ c.encoding = ENCODING_DEFLATE
+ } else {
+ return nil, errors.New("Unknown encoding:" + encoding)
+ }
+ return c, err
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/compressor_cache.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/compressor_cache.go
new file mode 100644
index 0000000..ee42601
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/compressor_cache.go
@@ -0,0 +1,103 @@
+package restful
+
+// Copyright 2015 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "compress/gzip"
+ "compress/zlib"
+)
+
+// BoundedCachedCompressors is a CompressorProvider that uses a cache with a fixed amount
+// of writers and readers (resources).
+// If a new resource is acquired and all are in use, it will return a new unmanaged resource.
+type BoundedCachedCompressors struct {
+ gzipWriters chan *gzip.Writer
+ gzipReaders chan *gzip.Reader
+ zlibWriters chan *zlib.Writer
+ writersCapacity int
+ readersCapacity int
+}
+
+// NewBoundedCachedCompressors returns a new, with filled cache, BoundedCachedCompressors.
+func NewBoundedCachedCompressors(writersCapacity, readersCapacity int) *BoundedCachedCompressors {
+ b := &BoundedCachedCompressors{
+ gzipWriters: make(chan *gzip.Writer, writersCapacity),
+ gzipReaders: make(chan *gzip.Reader, readersCapacity),
+ zlibWriters: make(chan *zlib.Writer, writersCapacity),
+ writersCapacity: writersCapacity,
+ readersCapacity: readersCapacity,
+ }
+ for ix := 0; ix < writersCapacity; ix++ {
+ b.gzipWriters <- newGzipWriter()
+ b.zlibWriters <- newZlibWriter()
+ }
+ for ix := 0; ix < readersCapacity; ix++ {
+ b.gzipReaders <- newGzipReader()
+ }
+ return b
+}
+
+// AcquireGzipWriter returns an resettable *gzip.Writer. Needs to be released.
+func (b *BoundedCachedCompressors) AcquireGzipWriter() *gzip.Writer {
+ var writer *gzip.Writer
+ select {
+ case writer, _ = <-b.gzipWriters:
+ default:
+ // return a new unmanaged one
+ writer = newGzipWriter()
+ }
+ return writer
+}
+
+// ReleaseGzipWriter accepts a writer (does not have to be one that was cached)
+// only when the cache has room for it. It will ignore it otherwise.
+func (b *BoundedCachedCompressors) ReleaseGzipWriter(w *gzip.Writer) {
+ // forget the unmanaged ones
+ if len(b.gzipWriters) < b.writersCapacity {
+ b.gzipWriters <- w
+ }
+}
+
+// AcquireGzipReader returns a *gzip.Reader. Needs to be released.
+func (b *BoundedCachedCompressors) AcquireGzipReader() *gzip.Reader {
+ var reader *gzip.Reader
+ select {
+ case reader, _ = <-b.gzipReaders:
+ default:
+ // return a new unmanaged one
+ reader = newGzipReader()
+ }
+ return reader
+}
+
+// ReleaseGzipReader accepts a reader (does not have to be one that was cached)
+// only when the cache has room for it. It will ignore it otherwise.
+func (b *BoundedCachedCompressors) ReleaseGzipReader(r *gzip.Reader) {
+ // forget the unmanaged ones
+ if len(b.gzipReaders) < b.readersCapacity {
+ b.gzipReaders <- r
+ }
+}
+
+// AcquireZlibWriter returns an resettable *zlib.Writer. Needs to be released.
+func (b *BoundedCachedCompressors) AcquireZlibWriter() *zlib.Writer {
+ var writer *zlib.Writer
+ select {
+ case writer, _ = <-b.zlibWriters:
+ default:
+ // return a new unmanaged one
+ writer = newZlibWriter()
+ }
+ return writer
+}
+
+// ReleaseZlibWriter accepts a writer (does not have to be one that was cached)
+// only when the cache has room for it. It will ignore it otherwise.
+func (b *BoundedCachedCompressors) ReleaseZlibWriter(w *zlib.Writer) {
+ // forget the unmanaged ones
+ if len(b.zlibWriters) < b.writersCapacity {
+ b.zlibWriters <- w
+ }
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/compressor_pools.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/compressor_pools.go
new file mode 100644
index 0000000..d866ce6
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/compressor_pools.go
@@ -0,0 +1,91 @@
+package restful
+
+// Copyright 2015 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "bytes"
+ "compress/gzip"
+ "compress/zlib"
+ "sync"
+)
+
+// SyncPoolCompessors is a CompressorProvider that use the standard sync.Pool.
+type SyncPoolCompessors struct {
+ GzipWriterPool *sync.Pool
+ GzipReaderPool *sync.Pool
+ ZlibWriterPool *sync.Pool
+}
+
+// NewSyncPoolCompessors returns a new ("empty") SyncPoolCompessors.
+func NewSyncPoolCompessors() *SyncPoolCompessors {
+ return &SyncPoolCompessors{
+ GzipWriterPool: &sync.Pool{
+ New: func() interface{} { return newGzipWriter() },
+ },
+ GzipReaderPool: &sync.Pool{
+ New: func() interface{} { return newGzipReader() },
+ },
+ ZlibWriterPool: &sync.Pool{
+ New: func() interface{} { return newZlibWriter() },
+ },
+ }
+}
+
+func (s *SyncPoolCompessors) AcquireGzipWriter() *gzip.Writer {
+ return s.GzipWriterPool.Get().(*gzip.Writer)
+}
+
+func (s *SyncPoolCompessors) ReleaseGzipWriter(w *gzip.Writer) {
+ s.GzipWriterPool.Put(w)
+}
+
+func (s *SyncPoolCompessors) AcquireGzipReader() *gzip.Reader {
+ return s.GzipReaderPool.Get().(*gzip.Reader)
+}
+
+func (s *SyncPoolCompessors) ReleaseGzipReader(r *gzip.Reader) {
+ s.GzipReaderPool.Put(r)
+}
+
+func (s *SyncPoolCompessors) AcquireZlibWriter() *zlib.Writer {
+ return s.ZlibWriterPool.Get().(*zlib.Writer)
+}
+
+func (s *SyncPoolCompessors) ReleaseZlibWriter(w *zlib.Writer) {
+ s.ZlibWriterPool.Put(w)
+}
+
+func newGzipWriter() *gzip.Writer {
+ // create with an empty bytes writer; it will be replaced before using the gzipWriter
+ writer, err := gzip.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed)
+ if err != nil {
+ panic(err.Error())
+ }
+ return writer
+}
+
+func newGzipReader() *gzip.Reader {
+ // create with an empty reader (but with GZIP header); it will be replaced before using the gzipReader
+ // we can safely use currentCompressProvider because it is set on package initialization.
+ w := currentCompressorProvider.AcquireGzipWriter()
+ defer currentCompressorProvider.ReleaseGzipWriter(w)
+ b := new(bytes.Buffer)
+ w.Reset(b)
+ w.Flush()
+ w.Close()
+ reader, err := gzip.NewReader(bytes.NewReader(b.Bytes()))
+ if err != nil {
+ panic(err.Error())
+ }
+ return reader
+}
+
+func newZlibWriter() *zlib.Writer {
+ writer, err := zlib.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed)
+ if err != nil {
+ panic(err.Error())
+ }
+ return writer
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/compressors.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/compressors.go
new file mode 100644
index 0000000..f028456
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/compressors.go
@@ -0,0 +1,53 @@
+package restful
+
+// Copyright 2015 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "compress/gzip"
+ "compress/zlib"
+)
+
+type CompressorProvider interface {
+ // Returns a *gzip.Writer which needs to be released later.
+ // Before using it, call Reset().
+ AcquireGzipWriter() *gzip.Writer
+
+ // Releases an aqcuired *gzip.Writer.
+ ReleaseGzipWriter(w *gzip.Writer)
+
+ // Returns a *gzip.Reader which needs to be released later.
+ AcquireGzipReader() *gzip.Reader
+
+ // Releases an aqcuired *gzip.Reader.
+ ReleaseGzipReader(w *gzip.Reader)
+
+ // Returns a *zlib.Writer which needs to be released later.
+ // Before using it, call Reset().
+ AcquireZlibWriter() *zlib.Writer
+
+ // Releases an aqcuired *zlib.Writer.
+ ReleaseZlibWriter(w *zlib.Writer)
+}
+
+// DefaultCompressorProvider is the actual provider of compressors (zlib or gzip).
+var currentCompressorProvider CompressorProvider
+
+func init() {
+ currentCompressorProvider = NewSyncPoolCompessors()
+}
+
+// CurrentCompressorProvider returns the current CompressorProvider.
+// It is initialized using a SyncPoolCompessors.
+func CurrentCompressorProvider() CompressorProvider {
+ return currentCompressorProvider
+}
+
+// CompressorProvider sets the actual provider of compressors (zlib or gzip).
+func SetCompressorProvider(p CompressorProvider) {
+ if p == nil {
+ panic("cannot set compressor provider to nil")
+ }
+ currentCompressorProvider = p
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/constants.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/constants.go
new file mode 100644
index 0000000..203439c
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/constants.go
@@ -0,0 +1,30 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+const (
+ MIME_XML = "application/xml" // Accept or Content-Type used in Consumes() and/or Produces()
+ MIME_JSON = "application/json" // Accept or Content-Type used in Consumes() and/or Produces()
+ MIME_OCTET = "application/octet-stream" // If Content-Type is not present in request, use the default
+
+ HEADER_Allow = "Allow"
+ HEADER_Accept = "Accept"
+ HEADER_Origin = "Origin"
+ HEADER_ContentType = "Content-Type"
+ HEADER_LastModified = "Last-Modified"
+ HEADER_AcceptEncoding = "Accept-Encoding"
+ HEADER_ContentEncoding = "Content-Encoding"
+ HEADER_AccessControlExposeHeaders = "Access-Control-Expose-Headers"
+ HEADER_AccessControlRequestMethod = "Access-Control-Request-Method"
+ HEADER_AccessControlRequestHeaders = "Access-Control-Request-Headers"
+ HEADER_AccessControlAllowMethods = "Access-Control-Allow-Methods"
+ HEADER_AccessControlAllowOrigin = "Access-Control-Allow-Origin"
+ HEADER_AccessControlAllowCredentials = "Access-Control-Allow-Credentials"
+ HEADER_AccessControlAllowHeaders = "Access-Control-Allow-Headers"
+ HEADER_AccessControlMaxAge = "Access-Control-Max-Age"
+
+ ENCODING_GZIP = "gzip"
+ ENCODING_DEFLATE = "deflate"
+)
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/container.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/container.go
new file mode 100644
index 0000000..62ded27
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/container.go
@@ -0,0 +1,361 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net/http"
+ "os"
+ "runtime"
+ "strings"
+ "sync"
+
+ "github.com/emicklei/go-restful/log"
+)
+
+// Container holds a collection of WebServices and a http.ServeMux to dispatch http requests.
+// The requests are further dispatched to routes of WebServices using a RouteSelector
+type Container struct {
+ webServicesLock sync.RWMutex
+ webServices []*WebService
+ ServeMux *http.ServeMux
+ isRegisteredOnRoot bool
+ containerFilters []FilterFunction
+ doNotRecover bool // default is false
+ recoverHandleFunc RecoverHandleFunction
+ serviceErrorHandleFunc ServiceErrorHandleFunction
+ router RouteSelector // default is a RouterJSR311, CurlyRouter is the faster alternative
+ contentEncodingEnabled bool // default is false
+}
+
+// NewContainer creates a new Container using a new ServeMux and default router (RouterJSR311)
+func NewContainer() *Container {
+ return &Container{
+ webServices: []*WebService{},
+ ServeMux: http.NewServeMux(),
+ isRegisteredOnRoot: false,
+ containerFilters: []FilterFunction{},
+ doNotRecover: false,
+ recoverHandleFunc: logStackOnRecover,
+ serviceErrorHandleFunc: writeServiceError,
+ router: RouterJSR311{},
+ contentEncodingEnabled: false}
+}
+
+// RecoverHandleFunction declares functions that can be used to handle a panic situation.
+// The first argument is what recover() returns. The second must be used to communicate an error response.
+type RecoverHandleFunction func(interface{}, http.ResponseWriter)
+
+// RecoverHandler changes the default function (logStackOnRecover) to be called
+// when a panic is detected. DoNotRecover must be have its default value (=false).
+func (c *Container) RecoverHandler(handler RecoverHandleFunction) {
+ c.recoverHandleFunc = handler
+}
+
+// ServiceErrorHandleFunction declares functions that can be used to handle a service error situation.
+// The first argument is the service error, the second is the request that resulted in the error and
+// the third must be used to communicate an error response.
+type ServiceErrorHandleFunction func(ServiceError, *Request, *Response)
+
+// ServiceErrorHandler changes the default function (writeServiceError) to be called
+// when a ServiceError is detected.
+func (c *Container) ServiceErrorHandler(handler ServiceErrorHandleFunction) {
+ c.serviceErrorHandleFunc = handler
+}
+
+// DoNotRecover controls whether panics will be caught to return HTTP 500.
+// If set to true, Route functions are responsible for handling any error situation.
+// Default value is false = recover from panics. This has performance implications.
+func (c *Container) DoNotRecover(doNot bool) {
+ c.doNotRecover = doNot
+}
+
+// Router changes the default Router (currently RouterJSR311)
+func (c *Container) Router(aRouter RouteSelector) {
+ c.router = aRouter
+}
+
+// EnableContentEncoding (default=false) allows for GZIP or DEFLATE encoding of responses.
+func (c *Container) EnableContentEncoding(enabled bool) {
+ c.contentEncodingEnabled = enabled
+}
+
+// Add a WebService to the Container. It will detect duplicate root paths and exit in that case.
+func (c *Container) Add(service *WebService) *Container {
+ c.webServicesLock.Lock()
+ defer c.webServicesLock.Unlock()
+
+ // if rootPath was not set then lazy initialize it
+ if len(service.rootPath) == 0 {
+ service.Path("/")
+ }
+
+ // cannot have duplicate root paths
+ for _, each := range c.webServices {
+ if each.RootPath() == service.RootPath() {
+ log.Printf("[restful] WebService with duplicate root path detected:['%v']", each)
+ os.Exit(1)
+ }
+ }
+
+ // If not registered on root then add specific mapping
+ if !c.isRegisteredOnRoot {
+ c.isRegisteredOnRoot = c.addHandler(service, c.ServeMux)
+ }
+ c.webServices = append(c.webServices, service)
+ return c
+}
+
+// addHandler may set a new HandleFunc for the serveMux
+// this function must run inside the critical region protected by the webServicesLock.
+// returns true if the function was registered on root ("/")
+func (c *Container) addHandler(service *WebService, serveMux *http.ServeMux) bool {
+ pattern := fixedPrefixPath(service.RootPath())
+ // check if root path registration is needed
+ if "/" == pattern || "" == pattern {
+ serveMux.HandleFunc("/", c.dispatch)
+ return true
+ }
+ // detect if registration already exists
+ alreadyMapped := false
+ for _, each := range c.webServices {
+ if each.RootPath() == service.RootPath() {
+ alreadyMapped = true
+ break
+ }
+ }
+ if !alreadyMapped {
+ serveMux.HandleFunc(pattern, c.dispatch)
+ if !strings.HasSuffix(pattern, "/") {
+ serveMux.HandleFunc(pattern+"/", c.dispatch)
+ }
+ }
+ return false
+}
+
+func (c *Container) Remove(ws *WebService) error {
+ if c.ServeMux == http.DefaultServeMux {
+ errMsg := fmt.Sprintf("[restful] cannot remove a WebService from a Container using the DefaultServeMux: ['%v']", ws)
+ log.Printf(errMsg)
+ return errors.New(errMsg)
+ }
+ c.webServicesLock.Lock()
+ defer c.webServicesLock.Unlock()
+ // build a new ServeMux and re-register all WebServices
+ newServeMux := http.NewServeMux()
+ newServices := []*WebService{}
+ newIsRegisteredOnRoot := false
+ for _, each := range c.webServices {
+ if each.rootPath != ws.rootPath {
+ // If not registered on root then add specific mapping
+ if !newIsRegisteredOnRoot {
+ newIsRegisteredOnRoot = c.addHandler(each, newServeMux)
+ }
+ newServices = append(newServices, each)
+ }
+ }
+ c.webServices, c.ServeMux, c.isRegisteredOnRoot = newServices, newServeMux, newIsRegisteredOnRoot
+ return nil
+}
+
+// logStackOnRecover is the default RecoverHandleFunction and is called
+// when DoNotRecover is false and the recoverHandleFunc is not set for the container.
+// Default implementation logs the stacktrace and writes the stacktrace on the response.
+// This may be a security issue as it exposes sourcecode information.
+func logStackOnRecover(panicReason interface{}, httpWriter http.ResponseWriter) {
+ var buffer bytes.Buffer
+ buffer.WriteString(fmt.Sprintf("[restful] recover from panic situation: - %v\r\n", panicReason))
+ for i := 2; ; i += 1 {
+ _, file, line, ok := runtime.Caller(i)
+ if !ok {
+ break
+ }
+ buffer.WriteString(fmt.Sprintf(" %s:%d\r\n", file, line))
+ }
+ log.Print(buffer.String())
+ httpWriter.WriteHeader(http.StatusInternalServerError)
+ httpWriter.Write(buffer.Bytes())
+}
+
+// writeServiceError is the default ServiceErrorHandleFunction and is called
+// when a ServiceError is returned during route selection. Default implementation
+// calls resp.WriteErrorString(err.Code, err.Message)
+func writeServiceError(err ServiceError, req *Request, resp *Response) {
+ resp.WriteErrorString(err.Code, err.Message)
+}
+
+// Dispatch the incoming Http Request to a matching WebService.
+func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) {
+ writer := httpWriter
+
+ // CompressingResponseWriter should be closed after all operations are done
+ defer func() {
+ if compressWriter, ok := writer.(*CompressingResponseWriter); ok {
+ compressWriter.Close()
+ }
+ }()
+
+ // Instal panic recovery unless told otherwise
+ if !c.doNotRecover { // catch all for 500 response
+ defer func() {
+ if r := recover(); r != nil {
+ c.recoverHandleFunc(r, writer)
+ return
+ }
+ }()
+ }
+ // Install closing the request body (if any)
+ defer func() {
+ if nil != httpRequest.Body {
+ httpRequest.Body.Close()
+ }
+ }()
+
+ // Detect if compression is needed
+ // assume without compression, test for override
+ if c.contentEncodingEnabled {
+ doCompress, encoding := wantsCompressedResponse(httpRequest)
+ if doCompress {
+ var err error
+ writer, err = NewCompressingResponseWriter(httpWriter, encoding)
+ if err != nil {
+ log.Print("[restful] unable to install compressor: ", err)
+ httpWriter.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+ }
+ }
+ // Find best match Route ; err is non nil if no match was found
+ var webService *WebService
+ var route *Route
+ var err error
+ func() {
+ c.webServicesLock.RLock()
+ defer c.webServicesLock.RUnlock()
+ webService, route, err = c.router.SelectRoute(
+ c.webServices,
+ httpRequest)
+ }()
+ if err != nil {
+ // a non-200 response has already been written
+ // run container filters anyway ; they should not touch the response...
+ chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) {
+ switch err.(type) {
+ case ServiceError:
+ ser := err.(ServiceError)
+ c.serviceErrorHandleFunc(ser, req, resp)
+ }
+ // TODO
+ }}
+ chain.ProcessFilter(NewRequest(httpRequest), NewResponse(writer))
+ return
+ }
+ wrappedRequest, wrappedResponse := route.wrapRequestResponse(writer, httpRequest)
+ // pass through filters (if any)
+ if len(c.containerFilters)+len(webService.filters)+len(route.Filters) > 0 {
+ // compose filter chain
+ allFilters := []FilterFunction{}
+ allFilters = append(allFilters, c.containerFilters...)
+ allFilters = append(allFilters, webService.filters...)
+ allFilters = append(allFilters, route.Filters...)
+ chain := FilterChain{Filters: allFilters, Target: func(req *Request, resp *Response) {
+ // handle request by route after passing all filters
+ route.Function(wrappedRequest, wrappedResponse)
+ }}
+ chain.ProcessFilter(wrappedRequest, wrappedResponse)
+ } else {
+ // no filters, handle request by route
+ route.Function(wrappedRequest, wrappedResponse)
+ }
+}
+
+// fixedPrefixPath returns the fixed part of the partspec ; it may include template vars {}
+func fixedPrefixPath(pathspec string) string {
+ varBegin := strings.Index(pathspec, "{")
+ if -1 == varBegin {
+ return pathspec
+ }
+ return pathspec[:varBegin]
+}
+
+// ServeHTTP implements net/http.Handler therefore a Container can be a Handler in a http.Server
+func (c Container) ServeHTTP(httpwriter http.ResponseWriter, httpRequest *http.Request) {
+ c.ServeMux.ServeHTTP(httpwriter, httpRequest)
+}
+
+// Handle registers the handler for the given pattern. If a handler already exists for pattern, Handle panics.
+func (c Container) Handle(pattern string, handler http.Handler) {
+ c.ServeMux.Handle(pattern, handler)
+}
+
+// HandleWithFilter registers the handler for the given pattern.
+// Container's filter chain is applied for handler.
+// If a handler already exists for pattern, HandleWithFilter panics.
+func (c *Container) HandleWithFilter(pattern string, handler http.Handler) {
+ f := func(httpResponse http.ResponseWriter, httpRequest *http.Request) {
+ if len(c.containerFilters) == 0 {
+ handler.ServeHTTP(httpResponse, httpRequest)
+ return
+ }
+
+ chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) {
+ handler.ServeHTTP(httpResponse, httpRequest)
+ }}
+ chain.ProcessFilter(NewRequest(httpRequest), NewResponse(httpResponse))
+ }
+
+ c.Handle(pattern, http.HandlerFunc(f))
+}
+
+// Filter appends a container FilterFunction. These are called before dispatching
+// a http.Request to a WebService from the container
+func (c *Container) Filter(filter FilterFunction) {
+ c.containerFilters = append(c.containerFilters, filter)
+}
+
+// RegisteredWebServices returns the collections of added WebServices
+func (c Container) RegisteredWebServices() []*WebService {
+ c.webServicesLock.RLock()
+ defer c.webServicesLock.RUnlock()
+ result := make([]*WebService, len(c.webServices))
+ for ix := range c.webServices {
+ result[ix] = c.webServices[ix]
+ }
+ return result
+}
+
+// computeAllowedMethods returns a list of HTTP methods that are valid for a Request
+func (c Container) computeAllowedMethods(req *Request) []string {
+ // Go through all RegisteredWebServices() and all its Routes to collect the options
+ methods := []string{}
+ requestPath := req.Request.URL.Path
+ for _, ws := range c.RegisteredWebServices() {
+ matches := ws.pathExpr.Matcher.FindStringSubmatch(requestPath)
+ if matches != nil {
+ finalMatch := matches[len(matches)-1]
+ for _, rt := range ws.Routes() {
+ matches := rt.pathExpr.Matcher.FindStringSubmatch(finalMatch)
+ if matches != nil {
+ lastMatch := matches[len(matches)-1]
+ if lastMatch == "" || lastMatch == "/" { // do not include if value is neither empty nor ‘/’.
+ methods = append(methods, rt.Method)
+ }
+ }
+ }
+ }
+ }
+ // methods = append(methods, "OPTIONS") not sure about this
+ return methods
+}
+
+// newBasicRequestResponse creates a pair of Request,Response from its http versions.
+// It is basic because no parameter or (produces) content-type information is given.
+func newBasicRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request) (*Request, *Response) {
+ resp := NewResponse(httpWriter)
+ resp.requestAccept = httpRequest.Header.Get(HEADER_Accept)
+ return NewRequest(httpRequest), resp
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/cors_filter.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/cors_filter.go
new file mode 100644
index 0000000..1efeef0
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/cors_filter.go
@@ -0,0 +1,202 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// CrossOriginResourceSharing is used to create a Container Filter that implements CORS.
+// Cross-origin resource sharing (CORS) is a mechanism that allows JavaScript on a web page
+// to make XMLHttpRequests to another domain, not the domain the JavaScript originated from.
+//
+// http://en.wikipedia.org/wiki/Cross-origin_resource_sharing
+// http://enable-cors.org/server.html
+// http://www.html5rocks.com/en/tutorials/cors/#toc-handling-a-not-so-simple-request
+type CrossOriginResourceSharing struct {
+ ExposeHeaders []string // list of Header names
+ AllowedHeaders []string // list of Header names
+ AllowedDomains []string // list of allowed values for Http Origin. An allowed value can be a regular expression to support subdomain matching. If empty all are allowed.
+ AllowedMethods []string
+ MaxAge int // number of seconds before requiring new Options request
+ CookiesAllowed bool
+ Container *Container
+
+ allowedOriginPatterns []*regexp.Regexp // internal field for origin regexp check.
+}
+
+// Filter is a filter function that implements the CORS flow as documented on http://enable-cors.org/server.html
+// and http://www.html5rocks.com/static/images/cors_server_flowchart.png
+func (c CrossOriginResourceSharing) Filter(req *Request, resp *Response, chain *FilterChain) {
+ origin := req.Request.Header.Get(HEADER_Origin)
+ if len(origin) == 0 {
+ if trace {
+ traceLogger.Print("no Http header Origin set")
+ }
+ chain.ProcessFilter(req, resp)
+ return
+ }
+ if !c.isOriginAllowed(origin) { // check whether this origin is allowed
+ if trace {
+ traceLogger.Printf("HTTP Origin:%s is not part of %v, neither matches any part of %v", origin, c.AllowedDomains, c.allowedOriginPatterns)
+ }
+ chain.ProcessFilter(req, resp)
+ return
+ }
+ if req.Request.Method != "OPTIONS" {
+ c.doActualRequest(req, resp)
+ chain.ProcessFilter(req, resp)
+ return
+ }
+ if acrm := req.Request.Header.Get(HEADER_AccessControlRequestMethod); acrm != "" {
+ c.doPreflightRequest(req, resp)
+ } else {
+ c.doActualRequest(req, resp)
+ chain.ProcessFilter(req, resp)
+ return
+ }
+}
+
+func (c CrossOriginResourceSharing) doActualRequest(req *Request, resp *Response) {
+ c.setOptionsHeaders(req, resp)
+ // continue processing the response
+}
+
+func (c *CrossOriginResourceSharing) doPreflightRequest(req *Request, resp *Response) {
+ if len(c.AllowedMethods) == 0 {
+ if c.Container == nil {
+ c.AllowedMethods = DefaultContainer.computeAllowedMethods(req)
+ } else {
+ c.AllowedMethods = c.Container.computeAllowedMethods(req)
+ }
+ }
+
+ acrm := req.Request.Header.Get(HEADER_AccessControlRequestMethod)
+ if !c.isValidAccessControlRequestMethod(acrm, c.AllowedMethods) {
+ if trace {
+ traceLogger.Printf("Http header %s:%s is not in %v",
+ HEADER_AccessControlRequestMethod,
+ acrm,
+ c.AllowedMethods)
+ }
+ return
+ }
+ acrhs := req.Request.Header.Get(HEADER_AccessControlRequestHeaders)
+ if len(acrhs) > 0 {
+ for _, each := range strings.Split(acrhs, ",") {
+ if !c.isValidAccessControlRequestHeader(strings.Trim(each, " ")) {
+ if trace {
+ traceLogger.Printf("Http header %s:%s is not in %v",
+ HEADER_AccessControlRequestHeaders,
+ acrhs,
+ c.AllowedHeaders)
+ }
+ return
+ }
+ }
+ }
+ resp.AddHeader(HEADER_AccessControlAllowMethods, strings.Join(c.AllowedMethods, ","))
+ resp.AddHeader(HEADER_AccessControlAllowHeaders, acrhs)
+ c.setOptionsHeaders(req, resp)
+
+ // return http 200 response, no body
+}
+
+func (c CrossOriginResourceSharing) setOptionsHeaders(req *Request, resp *Response) {
+ c.checkAndSetExposeHeaders(resp)
+ c.setAllowOriginHeader(req, resp)
+ c.checkAndSetAllowCredentials(resp)
+ if c.MaxAge > 0 {
+ resp.AddHeader(HEADER_AccessControlMaxAge, strconv.Itoa(c.MaxAge))
+ }
+}
+
+func (c CrossOriginResourceSharing) isOriginAllowed(origin string) bool {
+ if len(origin) == 0 {
+ return false
+ }
+ if len(c.AllowedDomains) == 0 {
+ return true
+ }
+
+ allowed := false
+ for _, domain := range c.AllowedDomains {
+ if domain == origin {
+ allowed = true
+ break
+ }
+ }
+
+ if !allowed {
+ if len(c.allowedOriginPatterns) == 0 {
+ // compile allowed domains to allowed origin patterns
+ allowedOriginRegexps, err := compileRegexps(c.AllowedDomains)
+ if err != nil {
+ return false
+ }
+ c.allowedOriginPatterns = allowedOriginRegexps
+ }
+
+ for _, pattern := range c.allowedOriginPatterns {
+ if allowed = pattern.MatchString(origin); allowed {
+ break
+ }
+ }
+ }
+
+ return allowed
+}
+
+func (c CrossOriginResourceSharing) setAllowOriginHeader(req *Request, resp *Response) {
+ origin := req.Request.Header.Get(HEADER_Origin)
+ if c.isOriginAllowed(origin) {
+ resp.AddHeader(HEADER_AccessControlAllowOrigin, origin)
+ }
+}
+
+func (c CrossOriginResourceSharing) checkAndSetExposeHeaders(resp *Response) {
+ if len(c.ExposeHeaders) > 0 {
+ resp.AddHeader(HEADER_AccessControlExposeHeaders, strings.Join(c.ExposeHeaders, ","))
+ }
+}
+
+func (c CrossOriginResourceSharing) checkAndSetAllowCredentials(resp *Response) {
+ if c.CookiesAllowed {
+ resp.AddHeader(HEADER_AccessControlAllowCredentials, "true")
+ }
+}
+
+func (c CrossOriginResourceSharing) isValidAccessControlRequestMethod(method string, allowedMethods []string) bool {
+ for _, each := range allowedMethods {
+ if each == method {
+ return true
+ }
+ }
+ return false
+}
+
+func (c CrossOriginResourceSharing) isValidAccessControlRequestHeader(header string) bool {
+ for _, each := range c.AllowedHeaders {
+ if strings.ToLower(each) == strings.ToLower(header) {
+ return true
+ }
+ }
+ return false
+}
+
+// Take a list of strings and compile them into a list of regular expressions.
+func compileRegexps(regexpStrings []string) ([]*regexp.Regexp, error) {
+ regexps := []*regexp.Regexp{}
+ for _, regexpStr := range regexpStrings {
+ r, err := regexp.Compile(regexpStr)
+ if err != nil {
+ return regexps, err
+ }
+ regexps = append(regexps, r)
+ }
+ return regexps, nil
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/coverage.sh b/src/kube2msb/vendor/github.com/emicklei/go-restful/coverage.sh
new file mode 100644
index 0000000..e27dbf1
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/coverage.sh
@@ -0,0 +1,2 @@
+go test -coverprofile=coverage.out
+go tool cover -html=coverage.out \ No newline at end of file
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/curly.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/curly.go
new file mode 100644
index 0000000..185300d
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/curly.go
@@ -0,0 +1,162 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "net/http"
+ "regexp"
+ "sort"
+ "strings"
+)
+
+// CurlyRouter expects Routes with paths that contain zero or more parameters in curly brackets.
+type CurlyRouter struct{}
+
+// SelectRoute is part of the Router interface and returns the best match
+// for the WebService and its Route for the given Request.
+func (c CurlyRouter) SelectRoute(
+ webServices []*WebService,
+ httpRequest *http.Request) (selectedService *WebService, selected *Route, err error) {
+
+ requestTokens := tokenizePath(httpRequest.URL.Path)
+
+ detectedService := c.detectWebService(requestTokens, webServices)
+ if detectedService == nil {
+ if trace {
+ traceLogger.Printf("no WebService was found to match URL path:%s\n", httpRequest.URL.Path)
+ }
+ return nil, nil, NewError(http.StatusNotFound, "404: Page Not Found")
+ }
+ candidateRoutes := c.selectRoutes(detectedService, requestTokens)
+ if len(candidateRoutes) == 0 {
+ if trace {
+ traceLogger.Printf("no Route in WebService with path %s was found to match URL path:%s\n", detectedService.rootPath, httpRequest.URL.Path)
+ }
+ return detectedService, nil, NewError(http.StatusNotFound, "404: Page Not Found")
+ }
+ selectedRoute, err := c.detectRoute(candidateRoutes, httpRequest)
+ if selectedRoute == nil {
+ return detectedService, nil, err
+ }
+ return detectedService, selectedRoute, nil
+}
+
+// selectRoutes return a collection of Route from a WebService that matches the path tokens from the request.
+func (c CurlyRouter) selectRoutes(ws *WebService, requestTokens []string) sortableCurlyRoutes {
+ candidates := sortableCurlyRoutes{}
+ for _, each := range ws.routes {
+ matches, paramCount, staticCount := c.matchesRouteByPathTokens(each.pathParts, requestTokens)
+ if matches {
+ candidates.add(curlyRoute{each, paramCount, staticCount}) // TODO make sure Routes() return pointers?
+ }
+ }
+ sort.Sort(sort.Reverse(candidates))
+ return candidates
+}
+
+// matchesRouteByPathTokens computes whether it matches, howmany parameters do match and what the number of static path elements are.
+func (c CurlyRouter) matchesRouteByPathTokens(routeTokens, requestTokens []string) (matches bool, paramCount int, staticCount int) {
+ if len(routeTokens) < len(requestTokens) {
+ // proceed in matching only if last routeToken is wildcard
+ count := len(routeTokens)
+ if count == 0 || !strings.HasSuffix(routeTokens[count-1], "*}") {
+ return false, 0, 0
+ }
+ // proceed
+ }
+ for i, routeToken := range routeTokens {
+ if i == len(requestTokens) {
+ // reached end of request path
+ return false, 0, 0
+ }
+ requestToken := requestTokens[i]
+ if strings.HasPrefix(routeToken, "{") {
+ paramCount++
+ if colon := strings.Index(routeToken, ":"); colon != -1 {
+ // match by regex
+ matchesToken, matchesRemainder := c.regularMatchesPathToken(routeToken, colon, requestToken)
+ if !matchesToken {
+ return false, 0, 0
+ }
+ if matchesRemainder {
+ break
+ }
+ }
+ } else { // no { prefix
+ if requestToken != routeToken {
+ return false, 0, 0
+ }
+ staticCount++
+ }
+ }
+ return true, paramCount, staticCount
+}
+
+// regularMatchesPathToken tests whether the regular expression part of routeToken matches the requestToken or all remaining tokens
+// format routeToken is {someVar:someExpression}, e.g. {zipcode:[\d][\d][\d][\d][A-Z][A-Z]}
+func (c CurlyRouter) regularMatchesPathToken(routeToken string, colon int, requestToken string) (matchesToken bool, matchesRemainder bool) {
+ regPart := routeToken[colon+1 : len(routeToken)-1]
+ if regPart == "*" {
+ if trace {
+ traceLogger.Printf("wildcard parameter detected in route token %s that matches %s\n", routeToken, requestToken)
+ }
+ return true, true
+ }
+ matched, err := regexp.MatchString(regPart, requestToken)
+ return (matched && err == nil), false
+}
+
+// detectRoute selectes from a list of Route the first match by inspecting both the Accept and Content-Type
+// headers of the Request. See also RouterJSR311 in jsr311.go
+func (c CurlyRouter) detectRoute(candidateRoutes sortableCurlyRoutes, httpRequest *http.Request) (*Route, error) {
+ // tracing is done inside detectRoute
+ return RouterJSR311{}.detectRoute(candidateRoutes.routes(), httpRequest)
+}
+
+// detectWebService returns the best matching webService given the list of path tokens.
+// see also computeWebserviceScore
+func (c CurlyRouter) detectWebService(requestTokens []string, webServices []*WebService) *WebService {
+ var best *WebService
+ score := -1
+ for _, each := range webServices {
+ matches, eachScore := c.computeWebserviceScore(requestTokens, each.pathExpr.tokens)
+ if matches && (eachScore > score) {
+ best = each
+ score = eachScore
+ }
+ }
+ return best
+}
+
+// computeWebserviceScore returns whether tokens match and
+// the weighted score of the longest matching consecutive tokens from the beginning.
+func (c CurlyRouter) computeWebserviceScore(requestTokens []string, tokens []string) (bool, int) {
+ if len(tokens) > len(requestTokens) {
+ return false, 0
+ }
+ score := 0
+ for i := 0; i < len(tokens); i++ {
+ each := requestTokens[i]
+ other := tokens[i]
+ if len(each) == 0 && len(other) == 0 {
+ score++
+ continue
+ }
+ if len(other) > 0 && strings.HasPrefix(other, "{") {
+ // no empty match
+ if len(each) == 0 {
+ return false, score
+ }
+ score += 1
+ } else {
+ // not a parameter
+ if each != other {
+ return false, score
+ }
+ score += (len(tokens) - i) * 10 //fuzzy
+ }
+ }
+ return true, score
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/curly_route.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/curly_route.go
new file mode 100644
index 0000000..296f946
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/curly_route.go
@@ -0,0 +1,52 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+// curlyRoute exits for sorting Routes by the CurlyRouter based on number of parameters and number of static path elements.
+type curlyRoute struct {
+ route Route
+ paramCount int
+ staticCount int
+}
+
+type sortableCurlyRoutes []curlyRoute
+
+func (s *sortableCurlyRoutes) add(route curlyRoute) {
+ *s = append(*s, route)
+}
+
+func (s sortableCurlyRoutes) routes() (routes []Route) {
+ for _, each := range s {
+ routes = append(routes, each.route) // TODO change return type
+ }
+ return routes
+}
+
+func (s sortableCurlyRoutes) Len() int {
+ return len(s)
+}
+func (s sortableCurlyRoutes) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+func (s sortableCurlyRoutes) Less(i, j int) bool {
+ ci := s[i]
+ cj := s[j]
+
+ // primary key
+ if ci.staticCount < cj.staticCount {
+ return true
+ }
+ if ci.staticCount > cj.staticCount {
+ return false
+ }
+ // secundary key
+ if ci.paramCount < cj.paramCount {
+ return true
+ }
+ if ci.paramCount > cj.paramCount {
+ return false
+ }
+ return ci.route.Path < cj.route.Path
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/doc.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/doc.go
new file mode 100644
index 0000000..d40405b
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/doc.go
@@ -0,0 +1,196 @@
+/*
+Package restful, a lean package for creating REST-style WebServices without magic.
+
+WebServices and Routes
+
+A WebService has a collection of Route objects that dispatch incoming Http Requests to a function calls.
+Typically, a WebService has a root path (e.g. /users) and defines common MIME types for its routes.
+WebServices must be added to a container (see below) in order to handler Http requests from a server.
+
+A Route is defined by a HTTP method, an URL path and (optionally) the MIME types it consumes (Content-Type) and produces (Accept).
+This package has the logic to find the best matching Route and if found, call its Function.
+
+ ws := new(restful.WebService)
+ ws.
+ Path("/users").
+ Consumes(restful.MIME_JSON, restful.MIME_XML).
+ Produces(restful.MIME_JSON, restful.MIME_XML)
+
+ ws.Route(ws.GET("/{user-id}").To(u.findUser)) // u is a UserResource
+
+ ...
+
+ // GET http://localhost:8080/users/1
+ func (u UserResource) findUser(request *restful.Request, response *restful.Response) {
+ id := request.PathParameter("user-id")
+ ...
+ }
+
+The (*Request, *Response) arguments provide functions for reading information from the request and writing information back to the response.
+
+See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-user-resource.go with a full implementation.
+
+Regular expression matching Routes
+
+A Route parameter can be specified using the format "uri/{var[:regexp]}" or the special version "uri/{var:*}" for matching the tail of the path.
+For example, /persons/{name:[A-Z][A-Z]} can be used to restrict values for the parameter "name" to only contain capital alphabetic characters.
+Regular expressions must use the standard Go syntax as described in the regexp package. (https://code.google.com/p/re2/wiki/Syntax)
+This feature requires the use of a CurlyRouter.
+
+Containers
+
+A Container holds a collection of WebServices, Filters and a http.ServeMux for multiplexing http requests.
+Using the statements "restful.Add(...) and restful.Filter(...)" will register WebServices and Filters to the Default Container.
+The Default container of go-restful uses the http.DefaultServeMux.
+You can create your own Container and create a new http.Server for that particular container.
+
+ container := restful.NewContainer()
+ server := &http.Server{Addr: ":8081", Handler: container}
+
+Filters
+
+A filter dynamically intercepts requests and responses to transform or use the information contained in the requests or responses.
+You can use filters to perform generic logging, measurement, authentication, redirect, set response headers etc.
+In the restful package there are three hooks into the request,response flow where filters can be added.
+Each filter must define a FilterFunction:
+
+ func (req *restful.Request, resp *restful.Response, chain *restful.FilterChain)
+
+Use the following statement to pass the request,response pair to the next filter or RouteFunction
+
+ chain.ProcessFilter(req, resp)
+
+Container Filters
+
+These are processed before any registered WebService.
+
+ // install a (global) filter for the default container (processed before any webservice)
+ restful.Filter(globalLogging)
+
+WebService Filters
+
+These are processed before any Route of a WebService.
+
+ // install a webservice filter (processed before any route)
+ ws.Filter(webserviceLogging).Filter(measureTime)
+
+
+Route Filters
+
+These are processed before calling the function associated with the Route.
+
+ // install 2 chained route filters (processed before calling findUser)
+ ws.Route(ws.GET("/{user-id}").Filter(routeLogging).Filter(NewCountFilter().routeCounter).To(findUser))
+
+See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-filters.go with full implementations.
+
+Response Encoding
+
+Two encodings are supported: gzip and deflate. To enable this for all responses:
+
+ restful.DefaultContainer.EnableContentEncoding(true)
+
+If a Http request includes the Accept-Encoding header then the response content will be compressed using the specified encoding.
+Alternatively, you can create a Filter that performs the encoding and install it per WebService or Route.
+
+See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-encoding-filter.go
+
+OPTIONS support
+
+By installing a pre-defined container filter, your Webservice(s) can respond to the OPTIONS Http request.
+
+ Filter(OPTIONSFilter())
+
+CORS
+
+By installing the filter of a CrossOriginResourceSharing (CORS), your WebService(s) can handle CORS requests.
+
+ cors := CrossOriginResourceSharing{ExposeHeaders: []string{"X-My-Header"}, CookiesAllowed: false, Container: DefaultContainer}
+ Filter(cors.Filter)
+
+Error Handling
+
+Unexpected things happen. If a request cannot be processed because of a failure, your service needs to tell via the response what happened and why.
+For this reason HTTP status codes exist and it is important to use the correct code in every exceptional situation.
+
+ 400: Bad Request
+
+If path or query parameters are not valid (content or type) then use http.StatusBadRequest.
+
+ 404: Not Found
+
+Despite a valid URI, the resource requested may not be available
+
+ 500: Internal Server Error
+
+If the application logic could not process the request (or write the response) then use http.StatusInternalServerError.
+
+ 405: Method Not Allowed
+
+The request has a valid URL but the method (GET,PUT,POST,...) is not allowed.
+
+ 406: Not Acceptable
+
+The request does not have or has an unknown Accept Header set for this operation.
+
+ 415: Unsupported Media Type
+
+The request does not have or has an unknown Content-Type Header set for this operation.
+
+ServiceError
+
+In addition to setting the correct (error) Http status code, you can choose to write a ServiceError message on the response.
+
+Performance options
+
+This package has several options that affect the performance of your service. It is important to understand them and how you can change it.
+
+ restful.DefaultContainer.Router(CurlyRouter{})
+
+The default router is the RouterJSR311 which is an implementation of its spec (http://jsr311.java.net/nonav/releases/1.1/spec/spec.html).
+However, it uses regular expressions for all its routes which, depending on your usecase, may consume a significant amount of time.
+The CurlyRouter implementation is more lightweight that also allows you to use wildcards and expressions, but only if needed.
+
+ restful.DefaultContainer.DoNotRecover(true)
+
+DoNotRecover controls whether panics will be caught to return HTTP 500.
+If set to true, Route functions are responsible for handling any error situation.
+Default value is false; it will recover from panics. This has performance implications.
+
+ restful.SetCacheReadEntity(false)
+
+SetCacheReadEntity controls whether the response data ([]byte) is cached such that ReadEntity is repeatable.
+If you expect to read large amounts of payload data, and you do not use this feature, you should set it to false.
+
+ restful.SetCompressorProvider(NewBoundedCachedCompressors(20, 20))
+
+If content encoding is enabled then the default strategy for getting new gzip/zlib writers and readers is to use a sync.Pool.
+Because writers are expensive structures, performance is even more improved when using a preloaded cache. You can also inject your own implementation.
+
+Trouble shooting
+
+This package has the means to produce detail logging of the complete Http request matching process and filter invocation.
+Enabling this feature requires you to set an implementation of restful.StdLogger (e.g. log.Logger) instance such as:
+
+ restful.TraceLogger(log.New(os.Stdout, "[restful] ", log.LstdFlags|log.Lshortfile))
+
+Logging
+
+The restful.SetLogger() method allows you to override the logger used by the package. By default restful
+uses the standard library `log` package and logs to stdout. Different logging packages are supported as
+long as they conform to `StdLogger` interface defined in the `log` sub-package, writing an adapter for your
+preferred package is simple.
+
+Resources
+
+[project]: https://github.com/emicklei/go-restful
+
+[examples]: https://github.com/emicklei/go-restful/blob/master/examples
+
+[design]: http://ernestmicklei.com/2012/11/11/go-restful-api-design/
+
+[showcases]: https://github.com/emicklei/mora, https://github.com/emicklei/landskape
+
+(c) 2012-2015, http://ernestmicklei.com. MIT License
+*/
+package restful
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/entity_accessors.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/entity_accessors.go
new file mode 100644
index 0000000..6ecf6c7
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/entity_accessors.go
@@ -0,0 +1,163 @@
+package restful
+
+// Copyright 2015 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "encoding/json"
+ "encoding/xml"
+ "strings"
+ "sync"
+)
+
+// EntityReaderWriter can read and write values using an encoding such as JSON,XML.
+type EntityReaderWriter interface {
+ // Read a serialized version of the value from the request.
+ // The Request may have a decompressing reader. Depends on Content-Encoding.
+ Read(req *Request, v interface{}) error
+
+ // Write a serialized version of the value on the response.
+ // The Response may have a compressing writer. Depends on Accept-Encoding.
+ // status should be a valid Http Status code
+ Write(resp *Response, status int, v interface{}) error
+}
+
+// entityAccessRegistry is a singleton
+var entityAccessRegistry = &entityReaderWriters{
+ protection: new(sync.RWMutex),
+ accessors: map[string]EntityReaderWriter{},
+}
+
+// entityReaderWriters associates MIME to an EntityReaderWriter
+type entityReaderWriters struct {
+ protection *sync.RWMutex
+ accessors map[string]EntityReaderWriter
+}
+
+func init() {
+ RegisterEntityAccessor(MIME_JSON, NewEntityAccessorJSON(MIME_JSON))
+ RegisterEntityAccessor(MIME_XML, NewEntityAccessorXML(MIME_XML))
+}
+
+// RegisterEntityAccessor add/overrides the ReaderWriter for encoding content with this MIME type.
+func RegisterEntityAccessor(mime string, erw EntityReaderWriter) {
+ entityAccessRegistry.protection.Lock()
+ defer entityAccessRegistry.protection.Unlock()
+ entityAccessRegistry.accessors[mime] = erw
+}
+
+// NewEntityAccessorJSON returns a new EntityReaderWriter for accessing JSON content.
+// This package is already initialized with such an accessor using the MIME_JSON contentType.
+func NewEntityAccessorJSON(contentType string) EntityReaderWriter {
+ return entityJSONAccess{ContentType: contentType}
+}
+
+// NewEntityAccessorXML returns a new EntityReaderWriter for accessing XML content.
+// This package is already initialized with such an accessor using the MIME_XML contentType.
+func NewEntityAccessorXML(contentType string) EntityReaderWriter {
+ return entityXMLAccess{ContentType: contentType}
+}
+
+// accessorAt returns the registered ReaderWriter for this MIME type.
+func (r *entityReaderWriters) accessorAt(mime string) (EntityReaderWriter, bool) {
+ r.protection.RLock()
+ defer r.protection.RUnlock()
+ er, ok := r.accessors[mime]
+ if !ok {
+ // retry with reverse lookup
+ // more expensive but we are in an exceptional situation anyway
+ for k, v := range r.accessors {
+ if strings.Contains(mime, k) {
+ return v, true
+ }
+ }
+ }
+ return er, ok
+}
+
+// entityXMLAccess is a EntityReaderWriter for XML encoding
+type entityXMLAccess struct {
+ // This is used for setting the Content-Type header when writing
+ ContentType string
+}
+
+// Read unmarshalls the value from XML
+func (e entityXMLAccess) Read(req *Request, v interface{}) error {
+ return xml.NewDecoder(req.Request.Body).Decode(v)
+}
+
+// Write marshalls the value to JSON and set the Content-Type Header.
+func (e entityXMLAccess) Write(resp *Response, status int, v interface{}) error {
+ return writeXML(resp, status, e.ContentType, v)
+}
+
+// writeXML marshalls the value to JSON and set the Content-Type Header.
+func writeXML(resp *Response, status int, contentType string, v interface{}) error {
+ if v == nil {
+ resp.WriteHeader(status)
+ // do not write a nil representation
+ return nil
+ }
+ if resp.prettyPrint {
+ // pretty output must be created and written explicitly
+ output, err := xml.MarshalIndent(v, " ", " ")
+ if err != nil {
+ return err
+ }
+ resp.Header().Set(HEADER_ContentType, contentType)
+ resp.WriteHeader(status)
+ _, err = resp.Write([]byte(xml.Header))
+ if err != nil {
+ return err
+ }
+ _, err = resp.Write(output)
+ return err
+ }
+ // not-so-pretty
+ resp.Header().Set(HEADER_ContentType, contentType)
+ resp.WriteHeader(status)
+ return xml.NewEncoder(resp).Encode(v)
+}
+
+// entityJSONAccess is a EntityReaderWriter for JSON encoding
+type entityJSONAccess struct {
+ // This is used for setting the Content-Type header when writing
+ ContentType string
+}
+
+// Read unmarshalls the value from JSON
+func (e entityJSONAccess) Read(req *Request, v interface{}) error {
+ decoder := json.NewDecoder(req.Request.Body)
+ decoder.UseNumber()
+ return decoder.Decode(v)
+}
+
+// Write marshalls the value to JSON and set the Content-Type Header.
+func (e entityJSONAccess) Write(resp *Response, status int, v interface{}) error {
+ return writeJSON(resp, status, e.ContentType, v)
+}
+
+// write marshalls the value to JSON and set the Content-Type Header.
+func writeJSON(resp *Response, status int, contentType string, v interface{}) error {
+ if v == nil {
+ resp.WriteHeader(status)
+ // do not write a nil representation
+ return nil
+ }
+ if resp.prettyPrint {
+ // pretty output must be created and written explicitly
+ output, err := json.MarshalIndent(v, " ", " ")
+ if err != nil {
+ return err
+ }
+ resp.Header().Set(HEADER_ContentType, contentType)
+ resp.WriteHeader(status)
+ _, err = resp.Write(output)
+ return err
+ }
+ // not-so-pretty
+ resp.Header().Set(HEADER_ContentType, contentType)
+ resp.WriteHeader(status)
+ return json.NewEncoder(resp).Encode(v)
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/filter.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/filter.go
new file mode 100644
index 0000000..4b86656
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/filter.go
@@ -0,0 +1,26 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+// FilterChain is a request scoped object to process one or more filters before calling the target RouteFunction.
+type FilterChain struct {
+ Filters []FilterFunction // ordered list of FilterFunction
+ Index int // index into filters that is currently in progress
+ Target RouteFunction // function to call after passing all filters
+}
+
+// ProcessFilter passes the request,response pair through the next of Filters.
+// Each filter can decide to proceed to the next Filter or handle the Response itself.
+func (f *FilterChain) ProcessFilter(request *Request, response *Response) {
+ if f.Index < len(f.Filters) {
+ f.Index++
+ f.Filters[f.Index-1](request, response, f)
+ } else {
+ f.Target(request, response)
+ }
+}
+
+// FilterFunction definitions must call ProcessFilter on the FilterChain to pass on the control and eventually call the RouteFunction
+type FilterFunction func(*Request, *Response, *FilterChain)
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/install.sh b/src/kube2msb/vendor/github.com/emicklei/go-restful/install.sh
new file mode 100644
index 0000000..5fe03b5
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/install.sh
@@ -0,0 +1,9 @@
+cd examples
+ ls *.go | xargs -I {} go build -o /tmp/ignore {}
+ cd ..
+go fmt ...swagger && \
+go test -test.v ...swagger && \
+go install ...swagger && \
+go fmt ...restful && \
+go test -test.v ...restful && \
+go install ...restful \ No newline at end of file
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/jsr311.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/jsr311.go
new file mode 100644
index 0000000..511444a
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/jsr311.go
@@ -0,0 +1,248 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+ "sort"
+)
+
+// RouterJSR311 implements the flow for matching Requests to Routes (and consequently Resource Functions)
+// as specified by the JSR311 http://jsr311.java.net/nonav/releases/1.1/spec/spec.html.
+// RouterJSR311 implements the Router interface.
+// Concept of locators is not implemented.
+type RouterJSR311 struct{}
+
+// SelectRoute is part of the Router interface and returns the best match
+// for the WebService and its Route for the given Request.
+func (r RouterJSR311) SelectRoute(
+ webServices []*WebService,
+ httpRequest *http.Request) (selectedService *WebService, selectedRoute *Route, err error) {
+
+ // Identify the root resource class (WebService)
+ dispatcher, finalMatch, err := r.detectDispatcher(httpRequest.URL.Path, webServices)
+ if err != nil {
+ return nil, nil, NewError(http.StatusNotFound, "")
+ }
+ // Obtain the set of candidate methods (Routes)
+ routes := r.selectRoutes(dispatcher, finalMatch)
+ if len(routes) == 0 {
+ return dispatcher, nil, NewError(http.StatusNotFound, "404: Page Not Found")
+ }
+
+ // Identify the method (Route) that will handle the request
+ route, ok := r.detectRoute(routes, httpRequest)
+ return dispatcher, route, ok
+}
+
+// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2
+func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*Route, error) {
+ // http method
+ methodOk := []Route{}
+ for _, each := range routes {
+ if httpRequest.Method == each.Method {
+ methodOk = append(methodOk, each)
+ }
+ }
+ if len(methodOk) == 0 {
+ if trace {
+ traceLogger.Printf("no Route found (in %d routes) that matches HTTP method %s\n", len(routes), httpRequest.Method)
+ }
+ return nil, NewError(http.StatusMethodNotAllowed, "405: Method Not Allowed")
+ }
+ inputMediaOk := methodOk
+
+ // content-type
+ contentType := httpRequest.Header.Get(HEADER_ContentType)
+ inputMediaOk = []Route{}
+ for _, each := range methodOk {
+ if each.matchesContentType(contentType) {
+ inputMediaOk = append(inputMediaOk, each)
+ }
+ }
+ if len(inputMediaOk) == 0 {
+ if trace {
+ traceLogger.Printf("no Route found (from %d) that matches HTTP Content-Type: %s\n", len(methodOk), contentType)
+ }
+ return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type")
+ }
+
+ // accept
+ outputMediaOk := []Route{}
+ accept := httpRequest.Header.Get(HEADER_Accept)
+ if len(accept) == 0 {
+ accept = "*/*"
+ }
+ for _, each := range inputMediaOk {
+ if each.matchesAccept(accept) {
+ outputMediaOk = append(outputMediaOk, each)
+ }
+ }
+ if len(outputMediaOk) == 0 {
+ if trace {
+ traceLogger.Printf("no Route found (from %d) that matches HTTP Accept: %s\n", len(inputMediaOk), accept)
+ }
+ return nil, NewError(http.StatusNotAcceptable, "406: Not Acceptable")
+ }
+ // return r.bestMatchByMedia(outputMediaOk, contentType, accept), nil
+ return &outputMediaOk[0], nil
+}
+
+// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2
+// n/m > n/* > */*
+func (r RouterJSR311) bestMatchByMedia(routes []Route, contentType string, accept string) *Route {
+ // TODO
+ return &routes[0]
+}
+
+// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 (step 2)
+func (r RouterJSR311) selectRoutes(dispatcher *WebService, pathRemainder string) []Route {
+ filtered := &sortableRouteCandidates{}
+ for _, each := range dispatcher.Routes() {
+ pathExpr := each.pathExpr
+ matches := pathExpr.Matcher.FindStringSubmatch(pathRemainder)
+ if matches != nil {
+ lastMatch := matches[len(matches)-1]
+ if len(lastMatch) == 0 || lastMatch == "/" { // do not include if value is neither empty nor ‘/’.
+ filtered.candidates = append(filtered.candidates,
+ routeCandidate{each, len(matches) - 1, pathExpr.LiteralCount, pathExpr.VarCount})
+ }
+ }
+ }
+ if len(filtered.candidates) == 0 {
+ if trace {
+ traceLogger.Printf("WebService on path %s has no routes that match URL path remainder:%s\n", dispatcher.rootPath, pathRemainder)
+ }
+ return []Route{}
+ }
+ sort.Sort(sort.Reverse(filtered))
+
+ // select other routes from candidates whoes expression matches rmatch
+ matchingRoutes := []Route{filtered.candidates[0].route}
+ for c := 1; c < len(filtered.candidates); c++ {
+ each := filtered.candidates[c]
+ if each.route.pathExpr.Matcher.MatchString(pathRemainder) {
+ matchingRoutes = append(matchingRoutes, each.route)
+ }
+ }
+ return matchingRoutes
+}
+
+// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 (step 1)
+func (r RouterJSR311) detectDispatcher(requestPath string, dispatchers []*WebService) (*WebService, string, error) {
+ filtered := &sortableDispatcherCandidates{}
+ for _, each := range dispatchers {
+ matches := each.pathExpr.Matcher.FindStringSubmatch(requestPath)
+ if matches != nil {
+ filtered.candidates = append(filtered.candidates,
+ dispatcherCandidate{each, matches[len(matches)-1], len(matches), each.pathExpr.LiteralCount, each.pathExpr.VarCount})
+ }
+ }
+ if len(filtered.candidates) == 0 {
+ if trace {
+ traceLogger.Printf("no WebService was found to match URL path:%s\n", requestPath)
+ }
+ return nil, "", errors.New("not found")
+ }
+ sort.Sort(sort.Reverse(filtered))
+ return filtered.candidates[0].dispatcher, filtered.candidates[0].finalMatch, nil
+}
+
+// Types and functions to support the sorting of Routes
+
+type routeCandidate struct {
+ route Route
+ matchesCount int // the number of capturing groups
+ literalCount int // the number of literal characters (means those not resulting from template variable substitution)
+ nonDefaultCount int // the number of capturing groups with non-default regular expressions (i.e. not ‘([^ /]+?)’)
+}
+
+func (r routeCandidate) expressionToMatch() string {
+ return r.route.pathExpr.Source
+}
+
+func (r routeCandidate) String() string {
+ return fmt.Sprintf("(m=%d,l=%d,n=%d)", r.matchesCount, r.literalCount, r.nonDefaultCount)
+}
+
+type sortableRouteCandidates struct {
+ candidates []routeCandidate
+}
+
+func (rcs *sortableRouteCandidates) Len() int {
+ return len(rcs.candidates)
+}
+func (rcs *sortableRouteCandidates) Swap(i, j int) {
+ rcs.candidates[i], rcs.candidates[j] = rcs.candidates[j], rcs.candidates[i]
+}
+func (rcs *sortableRouteCandidates) Less(i, j int) bool {
+ ci := rcs.candidates[i]
+ cj := rcs.candidates[j]
+ // primary key
+ if ci.literalCount < cj.literalCount {
+ return true
+ }
+ if ci.literalCount > cj.literalCount {
+ return false
+ }
+ // secundary key
+ if ci.matchesCount < cj.matchesCount {
+ return true
+ }
+ if ci.matchesCount > cj.matchesCount {
+ return false
+ }
+ // tertiary key
+ if ci.nonDefaultCount < cj.nonDefaultCount {
+ return true
+ }
+ if ci.nonDefaultCount > cj.nonDefaultCount {
+ return false
+ }
+ // quaternary key ("source" is interpreted as Path)
+ return ci.route.Path < cj.route.Path
+}
+
+// Types and functions to support the sorting of Dispatchers
+
+type dispatcherCandidate struct {
+ dispatcher *WebService
+ finalMatch string
+ matchesCount int // the number of capturing groups
+ literalCount int // the number of literal characters (means those not resulting from template variable substitution)
+ nonDefaultCount int // the number of capturing groups with non-default regular expressions (i.e. not ‘([^ /]+?)’)
+}
+type sortableDispatcherCandidates struct {
+ candidates []dispatcherCandidate
+}
+
+func (dc *sortableDispatcherCandidates) Len() int {
+ return len(dc.candidates)
+}
+func (dc *sortableDispatcherCandidates) Swap(i, j int) {
+ dc.candidates[i], dc.candidates[j] = dc.candidates[j], dc.candidates[i]
+}
+func (dc *sortableDispatcherCandidates) Less(i, j int) bool {
+ ci := dc.candidates[i]
+ cj := dc.candidates[j]
+ // primary key
+ if ci.matchesCount < cj.matchesCount {
+ return true
+ }
+ if ci.matchesCount > cj.matchesCount {
+ return false
+ }
+ // secundary key
+ if ci.literalCount < cj.literalCount {
+ return true
+ }
+ if ci.literalCount > cj.literalCount {
+ return false
+ }
+ // tertiary key
+ return ci.nonDefaultCount < cj.nonDefaultCount
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/log/log.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/log/log.go
new file mode 100644
index 0000000..f70d895
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/log/log.go
@@ -0,0 +1,31 @@
+package log
+
+import (
+ stdlog "log"
+ "os"
+)
+
+// Logger corresponds to a minimal subset of the interface satisfied by stdlib log.Logger
+type StdLogger interface {
+ Print(v ...interface{})
+ Printf(format string, v ...interface{})
+}
+
+var Logger StdLogger
+
+func init() {
+ // default Logger
+ SetLogger(stdlog.New(os.Stderr, "[restful] ", stdlog.LstdFlags|stdlog.Lshortfile))
+}
+
+func SetLogger(customLogger StdLogger) {
+ Logger = customLogger
+}
+
+func Print(v ...interface{}) {
+ Logger.Print(v...)
+}
+
+func Printf(format string, v ...interface{}) {
+ Logger.Printf(format, v...)
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/logger.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/logger.go
new file mode 100644
index 0000000..3f1c4db
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/logger.go
@@ -0,0 +1,32 @@
+package restful
+
+// Copyright 2014 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+import (
+ "github.com/emicklei/go-restful/log"
+)
+
+var trace bool = false
+var traceLogger log.StdLogger
+
+func init() {
+ traceLogger = log.Logger // use the package logger by default
+}
+
+// TraceLogger enables detailed logging of Http request matching and filter invocation. Default no logger is set.
+// You may call EnableTracing() directly to enable trace logging to the package-wide logger.
+func TraceLogger(logger log.StdLogger) {
+ traceLogger = logger
+ EnableTracing(logger != nil)
+}
+
+// expose the setter for the global logger on the top-level package
+func SetLogger(customLogger log.StdLogger) {
+ log.SetLogger(customLogger)
+}
+
+// EnableTracing can be used to Trace logging on and off.
+func EnableTracing(enabled bool) {
+ trace = enabled
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/mime.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/mime.go
new file mode 100644
index 0000000..d7ea2b6
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/mime.go
@@ -0,0 +1,45 @@
+package restful
+
+import (
+ "strconv"
+ "strings"
+)
+
+type mime struct {
+ media string
+ quality float64
+}
+
+// insertMime adds a mime to a list and keeps it sorted by quality.
+func insertMime(l []mime, e mime) []mime {
+ for i, each := range l {
+ // if current mime has lower quality then insert before
+ if e.quality > each.quality {
+ left := append([]mime{}, l[0:i]...)
+ return append(append(left, e), l[i:]...)
+ }
+ }
+ return append(l, e)
+}
+
+// sortedMimes returns a list of mime sorted (desc) by its specified quality.
+func sortedMimes(accept string) (sorted []mime) {
+ for _, each := range strings.Split(accept, ",") {
+ typeAndQuality := strings.Split(strings.Trim(each, " "), ";")
+ if len(typeAndQuality) == 1 {
+ sorted = insertMime(sorted, mime{typeAndQuality[0], 1.0})
+ } else {
+ // take factor
+ parts := strings.Split(typeAndQuality[1], "=")
+ if len(parts) == 2 {
+ f, err := strconv.ParseFloat(parts[1], 64)
+ if err != nil {
+ traceLogger.Printf("unable to parse quality in %s, %v", each, err)
+ } else {
+ sorted = insertMime(sorted, mime{typeAndQuality[0], f})
+ }
+ }
+ }
+ }
+ return
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/options_filter.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/options_filter.go
new file mode 100644
index 0000000..4514ead
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/options_filter.go
@@ -0,0 +1,26 @@
+package restful
+
+import "strings"
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+// OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method
+// and provides the response with a set of allowed methods for the request URL Path.
+// As for any filter, you can also install it for a particular WebService within a Container.
+// Note: this filter is not needed when using CrossOriginResourceSharing (for CORS).
+func (c *Container) OPTIONSFilter(req *Request, resp *Response, chain *FilterChain) {
+ if "OPTIONS" != req.Request.Method {
+ chain.ProcessFilter(req, resp)
+ return
+ }
+ resp.AddHeader(HEADER_Allow, strings.Join(c.computeAllowedMethods(req), ","))
+}
+
+// OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method
+// and provides the response with a set of allowed methods for the request URL Path.
+// Note: this filter is not needed when using CrossOriginResourceSharing (for CORS).
+func OPTIONSFilter() FilterFunction {
+ return DefaultContainer.OPTIONSFilter
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/parameter.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/parameter.go
new file mode 100644
index 0000000..e11c816
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/parameter.go
@@ -0,0 +1,114 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+const (
+ // PathParameterKind = indicator of Request parameter type "path"
+ PathParameterKind = iota
+
+ // QueryParameterKind = indicator of Request parameter type "query"
+ QueryParameterKind
+
+ // BodyParameterKind = indicator of Request parameter type "body"
+ BodyParameterKind
+
+ // HeaderParameterKind = indicator of Request parameter type "header"
+ HeaderParameterKind
+
+ // FormParameterKind = indicator of Request parameter type "form"
+ FormParameterKind
+)
+
+// Parameter is for documententing the parameter used in a Http Request
+// ParameterData kinds are Path,Query and Body
+type Parameter struct {
+ data *ParameterData
+}
+
+// ParameterData represents the state of a Parameter.
+// It is made public to make it accessible to e.g. the Swagger package.
+type ParameterData struct {
+ Name, Description, DataType, DataFormat string
+ Kind int
+ Required bool
+ AllowableValues map[string]string
+ AllowMultiple bool
+ DefaultValue string
+}
+
+// Data returns the state of the Parameter
+func (p *Parameter) Data() ParameterData {
+ return *p.data
+}
+
+// Kind returns the parameter type indicator (see const for valid values)
+func (p *Parameter) Kind() int {
+ return p.data.Kind
+}
+
+func (p *Parameter) bePath() *Parameter {
+ p.data.Kind = PathParameterKind
+ return p
+}
+func (p *Parameter) beQuery() *Parameter {
+ p.data.Kind = QueryParameterKind
+ return p
+}
+func (p *Parameter) beBody() *Parameter {
+ p.data.Kind = BodyParameterKind
+ return p
+}
+
+func (p *Parameter) beHeader() *Parameter {
+ p.data.Kind = HeaderParameterKind
+ return p
+}
+
+func (p *Parameter) beForm() *Parameter {
+ p.data.Kind = FormParameterKind
+ return p
+}
+
+// Required sets the required field and returns the receiver
+func (p *Parameter) Required(required bool) *Parameter {
+ p.data.Required = required
+ return p
+}
+
+// AllowMultiple sets the allowMultiple field and returns the receiver
+func (p *Parameter) AllowMultiple(multiple bool) *Parameter {
+ p.data.AllowMultiple = multiple
+ return p
+}
+
+// AllowableValues sets the allowableValues field and returns the receiver
+func (p *Parameter) AllowableValues(values map[string]string) *Parameter {
+ p.data.AllowableValues = values
+ return p
+}
+
+// DataType sets the dataType field and returns the receiver
+func (p *Parameter) DataType(typeName string) *Parameter {
+ p.data.DataType = typeName
+ return p
+}
+
+// DataFormat sets the dataFormat field for Swagger UI
+func (p *Parameter) DataFormat(formatName string) *Parameter {
+ p.data.DataFormat = formatName
+ return p
+}
+
+// DefaultValue sets the default value field and returns the receiver
+func (p *Parameter) DefaultValue(stringRepresentation string) *Parameter {
+ p.data.DefaultValue = stringRepresentation
+ return p
+}
+
+// Description sets the description value field and returns the receiver
+func (p *Parameter) Description(doc string) *Parameter {
+ p.data.Description = doc
+ return p
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/path_expression.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/path_expression.go
new file mode 100644
index 0000000..a921e6f
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/path_expression.go
@@ -0,0 +1,69 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "bytes"
+ "fmt"
+ "regexp"
+ "strings"
+)
+
+// PathExpression holds a compiled path expression (RegExp) needed to match against
+// Http request paths and to extract path parameter values.
+type pathExpression struct {
+ LiteralCount int // the number of literal characters (means those not resulting from template variable substitution)
+ VarCount int // the number of named parameters (enclosed by {}) in the path
+ Matcher *regexp.Regexp
+ Source string // Path as defined by the RouteBuilder
+ tokens []string
+}
+
+// NewPathExpression creates a PathExpression from the input URL path.
+// Returns an error if the path is invalid.
+func newPathExpression(path string) (*pathExpression, error) {
+ expression, literalCount, varCount, tokens := templateToRegularExpression(path)
+ compiled, err := regexp.Compile(expression)
+ if err != nil {
+ return nil, err
+ }
+ return &pathExpression{literalCount, varCount, compiled, expression, tokens}, nil
+}
+
+// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-370003.7.3
+func templateToRegularExpression(template string) (expression string, literalCount int, varCount int, tokens []string) {
+ var buffer bytes.Buffer
+ buffer.WriteString("^")
+ //tokens = strings.Split(template, "/")
+ tokens = tokenizePath(template)
+ for _, each := range tokens {
+ if each == "" {
+ continue
+ }
+ buffer.WriteString("/")
+ if strings.HasPrefix(each, "{") {
+ // check for regular expression in variable
+ colon := strings.Index(each, ":")
+ if colon != -1 {
+ // extract expression
+ paramExpr := strings.TrimSpace(each[colon+1 : len(each)-1])
+ if paramExpr == "*" { // special case
+ buffer.WriteString("(.*)")
+ } else {
+ buffer.WriteString(fmt.Sprintf("(%s)", paramExpr)) // between colon and closing moustache
+ }
+ } else {
+ // plain var
+ buffer.WriteString("([^/]+?)")
+ }
+ varCount += 1
+ } else {
+ literalCount += len(each)
+ encoded := each // TODO URI encode
+ buffer.WriteString(regexp.QuoteMeta(encoded))
+ }
+ }
+ return strings.TrimRight(buffer.String(), "/") + "(/.*)?$", literalCount, varCount, tokens
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/request.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/request.go
new file mode 100644
index 0000000..3e42346
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/request.go
@@ -0,0 +1,131 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "bytes"
+ "compress/zlib"
+ "io/ioutil"
+ "net/http"
+)
+
+var defaultRequestContentType string
+
+var doCacheReadEntityBytes = true
+
+// Request is a wrapper for a http Request that provides convenience methods
+type Request struct {
+ Request *http.Request
+ bodyContent *[]byte // to cache the request body for multiple reads of ReadEntity
+ pathParameters map[string]string
+ attributes map[string]interface{} // for storing request-scoped values
+ selectedRoutePath string // root path + route path that matched the request, e.g. /meetings/{id}/attendees
+}
+
+func NewRequest(httpRequest *http.Request) *Request {
+ return &Request{
+ Request: httpRequest,
+ pathParameters: map[string]string{},
+ attributes: map[string]interface{}{},
+ } // empty parameters, attributes
+}
+
+// If ContentType is missing or */* is given then fall back to this type, otherwise
+// a "Unable to unmarshal content of type:" response is returned.
+// Valid values are restful.MIME_JSON and restful.MIME_XML
+// Example:
+// restful.DefaultRequestContentType(restful.MIME_JSON)
+func DefaultRequestContentType(mime string) {
+ defaultRequestContentType = mime
+}
+
+// SetCacheReadEntity controls whether the response data ([]byte) is cached such that ReadEntity is repeatable.
+// Default is true (due to backwardcompatibility). For better performance, you should set it to false if you don't need it.
+func SetCacheReadEntity(doCache bool) {
+ doCacheReadEntityBytes = doCache
+}
+
+// PathParameter accesses the Path parameter value by its name
+func (r *Request) PathParameter(name string) string {
+ return r.pathParameters[name]
+}
+
+// PathParameters accesses the Path parameter values
+func (r *Request) PathParameters() map[string]string {
+ return r.pathParameters
+}
+
+// QueryParameter returns the (first) Query parameter value by its name
+func (r *Request) QueryParameter(name string) string {
+ return r.Request.FormValue(name)
+}
+
+// BodyParameter parses the body of the request (once for typically a POST or a PUT) and returns the value of the given name or an error.
+func (r *Request) BodyParameter(name string) (string, error) {
+ err := r.Request.ParseForm()
+ if err != nil {
+ return "", err
+ }
+ return r.Request.PostFormValue(name), nil
+}
+
+// HeaderParameter returns the HTTP Header value of a Header name or empty if missing
+func (r *Request) HeaderParameter(name string) string {
+ return r.Request.Header.Get(name)
+}
+
+// ReadEntity checks the Accept header and reads the content into the entityPointer.
+func (r *Request) ReadEntity(entityPointer interface{}) (err error) {
+ contentType := r.Request.Header.Get(HEADER_ContentType)
+ contentEncoding := r.Request.Header.Get(HEADER_ContentEncoding)
+
+ // OLD feature, cache the body for reads
+ if doCacheReadEntityBytes {
+ if r.bodyContent == nil {
+ data, err := ioutil.ReadAll(r.Request.Body)
+ if err != nil {
+ return err
+ }
+ r.bodyContent = &data
+ }
+ r.Request.Body = ioutil.NopCloser(bytes.NewReader(*r.bodyContent))
+ }
+
+ // check if the request body needs decompression
+ if ENCODING_GZIP == contentEncoding {
+ gzipReader := currentCompressorProvider.AcquireGzipReader()
+ defer currentCompressorProvider.ReleaseGzipReader(gzipReader)
+ gzipReader.Reset(r.Request.Body)
+ r.Request.Body = gzipReader
+ } else if ENCODING_DEFLATE == contentEncoding {
+ zlibReader, err := zlib.NewReader(r.Request.Body)
+ if err != nil {
+ return err
+ }
+ r.Request.Body = zlibReader
+ }
+
+ // lookup the EntityReader
+ entityReader, ok := entityAccessRegistry.accessorAt(contentType)
+ if !ok {
+ return NewError(http.StatusBadRequest, "Unable to unmarshal content of type:"+contentType)
+ }
+ return entityReader.Read(r, entityPointer)
+}
+
+// SetAttribute adds or replaces the attribute with the given value.
+func (r *Request) SetAttribute(name string, value interface{}) {
+ r.attributes[name] = value
+}
+
+// Attribute returns the value associated to the given name. Returns nil if absent.
+func (r Request) Attribute(name string) interface{} {
+ return r.attributes[name]
+}
+
+// SelectedRoutePath root path + route path that matched the request, e.g. /meetings/{id}/attendees
+func (r Request) SelectedRoutePath() string {
+ return r.selectedRoutePath
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/response.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/response.go
new file mode 100644
index 0000000..971cd0b
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/response.go
@@ -0,0 +1,235 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "errors"
+ "net/http"
+)
+
+// DEPRECATED, use DefaultResponseContentType(mime)
+var DefaultResponseMimeType string
+
+//PrettyPrintResponses controls the indentation feature of XML and JSON serialization
+var PrettyPrintResponses = true
+
+// Response is a wrapper on the actual http ResponseWriter
+// It provides several convenience methods to prepare and write response content.
+type Response struct {
+ http.ResponseWriter
+ requestAccept string // mime-type what the Http Request says it wants to receive
+ routeProduces []string // mime-types what the Route says it can produce
+ statusCode int // HTTP status code that has been written explicity (if zero then net/http has written 200)
+ contentLength int // number of bytes written for the response body
+ prettyPrint bool // controls the indentation feature of XML and JSON serialization. It is initialized using var PrettyPrintResponses.
+ err error // err property is kept when WriteError is called
+}
+
+// Creates a new response based on a http ResponseWriter.
+func NewResponse(httpWriter http.ResponseWriter) *Response {
+ return &Response{httpWriter, "", []string{}, http.StatusOK, 0, PrettyPrintResponses, nil} // empty content-types
+}
+
+// If Accept header matching fails, fall back to this type.
+// Valid values are restful.MIME_JSON and restful.MIME_XML
+// Example:
+// restful.DefaultResponseContentType(restful.MIME_JSON)
+func DefaultResponseContentType(mime string) {
+ DefaultResponseMimeType = mime
+}
+
+// InternalServerError writes the StatusInternalServerError header.
+// DEPRECATED, use WriteErrorString(http.StatusInternalServerError,reason)
+func (r Response) InternalServerError() Response {
+ r.WriteHeader(http.StatusInternalServerError)
+ return r
+}
+
+// PrettyPrint changes whether this response must produce pretty (line-by-line, indented) JSON or XML output.
+func (r *Response) PrettyPrint(bePretty bool) {
+ r.prettyPrint = bePretty
+}
+
+// AddHeader is a shortcut for .Header().Add(header,value)
+func (r Response) AddHeader(header string, value string) Response {
+ r.Header().Add(header, value)
+ return r
+}
+
+// SetRequestAccepts tells the response what Mime-type(s) the HTTP request said it wants to accept. Exposed for testing.
+func (r *Response) SetRequestAccepts(mime string) {
+ r.requestAccept = mime
+}
+
+// EntityWriter returns the registered EntityWriter that the entity (requested resource)
+// can write according to what the request wants (Accept) and what the Route can produce or what the restful defaults say.
+// If called before WriteEntity and WriteHeader then a false return value can be used to write a 406: Not Acceptable.
+func (r *Response) EntityWriter() (EntityReaderWriter, bool) {
+ sorted := sortedMimes(r.requestAccept)
+ for _, eachAccept := range sorted {
+ for _, eachProduce := range r.routeProduces {
+ if eachProduce == eachAccept.media {
+ if w, ok := entityAccessRegistry.accessorAt(eachAccept.media); ok {
+ return w, true
+ }
+ }
+ }
+ if eachAccept.media == "*/*" {
+ for _, each := range r.routeProduces {
+ if w, ok := entityAccessRegistry.accessorAt(each); ok {
+ return w, true
+ }
+ }
+ }
+ }
+ // if requestAccept is empty
+ writer, ok := entityAccessRegistry.accessorAt(r.requestAccept)
+ if !ok {
+ // if not registered then fallback to the defaults (if set)
+ if DefaultResponseMimeType == MIME_JSON {
+ return entityAccessRegistry.accessorAt(MIME_JSON)
+ }
+ if DefaultResponseMimeType == MIME_XML {
+ return entityAccessRegistry.accessorAt(MIME_XML)
+ }
+ // Fallback to whatever the route says it can produce.
+ // https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+ for _, each := range r.routeProduces {
+ if w, ok := entityAccessRegistry.accessorAt(each); ok {
+ return w, true
+ }
+ }
+ if trace {
+ traceLogger.Printf("no registered EntityReaderWriter found for %s", r.requestAccept)
+ }
+ }
+ return writer, ok
+}
+
+// WriteEntity calls WriteHeaderAndEntity with Http Status OK (200)
+func (r *Response) WriteEntity(value interface{}) error {
+ return r.WriteHeaderAndEntity(http.StatusOK, value)
+}
+
+// WriteHeaderAndEntity marshals the value using the representation denoted by the Accept Header and the registered EntityWriters.
+// If no Accept header is specified (or */*) then respond with the Content-Type as specified by the first in the Route.Produces.
+// If an Accept header is specified then respond with the Content-Type as specified by the first in the Route.Produces that is matched with the Accept header.
+// If the value is nil then no response is send except for the Http status. You may want to call WriteHeader(http.StatusNotFound) instead.
+// If there is no writer available that can represent the value in the requested MIME type then Http Status NotAcceptable is written.
+// Current implementation ignores any q-parameters in the Accept Header.
+// Returns an error if the value could not be written on the response.
+func (r *Response) WriteHeaderAndEntity(status int, value interface{}) error {
+ writer, ok := r.EntityWriter()
+ if !ok {
+ r.WriteHeader(http.StatusNotAcceptable)
+ return nil
+ }
+ return writer.Write(r, status, value)
+}
+
+// WriteAsXml is a convenience method for writing a value in xml (requires Xml tags on the value)
+// It uses the standard encoding/xml package for marshalling the value ; not using a registered EntityReaderWriter.
+func (r *Response) WriteAsXml(value interface{}) error {
+ return writeXML(r, http.StatusOK, MIME_XML, value)
+}
+
+// WriteHeaderAndXml is a convenience method for writing a status and value in xml (requires Xml tags on the value)
+// It uses the standard encoding/xml package for marshalling the value ; not using a registered EntityReaderWriter.
+func (r *Response) WriteHeaderAndXml(status int, value interface{}) error {
+ return writeXML(r, status, MIME_XML, value)
+}
+
+// WriteAsJson is a convenience method for writing a value in json.
+// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter.
+func (r *Response) WriteAsJson(value interface{}) error {
+ return writeJSON(r, http.StatusOK, MIME_JSON, value)
+}
+
+// WriteJson is a convenience method for writing a value in Json with a given Content-Type.
+// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter.
+func (r *Response) WriteJson(value interface{}, contentType string) error {
+ return writeJSON(r, http.StatusOK, contentType, value)
+}
+
+// WriteHeaderAndJson is a convenience method for writing the status and a value in Json with a given Content-Type.
+// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter.
+func (r *Response) WriteHeaderAndJson(status int, value interface{}, contentType string) error {
+ return writeJSON(r, status, contentType, value)
+}
+
+// WriteError write the http status and the error string on the response.
+func (r *Response) WriteError(httpStatus int, err error) error {
+ r.err = err
+ return r.WriteErrorString(httpStatus, err.Error())
+}
+
+// WriteServiceError is a convenience method for a responding with a status and a ServiceError
+func (r *Response) WriteServiceError(httpStatus int, err ServiceError) error {
+ r.err = err
+ return r.WriteHeaderAndEntity(httpStatus, err)
+}
+
+// WriteErrorString is a convenience method for an error status with the actual error
+func (r *Response) WriteErrorString(httpStatus int, errorReason string) error {
+ if r.err == nil {
+ // if not called from WriteError
+ r.err = errors.New(errorReason)
+ }
+ r.WriteHeader(httpStatus)
+ if _, err := r.Write([]byte(errorReason)); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Flush implements http.Flusher interface, which sends any buffered data to the client.
+func (r *Response) Flush() {
+ if f, ok := r.ResponseWriter.(http.Flusher); ok {
+ f.Flush()
+ } else if trace {
+ traceLogger.Printf("ResponseWriter %v doesn't support Flush", r)
+ }
+}
+
+// WriteHeader is overridden to remember the Status Code that has been written.
+// Changes to the Header of the response have no effect after this.
+func (r *Response) WriteHeader(httpStatus int) {
+ r.statusCode = httpStatus
+ r.ResponseWriter.WriteHeader(httpStatus)
+}
+
+// StatusCode returns the code that has been written using WriteHeader.
+func (r Response) StatusCode() int {
+ if 0 == r.statusCode {
+ // no status code has been written yet; assume OK
+ return http.StatusOK
+ }
+ return r.statusCode
+}
+
+// Write writes the data to the connection as part of an HTTP reply.
+// Write is part of http.ResponseWriter interface.
+func (r *Response) Write(bytes []byte) (int, error) {
+ written, err := r.ResponseWriter.Write(bytes)
+ r.contentLength += written
+ return written, err
+}
+
+// ContentLength returns the number of bytes written for the response content.
+// Note that this value is only correct if all data is written through the Response using its Write* methods.
+// Data written directly using the underlying http.ResponseWriter is not accounted for.
+func (r Response) ContentLength() int {
+ return r.contentLength
+}
+
+// CloseNotify is part of http.CloseNotifier interface
+func (r Response) CloseNotify() <-chan bool {
+ return r.ResponseWriter.(http.CloseNotifier).CloseNotify()
+}
+
+// Error returns the err created by WriteError
+func (r Response) Error() error {
+ return r.err
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/route.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/route.go
new file mode 100644
index 0000000..f54e862
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/route.go
@@ -0,0 +1,183 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "bytes"
+ "net/http"
+ "strings"
+)
+
+// RouteFunction declares the signature of a function that can be bound to a Route.
+type RouteFunction func(*Request, *Response)
+
+// Route binds a HTTP Method,Path,Consumes combination to a RouteFunction.
+type Route struct {
+ Method string
+ Produces []string
+ Consumes []string
+ Path string // webservice root path + described path
+ Function RouteFunction
+ Filters []FilterFunction
+
+ // cached values for dispatching
+ relativePath string
+ pathParts []string
+ pathExpr *pathExpression // cached compilation of relativePath as RegExp
+
+ // documentation
+ Doc string
+ Notes string
+ Operation string
+ ParameterDocs []*Parameter
+ ResponseErrors map[int]ResponseError
+ ReadSample, WriteSample interface{} // structs that model an example request or response payload
+}
+
+// Initialize for Route
+func (r *Route) postBuild() {
+ r.pathParts = tokenizePath(r.Path)
+}
+
+// Create Request and Response from their http versions
+func (r *Route) wrapRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request) (*Request, *Response) {
+ params := r.extractParameters(httpRequest.URL.Path)
+ wrappedRequest := NewRequest(httpRequest)
+ wrappedRequest.pathParameters = params
+ wrappedRequest.selectedRoutePath = r.Path
+ wrappedResponse := NewResponse(httpWriter)
+ wrappedResponse.requestAccept = httpRequest.Header.Get(HEADER_Accept)
+ wrappedResponse.routeProduces = r.Produces
+ return wrappedRequest, wrappedResponse
+}
+
+// dispatchWithFilters call the function after passing through its own filters
+func (r *Route) dispatchWithFilters(wrappedRequest *Request, wrappedResponse *Response) {
+ if len(r.Filters) > 0 {
+ chain := FilterChain{Filters: r.Filters, Target: r.Function}
+ chain.ProcessFilter(wrappedRequest, wrappedResponse)
+ } else {
+ // unfiltered
+ r.Function(wrappedRequest, wrappedResponse)
+ }
+}
+
+// Return whether the mimeType matches to what this Route can produce.
+func (r Route) matchesAccept(mimeTypesWithQuality string) bool {
+ parts := strings.Split(mimeTypesWithQuality, ",")
+ for _, each := range parts {
+ var withoutQuality string
+ if strings.Contains(each, ";") {
+ withoutQuality = strings.Split(each, ";")[0]
+ } else {
+ withoutQuality = each
+ }
+ // trim before compare
+ withoutQuality = strings.Trim(withoutQuality, " ")
+ if withoutQuality == "*/*" {
+ return true
+ }
+ for _, producibleType := range r.Produces {
+ if producibleType == "*/*" || producibleType == withoutQuality {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// Return whether this Route can consume content with a type specified by mimeTypes (can be empty).
+func (r Route) matchesContentType(mimeTypes string) bool {
+
+ if len(r.Consumes) == 0 {
+ // did not specify what it can consume ; any media type (“*/*”) is assumed
+ return true
+ }
+
+ if len(mimeTypes) == 0 {
+ // idempotent methods with (most-likely or garanteed) empty content match missing Content-Type
+ m := r.Method
+ if m == "GET" || m == "HEAD" || m == "OPTIONS" || m == "DELETE" || m == "TRACE" {
+ return true
+ }
+ // proceed with default
+ mimeTypes = MIME_OCTET
+ }
+
+ parts := strings.Split(mimeTypes, ",")
+ for _, each := range parts {
+ var contentType string
+ if strings.Contains(each, ";") {
+ contentType = strings.Split(each, ";")[0]
+ } else {
+ contentType = each
+ }
+ // trim before compare
+ contentType = strings.Trim(contentType, " ")
+ for _, consumeableType := range r.Consumes {
+ if consumeableType == "*/*" || consumeableType == contentType {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// Extract the parameters from the request url path
+func (r Route) extractParameters(urlPath string) map[string]string {
+ urlParts := tokenizePath(urlPath)
+ pathParameters := map[string]string{}
+ for i, key := range r.pathParts {
+ var value string
+ if i >= len(urlParts) {
+ value = ""
+ } else {
+ value = urlParts[i]
+ }
+ if strings.HasPrefix(key, "{") { // path-parameter
+ if colon := strings.Index(key, ":"); colon != -1 {
+ // extract by regex
+ regPart := key[colon+1 : len(key)-1]
+ keyPart := key[1:colon]
+ if regPart == "*" {
+ pathParameters[keyPart] = untokenizePath(i, urlParts)
+ break
+ } else {
+ pathParameters[keyPart] = value
+ }
+ } else {
+ // without enclosing {}
+ pathParameters[key[1:len(key)-1]] = value
+ }
+ }
+ }
+ return pathParameters
+}
+
+// Untokenize back into an URL path using the slash separator
+func untokenizePath(offset int, parts []string) string {
+ var buffer bytes.Buffer
+ for p := offset; p < len(parts); p++ {
+ buffer.WriteString(parts[p])
+ // do not end
+ if p < len(parts)-1 {
+ buffer.WriteString("/")
+ }
+ }
+ return buffer.String()
+}
+
+// Tokenize an URL path using the slash separator ; the result does not have empty tokens
+func tokenizePath(path string) []string {
+ if "/" == path {
+ return []string{}
+ }
+ return strings.Split(strings.Trim(path, "/"), "/")
+}
+
+// for debugging
+func (r Route) String() string {
+ return r.Method + " " + r.Path
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/route_builder.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/route_builder.go
new file mode 100644
index 0000000..8bc1ab6
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/route_builder.go
@@ -0,0 +1,240 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "os"
+ "reflect"
+ "runtime"
+ "strings"
+
+ "github.com/emicklei/go-restful/log"
+)
+
+// RouteBuilder is a helper to construct Routes.
+type RouteBuilder struct {
+ rootPath string
+ currentPath string
+ produces []string
+ consumes []string
+ httpMethod string // required
+ function RouteFunction // required
+ filters []FilterFunction
+ // documentation
+ doc string
+ notes string
+ operation string
+ readSample, writeSample interface{}
+ parameters []*Parameter
+ errorMap map[int]ResponseError
+}
+
+// Do evaluates each argument with the RouteBuilder itself.
+// This allows you to follow DRY principles without breaking the fluent programming style.
+// Example:
+// ws.Route(ws.DELETE("/{name}").To(t.deletePerson).Do(Returns200, Returns500))
+//
+// func Returns500(b *RouteBuilder) {
+// b.Returns(500, "Internal Server Error", restful.ServiceError{})
+// }
+func (b *RouteBuilder) Do(oneArgBlocks ...func(*RouteBuilder)) *RouteBuilder {
+ for _, each := range oneArgBlocks {
+ each(b)
+ }
+ return b
+}
+
+// To bind the route to a function.
+// If this route is matched with the incoming Http Request then call this function with the *Request,*Response pair. Required.
+func (b *RouteBuilder) To(function RouteFunction) *RouteBuilder {
+ b.function = function
+ return b
+}
+
+// Method specifies what HTTP method to match. Required.
+func (b *RouteBuilder) Method(method string) *RouteBuilder {
+ b.httpMethod = method
+ return b
+}
+
+// Produces specifies what MIME types can be produced ; the matched one will appear in the Content-Type Http header.
+func (b *RouteBuilder) Produces(mimeTypes ...string) *RouteBuilder {
+ b.produces = mimeTypes
+ return b
+}
+
+// Consumes specifies what MIME types can be consumes ; the Accept Http header must matched any of these
+func (b *RouteBuilder) Consumes(mimeTypes ...string) *RouteBuilder {
+ b.consumes = mimeTypes
+ return b
+}
+
+// Path specifies the relative (w.r.t WebService root path) URL path to match. Default is "/".
+func (b *RouteBuilder) Path(subPath string) *RouteBuilder {
+ b.currentPath = subPath
+ return b
+}
+
+// Doc tells what this route is all about. Optional.
+func (b *RouteBuilder) Doc(documentation string) *RouteBuilder {
+ b.doc = documentation
+ return b
+}
+
+// A verbose explanation of the operation behavior. Optional.
+func (b *RouteBuilder) Notes(notes string) *RouteBuilder {
+ b.notes = notes
+ return b
+}
+
+// Reads tells what resource type will be read from the request payload. Optional.
+// A parameter of type "body" is added ,required is set to true and the dataType is set to the qualified name of the sample's type.
+func (b *RouteBuilder) Reads(sample interface{}) *RouteBuilder {
+ b.readSample = sample
+ typeAsName := reflect.TypeOf(sample).String()
+ bodyParameter := &Parameter{&ParameterData{Name: "body"}}
+ bodyParameter.beBody()
+ bodyParameter.Required(true)
+ bodyParameter.DataType(typeAsName)
+ b.Param(bodyParameter)
+ return b
+}
+
+// ParameterNamed returns a Parameter already known to the RouteBuilder. Returns nil if not.
+// Use this to modify or extend information for the Parameter (through its Data()).
+func (b RouteBuilder) ParameterNamed(name string) (p *Parameter) {
+ for _, each := range b.parameters {
+ if each.Data().Name == name {
+ return each
+ }
+ }
+ return p
+}
+
+// Writes tells what resource type will be written as the response payload. Optional.
+func (b *RouteBuilder) Writes(sample interface{}) *RouteBuilder {
+ b.writeSample = sample
+ return b
+}
+
+// Param allows you to document the parameters of the Route. It adds a new Parameter (does not check for duplicates).
+func (b *RouteBuilder) Param(parameter *Parameter) *RouteBuilder {
+ if b.parameters == nil {
+ b.parameters = []*Parameter{}
+ }
+ b.parameters = append(b.parameters, parameter)
+ return b
+}
+
+// Operation allows you to document what the actual method/function call is of the Route.
+// Unless called, the operation name is derived from the RouteFunction set using To(..).
+func (b *RouteBuilder) Operation(name string) *RouteBuilder {
+ b.operation = name
+ return b
+}
+
+// ReturnsError is deprecated, use Returns instead.
+func (b *RouteBuilder) ReturnsError(code int, message string, model interface{}) *RouteBuilder {
+ log.Print("ReturnsError is deprecated, use Returns instead.")
+ return b.Returns(code, message, model)
+}
+
+// Returns allows you to document what responses (errors or regular) can be expected.
+// The model parameter is optional ; either pass a struct instance or use nil if not applicable.
+func (b *RouteBuilder) Returns(code int, message string, model interface{}) *RouteBuilder {
+ err := ResponseError{
+ Code: code,
+ Message: message,
+ Model: model,
+ }
+ // lazy init because there is no NewRouteBuilder (yet)
+ if b.errorMap == nil {
+ b.errorMap = map[int]ResponseError{}
+ }
+ b.errorMap[code] = err
+ return b
+}
+
+type ResponseError struct {
+ Code int
+ Message string
+ Model interface{}
+}
+
+func (b *RouteBuilder) servicePath(path string) *RouteBuilder {
+ b.rootPath = path
+ return b
+}
+
+// Filter appends a FilterFunction to the end of filters for this Route to build.
+func (b *RouteBuilder) Filter(filter FilterFunction) *RouteBuilder {
+ b.filters = append(b.filters, filter)
+ return b
+}
+
+// If no specific Route path then set to rootPath
+// If no specific Produces then set to rootProduces
+// If no specific Consumes then set to rootConsumes
+func (b *RouteBuilder) copyDefaults(rootProduces, rootConsumes []string) {
+ if len(b.produces) == 0 {
+ b.produces = rootProduces
+ }
+ if len(b.consumes) == 0 {
+ b.consumes = rootConsumes
+ }
+}
+
+// Build creates a new Route using the specification details collected by the RouteBuilder
+func (b *RouteBuilder) Build() Route {
+ pathExpr, err := newPathExpression(b.currentPath)
+ if err != nil {
+ log.Printf("[restful] Invalid path:%s because:%v", b.currentPath, err)
+ os.Exit(1)
+ }
+ if b.function == nil {
+ log.Printf("[restful] No function specified for route:" + b.currentPath)
+ os.Exit(1)
+ }
+ operationName := b.operation
+ if len(operationName) == 0 && b.function != nil {
+ // extract from definition
+ operationName = nameOfFunction(b.function)
+ }
+ route := Route{
+ Method: b.httpMethod,
+ Path: concatPath(b.rootPath, b.currentPath),
+ Produces: b.produces,
+ Consumes: b.consumes,
+ Function: b.function,
+ Filters: b.filters,
+ relativePath: b.currentPath,
+ pathExpr: pathExpr,
+ Doc: b.doc,
+ Notes: b.notes,
+ Operation: operationName,
+ ParameterDocs: b.parameters,
+ ResponseErrors: b.errorMap,
+ ReadSample: b.readSample,
+ WriteSample: b.writeSample}
+ route.postBuild()
+ return route
+}
+
+func concatPath(path1, path2 string) string {
+ return strings.TrimRight(path1, "/") + "/" + strings.TrimLeft(path2, "/")
+}
+
+// nameOfFunction returns the short name of the function f for documentation.
+// It uses a runtime feature for debugging ; its value may change for later Go versions.
+func nameOfFunction(f interface{}) string {
+ fun := runtime.FuncForPC(reflect.ValueOf(f).Pointer())
+ tokenized := strings.Split(fun.Name(), ".")
+ last := tokenized[len(tokenized)-1]
+ last = strings.TrimSuffix(last, ")·fm") // < Go 1.5
+ last = strings.TrimSuffix(last, ")-fm") // Go 1.5
+ last = strings.TrimSuffix(last, "·fm") // < Go 1.5
+ last = strings.TrimSuffix(last, "-fm") // Go 1.5
+ return last
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/router.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/router.go
new file mode 100644
index 0000000..9b32fb6
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/router.go
@@ -0,0 +1,18 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import "net/http"
+
+// A RouteSelector finds the best matching Route given the input HTTP Request
+type RouteSelector interface {
+
+ // SelectRoute finds a Route given the input HTTP Request and a list of WebServices.
+ // It returns a selected Route and its containing WebService or an error indicating
+ // a problem.
+ SelectRoute(
+ webServices []*WebService,
+ httpRequest *http.Request) (selectedService *WebService, selected *Route, err error)
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/service_error.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/service_error.go
new file mode 100644
index 0000000..62d1108
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/service_error.go
@@ -0,0 +1,23 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import "fmt"
+
+// ServiceError is a transport object to pass information about a non-Http error occurred in a WebService while processing a request.
+type ServiceError struct {
+ Code int
+ Message string
+}
+
+// NewError returns a ServiceError using the code and reason
+func NewError(code int, message string) ServiceError {
+ return ServiceError{Code: code, Message: message}
+}
+
+// Error returns a text representation of the service error
+func (s ServiceError) Error() string {
+ return fmt.Sprintf("[ServiceError:%v] %v", s.Code, s.Message)
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/CHANGES.md b/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/CHANGES.md
new file mode 100644
index 0000000..736f6f3
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/CHANGES.md
@@ -0,0 +1,43 @@
+Change history of swagger
+=
+2015-10-16
+- add type override mechanism for swagger models (MR 254, nathanejohnson)
+- replace uses of wildcard in generated apidocs (issue 251)
+
+2015-05-25
+- (api break) changed the type of Properties in Model
+- (api break) changed the type of Models in ApiDeclaration
+- (api break) changed the parameter type of PostBuildDeclarationMapFunc
+
+2015-04-09
+- add ModelBuildable interface for customization of Model
+
+2015-03-17
+- preserve order of Routes per WebService in Swagger listing
+- fix use of $ref and type in Swagger models
+- add api version to listing
+
+2014-11-14
+- operation parameters are now sorted using ordering path,query,form,header,body
+
+2014-11-12
+- respect omitempty tag value for embedded structs
+- expose ApiVersion of WebService to Swagger ApiDeclaration
+
+2014-05-29
+- (api add) Ability to define custom http.Handler to serve swagger-ui static files
+
+2014-05-04
+- (fix) include model for array element type of response
+
+2014-01-03
+- (fix) do not add primitive type to the Api models
+
+2013-11-27
+- (fix) make Swagger work for WebServices with root ("/" or "") paths
+
+2013-10-29
+- (api add) package variable LogInfo to customize logging function
+
+2013-10-15
+- upgraded to spec version 1.2 (https://github.com/wordnik/swagger-core/wiki/1.2-transition) \ No newline at end of file
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/README.md b/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/README.md
new file mode 100644
index 0000000..6c27c30
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/README.md
@@ -0,0 +1,76 @@
+How to use Swagger UI with go-restful
+=
+
+Get the Swagger UI sources (version 1.2 only)
+
+ git clone https://github.com/wordnik/swagger-ui.git
+
+The project contains a "dist" folder.
+Its contents has all the Swagger UI files you need.
+
+The `index.html` has an `url` set to `http://petstore.swagger.wordnik.com/api/api-docs`.
+You need to change that to match your WebService JSON endpoint e.g. `http://localhost:8080/apidocs.json`
+
+Now, you can install the Swagger WebService for serving the Swagger specification in JSON.
+
+ config := swagger.Config{
+ WebServices: restful.RegisteredWebServices(),
+ ApiPath: "/apidocs.json",
+ SwaggerPath: "/apidocs/",
+ SwaggerFilePath: "/Users/emicklei/Projects/swagger-ui/dist"}
+ swagger.InstallSwaggerService(config)
+
+
+Documenting Structs
+--
+
+Currently there are 2 ways to document your structs in the go-restful Swagger.
+
+###### By using struct tags
+- Use tag "description" to annotate a struct field with a description to show in the UI
+- Use tag "modelDescription" to annotate the struct itself with a description to show in the UI. The tag can be added in an field of the struct and in case that there are multiple definition, they will be appended with an empty line.
+
+###### By using the SwaggerDoc method
+Here is an example with an `Address` struct and the documentation for each of the fields. The `""` is a special entry for **documenting the struct itself**.
+
+ type Address struct {
+ Country string `json:"country,omitempty"`
+ PostCode int `json:"postcode,omitempty"`
+ }
+
+ func (Address) SwaggerDoc() map[string]string {
+ return map[string]string{
+ "": "Address doc",
+ "country": "Country doc",
+ "postcode": "PostCode doc",
+ }
+ }
+
+This example will generate a JSON like this
+
+ {
+ "Address": {
+ "id": "Address",
+ "description": "Address doc",
+ "properties": {
+ "country": {
+ "type": "string",
+ "description": "Country doc"
+ },
+ "postcode": {
+ "type": "integer",
+ "format": "int32",
+ "description": "PostCode doc"
+ }
+ }
+ }
+ }
+
+**Very Important Notes:**
+- `SwaggerDoc()` is using a **NON-Pointer** receiver (e.g. func (Address) and not func (*Address))
+- The returned map should use as key the name of the field as defined in the JSON parameter (e.g. `"postcode"` and not `"PostCode"`)
+
+Notes
+--
+- The Nickname of an Operation is automatically set by finding the name of the function. You can override it using RouteBuilder.Operation(..)
+- The WebServices field of swagger.Config can be used to control which service you want to expose and document ; you can have multiple configs and therefore multiple endpoints.
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/api_declaration_list.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/api_declaration_list.go
new file mode 100644
index 0000000..9f4c369
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/api_declaration_list.go
@@ -0,0 +1,64 @@
+package swagger
+
+// Copyright 2015 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "bytes"
+ "encoding/json"
+)
+
+// ApiDeclarationList maintains an ordered list of ApiDeclaration.
+type ApiDeclarationList struct {
+ List []ApiDeclaration
+}
+
+// At returns the ApiDeclaration by its path unless absent, then ok is false
+func (l *ApiDeclarationList) At(path string) (a ApiDeclaration, ok bool) {
+ for _, each := range l.List {
+ if each.ResourcePath == path {
+ return each, true
+ }
+ }
+ return a, false
+}
+
+// Put adds or replaces a ApiDeclaration with this name
+func (l *ApiDeclarationList) Put(path string, a ApiDeclaration) {
+ // maybe replace existing
+ for i, each := range l.List {
+ if each.ResourcePath == path {
+ // replace
+ l.List[i] = a
+ return
+ }
+ }
+ // add
+ l.List = append(l.List, a)
+}
+
+// Do enumerates all the properties, each with its assigned name
+func (l *ApiDeclarationList) Do(block func(path string, decl ApiDeclaration)) {
+ for _, each := range l.List {
+ block(each.ResourcePath, each)
+ }
+}
+
+// MarshalJSON writes the ModelPropertyList as if it was a map[string]ModelProperty
+func (l ApiDeclarationList) MarshalJSON() ([]byte, error) {
+ var buf bytes.Buffer
+ encoder := json.NewEncoder(&buf)
+ buf.WriteString("{\n")
+ for i, each := range l.List {
+ buf.WriteString("\"")
+ buf.WriteString(each.ResourcePath)
+ buf.WriteString("\": ")
+ encoder.Encode(each)
+ if i < len(l.List)-1 {
+ buf.WriteString(",\n")
+ }
+ }
+ buf.WriteString("}")
+ return buf.Bytes(), nil
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/config.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/config.go
new file mode 100644
index 0000000..510d6fc
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/config.go
@@ -0,0 +1,38 @@
+package swagger
+
+import (
+ "net/http"
+
+ "github.com/emicklei/go-restful"
+)
+
+// PostBuildDeclarationMapFunc can be used to modify the api declaration map.
+type PostBuildDeclarationMapFunc func(apiDeclarationMap *ApiDeclarationList)
+
+type MapSchemaFormatFunc func(typeName string) string
+
+type Config struct {
+ // url where the services are available, e.g. http://localhost:8080
+ // if left empty then the basePath of Swagger is taken from the actual request
+ WebServicesUrl string
+ // path where the JSON api is avaiable , e.g. /apidocs
+ ApiPath string
+ // [optional] path where the swagger UI will be served, e.g. /swagger
+ SwaggerPath string
+ // [optional] location of folder containing Swagger HTML5 application index.html
+ SwaggerFilePath string
+ // api listing is constructed from this list of restful WebServices.
+ WebServices []*restful.WebService
+ // will serve all static content (scripts,pages,images)
+ StaticHandler http.Handler
+ // [optional] on default CORS (Cross-Origin-Resource-Sharing) is enabled.
+ DisableCORS bool
+ // Top-level API version. Is reflected in the resource listing.
+ ApiVersion string
+ // If set then call this handler after building the complete ApiDeclaration Map
+ PostBuildHandler PostBuildDeclarationMapFunc
+ // Swagger global info struct
+ Info Info
+ // [optional] If set, model builder should call this handler to get addition typename-to-swagger-format-field convertion.
+ SchemaFormatHandler MapSchemaFormatFunc
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/model_builder.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/model_builder.go
new file mode 100644
index 0000000..696d192
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/model_builder.go
@@ -0,0 +1,436 @@
+package swagger
+
+import (
+ "encoding/json"
+ "reflect"
+ "strings"
+)
+
+// ModelBuildable is used for extending Structs that need more control over
+// how the Model appears in the Swagger api declaration.
+type ModelBuildable interface {
+ PostBuildModel(m *Model) *Model
+}
+
+type modelBuilder struct {
+ Models *ModelList
+ Config *Config
+}
+
+type documentable interface {
+ SwaggerDoc() map[string]string
+}
+
+// Check if this structure has a method with signature func (<theModel>) SwaggerDoc() map[string]string
+// If it exists, retrive the documentation and overwrite all struct tag descriptions
+func getDocFromMethodSwaggerDoc2(model reflect.Type) map[string]string {
+ if docable, ok := reflect.New(model).Elem().Interface().(documentable); ok {
+ return docable.SwaggerDoc()
+ }
+ return make(map[string]string)
+}
+
+// addModelFrom creates and adds a Model to the builder and detects and calls
+// the post build hook for customizations
+func (b modelBuilder) addModelFrom(sample interface{}) {
+ if modelOrNil := b.addModel(reflect.TypeOf(sample), ""); modelOrNil != nil {
+ // allow customizations
+ if buildable, ok := sample.(ModelBuildable); ok {
+ modelOrNil = buildable.PostBuildModel(modelOrNil)
+ b.Models.Put(modelOrNil.Id, *modelOrNil)
+ }
+ }
+}
+
+func (b modelBuilder) addModel(st reflect.Type, nameOverride string) *Model {
+ modelName := b.keyFrom(st)
+ if nameOverride != "" {
+ modelName = nameOverride
+ }
+ // no models needed for primitive types
+ if b.isPrimitiveType(modelName) {
+ return nil
+ }
+ // see if we already have visited this model
+ if _, ok := b.Models.At(modelName); ok {
+ return nil
+ }
+ sm := Model{
+ Id: modelName,
+ Required: []string{},
+ Properties: ModelPropertyList{}}
+
+ // reference the model before further initializing (enables recursive structs)
+ b.Models.Put(modelName, sm)
+
+ // check for slice or array
+ if st.Kind() == reflect.Slice || st.Kind() == reflect.Array {
+ b.addModel(st.Elem(), "")
+ return &sm
+ }
+ // check for structure or primitive type
+ if st.Kind() != reflect.Struct {
+ return &sm
+ }
+
+ fullDoc := getDocFromMethodSwaggerDoc2(st)
+ modelDescriptions := []string{}
+
+ for i := 0; i < st.NumField(); i++ {
+ field := st.Field(i)
+ jsonName, modelDescription, prop := b.buildProperty(field, &sm, modelName)
+ if len(modelDescription) > 0 {
+ modelDescriptions = append(modelDescriptions, modelDescription)
+ }
+
+ // add if not omitted
+ if len(jsonName) != 0 {
+ // update description
+ if fieldDoc, ok := fullDoc[jsonName]; ok {
+ prop.Description = fieldDoc
+ }
+ // update Required
+ if b.isPropertyRequired(field) {
+ sm.Required = append(sm.Required, jsonName)
+ }
+ sm.Properties.Put(jsonName, prop)
+ }
+ }
+
+ // We always overwrite documentation if SwaggerDoc method exists
+ // "" is special for documenting the struct itself
+ if modelDoc, ok := fullDoc[""]; ok {
+ sm.Description = modelDoc
+ } else if len(modelDescriptions) != 0 {
+ sm.Description = strings.Join(modelDescriptions, "\n")
+ }
+
+ // update model builder with completed model
+ b.Models.Put(modelName, sm)
+
+ return &sm
+}
+
+func (b modelBuilder) isPropertyRequired(field reflect.StructField) bool {
+ required := true
+ if jsonTag := field.Tag.Get("json"); jsonTag != "" {
+ s := strings.Split(jsonTag, ",")
+ if len(s) > 1 && s[1] == "omitempty" {
+ return false
+ }
+ }
+ return required
+}
+
+func (b modelBuilder) buildProperty(field reflect.StructField, model *Model, modelName string) (jsonName, modelDescription string, prop ModelProperty) {
+ jsonName = b.jsonNameOfField(field)
+ if len(jsonName) == 0 {
+ // empty name signals skip property
+ return "", "", prop
+ }
+
+ if tag := field.Tag.Get("modelDescription"); tag != "" {
+ modelDescription = tag
+ }
+
+ prop.setPropertyMetadata(field)
+ if prop.Type != nil {
+ return jsonName, modelDescription, prop
+ }
+ fieldType := field.Type
+
+ // check if type is doing its own marshalling
+ marshalerType := reflect.TypeOf((*json.Marshaler)(nil)).Elem()
+ if fieldType.Implements(marshalerType) {
+ var pType = "string"
+ if prop.Type == nil {
+ prop.Type = &pType
+ }
+ if prop.Format == "" {
+ prop.Format = b.jsonSchemaFormat(fieldType.String())
+ }
+ return jsonName, modelDescription, prop
+ }
+
+ // check if annotation says it is a string
+ if jsonTag := field.Tag.Get("json"); jsonTag != "" {
+ s := strings.Split(jsonTag, ",")
+ if len(s) > 1 && s[1] == "string" {
+ stringt := "string"
+ prop.Type = &stringt
+ return jsonName, modelDescription, prop
+ }
+ }
+
+ fieldKind := fieldType.Kind()
+ switch {
+ case fieldKind == reflect.Struct:
+ jsonName, prop := b.buildStructTypeProperty(field, jsonName, model)
+ return jsonName, modelDescription, prop
+ case fieldKind == reflect.Slice || fieldKind == reflect.Array:
+ jsonName, prop := b.buildArrayTypeProperty(field, jsonName, modelName)
+ return jsonName, modelDescription, prop
+ case fieldKind == reflect.Ptr:
+ jsonName, prop := b.buildPointerTypeProperty(field, jsonName, modelName)
+ return jsonName, modelDescription, prop
+ case fieldKind == reflect.String:
+ stringt := "string"
+ prop.Type = &stringt
+ return jsonName, modelDescription, prop
+ case fieldKind == reflect.Map:
+ // if it's a map, it's unstructured, and swagger 1.2 can't handle it
+ objectType := "object"
+ prop.Type = &objectType
+ return jsonName, modelDescription, prop
+ }
+
+ if b.isPrimitiveType(fieldType.String()) {
+ mapped := b.jsonSchemaType(fieldType.String())
+ prop.Type = &mapped
+ prop.Format = b.jsonSchemaFormat(fieldType.String())
+ return jsonName, modelDescription, prop
+ }
+ modelType := fieldType.String()
+ prop.Ref = &modelType
+
+ if fieldType.Name() == "" { // override type of anonymous structs
+ nestedTypeName := modelName + "." + jsonName
+ prop.Ref = &nestedTypeName
+ b.addModel(fieldType, nestedTypeName)
+ }
+ return jsonName, modelDescription, prop
+}
+
+func hasNamedJSONTag(field reflect.StructField) bool {
+ parts := strings.Split(field.Tag.Get("json"), ",")
+ if len(parts) == 0 {
+ return false
+ }
+ for _, s := range parts[1:] {
+ if s == "inline" {
+ return false
+ }
+ }
+ return len(parts[0]) > 0
+}
+
+func (b modelBuilder) buildStructTypeProperty(field reflect.StructField, jsonName string, model *Model) (nameJson string, prop ModelProperty) {
+ prop.setPropertyMetadata(field)
+ // Check for type override in tag
+ if prop.Type != nil {
+ return jsonName, prop
+ }
+ fieldType := field.Type
+ // check for anonymous
+ if len(fieldType.Name()) == 0 {
+ // anonymous
+ anonType := model.Id + "." + jsonName
+ b.addModel(fieldType, anonType)
+ prop.Ref = &anonType
+ return jsonName, prop
+ }
+
+ if field.Name == fieldType.Name() && field.Anonymous && !hasNamedJSONTag(field) {
+ // embedded struct
+ sub := modelBuilder{new(ModelList), b.Config}
+ sub.addModel(fieldType, "")
+ subKey := sub.keyFrom(fieldType)
+ // merge properties from sub
+ subModel, _ := sub.Models.At(subKey)
+ subModel.Properties.Do(func(k string, v ModelProperty) {
+ model.Properties.Put(k, v)
+ // if subModel says this property is required then include it
+ required := false
+ for _, each := range subModel.Required {
+ if k == each {
+ required = true
+ break
+ }
+ }
+ if required {
+ model.Required = append(model.Required, k)
+ }
+ })
+ // add all new referenced models
+ sub.Models.Do(func(key string, sub Model) {
+ if key != subKey {
+ if _, ok := b.Models.At(key); !ok {
+ b.Models.Put(key, sub)
+ }
+ }
+ })
+ // empty name signals skip property
+ return "", prop
+ }
+ // simple struct
+ b.addModel(fieldType, "")
+ var pType = fieldType.String()
+ prop.Ref = &pType
+ return jsonName, prop
+}
+
+func (b modelBuilder) buildArrayTypeProperty(field reflect.StructField, jsonName, modelName string) (nameJson string, prop ModelProperty) {
+ // check for type override in tags
+ prop.setPropertyMetadata(field)
+ if prop.Type != nil {
+ return jsonName, prop
+ }
+ fieldType := field.Type
+ var pType = "array"
+ prop.Type = &pType
+ isPrimitive := b.isPrimitiveType(fieldType.Elem().Name())
+ elemTypeName := b.getElementTypeName(modelName, jsonName, fieldType.Elem())
+ prop.Items = new(Item)
+ if isPrimitive {
+ mapped := b.jsonSchemaType(elemTypeName)
+ prop.Items.Type = &mapped
+ } else {
+ prop.Items.Ref = &elemTypeName
+ }
+ // add|overwrite model for element type
+ if fieldType.Elem().Kind() == reflect.Ptr {
+ fieldType = fieldType.Elem()
+ }
+ if !isPrimitive {
+ b.addModel(fieldType.Elem(), elemTypeName)
+ }
+ return jsonName, prop
+}
+
+func (b modelBuilder) buildPointerTypeProperty(field reflect.StructField, jsonName, modelName string) (nameJson string, prop ModelProperty) {
+ prop.setPropertyMetadata(field)
+ // Check for type override in tags
+ if prop.Type != nil {
+ return jsonName, prop
+ }
+ fieldType := field.Type
+
+ // override type of pointer to list-likes
+ if fieldType.Elem().Kind() == reflect.Slice || fieldType.Elem().Kind() == reflect.Array {
+ var pType = "array"
+ prop.Type = &pType
+ isPrimitive := b.isPrimitiveType(fieldType.Elem().Elem().Name())
+ elemName := b.getElementTypeName(modelName, jsonName, fieldType.Elem().Elem())
+ if isPrimitive {
+ primName := b.jsonSchemaType(elemName)
+ prop.Items = &Item{Ref: &primName}
+ } else {
+ prop.Items = &Item{Ref: &elemName}
+ }
+ if !isPrimitive {
+ // add|overwrite model for element type
+ b.addModel(fieldType.Elem().Elem(), elemName)
+ }
+ } else {
+ // non-array, pointer type
+ var pType = b.jsonSchemaType(fieldType.String()[1:]) // no star, include pkg path
+ if b.isPrimitiveType(fieldType.String()[1:]) {
+ prop.Type = &pType
+ prop.Format = b.jsonSchemaFormat(fieldType.String()[1:])
+ return jsonName, prop
+ }
+ prop.Ref = &pType
+ elemName := ""
+ if fieldType.Elem().Name() == "" {
+ elemName = modelName + "." + jsonName
+ prop.Ref = &elemName
+ }
+ b.addModel(fieldType.Elem(), elemName)
+ }
+ return jsonName, prop
+}
+
+func (b modelBuilder) getElementTypeName(modelName, jsonName string, t reflect.Type) string {
+ if t.Kind() == reflect.Ptr {
+ return t.String()[1:]
+ }
+ if t.Name() == "" {
+ return modelName + "." + jsonName
+ }
+ return b.keyFrom(t)
+}
+
+func (b modelBuilder) keyFrom(st reflect.Type) string {
+ key := st.String()
+ if len(st.Name()) == 0 { // unnamed type
+ // Swagger UI has special meaning for [
+ key = strings.Replace(key, "[]", "||", -1)
+ }
+ return key
+}
+
+// see also https://golang.org/ref/spec#Numeric_types
+func (b modelBuilder) isPrimitiveType(modelName string) bool {
+ if len(modelName) == 0 {
+ return false
+ }
+ return strings.Contains("uint uint8 uint16 uint32 uint64 int int8 int16 int32 int64 float32 float64 bool string byte rune time.Time", modelName)
+}
+
+// jsonNameOfField returns the name of the field as it should appear in JSON format
+// An empty string indicates that this field is not part of the JSON representation
+func (b modelBuilder) jsonNameOfField(field reflect.StructField) string {
+ if jsonTag := field.Tag.Get("json"); jsonTag != "" {
+ s := strings.Split(jsonTag, ",")
+ if s[0] == "-" {
+ // empty name signals skip property
+ return ""
+ } else if s[0] != "" {
+ return s[0]
+ }
+ }
+ return field.Name
+}
+
+// see also http://json-schema.org/latest/json-schema-core.html#anchor8
+func (b modelBuilder) jsonSchemaType(modelName string) string {
+ schemaMap := map[string]string{
+ "uint": "integer",
+ "uint8": "integer",
+ "uint16": "integer",
+ "uint32": "integer",
+ "uint64": "integer",
+
+ "int": "integer",
+ "int8": "integer",
+ "int16": "integer",
+ "int32": "integer",
+ "int64": "integer",
+
+ "byte": "integer",
+ "float64": "number",
+ "float32": "number",
+ "bool": "boolean",
+ "time.Time": "string",
+ }
+ mapped, ok := schemaMap[modelName]
+ if !ok {
+ return modelName // use as is (custom or struct)
+ }
+ return mapped
+}
+
+func (b modelBuilder) jsonSchemaFormat(modelName string) string {
+ if b.Config != nil && b.Config.SchemaFormatHandler != nil {
+ if mapped := b.Config.SchemaFormatHandler(modelName); mapped != "" {
+ return mapped
+ }
+ }
+ schemaMap := map[string]string{
+ "int": "int32",
+ "int32": "int32",
+ "int64": "int64",
+ "byte": "byte",
+ "uint": "integer",
+ "uint8": "byte",
+ "float64": "double",
+ "float32": "float",
+ "time.Time": "date-time",
+ "*time.Time": "date-time",
+ }
+ mapped, ok := schemaMap[modelName]
+ if !ok {
+ return "" // no format
+ }
+ return mapped
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/model_list.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/model_list.go
new file mode 100644
index 0000000..9bb6cb6
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/model_list.go
@@ -0,0 +1,86 @@
+package swagger
+
+// Copyright 2015 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "bytes"
+ "encoding/json"
+)
+
+// NamedModel associates a name with a Model (not using its Id)
+type NamedModel struct {
+ Name string
+ Model Model
+}
+
+// ModelList encapsulates a list of NamedModel (association)
+type ModelList struct {
+ List []NamedModel
+}
+
+// Put adds or replaces a Model by its name
+func (l *ModelList) Put(name string, model Model) {
+ for i, each := range l.List {
+ if each.Name == name {
+ // replace
+ l.List[i] = NamedModel{name, model}
+ return
+ }
+ }
+ // add
+ l.List = append(l.List, NamedModel{name, model})
+}
+
+// At returns a Model by its name, ok is false if absent
+func (l *ModelList) At(name string) (m Model, ok bool) {
+ for _, each := range l.List {
+ if each.Name == name {
+ return each.Model, true
+ }
+ }
+ return m, false
+}
+
+// Do enumerates all the models, each with its assigned name
+func (l *ModelList) Do(block func(name string, value Model)) {
+ for _, each := range l.List {
+ block(each.Name, each.Model)
+ }
+}
+
+// MarshalJSON writes the ModelList as if it was a map[string]Model
+func (l ModelList) MarshalJSON() ([]byte, error) {
+ var buf bytes.Buffer
+ encoder := json.NewEncoder(&buf)
+ buf.WriteString("{\n")
+ for i, each := range l.List {
+ buf.WriteString("\"")
+ buf.WriteString(each.Name)
+ buf.WriteString("\": ")
+ encoder.Encode(each.Model)
+ if i < len(l.List)-1 {
+ buf.WriteString(",\n")
+ }
+ }
+ buf.WriteString("}")
+ return buf.Bytes(), nil
+}
+
+// UnmarshalJSON reads back a ModelList. This is an expensive operation.
+func (l *ModelList) UnmarshalJSON(data []byte) error {
+ raw := map[string]interface{}{}
+ json.NewDecoder(bytes.NewReader(data)).Decode(&raw)
+ for k, v := range raw {
+ // produces JSON bytes for each value
+ data, err := json.Marshal(v)
+ if err != nil {
+ return err
+ }
+ var m Model
+ json.NewDecoder(bytes.NewReader(data)).Decode(&m)
+ l.Put(k, m)
+ }
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/model_property_ext.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/model_property_ext.go
new file mode 100644
index 0000000..04fff2c
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/model_property_ext.go
@@ -0,0 +1,66 @@
+package swagger
+
+import (
+ "reflect"
+ "strings"
+)
+
+func (prop *ModelProperty) setDescription(field reflect.StructField) {
+ if tag := field.Tag.Get("description"); tag != "" {
+ prop.Description = tag
+ }
+}
+
+func (prop *ModelProperty) setDefaultValue(field reflect.StructField) {
+ if tag := field.Tag.Get("default"); tag != "" {
+ prop.DefaultValue = Special(tag)
+ }
+}
+
+func (prop *ModelProperty) setEnumValues(field reflect.StructField) {
+ // We use | to separate the enum values. This value is chosen
+ // since its unlikely to be useful in actual enumeration values.
+ if tag := field.Tag.Get("enum"); tag != "" {
+ prop.Enum = strings.Split(tag, "|")
+ }
+}
+
+func (prop *ModelProperty) setMaximum(field reflect.StructField) {
+ if tag := field.Tag.Get("maximum"); tag != "" {
+ prop.Maximum = tag
+ }
+}
+
+func (prop *ModelProperty) setType(field reflect.StructField) {
+ if tag := field.Tag.Get("type"); tag != "" {
+ prop.Type = &tag
+ }
+}
+
+func (prop *ModelProperty) setMinimum(field reflect.StructField) {
+ if tag := field.Tag.Get("minimum"); tag != "" {
+ prop.Minimum = tag
+ }
+}
+
+func (prop *ModelProperty) setUniqueItems(field reflect.StructField) {
+ tag := field.Tag.Get("unique")
+ switch tag {
+ case "true":
+ v := true
+ prop.UniqueItems = &v
+ case "false":
+ v := false
+ prop.UniqueItems = &v
+ }
+}
+
+func (prop *ModelProperty) setPropertyMetadata(field reflect.StructField) {
+ prop.setDescription(field)
+ prop.setEnumValues(field)
+ prop.setMinimum(field)
+ prop.setMaximum(field)
+ prop.setUniqueItems(field)
+ prop.setDefaultValue(field)
+ prop.setType(field)
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/model_property_list.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/model_property_list.go
new file mode 100644
index 0000000..3babb19
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/model_property_list.go
@@ -0,0 +1,87 @@
+package swagger
+
+// Copyright 2015 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "bytes"
+ "encoding/json"
+)
+
+// NamedModelProperty associates a name to a ModelProperty
+type NamedModelProperty struct {
+ Name string
+ Property ModelProperty
+}
+
+// ModelPropertyList encapsulates a list of NamedModelProperty (association)
+type ModelPropertyList struct {
+ List []NamedModelProperty
+}
+
+// At returns the ModelPropety by its name unless absent, then ok is false
+func (l *ModelPropertyList) At(name string) (p ModelProperty, ok bool) {
+ for _, each := range l.List {
+ if each.Name == name {
+ return each.Property, true
+ }
+ }
+ return p, false
+}
+
+// Put adds or replaces a ModelProperty with this name
+func (l *ModelPropertyList) Put(name string, prop ModelProperty) {
+ // maybe replace existing
+ for i, each := range l.List {
+ if each.Name == name {
+ // replace
+ l.List[i] = NamedModelProperty{Name: name, Property: prop}
+ return
+ }
+ }
+ // add
+ l.List = append(l.List, NamedModelProperty{Name: name, Property: prop})
+}
+
+// Do enumerates all the properties, each with its assigned name
+func (l *ModelPropertyList) Do(block func(name string, value ModelProperty)) {
+ for _, each := range l.List {
+ block(each.Name, each.Property)
+ }
+}
+
+// MarshalJSON writes the ModelPropertyList as if it was a map[string]ModelProperty
+func (l ModelPropertyList) MarshalJSON() ([]byte, error) {
+ var buf bytes.Buffer
+ encoder := json.NewEncoder(&buf)
+ buf.WriteString("{\n")
+ for i, each := range l.List {
+ buf.WriteString("\"")
+ buf.WriteString(each.Name)
+ buf.WriteString("\": ")
+ encoder.Encode(each.Property)
+ if i < len(l.List)-1 {
+ buf.WriteString(",\n")
+ }
+ }
+ buf.WriteString("}")
+ return buf.Bytes(), nil
+}
+
+// UnmarshalJSON reads back a ModelPropertyList. This is an expensive operation.
+func (l *ModelPropertyList) UnmarshalJSON(data []byte) error {
+ raw := map[string]interface{}{}
+ json.NewDecoder(bytes.NewReader(data)).Decode(&raw)
+ for k, v := range raw {
+ // produces JSON bytes for each value
+ data, err := json.Marshal(v)
+ if err != nil {
+ return err
+ }
+ var m ModelProperty
+ json.NewDecoder(bytes.NewReader(data)).Decode(&m)
+ l.Put(k, m)
+ }
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/ordered_route_map.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/ordered_route_map.go
new file mode 100644
index 0000000..b33ccfb
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/ordered_route_map.go
@@ -0,0 +1,36 @@
+package swagger
+
+// Copyright 2015 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import "github.com/emicklei/go-restful"
+
+type orderedRouteMap struct {
+ elements map[string][]restful.Route
+ keys []string
+}
+
+func newOrderedRouteMap() *orderedRouteMap {
+ return &orderedRouteMap{
+ elements: map[string][]restful.Route{},
+ keys: []string{},
+ }
+}
+
+func (o *orderedRouteMap) Add(key string, route restful.Route) {
+ routes, ok := o.elements[key]
+ if ok {
+ routes = append(routes, route)
+ o.elements[key] = routes
+ return
+ }
+ o.elements[key] = []restful.Route{route}
+ o.keys = append(o.keys, key)
+}
+
+func (o *orderedRouteMap) Do(block func(key string, routes []restful.Route)) {
+ for _, k := range o.keys {
+ block(k, o.elements[k])
+ }
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/swagger.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/swagger.go
new file mode 100644
index 0000000..967b671
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/swagger.go
@@ -0,0 +1,184 @@
+// Package swagger implements the structures of the Swagger
+// https://github.com/wordnik/swagger-spec/blob/master/versions/1.2.md
+package swagger
+
+const swaggerVersion = "1.2"
+
+// 4.3.3 Data Type Fields
+type DataTypeFields struct {
+ Type *string `json:"type,omitempty"` // if Ref not used
+ Ref *string `json:"$ref,omitempty"` // if Type not used
+ Format string `json:"format,omitempty"`
+ DefaultValue Special `json:"defaultValue,omitempty"`
+ Enum []string `json:"enum,omitempty"`
+ Minimum string `json:"minimum,omitempty"`
+ Maximum string `json:"maximum,omitempty"`
+ Items *Item `json:"items,omitempty"`
+ UniqueItems *bool `json:"uniqueItems,omitempty"`
+}
+
+type Special string
+
+// 4.3.4 Items Object
+type Item struct {
+ Type *string `json:"type,omitempty"`
+ Ref *string `json:"$ref,omitempty"`
+ Format string `json:"format,omitempty"`
+}
+
+// 5.1 Resource Listing
+type ResourceListing struct {
+ SwaggerVersion string `json:"swaggerVersion"` // e.g 1.2
+ Apis []Resource `json:"apis"`
+ ApiVersion string `json:"apiVersion"`
+ Info Info `json:"info"`
+ Authorizations []Authorization `json:"authorizations,omitempty"`
+}
+
+// 5.1.2 Resource Object
+type Resource struct {
+ Path string `json:"path"` // relative or absolute, must start with /
+ Description string `json:"description"`
+}
+
+// 5.1.3 Info Object
+type Info struct {
+ Title string `json:"title"`
+ Description string `json:"description"`
+ TermsOfServiceUrl string `json:"termsOfServiceUrl,omitempty"`
+ Contact string `json:"contact,omitempty"`
+ License string `json:"license,omitempty"`
+ LicenseUrl string `json:"licenseUrl,omitempty"`
+}
+
+// 5.1.5
+type Authorization struct {
+ Type string `json:"type"`
+ PassAs string `json:"passAs"`
+ Keyname string `json:"keyname"`
+ Scopes []Scope `json:"scopes"`
+ GrantTypes []GrantType `json:"grandTypes"`
+}
+
+// 5.1.6, 5.2.11
+type Scope struct {
+ // Required. The name of the scope.
+ Scope string `json:"scope"`
+ // Recommended. A short description of the scope.
+ Description string `json:"description"`
+}
+
+// 5.1.7
+type GrantType struct {
+ Implicit Implicit `json:"implicit"`
+ AuthorizationCode AuthorizationCode `json:"authorization_code"`
+}
+
+// 5.1.8 Implicit Object
+type Implicit struct {
+ // Required. The login endpoint definition.
+ loginEndpoint LoginEndpoint `json:"loginEndpoint"`
+ // An optional alternative name to standard "access_token" OAuth2 parameter.
+ TokenName string `json:"tokenName"`
+}
+
+// 5.1.9 Authorization Code Object
+type AuthorizationCode struct {
+ TokenRequestEndpoint TokenRequestEndpoint `json:"tokenRequestEndpoint"`
+ TokenEndpoint TokenEndpoint `json:"tokenEndpoint"`
+}
+
+// 5.1.10 Login Endpoint Object
+type LoginEndpoint struct {
+ // Required. The URL of the authorization endpoint for the implicit grant flow. The value SHOULD be in a URL format.
+ Url string `json:"url"`
+}
+
+// 5.1.11 Token Request Endpoint Object
+type TokenRequestEndpoint struct {
+ // Required. The URL of the authorization endpoint for the authentication code grant flow. The value SHOULD be in a URL format.
+ Url string `json:"url"`
+ // An optional alternative name to standard "client_id" OAuth2 parameter.
+ ClientIdName string `json:"clientIdName"`
+ // An optional alternative name to the standard "client_secret" OAuth2 parameter.
+ ClientSecretName string `json:"clientSecretName"`
+}
+
+// 5.1.12 Token Endpoint Object
+type TokenEndpoint struct {
+ // Required. The URL of the token endpoint for the authentication code grant flow. The value SHOULD be in a URL format.
+ Url string `json:"url"`
+ // An optional alternative name to standard "access_token" OAuth2 parameter.
+ TokenName string `json:"tokenName"`
+}
+
+// 5.2 API Declaration
+type ApiDeclaration struct {
+ SwaggerVersion string `json:"swaggerVersion"`
+ ApiVersion string `json:"apiVersion"`
+ BasePath string `json:"basePath"`
+ ResourcePath string `json:"resourcePath"` // must start with /
+ Apis []Api `json:"apis,omitempty"`
+ Models ModelList `json:"models,omitempty"`
+ Produces []string `json:"produces,omitempty"`
+ Consumes []string `json:"consumes,omitempty"`
+ Authorizations []Authorization `json:"authorizations,omitempty"`
+}
+
+// 5.2.2 API Object
+type Api struct {
+ Path string `json:"path"` // relative or absolute, must start with /
+ Description string `json:"description"`
+ Operations []Operation `json:"operations,omitempty"`
+}
+
+// 5.2.3 Operation Object
+type Operation struct {
+ DataTypeFields
+ Method string `json:"method"`
+ Summary string `json:"summary,omitempty"`
+ Notes string `json:"notes,omitempty"`
+ Nickname string `json:"nickname"`
+ Authorizations []Authorization `json:"authorizations,omitempty"`
+ Parameters []Parameter `json:"parameters"`
+ ResponseMessages []ResponseMessage `json:"responseMessages,omitempty"` // optional
+ Produces []string `json:"produces,omitempty"`
+ Consumes []string `json:"consumes,omitempty"`
+ Deprecated string `json:"deprecated,omitempty"`
+}
+
+// 5.2.4 Parameter Object
+type Parameter struct {
+ DataTypeFields
+ ParamType string `json:"paramType"` // path,query,body,header,form
+ Name string `json:"name"`
+ Description string `json:"description"`
+ Required bool `json:"required"`
+ AllowMultiple bool `json:"allowMultiple"`
+}
+
+// 5.2.5 Response Message Object
+type ResponseMessage struct {
+ Code int `json:"code"`
+ Message string `json:"message"`
+ ResponseModel string `json:"responseModel,omitempty"`
+}
+
+// 5.2.6, 5.2.7 Models Object
+type Model struct {
+ Id string `json:"id"`
+ Description string `json:"description,omitempty"`
+ Required []string `json:"required,omitempty"`
+ Properties ModelPropertyList `json:"properties"`
+ SubTypes []string `json:"subTypes,omitempty"`
+ Discriminator string `json:"discriminator,omitempty"`
+}
+
+// 5.2.8 Properties Object
+type ModelProperty struct {
+ DataTypeFields
+ Description string `json:"description,omitempty"`
+}
+
+// 5.2.10
+type Authorizations map[string]Authorization
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/swagger_builder.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/swagger_builder.go
new file mode 100644
index 0000000..05a3c7e
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/swagger_builder.go
@@ -0,0 +1,21 @@
+package swagger
+
+type SwaggerBuilder struct {
+ SwaggerService
+}
+
+func NewSwaggerBuilder(config Config) *SwaggerBuilder {
+ return &SwaggerBuilder{*newSwaggerService(config)}
+}
+
+func (sb SwaggerBuilder) ProduceListing() ResourceListing {
+ return sb.SwaggerService.produceListing()
+}
+
+func (sb SwaggerBuilder) ProduceAllDeclarations() map[string]ApiDeclaration {
+ return sb.SwaggerService.produceAllDeclarations()
+}
+
+func (sb SwaggerBuilder) ProduceDeclarations(route string) (*ApiDeclaration, bool) {
+ return sb.SwaggerService.produceDeclarations(route)
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/swagger_webservice.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/swagger_webservice.go
new file mode 100644
index 0000000..58dd625
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/swagger/swagger_webservice.go
@@ -0,0 +1,440 @@
+package swagger
+
+import (
+ "fmt"
+
+ "github.com/emicklei/go-restful"
+ // "github.com/emicklei/hopwatch"
+ "net/http"
+ "reflect"
+ "sort"
+ "strings"
+
+ "github.com/emicklei/go-restful/log"
+)
+
+type SwaggerService struct {
+ config Config
+ apiDeclarationMap *ApiDeclarationList
+}
+
+func newSwaggerService(config Config) *SwaggerService {
+ sws := &SwaggerService{
+ config: config,
+ apiDeclarationMap: new(ApiDeclarationList)}
+
+ // Build all ApiDeclarations
+ for _, each := range config.WebServices {
+ rootPath := each.RootPath()
+ // skip the api service itself
+ if rootPath != config.ApiPath {
+ if rootPath == "" || rootPath == "/" {
+ // use routes
+ for _, route := range each.Routes() {
+ entry := staticPathFromRoute(route)
+ _, exists := sws.apiDeclarationMap.At(entry)
+ if !exists {
+ sws.apiDeclarationMap.Put(entry, sws.composeDeclaration(each, entry))
+ }
+ }
+ } else { // use root path
+ sws.apiDeclarationMap.Put(each.RootPath(), sws.composeDeclaration(each, each.RootPath()))
+ }
+ }
+ }
+
+ // if specified then call the PostBuilderHandler
+ if config.PostBuildHandler != nil {
+ config.PostBuildHandler(sws.apiDeclarationMap)
+ }
+ return sws
+}
+
+// LogInfo is the function that is called when this package needs to log. It defaults to log.Printf
+var LogInfo = func(format string, v ...interface{}) {
+ // use the restful package-wide logger
+ log.Printf(format, v...)
+}
+
+// InstallSwaggerService add the WebService that provides the API documentation of all services
+// conform the Swagger documentation specifcation. (https://github.com/wordnik/swagger-core/wiki).
+func InstallSwaggerService(aSwaggerConfig Config) {
+ RegisterSwaggerService(aSwaggerConfig, restful.DefaultContainer)
+}
+
+// RegisterSwaggerService add the WebService that provides the API documentation of all services
+// conform the Swagger documentation specifcation. (https://github.com/wordnik/swagger-core/wiki).
+func RegisterSwaggerService(config Config, wsContainer *restful.Container) {
+ sws := newSwaggerService(config)
+ ws := new(restful.WebService)
+ ws.Path(config.ApiPath)
+ ws.Produces(restful.MIME_JSON)
+ if config.DisableCORS {
+ ws.Filter(enableCORS)
+ }
+ ws.Route(ws.GET("/").To(sws.getListing))
+ ws.Route(ws.GET("/{a}").To(sws.getDeclarations))
+ ws.Route(ws.GET("/{a}/{b}").To(sws.getDeclarations))
+ ws.Route(ws.GET("/{a}/{b}/{c}").To(sws.getDeclarations))
+ ws.Route(ws.GET("/{a}/{b}/{c}/{d}").To(sws.getDeclarations))
+ ws.Route(ws.GET("/{a}/{b}/{c}/{d}/{e}").To(sws.getDeclarations))
+ ws.Route(ws.GET("/{a}/{b}/{c}/{d}/{e}/{f}").To(sws.getDeclarations))
+ ws.Route(ws.GET("/{a}/{b}/{c}/{d}/{e}/{f}/{g}").To(sws.getDeclarations))
+ LogInfo("[restful/swagger] listing is available at %v%v", config.WebServicesUrl, config.ApiPath)
+ wsContainer.Add(ws)
+
+ // Check paths for UI serving
+ if config.StaticHandler == nil && config.SwaggerFilePath != "" && config.SwaggerPath != "" {
+ swaggerPathSlash := config.SwaggerPath
+ // path must end with slash /
+ if "/" != config.SwaggerPath[len(config.SwaggerPath)-1:] {
+ LogInfo("[restful/swagger] use corrected SwaggerPath ; must end with slash (/)")
+ swaggerPathSlash += "/"
+ }
+
+ LogInfo("[restful/swagger] %v%v is mapped to folder %v", config.WebServicesUrl, swaggerPathSlash, config.SwaggerFilePath)
+ wsContainer.Handle(swaggerPathSlash, http.StripPrefix(swaggerPathSlash, http.FileServer(http.Dir(config.SwaggerFilePath))))
+
+ //if we define a custom static handler use it
+ } else if config.StaticHandler != nil && config.SwaggerPath != "" {
+ swaggerPathSlash := config.SwaggerPath
+ // path must end with slash /
+ if "/" != config.SwaggerPath[len(config.SwaggerPath)-1:] {
+ LogInfo("[restful/swagger] use corrected SwaggerFilePath ; must end with slash (/)")
+ swaggerPathSlash += "/"
+
+ }
+ LogInfo("[restful/swagger] %v%v is mapped to custom Handler %T", config.WebServicesUrl, swaggerPathSlash, config.StaticHandler)
+ wsContainer.Handle(swaggerPathSlash, config.StaticHandler)
+
+ } else {
+ LogInfo("[restful/swagger] Swagger(File)Path is empty ; no UI is served")
+ }
+}
+
+func staticPathFromRoute(r restful.Route) string {
+ static := r.Path
+ bracket := strings.Index(static, "{")
+ if bracket <= 1 { // result cannot be empty
+ return static
+ }
+ if bracket != -1 {
+ static = r.Path[:bracket]
+ }
+ if strings.HasSuffix(static, "/") {
+ return static[:len(static)-1]
+ } else {
+ return static
+ }
+}
+
+func enableCORS(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) {
+ if origin := req.HeaderParameter(restful.HEADER_Origin); origin != "" {
+ // prevent duplicate header
+ if len(resp.Header().Get(restful.HEADER_AccessControlAllowOrigin)) == 0 {
+ resp.AddHeader(restful.HEADER_AccessControlAllowOrigin, origin)
+ }
+ }
+ chain.ProcessFilter(req, resp)
+}
+
+func (sws SwaggerService) getListing(req *restful.Request, resp *restful.Response) {
+ listing := sws.produceListing()
+ resp.WriteAsJson(listing)
+}
+
+func (sws SwaggerService) produceListing() ResourceListing {
+ listing := ResourceListing{SwaggerVersion: swaggerVersion, ApiVersion: sws.config.ApiVersion, Info: sws.config.Info}
+ sws.apiDeclarationMap.Do(func(k string, v ApiDeclaration) {
+ ref := Resource{Path: k}
+ if len(v.Apis) > 0 { // use description of first (could still be empty)
+ ref.Description = v.Apis[0].Description
+ }
+ listing.Apis = append(listing.Apis, ref)
+ })
+ return listing
+}
+
+func (sws SwaggerService) getDeclarations(req *restful.Request, resp *restful.Response) {
+ decl, ok := sws.produceDeclarations(composeRootPath(req))
+ if !ok {
+ resp.WriteErrorString(http.StatusNotFound, "ApiDeclaration not found")
+ return
+ }
+ // unless WebServicesUrl is given
+ if len(sws.config.WebServicesUrl) == 0 {
+ // update base path from the actual request
+ // TODO how to detect https? assume http for now
+ var host string
+ // X-Forwarded-Host or Host or Request.Host
+ hostvalues, ok := req.Request.Header["X-Forwarded-Host"] // apache specific?
+ if !ok || len(hostvalues) == 0 {
+ forwarded, ok := req.Request.Header["Host"] // without reverse-proxy
+ if !ok || len(forwarded) == 0 {
+ // fallback to Host field
+ host = req.Request.Host
+ } else {
+ host = forwarded[0]
+ }
+ } else {
+ host = hostvalues[0]
+ }
+ // inspect Referer for the scheme (http vs https)
+ scheme := "http"
+ if referer := req.Request.Header["Referer"]; len(referer) > 0 {
+ if strings.HasPrefix(referer[0], "https") {
+ scheme = "https"
+ }
+ }
+ decl.BasePath = fmt.Sprintf("%s://%s", scheme, host)
+ }
+ resp.WriteAsJson(decl)
+}
+
+func (sws SwaggerService) produceAllDeclarations() map[string]ApiDeclaration {
+ decls := map[string]ApiDeclaration{}
+ sws.apiDeclarationMap.Do(func(k string, v ApiDeclaration) {
+ decls[k] = v
+ })
+ return decls
+}
+
+func (sws SwaggerService) produceDeclarations(route string) (*ApiDeclaration, bool) {
+ decl, ok := sws.apiDeclarationMap.At(route)
+ if !ok {
+ return nil, false
+ }
+ decl.BasePath = sws.config.WebServicesUrl
+ return &decl, true
+}
+
+// composeDeclaration uses all routes and parameters to create a ApiDeclaration
+func (sws SwaggerService) composeDeclaration(ws *restful.WebService, pathPrefix string) ApiDeclaration {
+ decl := ApiDeclaration{
+ SwaggerVersion: swaggerVersion,
+ BasePath: sws.config.WebServicesUrl,
+ ResourcePath: pathPrefix,
+ Models: ModelList{},
+ ApiVersion: ws.Version()}
+
+ // collect any path parameters
+ rootParams := []Parameter{}
+ for _, param := range ws.PathParameters() {
+ rootParams = append(rootParams, asSwaggerParameter(param.Data()))
+ }
+ // aggregate by path
+ pathToRoutes := newOrderedRouteMap()
+ for _, other := range ws.Routes() {
+ if strings.HasPrefix(other.Path, pathPrefix) {
+ pathToRoutes.Add(other.Path, other)
+ }
+ }
+ pathToRoutes.Do(func(path string, routes []restful.Route) {
+ api := Api{Path: strings.TrimSuffix(withoutWildcard(path), "/"), Description: ws.Documentation()}
+ voidString := "void"
+ for _, route := range routes {
+ operation := Operation{
+ Method: route.Method,
+ Summary: route.Doc,
+ Notes: route.Notes,
+ // Type gets overwritten if there is a write sample
+ DataTypeFields: DataTypeFields{Type: &voidString},
+ Parameters: []Parameter{},
+ Nickname: route.Operation,
+ ResponseMessages: composeResponseMessages(route, &decl, &sws.config)}
+
+ operation.Consumes = route.Consumes
+ operation.Produces = route.Produces
+
+ // share root params if any
+ for _, swparam := range rootParams {
+ operation.Parameters = append(operation.Parameters, swparam)
+ }
+ // route specific params
+ for _, param := range route.ParameterDocs {
+ operation.Parameters = append(operation.Parameters, asSwaggerParameter(param.Data()))
+ }
+
+ sws.addModelsFromRouteTo(&operation, route, &decl)
+ api.Operations = append(api.Operations, operation)
+ }
+ decl.Apis = append(decl.Apis, api)
+ })
+ return decl
+}
+
+func withoutWildcard(path string) string {
+ if strings.HasSuffix(path, ":*}") {
+ return path[0:len(path)-3] + "}"
+ }
+ return path
+}
+
+// composeResponseMessages takes the ResponseErrors (if any) and creates ResponseMessages from them.
+func composeResponseMessages(route restful.Route, decl *ApiDeclaration, config *Config) (messages []ResponseMessage) {
+ if route.ResponseErrors == nil {
+ return messages
+ }
+ // sort by code
+ codes := sort.IntSlice{}
+ for code, _ := range route.ResponseErrors {
+ codes = append(codes, code)
+ }
+ codes.Sort()
+ for _, code := range codes {
+ each := route.ResponseErrors[code]
+ message := ResponseMessage{
+ Code: code,
+ Message: each.Message,
+ }
+ if each.Model != nil {
+ st := reflect.TypeOf(each.Model)
+ isCollection, st := detectCollectionType(st)
+ modelName := modelBuilder{}.keyFrom(st)
+ if isCollection {
+ modelName = "array[" + modelName + "]"
+ }
+ modelBuilder{Models: &decl.Models, Config: config}.addModel(st, "")
+ // reference the model
+ message.ResponseModel = modelName
+ }
+ messages = append(messages, message)
+ }
+ return
+}
+
+// addModelsFromRoute takes any read or write sample from the Route and creates a Swagger model from it.
+func (sws SwaggerService) addModelsFromRouteTo(operation *Operation, route restful.Route, decl *ApiDeclaration) {
+ if route.ReadSample != nil {
+ sws.addModelFromSampleTo(operation, false, route.ReadSample, &decl.Models)
+ }
+ if route.WriteSample != nil {
+ sws.addModelFromSampleTo(operation, true, route.WriteSample, &decl.Models)
+ }
+}
+
+func detectCollectionType(st reflect.Type) (bool, reflect.Type) {
+ isCollection := false
+ if st.Kind() == reflect.Slice || st.Kind() == reflect.Array {
+ st = st.Elem()
+ isCollection = true
+ } else {
+ if st.Kind() == reflect.Ptr {
+ if st.Elem().Kind() == reflect.Slice || st.Elem().Kind() == reflect.Array {
+ st = st.Elem().Elem()
+ isCollection = true
+ }
+ }
+ }
+ return isCollection, st
+}
+
+// addModelFromSample creates and adds (or overwrites) a Model from a sample resource
+func (sws SwaggerService) addModelFromSampleTo(operation *Operation, isResponse bool, sample interface{}, models *ModelList) {
+ if isResponse {
+ type_, items := asDataType(sample, &sws.config)
+ operation.Type = type_
+ operation.Items = items
+ }
+ modelBuilder{Models: models, Config: &sws.config}.addModelFrom(sample)
+}
+
+func asSwaggerParameter(param restful.ParameterData) Parameter {
+ return Parameter{
+ DataTypeFields: DataTypeFields{
+ Type: &param.DataType,
+ Format: asFormat(param.DataType, param.DataFormat),
+ DefaultValue: Special(param.DefaultValue),
+ },
+ Name: param.Name,
+ Description: param.Description,
+ ParamType: asParamType(param.Kind),
+
+ Required: param.Required}
+}
+
+// Between 1..7 path parameters is supported
+func composeRootPath(req *restful.Request) string {
+ path := "/" + req.PathParameter("a")
+ b := req.PathParameter("b")
+ if b == "" {
+ return path
+ }
+ path = path + "/" + b
+ c := req.PathParameter("c")
+ if c == "" {
+ return path
+ }
+ path = path + "/" + c
+ d := req.PathParameter("d")
+ if d == "" {
+ return path
+ }
+ path = path + "/" + d
+ e := req.PathParameter("e")
+ if e == "" {
+ return path
+ }
+ path = path + "/" + e
+ f := req.PathParameter("f")
+ if f == "" {
+ return path
+ }
+ path = path + "/" + f
+ g := req.PathParameter("g")
+ if g == "" {
+ return path
+ }
+ return path + "/" + g
+}
+
+func asFormat(dataType string, dataFormat string) string {
+ if dataFormat != "" {
+ return dataFormat
+ }
+ return "" // TODO
+}
+
+func asParamType(kind int) string {
+ switch {
+ case kind == restful.PathParameterKind:
+ return "path"
+ case kind == restful.QueryParameterKind:
+ return "query"
+ case kind == restful.BodyParameterKind:
+ return "body"
+ case kind == restful.HeaderParameterKind:
+ return "header"
+ case kind == restful.FormParameterKind:
+ return "form"
+ }
+ return ""
+}
+
+func asDataType(any interface{}, config *Config) (*string, *Item) {
+ // If it's not a collection, return the suggested model name
+ st := reflect.TypeOf(any)
+ isCollection, st := detectCollectionType(st)
+ modelName := modelBuilder{}.keyFrom(st)
+ // if it's not a collection we are done
+ if !isCollection {
+ return &modelName, nil
+ }
+
+ // XXX: This is not very elegant
+ // We create an Item object referring to the given model
+ models := ModelList{}
+ mb := modelBuilder{Models: &models, Config: config}
+ mb.addModelFrom(any)
+
+ elemTypeName := mb.getElementTypeName(modelName, "", st)
+ item := new(Item)
+ if mb.isPrimitiveType(elemTypeName) {
+ mapped := mb.jsonSchemaType(elemTypeName)
+ item.Type = &mapped
+ } else {
+ item.Ref = &elemTypeName
+ }
+ tmp := "array"
+ return &tmp, item
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/web_service.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/web_service.go
new file mode 100644
index 0000000..24fc532
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/web_service.go
@@ -0,0 +1,268 @@
+package restful
+
+import (
+ "fmt"
+ "os"
+ "sync"
+
+ "github.com/emicklei/go-restful/log"
+)
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+// WebService holds a collection of Route values that bind a Http Method + URL Path to a function.
+type WebService struct {
+ rootPath string
+ pathExpr *pathExpression // cached compilation of rootPath as RegExp
+ routes []Route
+ produces []string
+ consumes []string
+ pathParameters []*Parameter
+ filters []FilterFunction
+ documentation string
+ apiVersion string
+
+ dynamicRoutes bool
+
+ // protects 'routes' if dynamic routes are enabled
+ routesLock sync.RWMutex
+}
+
+func (w *WebService) SetDynamicRoutes(enable bool) {
+ w.dynamicRoutes = enable
+}
+
+// compilePathExpression ensures that the path is compiled into a RegEx for those routers that need it.
+func (w *WebService) compilePathExpression() {
+ compiled, err := newPathExpression(w.rootPath)
+ if err != nil {
+ log.Printf("[restful] invalid path:%s because:%v", w.rootPath, err)
+ os.Exit(1)
+ }
+ w.pathExpr = compiled
+}
+
+// ApiVersion sets the API version for documentation purposes.
+func (w *WebService) ApiVersion(apiVersion string) *WebService {
+ w.apiVersion = apiVersion
+ return w
+}
+
+// Version returns the API version for documentation purposes.
+func (w WebService) Version() string { return w.apiVersion }
+
+// Path specifies the root URL template path of the WebService.
+// All Routes will be relative to this path.
+func (w *WebService) Path(root string) *WebService {
+ w.rootPath = root
+ if len(w.rootPath) == 0 {
+ w.rootPath = "/"
+ }
+ w.compilePathExpression()
+ return w
+}
+
+// Param adds a PathParameter to document parameters used in the root path.
+func (w *WebService) Param(parameter *Parameter) *WebService {
+ if w.pathParameters == nil {
+ w.pathParameters = []*Parameter{}
+ }
+ w.pathParameters = append(w.pathParameters, parameter)
+ return w
+}
+
+// PathParameter creates a new Parameter of kind Path for documentation purposes.
+// It is initialized as required with string as its DataType.
+func (w *WebService) PathParameter(name, description string) *Parameter {
+ return PathParameter(name, description)
+}
+
+// PathParameter creates a new Parameter of kind Path for documentation purposes.
+// It is initialized as required with string as its DataType.
+func PathParameter(name, description string) *Parameter {
+ p := &Parameter{&ParameterData{Name: name, Description: description, Required: true, DataType: "string"}}
+ p.bePath()
+ return p
+}
+
+// QueryParameter creates a new Parameter of kind Query for documentation purposes.
+// It is initialized as not required with string as its DataType.
+func (w *WebService) QueryParameter(name, description string) *Parameter {
+ return QueryParameter(name, description)
+}
+
+// QueryParameter creates a new Parameter of kind Query for documentation purposes.
+// It is initialized as not required with string as its DataType.
+func QueryParameter(name, description string) *Parameter {
+ p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}}
+ p.beQuery()
+ return p
+}
+
+// BodyParameter creates a new Parameter of kind Body for documentation purposes.
+// It is initialized as required without a DataType.
+func (w *WebService) BodyParameter(name, description string) *Parameter {
+ return BodyParameter(name, description)
+}
+
+// BodyParameter creates a new Parameter of kind Body for documentation purposes.
+// It is initialized as required without a DataType.
+func BodyParameter(name, description string) *Parameter {
+ p := &Parameter{&ParameterData{Name: name, Description: description, Required: true}}
+ p.beBody()
+ return p
+}
+
+// HeaderParameter creates a new Parameter of kind (Http) Header for documentation purposes.
+// It is initialized as not required with string as its DataType.
+func (w *WebService) HeaderParameter(name, description string) *Parameter {
+ return HeaderParameter(name, description)
+}
+
+// HeaderParameter creates a new Parameter of kind (Http) Header for documentation purposes.
+// It is initialized as not required with string as its DataType.
+func HeaderParameter(name, description string) *Parameter {
+ p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}}
+ p.beHeader()
+ return p
+}
+
+// FormParameter creates a new Parameter of kind Form (using application/x-www-form-urlencoded) for documentation purposes.
+// It is initialized as required with string as its DataType.
+func (w *WebService) FormParameter(name, description string) *Parameter {
+ return FormParameter(name, description)
+}
+
+// FormParameter creates a new Parameter of kind Form (using application/x-www-form-urlencoded) for documentation purposes.
+// It is initialized as required with string as its DataType.
+func FormParameter(name, description string) *Parameter {
+ p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}}
+ p.beForm()
+ return p
+}
+
+// Route creates a new Route using the RouteBuilder and add to the ordered list of Routes.
+func (w *WebService) Route(builder *RouteBuilder) *WebService {
+ w.routesLock.Lock()
+ defer w.routesLock.Unlock()
+ builder.copyDefaults(w.produces, w.consumes)
+ w.routes = append(w.routes, builder.Build())
+ return w
+}
+
+// RemoveRoute removes the specified route, looks for something that matches 'path' and 'method'
+func (w *WebService) RemoveRoute(path, method string) error {
+ if !w.dynamicRoutes {
+ return fmt.Errorf("dynamic routes are not enabled.")
+ }
+ w.routesLock.Lock()
+ defer w.routesLock.Unlock()
+ newRoutes := make([]Route, (len(w.routes) - 1))
+ current := 0
+ for ix := range w.routes {
+ if w.routes[ix].Method == method && w.routes[ix].Path == path {
+ continue
+ }
+ newRoutes[current] = w.routes[ix]
+ current = current + 1
+ }
+ w.routes = newRoutes
+ return nil
+}
+
+// Method creates a new RouteBuilder and initialize its http method
+func (w *WebService) Method(httpMethod string) *RouteBuilder {
+ return new(RouteBuilder).servicePath(w.rootPath).Method(httpMethod)
+}
+
+// Produces specifies that this WebService can produce one or more MIME types.
+// Http requests must have one of these values set for the Accept header.
+func (w *WebService) Produces(contentTypes ...string) *WebService {
+ w.produces = contentTypes
+ return w
+}
+
+// Consumes specifies that this WebService can consume one or more MIME types.
+// Http requests must have one of these values set for the Content-Type header.
+func (w *WebService) Consumes(accepts ...string) *WebService {
+ w.consumes = accepts
+ return w
+}
+
+// Routes returns the Routes associated with this WebService
+func (w WebService) Routes() []Route {
+ if !w.dynamicRoutes {
+ return w.routes
+ }
+ // Make a copy of the array to prevent concurrency problems
+ w.routesLock.RLock()
+ defer w.routesLock.RUnlock()
+ result := make([]Route, len(w.routes))
+ for ix := range w.routes {
+ result[ix] = w.routes[ix]
+ }
+ return result
+}
+
+// RootPath returns the RootPath associated with this WebService. Default "/"
+func (w WebService) RootPath() string {
+ return w.rootPath
+}
+
+// PathParameters return the path parameter names for (shared amoung its Routes)
+func (w WebService) PathParameters() []*Parameter {
+ return w.pathParameters
+}
+
+// Filter adds a filter function to the chain of filters applicable to all its Routes
+func (w *WebService) Filter(filter FilterFunction) *WebService {
+ w.filters = append(w.filters, filter)
+ return w
+}
+
+// Doc is used to set the documentation of this service.
+func (w *WebService) Doc(plainText string) *WebService {
+ w.documentation = plainText
+ return w
+}
+
+// Documentation returns it.
+func (w WebService) Documentation() string {
+ return w.documentation
+}
+
+/*
+ Convenience methods
+*/
+
+// HEAD is a shortcut for .Method("HEAD").Path(subPath)
+func (w *WebService) HEAD(subPath string) *RouteBuilder {
+ return new(RouteBuilder).servicePath(w.rootPath).Method("HEAD").Path(subPath)
+}
+
+// GET is a shortcut for .Method("GET").Path(subPath)
+func (w *WebService) GET(subPath string) *RouteBuilder {
+ return new(RouteBuilder).servicePath(w.rootPath).Method("GET").Path(subPath)
+}
+
+// POST is a shortcut for .Method("POST").Path(subPath)
+func (w *WebService) POST(subPath string) *RouteBuilder {
+ return new(RouteBuilder).servicePath(w.rootPath).Method("POST").Path(subPath)
+}
+
+// PUT is a shortcut for .Method("PUT").Path(subPath)
+func (w *WebService) PUT(subPath string) *RouteBuilder {
+ return new(RouteBuilder).servicePath(w.rootPath).Method("PUT").Path(subPath)
+}
+
+// PATCH is a shortcut for .Method("PATCH").Path(subPath)
+func (w *WebService) PATCH(subPath string) *RouteBuilder {
+ return new(RouteBuilder).servicePath(w.rootPath).Method("PATCH").Path(subPath)
+}
+
+// DELETE is a shortcut for .Method("DELETE").Path(subPath)
+func (w *WebService) DELETE(subPath string) *RouteBuilder {
+ return new(RouteBuilder).servicePath(w.rootPath).Method("DELETE").Path(subPath)
+}
diff --git a/src/kube2msb/vendor/github.com/emicklei/go-restful/web_service_container.go b/src/kube2msb/vendor/github.com/emicklei/go-restful/web_service_container.go
new file mode 100644
index 0000000..c9d31b0
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/emicklei/go-restful/web_service_container.go
@@ -0,0 +1,39 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+ "net/http"
+)
+
+// DefaultContainer is a restful.Container that uses http.DefaultServeMux
+var DefaultContainer *Container
+
+func init() {
+ DefaultContainer = NewContainer()
+ DefaultContainer.ServeMux = http.DefaultServeMux
+}
+
+// If set the true then panics will not be caught to return HTTP 500.
+// In that case, Route functions are responsible for handling any error situation.
+// Default value is false = recover from panics. This has performance implications.
+// OBSOLETE ; use restful.DefaultContainer.DoNotRecover(true)
+var DoNotRecover = false
+
+// Add registers a new WebService add it to the DefaultContainer.
+func Add(service *WebService) {
+ DefaultContainer.Add(service)
+}
+
+// Filter appends a container FilterFunction from the DefaultContainer.
+// These are called before dispatching a http.Request to a WebService.
+func Filter(filter FilterFunction) {
+ DefaultContainer.Filter(filter)
+}
+
+// RegisteredWebServices returns the collections of WebServices from the DefaultContainer
+func RegisteredWebServices() []*WebService {
+ return DefaultContainer.RegisteredWebServices()
+}
diff --git a/src/kube2msb/vendor/github.com/ghodss/yaml/LICENSE b/src/kube2msb/vendor/github.com/ghodss/yaml/LICENSE
new file mode 100644
index 0000000..7805d36
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ghodss/yaml/LICENSE
@@ -0,0 +1,50 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Sam Ghods
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/kube2msb/vendor/github.com/ghodss/yaml/README.md b/src/kube2msb/vendor/github.com/ghodss/yaml/README.md
new file mode 100644
index 0000000..f8f7e36
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ghodss/yaml/README.md
@@ -0,0 +1,116 @@
+# YAML marshaling and unmarshaling support for Go
+
+[![Build Status](https://travis-ci.org/ghodss/yaml.svg)](https://travis-ci.org/ghodss/yaml)
+
+## Introduction
+
+A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs.
+
+In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/).
+
+## Compatibility
+
+This package uses [go-yaml v2](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility).
+
+## Caveats
+
+**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, go-yaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example:
+
+```
+BAD:
+ exampleKey: !!binary gIGC
+
+GOOD:
+ exampleKey: gIGC
+... and decode the base64 data in your code.
+```
+
+**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys.
+
+## Installation and usage
+
+To install, run:
+
+```
+$ go get github.com/ghodss/yaml
+```
+
+And import using:
+
+```
+import "github.com/ghodss/yaml"
+```
+
+Usage is very similar to the JSON library:
+
+```go
+import (
+ "fmt"
+
+ "github.com/ghodss/yaml"
+)
+
+type Person struct {
+ Name string `json:"name"` // Affects YAML field names too.
+ Age int `json:"name"`
+}
+
+func main() {
+ // Marshal a Person struct to YAML.
+ p := Person{"John", 30}
+ y, err := yaml.Marshal(p)
+ if err != nil {
+ fmt.Printf("err: %v\n", err)
+ return
+ }
+ fmt.Println(string(y))
+ /* Output:
+ name: John
+ age: 30
+ */
+
+ // Unmarshal the YAML back into a Person struct.
+ var p2 Person
+ err := yaml.Unmarshal(y, &p2)
+ if err != nil {
+ fmt.Printf("err: %v\n", err)
+ return
+ }
+ fmt.Println(p2)
+ /* Output:
+ {John 30}
+ */
+}
+```
+
+`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available:
+
+```go
+import (
+ "fmt"
+
+ "github.com/ghodss/yaml"
+)
+func main() {
+ j := []byte(`{"name": "John", "age": 30}`)
+ y, err := yaml.JSONToYAML(j)
+ if err != nil {
+ fmt.Printf("err: %v\n", err)
+ return
+ }
+ fmt.Println(string(y))
+ /* Output:
+ name: John
+ age: 30
+ */
+ j2, err := yaml.YAMLToJSON(y)
+ if err != nil {
+ fmt.Printf("err: %v\n", err)
+ return
+ }
+ fmt.Println(string(j2))
+ /* Output:
+ {"age":30,"name":"John"}
+ */
+}
+```
diff --git a/src/kube2msb/vendor/github.com/ghodss/yaml/fields.go b/src/kube2msb/vendor/github.com/ghodss/yaml/fields.go
new file mode 100644
index 0000000..0bd3c2b
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ghodss/yaml/fields.go
@@ -0,0 +1,497 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+package yaml
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/json"
+ "reflect"
+ "sort"
+ "strings"
+ "sync"
+ "unicode"
+ "unicode/utf8"
+)
+
+// indirect walks down v allocating pointers as needed,
+// until it gets to a non-pointer.
+// if it encounters an Unmarshaler, indirect stops and returns that.
+// if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
+func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
+ // If v is a named type and is addressable,
+ // start with its address, so that if the type has pointer methods,
+ // we find them.
+ if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
+ v = v.Addr()
+ }
+ for {
+ // Load value from interface, but only if the result will be
+ // usefully addressable.
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ e := v.Elem()
+ if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
+ v = e
+ continue
+ }
+ }
+
+ if v.Kind() != reflect.Ptr {
+ break
+ }
+
+ if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
+ break
+ }
+ if v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ if v.Type().NumMethod() > 0 {
+ if u, ok := v.Interface().(json.Unmarshaler); ok {
+ return u, nil, reflect.Value{}
+ }
+ if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
+ return nil, u, reflect.Value{}
+ }
+ }
+ v = v.Elem()
+ }
+ return nil, nil, v
+}
+
+// A field represents a single field found in a struct.
+type field struct {
+ name string
+ nameBytes []byte // []byte(name)
+ equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent
+
+ tag bool
+ index []int
+ typ reflect.Type
+ omitEmpty bool
+ quoted bool
+}
+
+func fillField(f field) field {
+ f.nameBytes = []byte(f.name)
+ f.equalFold = foldFunc(f.nameBytes)
+ return f
+}
+
+// byName sorts field by name, breaking ties with depth,
+// then breaking ties with "name came from json tag", then
+// breaking ties with index sequence.
+type byName []field
+
+func (x byName) Len() int { return len(x) }
+
+func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byName) Less(i, j int) bool {
+ if x[i].name != x[j].name {
+ return x[i].name < x[j].name
+ }
+ if len(x[i].index) != len(x[j].index) {
+ return len(x[i].index) < len(x[j].index)
+ }
+ if x[i].tag != x[j].tag {
+ return x[i].tag
+ }
+ return byIndex(x).Less(i, j)
+}
+
+// byIndex sorts field by index sequence.
+type byIndex []field
+
+func (x byIndex) Len() int { return len(x) }
+
+func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byIndex) Less(i, j int) bool {
+ for k, xik := range x[i].index {
+ if k >= len(x[j].index) {
+ return false
+ }
+ if xik != x[j].index[k] {
+ return xik < x[j].index[k]
+ }
+ }
+ return len(x[i].index) < len(x[j].index)
+}
+
+// typeFields returns a list of fields that JSON should recognize for the given type.
+// The algorithm is breadth-first search over the set of structs to include - the top struct
+// and then any reachable anonymous structs.
+func typeFields(t reflect.Type) []field {
+ // Anonymous fields to explore at the current level and the next.
+ current := []field{}
+ next := []field{{typ: t}}
+
+ // Count of queued names for current level and the next.
+ count := map[reflect.Type]int{}
+ nextCount := map[reflect.Type]int{}
+
+ // Types already visited at an earlier level.
+ visited := map[reflect.Type]bool{}
+
+ // Fields found.
+ var fields []field
+
+ for len(next) > 0 {
+ current, next = next, current[:0]
+ count, nextCount = nextCount, map[reflect.Type]int{}
+
+ for _, f := range current {
+ if visited[f.typ] {
+ continue
+ }
+ visited[f.typ] = true
+
+ // Scan f.typ for fields to include.
+ for i := 0; i < f.typ.NumField(); i++ {
+ sf := f.typ.Field(i)
+ if sf.PkgPath != "" { // unexported
+ continue
+ }
+ tag := sf.Tag.Get("json")
+ if tag == "-" {
+ continue
+ }
+ name, opts := parseTag(tag)
+ if !isValidTag(name) {
+ name = ""
+ }
+ index := make([]int, len(f.index)+1)
+ copy(index, f.index)
+ index[len(f.index)] = i
+
+ ft := sf.Type
+ if ft.Name() == "" && ft.Kind() == reflect.Ptr {
+ // Follow pointer.
+ ft = ft.Elem()
+ }
+
+ // Record found field and index sequence.
+ if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
+ tagged := name != ""
+ if name == "" {
+ name = sf.Name
+ }
+ fields = append(fields, fillField(field{
+ name: name,
+ tag: tagged,
+ index: index,
+ typ: ft,
+ omitEmpty: opts.Contains("omitempty"),
+ quoted: opts.Contains("string"),
+ }))
+ if count[f.typ] > 1 {
+ // If there were multiple instances, add a second,
+ // so that the annihilation code will see a duplicate.
+ // It only cares about the distinction between 1 or 2,
+ // so don't bother generating any more copies.
+ fields = append(fields, fields[len(fields)-1])
+ }
+ continue
+ }
+
+ // Record new anonymous struct to explore in next round.
+ nextCount[ft]++
+ if nextCount[ft] == 1 {
+ next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft}))
+ }
+ }
+ }
+ }
+
+ sort.Sort(byName(fields))
+
+ // Delete all fields that are hidden by the Go rules for embedded fields,
+ // except that fields with JSON tags are promoted.
+
+ // The fields are sorted in primary order of name, secondary order
+ // of field index length. Loop over names; for each name, delete
+ // hidden fields by choosing the one dominant field that survives.
+ out := fields[:0]
+ for advance, i := 0, 0; i < len(fields); i += advance {
+ // One iteration per name.
+ // Find the sequence of fields with the name of this first field.
+ fi := fields[i]
+ name := fi.name
+ for advance = 1; i+advance < len(fields); advance++ {
+ fj := fields[i+advance]
+ if fj.name != name {
+ break
+ }
+ }
+ if advance == 1 { // Only one field with this name
+ out = append(out, fi)
+ continue
+ }
+ dominant, ok := dominantField(fields[i : i+advance])
+ if ok {
+ out = append(out, dominant)
+ }
+ }
+
+ fields = out
+ sort.Sort(byIndex(fields))
+
+ return fields
+}
+
+// dominantField looks through the fields, all of which are known to
+// have the same name, to find the single field that dominates the
+// others using Go's embedding rules, modified by the presence of
+// JSON tags. If there are multiple top-level fields, the boolean
+// will be false: This condition is an error in Go and we skip all
+// the fields.
+func dominantField(fields []field) (field, bool) {
+ // The fields are sorted in increasing index-length order. The winner
+ // must therefore be one with the shortest index length. Drop all
+ // longer entries, which is easy: just truncate the slice.
+ length := len(fields[0].index)
+ tagged := -1 // Index of first tagged field.
+ for i, f := range fields {
+ if len(f.index) > length {
+ fields = fields[:i]
+ break
+ }
+ if f.tag {
+ if tagged >= 0 {
+ // Multiple tagged fields at the same level: conflict.
+ // Return no field.
+ return field{}, false
+ }
+ tagged = i
+ }
+ }
+ if tagged >= 0 {
+ return fields[tagged], true
+ }
+ // All remaining fields have the same length. If there's more than one,
+ // we have a conflict (two fields named "X" at the same level) and we
+ // return no field.
+ if len(fields) > 1 {
+ return field{}, false
+ }
+ return fields[0], true
+}
+
+var fieldCache struct {
+ sync.RWMutex
+ m map[reflect.Type][]field
+}
+
+// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
+func cachedTypeFields(t reflect.Type) []field {
+ fieldCache.RLock()
+ f := fieldCache.m[t]
+ fieldCache.RUnlock()
+ if f != nil {
+ return f
+ }
+
+ // Compute fields without lock.
+ // Might duplicate effort but won't hold other computations back.
+ f = typeFields(t)
+ if f == nil {
+ f = []field{}
+ }
+
+ fieldCache.Lock()
+ if fieldCache.m == nil {
+ fieldCache.m = map[reflect.Type][]field{}
+ }
+ fieldCache.m[t] = f
+ fieldCache.Unlock()
+ return f
+}
+
+func isValidTag(s string) bool {
+ if s == "" {
+ return false
+ }
+ for _, c := range s {
+ switch {
+ case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
+ // Backslash and quote chars are reserved, but
+ // otherwise any punctuation chars are allowed
+ // in a tag name.
+ default:
+ if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+const (
+ caseMask = ^byte(0x20) // Mask to ignore case in ASCII.
+ kelvin = '\u212a'
+ smallLongEss = '\u017f'
+)
+
+// foldFunc returns one of four different case folding equivalence
+// functions, from most general (and slow) to fastest:
+//
+// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
+// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
+// 3) asciiEqualFold, no special, but includes non-letters (including _)
+// 4) simpleLetterEqualFold, no specials, no non-letters.
+//
+// The letters S and K are special because they map to 3 runes, not just 2:
+// * S maps to s and to U+017F 'ſ' Latin small letter long s
+// * k maps to K and to U+212A 'K' Kelvin sign
+// See http://play.golang.org/p/tTxjOc0OGo
+//
+// The returned function is specialized for matching against s and
+// should only be given s. It's not curried for performance reasons.
+func foldFunc(s []byte) func(s, t []byte) bool {
+ nonLetter := false
+ special := false // special letter
+ for _, b := range s {
+ if b >= utf8.RuneSelf {
+ return bytes.EqualFold
+ }
+ upper := b & caseMask
+ if upper < 'A' || upper > 'Z' {
+ nonLetter = true
+ } else if upper == 'K' || upper == 'S' {
+ // See above for why these letters are special.
+ special = true
+ }
+ }
+ if special {
+ return equalFoldRight
+ }
+ if nonLetter {
+ return asciiEqualFold
+ }
+ return simpleLetterEqualFold
+}
+
+// equalFoldRight is a specialization of bytes.EqualFold when s is
+// known to be all ASCII (including punctuation), but contains an 's',
+// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
+// See comments on foldFunc.
+func equalFoldRight(s, t []byte) bool {
+ for _, sb := range s {
+ if len(t) == 0 {
+ return false
+ }
+ tb := t[0]
+ if tb < utf8.RuneSelf {
+ if sb != tb {
+ sbUpper := sb & caseMask
+ if 'A' <= sbUpper && sbUpper <= 'Z' {
+ if sbUpper != tb&caseMask {
+ return false
+ }
+ } else {
+ return false
+ }
+ }
+ t = t[1:]
+ continue
+ }
+ // sb is ASCII and t is not. t must be either kelvin
+ // sign or long s; sb must be s, S, k, or K.
+ tr, size := utf8.DecodeRune(t)
+ switch sb {
+ case 's', 'S':
+ if tr != smallLongEss {
+ return false
+ }
+ case 'k', 'K':
+ if tr != kelvin {
+ return false
+ }
+ default:
+ return false
+ }
+ t = t[size:]
+
+ }
+ if len(t) > 0 {
+ return false
+ }
+ return true
+}
+
+// asciiEqualFold is a specialization of bytes.EqualFold for use when
+// s is all ASCII (but may contain non-letters) and contains no
+// special-folding letters.
+// See comments on foldFunc.
+func asciiEqualFold(s, t []byte) bool {
+ if len(s) != len(t) {
+ return false
+ }
+ for i, sb := range s {
+ tb := t[i]
+ if sb == tb {
+ continue
+ }
+ if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
+ if sb&caseMask != tb&caseMask {
+ return false
+ }
+ } else {
+ return false
+ }
+ }
+ return true
+}
+
+// simpleLetterEqualFold is a specialization of bytes.EqualFold for
+// use when s is all ASCII letters (no underscores, etc) and also
+// doesn't contain 'k', 'K', 's', or 'S'.
+// See comments on foldFunc.
+func simpleLetterEqualFold(s, t []byte) bool {
+ if len(s) != len(t) {
+ return false
+ }
+ for i, b := range s {
+ if b&caseMask != t[i]&caseMask {
+ return false
+ }
+ }
+ return true
+}
+
+// tagOptions is the string following a comma in a struct field's "json"
+// tag, or the empty string. It does not include the leading comma.
+type tagOptions string
+
+// parseTag splits a struct field's json tag into its name and
+// comma-separated options.
+func parseTag(tag string) (string, tagOptions) {
+ if idx := strings.Index(tag, ","); idx != -1 {
+ return tag[:idx], tagOptions(tag[idx+1:])
+ }
+ return tag, tagOptions("")
+}
+
+// Contains reports whether a comma-separated list of options
+// contains a particular substr flag. substr must be surrounded by a
+// string boundary or commas.
+func (o tagOptions) Contains(optionName string) bool {
+ if len(o) == 0 {
+ return false
+ }
+ s := string(o)
+ for s != "" {
+ var next string
+ i := strings.Index(s, ",")
+ if i >= 0 {
+ s, next = s[:i], s[i+1:]
+ }
+ if s == optionName {
+ return true
+ }
+ s = next
+ }
+ return false
+}
diff --git a/src/kube2msb/vendor/github.com/ghodss/yaml/yaml.go b/src/kube2msb/vendor/github.com/ghodss/yaml/yaml.go
new file mode 100644
index 0000000..c02beac
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ghodss/yaml/yaml.go
@@ -0,0 +1,277 @@
+package yaml
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "strconv"
+
+ "gopkg.in/yaml.v2"
+)
+
+// Marshals the object into JSON then converts JSON to YAML and returns the
+// YAML.
+func Marshal(o interface{}) ([]byte, error) {
+ j, err := json.Marshal(o)
+ if err != nil {
+ return nil, fmt.Errorf("error marshaling into JSON: ", err)
+ }
+
+ y, err := JSONToYAML(j)
+ if err != nil {
+ return nil, fmt.Errorf("error converting JSON to YAML: ", err)
+ }
+
+ return y, nil
+}
+
+// Converts YAML to JSON then uses JSON to unmarshal into an object.
+func Unmarshal(y []byte, o interface{}) error {
+ vo := reflect.ValueOf(o)
+ j, err := yamlToJSON(y, &vo)
+ if err != nil {
+ return fmt.Errorf("error converting YAML to JSON: %v", err)
+ }
+
+ err = json.Unmarshal(j, o)
+ if err != nil {
+ return fmt.Errorf("error unmarshaling JSON: %v", err)
+ }
+
+ return nil
+}
+
+// Convert JSON to YAML.
+func JSONToYAML(j []byte) ([]byte, error) {
+ // Convert the JSON to an object.
+ var jsonObj interface{}
+ // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the
+ // Go JSON library doesn't try to pick the right number type (int, float,
+ // etc.) when unmarshling to interface{}, it just picks float64
+ // universally. go-yaml does go through the effort of picking the right
+ // number type, so we can preserve number type throughout this process.
+ err := yaml.Unmarshal(j, &jsonObj)
+ if err != nil {
+ return nil, err
+ }
+
+ // Marshal this object into YAML.
+ return yaml.Marshal(jsonObj)
+}
+
+// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through
+// this method should be a no-op.
+//
+// Things YAML can do that are not supported by JSON:
+// * In YAML you can have binary and null keys in your maps. These are invalid
+// in JSON. (int and float keys are converted to strings.)
+// * Binary data in YAML with the !!binary tag is not supported. If you want to
+// use binary data with this library, encode the data as base64 as usual but do
+// not use the !!binary tag in your YAML. This will ensure the original base64
+// encoded data makes it all the way through to the JSON.
+func YAMLToJSON(y []byte) ([]byte, error) {
+ return yamlToJSON(y, nil)
+}
+
+func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) {
+ // Convert the YAML to an object.
+ var yamlObj interface{}
+ err := yaml.Unmarshal(y, &yamlObj)
+ if err != nil {
+ return nil, err
+ }
+
+ // YAML objects are not completely compatible with JSON objects (e.g. you
+ // can have non-string keys in YAML). So, convert the YAML-compatible object
+ // to a JSON-compatible object, failing with an error if irrecoverable
+ // incompatibilties happen along the way.
+ jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget)
+ if err != nil {
+ return nil, err
+ }
+
+ // Convert this object to JSON and return the data.
+ return json.Marshal(jsonObj)
+}
+
+func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) {
+ var err error
+
+ // Resolve jsonTarget to a concrete value (i.e. not a pointer or an
+ // interface). We pass decodingNull as false because we're not actually
+ // decoding into the value, we're just checking if the ultimate target is a
+ // string.
+ if jsonTarget != nil {
+ ju, tu, pv := indirect(*jsonTarget, false)
+ // We have a JSON or Text Umarshaler at this level, so we can't be trying
+ // to decode into a string.
+ if ju != nil || tu != nil {
+ jsonTarget = nil
+ } else {
+ jsonTarget = &pv
+ }
+ }
+
+ // If yamlObj is a number or a boolean, check if jsonTarget is a string -
+ // if so, coerce. Else return normal.
+ // If yamlObj is a map or array, find the field that each key is
+ // unmarshaling to, and when you recurse pass the reflect.Value for that
+ // field back into this function.
+ switch typedYAMLObj := yamlObj.(type) {
+ case map[interface{}]interface{}:
+ // JSON does not support arbitrary keys in a map, so we must convert
+ // these keys to strings.
+ //
+ // From my reading of go-yaml v2 (specifically the resolve function),
+ // keys can only have the types string, int, int64, float64, binary
+ // (unsupported), or null (unsupported).
+ strMap := make(map[string]interface{})
+ for k, v := range typedYAMLObj {
+ // Resolve the key to a string first.
+ var keyString string
+ switch typedKey := k.(type) {
+ case string:
+ keyString = typedKey
+ case int:
+ keyString = strconv.Itoa(typedKey)
+ case int64:
+ // go-yaml will only return an int64 as a key if the system
+ // architecture is 32-bit and the key's value is between 32-bit
+ // and 64-bit. Otherwise the key type will simply be int.
+ keyString = strconv.FormatInt(typedKey, 10)
+ case float64:
+ // Stolen from go-yaml to use the same conversion to string as
+ // the go-yaml library uses to convert float to string when
+ // Marshaling.
+ s := strconv.FormatFloat(typedKey, 'g', -1, 32)
+ switch s {
+ case "+Inf":
+ s = ".inf"
+ case "-Inf":
+ s = "-.inf"
+ case "NaN":
+ s = ".nan"
+ }
+ keyString = s
+ case bool:
+ if typedKey {
+ keyString = "true"
+ } else {
+ keyString = "false"
+ }
+ default:
+ return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v",
+ reflect.TypeOf(k), k, v)
+ }
+
+ // jsonTarget should be a struct or a map. If it's a struct, find
+ // the field it's going to map to and pass its reflect.Value. If
+ // it's a map, find the element type of the map and pass the
+ // reflect.Value created from that type. If it's neither, just pass
+ // nil - JSON conversion will error for us if it's a real issue.
+ if jsonTarget != nil {
+ t := *jsonTarget
+ if t.Kind() == reflect.Struct {
+ keyBytes := []byte(keyString)
+ // Find the field that the JSON library would use.
+ var f *field
+ fields := cachedTypeFields(t.Type())
+ for i := range fields {
+ ff := &fields[i]
+ if bytes.Equal(ff.nameBytes, keyBytes) {
+ f = ff
+ break
+ }
+ // Do case-insensitive comparison.
+ if f == nil && ff.equalFold(ff.nameBytes, keyBytes) {
+ f = ff
+ }
+ }
+ if f != nil {
+ // Find the reflect.Value of the most preferential
+ // struct field.
+ jtf := t.Field(f.index[0])
+ strMap[keyString], err = convertToJSONableObject(v, &jtf)
+ if err != nil {
+ return nil, err
+ }
+ continue
+ }
+ } else if t.Kind() == reflect.Map {
+ // Create a zero value of the map's element type to use as
+ // the JSON target.
+ jtv := reflect.Zero(t.Type().Elem())
+ strMap[keyString], err = convertToJSONableObject(v, &jtv)
+ if err != nil {
+ return nil, err
+ }
+ continue
+ }
+ }
+ strMap[keyString], err = convertToJSONableObject(v, nil)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return strMap, nil
+ case []interface{}:
+ // We need to recurse into arrays in case there are any
+ // map[interface{}]interface{}'s inside and to convert any
+ // numbers to strings.
+
+ // If jsonTarget is a slice (which it really should be), find the
+ // thing it's going to map to. If it's not a slice, just pass nil
+ // - JSON conversion will error for us if it's a real issue.
+ var jsonSliceElemValue *reflect.Value
+ if jsonTarget != nil {
+ t := *jsonTarget
+ if t.Kind() == reflect.Slice {
+ // By default slices point to nil, but we need a reflect.Value
+ // pointing to a value of the slice type, so we create one here.
+ ev := reflect.Indirect(reflect.New(t.Type().Elem()))
+ jsonSliceElemValue = &ev
+ }
+ }
+
+ // Make and use a new array.
+ arr := make([]interface{}, len(typedYAMLObj))
+ for i, v := range typedYAMLObj {
+ arr[i], err = convertToJSONableObject(v, jsonSliceElemValue)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return arr, nil
+ default:
+ // If the target type is a string and the YAML type is a number,
+ // convert the YAML type to a string.
+ if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String {
+ // Based on my reading of go-yaml, it may return int, int64,
+ // float64, or uint64.
+ var s string
+ switch typedVal := typedYAMLObj.(type) {
+ case int:
+ s = strconv.FormatInt(int64(typedVal), 10)
+ case int64:
+ s = strconv.FormatInt(typedVal, 10)
+ case float64:
+ s = strconv.FormatFloat(typedVal, 'g', -1, 32)
+ case uint64:
+ s = strconv.FormatUint(typedVal, 10)
+ case bool:
+ if typedVal {
+ s = "true"
+ } else {
+ s = "false"
+ }
+ }
+ if len(s) > 0 {
+ yamlObj = interface{}(s)
+ }
+ }
+ return yamlObj, nil
+ }
+
+ return nil, nil
+}
diff --git a/src/kube2msb/vendor/github.com/gogo/protobuf/LICENSE b/src/kube2msb/vendor/github.com/gogo/protobuf/LICENSE
new file mode 100644
index 0000000..335e38e
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/gogo/protobuf/LICENSE
@@ -0,0 +1,36 @@
+Extensions for Protocol Buffers to create more go like structures.
+
+Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
+http://github.com/gogo/protobuf/gogoproto
+
+Go support for Protocol Buffers - Google's data interchange format
+
+Copyright 2010 The Go Authors. All rights reserved.
+https://github.com/golang/protobuf
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/src/kube2msb/vendor/github.com/gogo/protobuf/proto/Makefile b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/Makefile
new file mode 100644
index 0000000..23a6b17
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/Makefile
@@ -0,0 +1,43 @@
+# Go support for Protocol Buffers - Google's data interchange format
+#
+# Copyright 2010 The Go Authors. All rights reserved.
+# https://github.com/golang/protobuf
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+install:
+ go install
+
+test: install generate-test-pbs
+ go test
+
+
+generate-test-pbs:
+ make install
+ make -C testdata
+ protoc-min-version --version="3.0.0" --proto_path=.:../../../../ --gogo_out=. proto3_proto/proto3.proto
+ make
diff --git a/src/kube2msb/vendor/github.com/gogo/protobuf/proto/clone.go b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/clone.go
new file mode 100644
index 0000000..79edb86
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/clone.go
@@ -0,0 +1,228 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer deep copy and merge.
+// TODO: RawMessage.
+
+package proto
+
+import (
+ "log"
+ "reflect"
+ "strings"
+)
+
+// Clone returns a deep copy of a protocol buffer.
+func Clone(pb Message) Message {
+ in := reflect.ValueOf(pb)
+ if in.IsNil() {
+ return pb
+ }
+
+ out := reflect.New(in.Type().Elem())
+ // out is empty so a merge is a deep copy.
+ mergeStruct(out.Elem(), in.Elem())
+ return out.Interface().(Message)
+}
+
+// Merge merges src into dst.
+// Required and optional fields that are set in src will be set to that value in dst.
+// Elements of repeated fields will be appended.
+// Merge panics if src and dst are not the same type, or if dst is nil.
+func Merge(dst, src Message) {
+ in := reflect.ValueOf(src)
+ out := reflect.ValueOf(dst)
+ if out.IsNil() {
+ panic("proto: nil destination")
+ }
+ if in.Type() != out.Type() {
+ // Explicit test prior to mergeStruct so that mistyped nils will fail
+ panic("proto: type mismatch")
+ }
+ if in.IsNil() {
+ // Merging nil into non-nil is a quiet no-op
+ return
+ }
+ mergeStruct(out.Elem(), in.Elem())
+}
+
+func mergeStruct(out, in reflect.Value) {
+ sprop := GetProperties(in.Type())
+ for i := 0; i < in.NumField(); i++ {
+ f := in.Type().Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
+ }
+
+ if emIn, ok := in.Addr().Interface().(extensionsMap); ok {
+ emOut := out.Addr().Interface().(extensionsMap)
+ mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap())
+ } else if emIn, ok := in.Addr().Interface().(extensionsBytes); ok {
+ emOut := out.Addr().Interface().(extensionsBytes)
+ bIn := emIn.GetExtensions()
+ bOut := emOut.GetExtensions()
+ *bOut = append(*bOut, *bIn...)
+ }
+
+ uf := in.FieldByName("XXX_unrecognized")
+ if !uf.IsValid() {
+ return
+ }
+ uin := uf.Bytes()
+ if len(uin) > 0 {
+ out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
+ }
+}
+
+// mergeAny performs a merge between two values of the same type.
+// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
+// prop is set if this is a struct field (it may be nil).
+func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
+ if in.Type() == protoMessageType {
+ if !in.IsNil() {
+ if out.IsNil() {
+ out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
+ } else {
+ Merge(out.Interface().(Message), in.Interface().(Message))
+ }
+ }
+ return
+ }
+ switch in.Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ if !viaPtr && isProto3Zero(in) {
+ return
+ }
+ out.Set(in)
+ case reflect.Interface:
+ // Probably a oneof field; copy non-nil values.
+ if in.IsNil() {
+ return
+ }
+ // Allocate destination if it is not set, or set to a different type.
+ // Otherwise we will merge as normal.
+ if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
+ out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
+ }
+ mergeAny(out.Elem(), in.Elem(), false, nil)
+ case reflect.Map:
+ if in.Len() == 0 {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.MakeMap(in.Type()))
+ }
+ // For maps with value types of *T or []byte we need to deep copy each value.
+ elemKind := in.Type().Elem().Kind()
+ for _, key := range in.MapKeys() {
+ var val reflect.Value
+ switch elemKind {
+ case reflect.Ptr:
+ val = reflect.New(in.Type().Elem().Elem())
+ mergeAny(val, in.MapIndex(key), false, nil)
+ case reflect.Slice:
+ val = in.MapIndex(key)
+ val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+ default:
+ val = in.MapIndex(key)
+ }
+ out.SetMapIndex(key, val)
+ }
+ case reflect.Ptr:
+ if in.IsNil() {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.New(in.Elem().Type()))
+ }
+ mergeAny(out.Elem(), in.Elem(), true, nil)
+ case reflect.Slice:
+ if in.IsNil() {
+ return
+ }
+ if in.Type().Elem().Kind() == reflect.Uint8 {
+ // []byte is a scalar bytes field, not a repeated field.
+
+ // Edge case: if this is in a proto3 message, a zero length
+ // bytes field is considered the zero value, and should not
+ // be merged.
+ if prop != nil && prop.proto3 && in.Len() == 0 {
+ return
+ }
+
+ // Make a deep copy.
+ // Append to []byte{} instead of []byte(nil) so that we never end up
+ // with a nil result.
+ out.SetBytes(append([]byte{}, in.Bytes()...))
+ return
+ }
+ n := in.Len()
+ if out.IsNil() {
+ out.Set(reflect.MakeSlice(in.Type(), 0, n))
+ }
+ switch in.Type().Elem().Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ out.Set(reflect.AppendSlice(out, in))
+ default:
+ for i := 0; i < n; i++ {
+ x := reflect.Indirect(reflect.New(in.Type().Elem()))
+ mergeAny(x, in.Index(i), false, nil)
+ out.Set(reflect.Append(out, x))
+ }
+ }
+ case reflect.Struct:
+ mergeStruct(out, in)
+ default:
+ // unknown type, so not a protocol buffer
+ log.Printf("proto: don't know how to copy %v", in)
+ }
+}
+
+func mergeExtension(out, in map[int32]Extension) {
+ for extNum, eIn := range in {
+ eOut := Extension{desc: eIn.desc}
+ if eIn.value != nil {
+ v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
+ mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
+ eOut.value = v.Interface()
+ }
+ if eIn.enc != nil {
+ eOut.enc = make([]byte, len(eIn.enc))
+ copy(eOut.enc, eIn.enc)
+ }
+
+ out[extNum] = eOut
+ }
+}
diff --git a/src/kube2msb/vendor/github.com/gogo/protobuf/proto/decode.go b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/decode.go
new file mode 100644
index 0000000..cb5b213
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/decode.go
@@ -0,0 +1,872 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for decoding protocol buffer data to construct in-memory representations.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+)
+
+// errOverflow is returned when an integer is too large to be represented.
+var errOverflow = errors.New("proto: integer overflow")
+
+// ErrInternalBadWireType is returned by generated code when an incorrect
+// wire type is encountered. It does not get returned to user code.
+var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
+
+// The fundamental decoders that interpret bytes on the wire.
+// Those that take integer types all return uint64 and are
+// therefore of type valueDecoder.
+
+// DecodeVarint reads a varint-encoded integer from the slice.
+// It returns the integer and the number of bytes consumed, or
+// zero if there is not enough.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func DecodeVarint(buf []byte) (x uint64, n int) {
+ // x, n already 0
+ for shift := uint(0); shift < 64; shift += 7 {
+ if n >= len(buf) {
+ return 0, 0
+ }
+ b := uint64(buf[n])
+ n++
+ x |= (b & 0x7F) << shift
+ if (b & 0x80) == 0 {
+ return x, n
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ return 0, 0
+}
+
+// DecodeVarint reads a varint-encoded integer from the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) DecodeVarint() (x uint64, err error) {
+ // x, err already 0
+
+ i := p.index
+ l := len(p.buf)
+
+ for shift := uint(0); shift < 64; shift += 7 {
+ if i >= l {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ b := p.buf[i]
+ i++
+ x |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ p.index = i
+ return
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ err = errOverflow
+ return
+}
+
+// DecodeFixed64 reads a 64-bit integer from the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) DecodeFixed64() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 8
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-8])
+ x |= uint64(p.buf[i-7]) << 8
+ x |= uint64(p.buf[i-6]) << 16
+ x |= uint64(p.buf[i-5]) << 24
+ x |= uint64(p.buf[i-4]) << 32
+ x |= uint64(p.buf[i-3]) << 40
+ x |= uint64(p.buf[i-2]) << 48
+ x |= uint64(p.buf[i-1]) << 56
+ return
+}
+
+// DecodeFixed32 reads a 32-bit integer from the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) DecodeFixed32() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 4
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-4])
+ x |= uint64(p.buf[i-3]) << 8
+ x |= uint64(p.buf[i-2]) << 16
+ x |= uint64(p.buf[i-1]) << 24
+ return
+}
+
+// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
+// from the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
+ return
+}
+
+// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
+// from the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
+ return
+}
+
+// These are not ValueDecoders: they produce an array of bytes or a string.
+// bytes, embedded messages
+
+// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
+ n, err := p.DecodeVarint()
+ if err != nil {
+ return nil, err
+ }
+
+ nb := int(n)
+ if nb < 0 {
+ return nil, fmt.Errorf("proto: bad byte length %d", nb)
+ }
+ end := p.index + nb
+ if end < p.index || end > len(p.buf) {
+ return nil, io.ErrUnexpectedEOF
+ }
+
+ if !alloc {
+ // todo: check if can get more uses of alloc=false
+ buf = p.buf[p.index:end]
+ p.index += nb
+ return
+ }
+
+ buf = make([]byte, nb)
+ copy(buf, p.buf[p.index:])
+ p.index += nb
+ return
+}
+
+// DecodeStringBytes reads an encoded string from the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) DecodeStringBytes() (s string, err error) {
+ buf, err := p.DecodeRawBytes(false)
+ if err != nil {
+ return
+ }
+ return string(buf), nil
+}
+
+// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
+// If the protocol buffer has extensions, and the field matches, add it as an extension.
+// Otherwise, if the XXX_unrecognized field exists, append the skipped data there.
+func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error {
+ oi := o.index
+
+ err := o.skip(t, tag, wire)
+ if err != nil {
+ return err
+ }
+
+ if !unrecField.IsValid() {
+ return nil
+ }
+
+ ptr := structPointer_Bytes(base, unrecField)
+
+ // Add the skipped field to struct field
+ obuf := o.buf
+
+ o.buf = *ptr
+ o.EncodeVarint(uint64(tag<<3 | wire))
+ *ptr = append(o.buf, obuf[oi:o.index]...)
+
+ o.buf = obuf
+
+ return nil
+}
+
+// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
+func (o *Buffer) skip(t reflect.Type, tag, wire int) error {
+
+ var u uint64
+ var err error
+
+ switch wire {
+ case WireVarint:
+ _, err = o.DecodeVarint()
+ case WireFixed64:
+ _, err = o.DecodeFixed64()
+ case WireBytes:
+ _, err = o.DecodeRawBytes(false)
+ case WireFixed32:
+ _, err = o.DecodeFixed32()
+ case WireStartGroup:
+ for {
+ u, err = o.DecodeVarint()
+ if err != nil {
+ break
+ }
+ fwire := int(u & 0x7)
+ if fwire == WireEndGroup {
+ break
+ }
+ ftag := int(u >> 3)
+ err = o.skip(t, ftag, fwire)
+ if err != nil {
+ break
+ }
+ }
+ default:
+ err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t)
+ }
+ return err
+}
+
+// Unmarshaler is the interface representing objects that can
+// unmarshal themselves. The method should reset the receiver before
+// decoding starts. The argument points to data that may be
+// overwritten, so implementations should not keep references to the
+// buffer.
+type Unmarshaler interface {
+ Unmarshal([]byte) error
+}
+
+// Unmarshal parses the protocol buffer representation in buf and places the
+// decoded result in pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// Unmarshal resets pb before starting to unmarshal, so any
+// existing data in pb is always removed. Use UnmarshalMerge
+// to preserve and append to existing data.
+func Unmarshal(buf []byte, pb Message) error {
+ pb.Reset()
+ return UnmarshalMerge(buf, pb)
+}
+
+// UnmarshalMerge parses the protocol buffer representation in buf and
+// writes the decoded result to pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// UnmarshalMerge merges into existing data in pb.
+// Most code should use Unmarshal instead.
+func UnmarshalMerge(buf []byte, pb Message) error {
+ // If the object can unmarshal itself, let it.
+ if u, ok := pb.(Unmarshaler); ok {
+ return u.Unmarshal(buf)
+ }
+ return NewBuffer(buf).Unmarshal(pb)
+}
+
+// DecodeMessage reads a count-delimited message from the Buffer.
+func (p *Buffer) DecodeMessage(pb Message) error {
+ enc, err := p.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ return NewBuffer(enc).Unmarshal(pb)
+}
+
+// DecodeGroup reads a tag-delimited group from the Buffer.
+func (p *Buffer) DecodeGroup(pb Message) error {
+ typ, base, err := getbase(pb)
+ if err != nil {
+ return err
+ }
+ return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base)
+}
+
+// Unmarshal parses the protocol buffer representation in the
+// Buffer and places the decoded result in pb. If the struct
+// underlying pb does not match the data in the buffer, the results can be
+// unpredictable.
+func (p *Buffer) Unmarshal(pb Message) error {
+ // If the object can unmarshal itself, let it.
+ if u, ok := pb.(Unmarshaler); ok {
+ err := u.Unmarshal(p.buf[p.index:])
+ p.index = len(p.buf)
+ return err
+ }
+
+ typ, base, err := getbase(pb)
+ if err != nil {
+ return err
+ }
+
+ err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base)
+
+ if collectStats {
+ stats.Decode++
+ }
+
+ return err
+}
+
+// unmarshalType does the work of unmarshaling a structure.
+func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error {
+ var state errorState
+ required, reqFields := prop.reqCount, uint64(0)
+
+ var err error
+ for err == nil && o.index < len(o.buf) {
+ oi := o.index
+ var u uint64
+ u, err = o.DecodeVarint()
+ if err != nil {
+ break
+ }
+ wire := int(u & 0x7)
+ if wire == WireEndGroup {
+ if is_group {
+ return nil // input is satisfied
+ }
+ return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
+ }
+ tag := int(u >> 3)
+ if tag <= 0 {
+ return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire)
+ }
+ fieldnum, ok := prop.decoderTags.get(tag)
+ if !ok {
+ // Maybe it's an extension?
+ if prop.extendable {
+ if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) {
+ if err = o.skip(st, tag, wire); err == nil {
+ if ee, eok := e.(extensionsMap); eok {
+ ext := ee.ExtensionMap()[int32(tag)] // may be missing
+ ext.enc = append(ext.enc, o.buf[oi:o.index]...)
+ ee.ExtensionMap()[int32(tag)] = ext
+ } else if ee, eok := e.(extensionsBytes); eok {
+ ext := ee.GetExtensions()
+ *ext = append(*ext, o.buf[oi:o.index]...)
+ }
+ }
+ continue
+ }
+ }
+ // Maybe it's a oneof?
+ if prop.oneofUnmarshaler != nil {
+ m := structPointer_Interface(base, st).(Message)
+ // First return value indicates whether tag is a oneof field.
+ ok, err = prop.oneofUnmarshaler(m, tag, wire, o)
+ if err == ErrInternalBadWireType {
+ // Map the error to something more descriptive.
+ // Do the formatting here to save generated code space.
+ err = fmt.Errorf("bad wiretype for oneof field in %T", m)
+ }
+ if ok {
+ continue
+ }
+ }
+ err = o.skipAndSave(st, tag, wire, base, prop.unrecField)
+ continue
+ }
+ p := prop.Prop[fieldnum]
+
+ if p.dec == nil {
+ fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name)
+ continue
+ }
+ dec := p.dec
+ if wire != WireStartGroup && wire != p.WireType {
+ if wire == WireBytes && p.packedDec != nil {
+ // a packable field
+ dec = p.packedDec
+ } else {
+ err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType)
+ continue
+ }
+ }
+ decErr := dec(o, p, base)
+ if decErr != nil && !state.shouldContinue(decErr, p) {
+ err = decErr
+ }
+ if err == nil && p.Required {
+ // Successfully decoded a required field.
+ if tag <= 64 {
+ // use bitmap for fields 1-64 to catch field reuse.
+ var mask uint64 = 1 << uint64(tag-1)
+ if reqFields&mask == 0 {
+ // new required field
+ reqFields |= mask
+ required--
+ }
+ } else {
+ // This is imprecise. It can be fooled by a required field
+ // with a tag > 64 that is encoded twice; that's very rare.
+ // A fully correct implementation would require allocating
+ // a data structure, which we would like to avoid.
+ required--
+ }
+ }
+ }
+ if err == nil {
+ if is_group {
+ return io.ErrUnexpectedEOF
+ }
+ if state.err != nil {
+ return state.err
+ }
+ if required > 0 {
+ // Not enough information to determine the exact field. If we use extra
+ // CPU, we could determine the field only if the missing required field
+ // has a tag <= 64 and we check reqFields.
+ return &RequiredNotSetError{"{Unknown}"}
+ }
+ }
+ return err
+}
+
+// Individual type decoders
+// For each,
+// u is the decoded value,
+// v is a pointer to the field (pointer) in the struct
+
+// Sizes of the pools to allocate inside the Buffer.
+// The goal is modest amortization and allocation
+// on at least 16-byte boundaries.
+const (
+ boolPoolSize = 16
+ uint32PoolSize = 8
+ uint64PoolSize = 4
+)
+
+// Decode a bool.
+func (o *Buffer) dec_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ if len(o.bools) == 0 {
+ o.bools = make([]bool, boolPoolSize)
+ }
+ o.bools[0] = u != 0
+ *structPointer_Bool(base, p.field) = &o.bools[0]
+ o.bools = o.bools[1:]
+ return nil
+}
+
+func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ *structPointer_BoolVal(base, p.field) = u != 0
+ return nil
+}
+
+// Decode an int32.
+func (o *Buffer) dec_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word32_Set(structPointer_Word32(base, p.field), o, uint32(u))
+ return nil
+}
+
+func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u))
+ return nil
+}
+
+// Decode an int64.
+func (o *Buffer) dec_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word64_Set(structPointer_Word64(base, p.field), o, u)
+ return nil
+}
+
+func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word64Val_Set(structPointer_Word64Val(base, p.field), o, u)
+ return nil
+}
+
+// Decode a string.
+func (o *Buffer) dec_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ *structPointer_String(base, p.field) = &s
+ return nil
+}
+
+func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ *structPointer_StringVal(base, p.field) = s
+ return nil
+}
+
+// Decode a slice of bytes ([]byte).
+func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error {
+ b, err := o.DecodeRawBytes(true)
+ if err != nil {
+ return err
+ }
+ *structPointer_Bytes(base, p.field) = b
+ return nil
+}
+
+// Decode a slice of bools ([]bool).
+func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v := structPointer_BoolSlice(base, p.field)
+ *v = append(*v, u != 0)
+ return nil
+}
+
+// Decode a slice of bools ([]bool) in packed format.
+func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error {
+ v := structPointer_BoolSlice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded bools
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+
+ y := *v
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ y = append(y, u != 0)
+ }
+
+ *v = y
+ return nil
+}
+
+// Decode a slice of int32s ([]int32).
+func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ structPointer_Word32Slice(base, p.field).Append(uint32(u))
+ return nil
+}
+
+// Decode a slice of int32s ([]int32) in packed format.
+func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Slice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded int32s
+
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v.Append(uint32(u))
+ }
+ return nil
+}
+
+// Decode a slice of int64s ([]int64).
+func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+
+ structPointer_Word64Slice(base, p.field).Append(u)
+ return nil
+}
+
+// Decode a slice of int64s ([]int64) in packed format.
+func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64Slice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded int64s
+
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v.Append(u)
+ }
+ return nil
+}
+
+// Decode a slice of strings ([]string).
+func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ v := structPointer_StringSlice(base, p.field)
+ *v = append(*v, s)
+ return nil
+}
+
+// Decode a slice of slice of bytes ([][]byte).
+func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error {
+ b, err := o.DecodeRawBytes(true)
+ if err != nil {
+ return err
+ }
+ v := structPointer_BytesSlice(base, p.field)
+ *v = append(*v, b)
+ return nil
+}
+
+// Decode a map field.
+func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
+ raw, err := o.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ oi := o.index // index at the end of this map entry
+ o.index -= len(raw) // move buffer back to start of map entry
+
+ mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V
+ if mptr.Elem().IsNil() {
+ mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))
+ }
+ v := mptr.Elem() // map[K]V
+
+ // Prepare addressable doubly-indirect placeholders for the key and value types.
+ // See enc_new_map for why.
+ keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K
+ keybase := toStructPointer(keyptr.Addr()) // **K
+
+ var valbase structPointer
+ var valptr reflect.Value
+ switch p.mtype.Elem().Kind() {
+ case reflect.Slice:
+ // []byte
+ var dummy []byte
+ valptr = reflect.ValueOf(&dummy) // *[]byte
+ valbase = toStructPointer(valptr) // *[]byte
+ case reflect.Ptr:
+ // message; valptr is **Msg; need to allocate the intermediate pointer
+ valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
+ valptr.Set(reflect.New(valptr.Type().Elem()))
+ valbase = toStructPointer(valptr)
+ default:
+ // everything else
+ valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
+ valbase = toStructPointer(valptr.Addr()) // **V
+ }
+
+ // Decode.
+ // This parses a restricted wire format, namely the encoding of a message
+ // with two fields. See enc_new_map for the format.
+ for o.index < oi {
+ // tagcode for key and value properties are always a single byte
+ // because they have tags 1 and 2.
+ tagcode := o.buf[o.index]
+ o.index++
+ switch tagcode {
+ case p.mkeyprop.tagcode[0]:
+ if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil {
+ return err
+ }
+ case p.mvalprop.tagcode[0]:
+ if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil {
+ return err
+ }
+ default:
+ // TODO: Should we silently skip this instead?
+ return fmt.Errorf("proto: bad map data tag %d", raw[0])
+ }
+ }
+ keyelem, valelem := keyptr.Elem(), valptr.Elem()
+ if !keyelem.IsValid() || !valelem.IsValid() {
+ // We did not decode the key or the value in the map entry.
+ // Either way, it's an invalid map entry.
+ return fmt.Errorf("proto: bad map data: missing key/val")
+ }
+
+ v.SetMapIndex(keyelem, valelem)
+ return nil
+}
+
+// Decode a group.
+func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error {
+ bas := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(bas) {
+ // allocate new nested message
+ bas = toStructPointer(reflect.New(p.stype))
+ structPointer_SetStructPointer(base, p.field, bas)
+ }
+ return o.unmarshalType(p.stype, p.sprop, true, bas)
+}
+
+// Decode an embedded message.
+func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) {
+ raw, e := o.DecodeRawBytes(false)
+ if e != nil {
+ return e
+ }
+
+ bas := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(bas) {
+ // allocate new nested message
+ bas = toStructPointer(reflect.New(p.stype))
+ structPointer_SetStructPointer(base, p.field, bas)
+ }
+
+ // If the object can unmarshal itself, let it.
+ if p.isUnmarshaler {
+ iv := structPointer_Interface(bas, p.stype)
+ return iv.(Unmarshaler).Unmarshal(raw)
+ }
+
+ obuf := o.buf
+ oi := o.index
+ o.buf = raw
+ o.index = 0
+
+ err = o.unmarshalType(p.stype, p.sprop, false, bas)
+ o.buf = obuf
+ o.index = oi
+
+ return err
+}
+
+// Decode a slice of embedded messages.
+func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error {
+ return o.dec_slice_struct(p, false, base)
+}
+
+// Decode a slice of embedded groups.
+func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error {
+ return o.dec_slice_struct(p, true, base)
+}
+
+// Decode a slice of structs ([]*struct).
+func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error {
+ v := reflect.New(p.stype)
+ bas := toStructPointer(v)
+ structPointer_StructPointerSlice(base, p.field).Append(bas)
+
+ if is_group {
+ err := o.unmarshalType(p.stype, p.sprop, is_group, bas)
+ return err
+ }
+
+ raw, err := o.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+
+ // If the object can unmarshal itself, let it.
+ if p.isUnmarshaler {
+ iv := v.Interface()
+ return iv.(Unmarshaler).Unmarshal(raw)
+ }
+
+ obuf := o.buf
+ oi := o.index
+ o.buf = raw
+ o.index = 0
+
+ err = o.unmarshalType(p.stype, p.sprop, is_group, bas)
+
+ o.buf = obuf
+ o.index = oi
+
+ return err
+}
diff --git a/src/kube2msb/vendor/github.com/gogo/protobuf/proto/decode_gogo.go b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/decode_gogo.go
new file mode 100644
index 0000000..6a77aad
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/decode_gogo.go
@@ -0,0 +1,175 @@
+// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
+// http://github.com/gogo/protobuf/gogoproto
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "reflect"
+)
+
+// Decode a reference to a struct pointer.
+func (o *Buffer) dec_ref_struct_message(p *Properties, base structPointer) (err error) {
+ raw, e := o.DecodeRawBytes(false)
+ if e != nil {
+ return e
+ }
+
+ // If the object can unmarshal itself, let it.
+ if p.isUnmarshaler {
+ panic("not supported, since this is a pointer receiver")
+ }
+
+ obuf := o.buf
+ oi := o.index
+ o.buf = raw
+ o.index = 0
+
+ bas := structPointer_FieldPointer(base, p.field)
+
+ err = o.unmarshalType(p.stype, p.sprop, false, bas)
+ o.buf = obuf
+ o.index = oi
+
+ return err
+}
+
+// Decode a slice of references to struct pointers ([]struct).
+func (o *Buffer) dec_slice_ref_struct(p *Properties, is_group bool, base structPointer) error {
+ newBas := appendStructPointer(base, p.field, p.sstype)
+
+ if is_group {
+ panic("not supported, maybe in future, if requested.")
+ }
+
+ raw, err := o.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+
+ // If the object can unmarshal itself, let it.
+ if p.isUnmarshaler {
+ panic("not supported, since this is not a pointer receiver.")
+ }
+
+ obuf := o.buf
+ oi := o.index
+ o.buf = raw
+ o.index = 0
+
+ err = o.unmarshalType(p.stype, p.sprop, is_group, newBas)
+
+ o.buf = obuf
+ o.index = oi
+
+ return err
+}
+
+// Decode a slice of references to struct pointers.
+func (o *Buffer) dec_slice_ref_struct_message(p *Properties, base structPointer) error {
+ return o.dec_slice_ref_struct(p, false, base)
+}
+
+func setPtrCustomType(base structPointer, f field, v interface{}) {
+ if v == nil {
+ return
+ }
+ structPointer_SetStructPointer(base, f, structPointer(reflect.ValueOf(v).Pointer()))
+}
+
+func setCustomType(base structPointer, f field, value interface{}) {
+ if value == nil {
+ return
+ }
+ v := reflect.ValueOf(value).Elem()
+ t := reflect.TypeOf(value).Elem()
+ kind := t.Kind()
+ switch kind {
+ case reflect.Slice:
+ slice := reflect.MakeSlice(t, v.Len(), v.Cap())
+ reflect.Copy(slice, v)
+ oldHeader := structPointer_GetSliceHeader(base, f)
+ oldHeader.Data = slice.Pointer()
+ oldHeader.Len = v.Len()
+ oldHeader.Cap = v.Cap()
+ default:
+ l := 1
+ size := reflect.TypeOf(value).Elem().Size()
+ if kind == reflect.Array {
+ l = reflect.TypeOf(value).Elem().Len()
+ size = reflect.TypeOf(value).Size()
+ }
+ total := int(size) * l
+ structPointer_Copy(toStructPointer(reflect.ValueOf(value)), structPointer_Add(base, f), total)
+ }
+}
+
+func (o *Buffer) dec_custom_bytes(p *Properties, base structPointer) error {
+ b, err := o.DecodeRawBytes(true)
+ if err != nil {
+ return err
+ }
+ i := reflect.New(p.ctype.Elem()).Interface()
+ custom := (i).(Unmarshaler)
+ if err := custom.Unmarshal(b); err != nil {
+ return err
+ }
+ setPtrCustomType(base, p.field, custom)
+ return nil
+}
+
+func (o *Buffer) dec_custom_ref_bytes(p *Properties, base structPointer) error {
+ b, err := o.DecodeRawBytes(true)
+ if err != nil {
+ return err
+ }
+ i := reflect.New(p.ctype).Interface()
+ custom := (i).(Unmarshaler)
+ if err := custom.Unmarshal(b); err != nil {
+ return err
+ }
+ if custom != nil {
+ setCustomType(base, p.field, custom)
+ }
+ return nil
+}
+
+// Decode a slice of bytes ([]byte) into a slice of custom types.
+func (o *Buffer) dec_custom_slice_bytes(p *Properties, base structPointer) error {
+ b, err := o.DecodeRawBytes(true)
+ if err != nil {
+ return err
+ }
+ i := reflect.New(p.ctype.Elem()).Interface()
+ custom := (i).(Unmarshaler)
+ if err := custom.Unmarshal(b); err != nil {
+ return err
+ }
+ newBas := appendStructPointer(base, p.field, p.ctype)
+
+ setCustomType(newBas, 0, custom)
+
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/gogo/protobuf/proto/encode.go b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/encode.go
new file mode 100644
index 0000000..7321e1a
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/encode.go
@@ -0,0 +1,1335 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+)
+
+// RequiredNotSetError is the error returned if Marshal is called with
+// a protocol buffer struct whose required fields have not
+// all been initialized. It is also the error returned if Unmarshal is
+// called with an encoded protocol buffer that does not include all the
+// required fields.
+//
+// When printed, RequiredNotSetError reports the first unset required field in a
+// message. If the field cannot be precisely determined, it is reported as
+// "{Unknown}".
+type RequiredNotSetError struct {
+ field string
+}
+
+func (e *RequiredNotSetError) Error() string {
+ return fmt.Sprintf("proto: required field %q not set", e.field)
+}
+
+var (
+ // errRepeatedHasNil is the error returned if Marshal is called with
+ // a struct with a repeated field containing a nil element.
+ errRepeatedHasNil = errors.New("proto: repeated field has nil element")
+
+ // ErrNil is the error returned if Marshal is called with nil.
+ ErrNil = errors.New("proto: Marshal called with nil")
+)
+
+// The fundamental encoders that put bytes on the wire.
+// Those that take integer types all accept uint64 and are
+// therefore of type valueEncoder.
+
+const maxVarintBytes = 10 // maximum length of a varint
+
+// EncodeVarint returns the varint encoding of x.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+// Not used by the package itself, but helpful to clients
+// wishing to use the same encoding.
+func EncodeVarint(x uint64) []byte {
+ var buf [maxVarintBytes]byte
+ var n int
+ for n = 0; x > 127; n++ {
+ buf[n] = 0x80 | uint8(x&0x7F)
+ x >>= 7
+ }
+ buf[n] = uint8(x)
+ n++
+ return buf[0:n]
+}
+
+// EncodeVarint writes a varint-encoded integer to the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) EncodeVarint(x uint64) error {
+ for x >= 1<<7 {
+ p.buf = append(p.buf, uint8(x&0x7f|0x80))
+ x >>= 7
+ }
+ p.buf = append(p.buf, uint8(x))
+ return nil
+}
+
+func sizeVarint(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+
+// EncodeFixed64 writes a 64-bit integer to the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) EncodeFixed64(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24),
+ uint8(x>>32),
+ uint8(x>>40),
+ uint8(x>>48),
+ uint8(x>>56))
+ return nil
+}
+
+func sizeFixed64(x uint64) int {
+ return 8
+}
+
+// EncodeFixed32 writes a 32-bit integer to the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) EncodeFixed32(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24))
+ return nil
+}
+
+func sizeFixed32(x uint64) int {
+ return 4
+}
+
+// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
+// to the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) EncodeZigzag64(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+func sizeZigzag64(x uint64) int {
+ return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
+// to the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) EncodeZigzag32(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+func sizeZigzag32(x uint64) int {
+ return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) EncodeRawBytes(b []byte) error {
+ p.EncodeVarint(uint64(len(b)))
+ p.buf = append(p.buf, b...)
+ return nil
+}
+
+func sizeRawBytes(b []byte) int {
+ return sizeVarint(uint64(len(b))) +
+ len(b)
+}
+
+// EncodeStringBytes writes an encoded string to the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) EncodeStringBytes(s string) error {
+ p.EncodeVarint(uint64(len(s)))
+ p.buf = append(p.buf, s...)
+ return nil
+}
+
+func sizeStringBytes(s string) int {
+ return sizeVarint(uint64(len(s))) +
+ len(s)
+}
+
+// Marshaler is the interface representing objects that can marshal themselves.
+type Marshaler interface {
+ Marshal() ([]byte, error)
+}
+
+// Marshal takes the protocol buffer
+// and encodes it into the wire format, returning the data.
+func Marshal(pb Message) ([]byte, error) {
+ // Can the object marshal itself?
+ if m, ok := pb.(Marshaler); ok {
+ return m.Marshal()
+ }
+ p := NewBuffer(nil)
+ err := p.Marshal(pb)
+ var state errorState
+ if err != nil && !state.shouldContinue(err, nil) {
+ return nil, err
+ }
+ if p.buf == nil && err == nil {
+ // Return a non-nil slice on success.
+ return []byte{}, nil
+ }
+ return p.buf, err
+}
+
+// EncodeMessage writes the protocol buffer to the Buffer,
+// prefixed by a varint-encoded length.
+func (p *Buffer) EncodeMessage(pb Message) error {
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return ErrNil
+ }
+ if err == nil {
+ var state errorState
+ err = p.enc_len_struct(GetProperties(t.Elem()), base, &state)
+ }
+ return err
+}
+
+// Marshal takes the protocol buffer
+// and encodes it into the wire format, writing the result to the
+// Buffer.
+func (p *Buffer) Marshal(pb Message) error {
+ // Can the object marshal itself?
+ if m, ok := pb.(Marshaler); ok {
+ data, err := m.Marshal()
+ if err != nil {
+ return err
+ }
+ p.buf = append(p.buf, data...)
+ return nil
+ }
+
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return ErrNil
+ }
+ if err == nil {
+ err = p.enc_struct(GetProperties(t.Elem()), base)
+ }
+
+ if collectStats {
+ stats.Encode++
+ }
+
+ return err
+}
+
+// Size returns the encoded size of a protocol buffer.
+func Size(pb Message) (n int) {
+ // Can the object marshal itself? If so, Size is slow.
+ // TODO: add Size to Marshaler, or add a Sizer interface.
+ if m, ok := pb.(Marshaler); ok {
+ b, _ := m.Marshal()
+ return len(b)
+ }
+
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return 0
+ }
+ if err == nil {
+ n = size_struct(GetProperties(t.Elem()), base)
+ }
+
+ if collectStats {
+ stats.Size++
+ }
+
+ return
+}
+
+// Individual type encoders.
+
+// Encode a bool.
+func (o *Buffer) enc_bool(p *Properties, base structPointer) error {
+ v := *structPointer_Bool(base, p.field)
+ if v == nil {
+ return ErrNil
+ }
+ x := 0
+ if *v {
+ x = 1
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error {
+ v := *structPointer_BoolVal(base, p.field)
+ if !v {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, 1)
+ return nil
+}
+
+func size_bool(p *Properties, base structPointer) int {
+ v := *structPointer_Bool(base, p.field)
+ if v == nil {
+ return 0
+ }
+ return len(p.tagcode) + 1 // each bool takes exactly one byte
+}
+
+func size_proto3_bool(p *Properties, base structPointer) int {
+ v := *structPointer_BoolVal(base, p.field)
+ if !v && !p.oneof {
+ return 0
+ }
+ return len(p.tagcode) + 1 // each bool takes exactly one byte
+}
+
+// Encode an int32.
+func (o *Buffer) enc_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return ErrNil
+ }
+ x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Val(base, p.field)
+ x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func size_int32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return 0
+ }
+ x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+func size_proto3_int32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32Val(base, p.field)
+ x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
+ if x == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+// Encode a uint32.
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_uint32(p *Properties, base structPointer) error {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return ErrNil
+ }
+ x := word32_Get(v)
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Val(base, p.field)
+ x := word32Val_Get(v)
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func size_uint32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return 0
+ }
+ x := word32_Get(v)
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+func size_proto3_uint32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32Val(base, p.field)
+ x := word32Val_Get(v)
+ if x == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+// Encode an int64.
+func (o *Buffer) enc_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64(base, p.field)
+ if word64_IsNil(v) {
+ return ErrNil
+ }
+ x := word64_Get(v)
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, x)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64Val(base, p.field)
+ x := word64Val_Get(v)
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, x)
+ return nil
+}
+
+func size_int64(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word64(base, p.field)
+ if word64_IsNil(v) {
+ return 0
+ }
+ x := word64_Get(v)
+ n += len(p.tagcode)
+ n += p.valSize(x)
+ return
+}
+
+func size_proto3_int64(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word64Val(base, p.field)
+ x := word64Val_Get(v)
+ if x == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(x)
+ return
+}
+
+// Encode a string.
+func (o *Buffer) enc_string(p *Properties, base structPointer) error {
+ v := *structPointer_String(base, p.field)
+ if v == nil {
+ return ErrNil
+ }
+ x := *v
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(x)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error {
+ v := *structPointer_StringVal(base, p.field)
+ if v == "" {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(v)
+ return nil
+}
+
+func size_string(p *Properties, base structPointer) (n int) {
+ v := *structPointer_String(base, p.field)
+ if v == nil {
+ return 0
+ }
+ x := *v
+ n += len(p.tagcode)
+ n += sizeStringBytes(x)
+ return
+}
+
+func size_proto3_string(p *Properties, base structPointer) (n int) {
+ v := *structPointer_StringVal(base, p.field)
+ if v == "" && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeStringBytes(v)
+ return
+}
+
+// All protocol buffer fields are nillable, but be careful.
+func isNil(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ return v.IsNil()
+ }
+ return false
+}
+
+// Encode a message struct.
+func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error {
+ var state errorState
+ structp := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(structp) {
+ return ErrNil
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, err := m.Marshal()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ return state.err
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ return o.enc_len_struct(p.sprop, structp, &state)
+}
+
+func size_struct_message(p *Properties, base structPointer) int {
+ structp := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(structp) {
+ return 0
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, _ := m.Marshal()
+ n0 := len(p.tagcode)
+ n1 := sizeRawBytes(data)
+ return n0 + n1
+ }
+
+ n0 := len(p.tagcode)
+ n1 := size_struct(p.sprop, structp)
+ n2 := sizeVarint(uint64(n1)) // size of encoded length
+ return n0 + n1 + n2
+}
+
+// Encode a group struct.
+func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error {
+ var state errorState
+ b := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(b) {
+ return ErrNil
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
+ err := o.enc_struct(p.sprop, b)
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ return state.err
+}
+
+func size_struct_group(p *Properties, base structPointer) (n int) {
+ b := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(b) {
+ return 0
+ }
+
+ n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup))
+ n += size_struct(p.sprop, b)
+ n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ return
+}
+
+// Encode a slice of bools ([]bool).
+func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return ErrNil
+ }
+ for _, x := range s {
+ o.buf = append(o.buf, p.tagcode...)
+ v := uint64(0)
+ if x {
+ v = 1
+ }
+ p.valEnc(o, v)
+ }
+ return nil
+}
+
+func size_slice_bool(p *Properties, base structPointer) int {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return 0
+ }
+ return l * (len(p.tagcode) + 1) // each bool takes exactly one byte
+}
+
+// Encode a slice of bools ([]bool) in packed format.
+func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(l)) // each bool takes exactly one byte
+ for _, x := range s {
+ v := uint64(0)
+ if x {
+ v = 1
+ }
+ p.valEnc(o, v)
+ }
+ return nil
+}
+
+func size_slice_packed_bool(p *Properties, base structPointer) (n int) {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(l))
+ n += l // each bool takes exactly one byte
+ return
+}
+
+// Encode a slice of bytes ([]byte).
+func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error {
+ s := *structPointer_Bytes(base, p.field)
+ if s == nil {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(s)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error {
+ s := *structPointer_Bytes(base, p.field)
+ if len(s) == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(s)
+ return nil
+}
+
+func size_slice_byte(p *Properties, base structPointer) (n int) {
+ s := *structPointer_Bytes(base, p.field)
+ if s == nil && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeRawBytes(s)
+ return
+}
+
+func size_proto3_slice_byte(p *Properties, base structPointer) (n int) {
+ s := *structPointer_Bytes(base, p.field)
+ if len(s) == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeRawBytes(s)
+ return
+}
+
+// Encode a slice of int32s ([]int32).
+func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ p.valEnc(o, uint64(x))
+ }
+ return nil
+}
+
+func size_slice_int32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ n += p.valSize(uint64(x))
+ }
+ return
+}
+
+// Encode a slice of int32s ([]int32) in packed format.
+func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ p.valEnc(buf, uint64(x))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_int32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ bufSize += p.valSize(uint64(x))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of uint32s ([]uint32).
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ x := s.Index(i)
+ p.valEnc(o, uint64(x))
+ }
+ return nil
+}
+
+func size_slice_uint32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ x := s.Index(i)
+ n += p.valSize(uint64(x))
+ }
+ return
+}
+
+// Encode a slice of uint32s ([]uint32) in packed format.
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ p.valEnc(buf, uint64(s.Index(i)))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_uint32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ bufSize += p.valSize(uint64(s.Index(i)))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of int64s ([]int64).
+func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, s.Index(i))
+ }
+ return nil
+}
+
+func size_slice_int64(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ n += p.valSize(s.Index(i))
+ }
+ return
+}
+
+// Encode a slice of int64s ([]int64) in packed format.
+func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ p.valEnc(buf, s.Index(i))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_int64(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ bufSize += p.valSize(s.Index(i))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of slice of bytes ([][]byte).
+func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error {
+ ss := *structPointer_BytesSlice(base, p.field)
+ l := len(ss)
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(ss[i])
+ }
+ return nil
+}
+
+func size_slice_slice_byte(p *Properties, base structPointer) (n int) {
+ ss := *structPointer_BytesSlice(base, p.field)
+ l := len(ss)
+ if l == 0 {
+ return 0
+ }
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ n += sizeRawBytes(ss[i])
+ }
+ return
+}
+
+// Encode a slice of strings ([]string).
+func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error {
+ ss := *structPointer_StringSlice(base, p.field)
+ l := len(ss)
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(ss[i])
+ }
+ return nil
+}
+
+func size_slice_string(p *Properties, base structPointer) (n int) {
+ ss := *structPointer_StringSlice(base, p.field)
+ l := len(ss)
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ n += sizeStringBytes(ss[i])
+ }
+ return
+}
+
+// Encode a slice of message structs ([]*struct).
+func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error {
+ var state errorState
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ for i := 0; i < l; i++ {
+ structp := s.Index(i)
+ if structPointer_IsNil(structp) {
+ return errRepeatedHasNil
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, err := m.Marshal()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ continue
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ err := o.enc_len_struct(p.sprop, structp, &state)
+ if err != nil && !state.shouldContinue(err, nil) {
+ if err == ErrNil {
+ return errRepeatedHasNil
+ }
+ return err
+ }
+ }
+ return state.err
+}
+
+func size_slice_struct_message(p *Properties, base structPointer) (n int) {
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ structp := s.Index(i)
+ if structPointer_IsNil(structp) {
+ return // return the size up to this point
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, _ := m.Marshal()
+ n += len(p.tagcode)
+ n += sizeRawBytes(data)
+ continue
+ }
+
+ n0 := size_struct(p.sprop, structp)
+ n1 := sizeVarint(uint64(n0)) // size of encoded length
+ n += n0 + n1
+ }
+ return
+}
+
+// Encode a slice of group structs ([]*struct).
+func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error {
+ var state errorState
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ for i := 0; i < l; i++ {
+ b := s.Index(i)
+ if structPointer_IsNil(b) {
+ return errRepeatedHasNil
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
+
+ err := o.enc_struct(p.sprop, b)
+
+ if err != nil && !state.shouldContinue(err, nil) {
+ if err == ErrNil {
+ return errRepeatedHasNil
+ }
+ return err
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ }
+ return state.err
+}
+
+func size_slice_struct_group(p *Properties, base structPointer) (n int) {
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup))
+ n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup))
+ for i := 0; i < l; i++ {
+ b := s.Index(i)
+ if structPointer_IsNil(b) {
+ return // return size up to this point
+ }
+
+ n += size_struct(p.sprop, b)
+ }
+ return
+}
+
+// Encode an extension map.
+func (o *Buffer) enc_map(p *Properties, base structPointer) error {
+ v := *structPointer_ExtMap(base, p.field)
+ if err := encodeExtensionMap(v); err != nil {
+ return err
+ }
+ // Fast-path for common cases: zero or one extensions.
+ if len(v) <= 1 {
+ for _, e := range v {
+ o.buf = append(o.buf, e.enc...)
+ }
+ return nil
+ }
+
+ // Sort keys to provide a deterministic encoding.
+ keys := make([]int, 0, len(v))
+ for k := range v {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+
+ for _, k := range keys {
+ o.buf = append(o.buf, v[int32(k)].enc...)
+ }
+ return nil
+}
+
+func size_map(p *Properties, base structPointer) int {
+ v := *structPointer_ExtMap(base, p.field)
+ return sizeExtensionMap(v)
+}
+
+// Encode a map field.
+func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
+ var state errorState // XXX: or do we need to plumb this through?
+
+ /*
+ A map defined as
+ map<key_type, value_type> map_field = N;
+ is encoded in the same way as
+ message MapFieldEntry {
+ key_type key = 1;
+ value_type value = 2;
+ }
+ repeated MapFieldEntry map_field = N;
+ */
+
+ v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
+ if v.Len() == 0 {
+ return nil
+ }
+
+ keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
+
+ enc := func() error {
+ if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil {
+ return err
+ }
+ if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil {
+ return err
+ }
+ return nil
+ }
+
+ // Don't sort map keys. It is not required by the spec, and C++ doesn't do it.
+ for _, key := range v.MapKeys() {
+ val := v.MapIndex(key)
+
+ // The only illegal map entry values are nil message pointers.
+ if val.Kind() == reflect.Ptr && val.IsNil() {
+ return errors.New("proto: map has nil element")
+ }
+
+ keycopy.Set(key)
+ valcopy.Set(val)
+
+ o.buf = append(o.buf, p.tagcode...)
+ if err := o.enc_len_thing(enc, &state); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func size_new_map(p *Properties, base structPointer) int {
+ v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
+
+ keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
+
+ n := 0
+ for _, key := range v.MapKeys() {
+ val := v.MapIndex(key)
+ keycopy.Set(key)
+ valcopy.Set(val)
+
+ // Tag codes for key and val are the responsibility of the sub-sizer.
+ keysize := p.mkeyprop.size(p.mkeyprop, keybase)
+ valsize := p.mvalprop.size(p.mvalprop, valbase)
+ entry := keysize + valsize
+ // Add on tag code and length of map entry itself.
+ n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry
+ }
+ return n
+}
+
+// mapEncodeScratch returns a new reflect.Value matching the map's value type,
+// and a structPointer suitable for passing to an encoder or sizer.
+func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) {
+ // Prepare addressable doubly-indirect placeholders for the key and value types.
+ // This is needed because the element-type encoders expect **T, but the map iteration produces T.
+
+ keycopy = reflect.New(mapType.Key()).Elem() // addressable K
+ keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K
+ keyptr.Set(keycopy.Addr()) //
+ keybase = toStructPointer(keyptr.Addr()) // **K
+
+ // Value types are more varied and require special handling.
+ switch mapType.Elem().Kind() {
+ case reflect.Slice:
+ // []byte
+ var dummy []byte
+ valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte
+ valbase = toStructPointer(valcopy.Addr())
+ case reflect.Ptr:
+ // message; the generated field type is map[K]*Msg (so V is *Msg),
+ // so we only need one level of indirection.
+ valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
+ valbase = toStructPointer(valcopy.Addr())
+ default:
+ // everything else
+ valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
+ valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V
+ valptr.Set(valcopy.Addr()) //
+ valbase = toStructPointer(valptr.Addr()) // **V
+ }
+ return
+}
+
+// Encode a struct.
+func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error {
+ var state errorState
+ // Encode fields in tag order so that decoders may use optimizations
+ // that depend on the ordering.
+ // https://developers.google.com/protocol-buffers/docs/encoding#order
+ for _, i := range prop.order {
+ p := prop.Prop[i]
+ if p.enc != nil {
+ err := p.enc(o, p, base)
+ if err != nil {
+ if err == ErrNil {
+ if p.Required && state.err == nil {
+ state.err = &RequiredNotSetError{p.Name}
+ }
+ } else if err == errRepeatedHasNil {
+ // Give more context to nil values in repeated fields.
+ return errors.New("repeated field " + p.OrigName + " has nil element")
+ } else if !state.shouldContinue(err, p) {
+ return err
+ }
+ }
+ }
+ }
+
+ // Do oneof fields.
+ if prop.oneofMarshaler != nil {
+ m := structPointer_Interface(base, prop.stype).(Message)
+ if err := prop.oneofMarshaler(m, o); err != nil {
+ return err
+ }
+ }
+
+ // Add unrecognized fields at the end.
+ if prop.unrecField.IsValid() {
+ v := *structPointer_Bytes(base, prop.unrecField)
+ if len(v) > 0 {
+ o.buf = append(o.buf, v...)
+ }
+ }
+
+ return state.err
+}
+
+func size_struct(prop *StructProperties, base structPointer) (n int) {
+ for _, i := range prop.order {
+ p := prop.Prop[i]
+ if p.size != nil {
+ n += p.size(p, base)
+ }
+ }
+
+ // Add unrecognized fields at the end.
+ if prop.unrecField.IsValid() {
+ v := *structPointer_Bytes(base, prop.unrecField)
+ n += len(v)
+ }
+
+ // Factor in any oneof fields.
+ // TODO: This could be faster and use less reflection.
+ if prop.oneofMarshaler != nil {
+ sv := reflect.ValueOf(structPointer_Interface(base, prop.stype)).Elem()
+ for i := 0; i < prop.stype.NumField(); i++ {
+ fv := sv.Field(i)
+ if fv.Kind() != reflect.Interface || fv.IsNil() {
+ continue
+ }
+ if prop.stype.Field(i).Tag.Get("protobuf_oneof") == "" {
+ continue
+ }
+ spv := fv.Elem() // interface -> *T
+ sv := spv.Elem() // *T -> T
+ sf := sv.Type().Field(0) // StructField inside T
+ var prop Properties
+ prop.Init(sf.Type, "whatever", sf.Tag.Get("protobuf"), &sf)
+ n += prop.size(&prop, toStructPointer(spv))
+ }
+ }
+
+ return
+}
+
+var zeroes [20]byte // longer than any conceivable sizeVarint
+
+// Encode a struct, preceded by its encoded length (as a varint).
+func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error {
+ return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state)
+}
+
+// Encode something, preceded by its encoded length (as a varint).
+func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error {
+ iLen := len(o.buf)
+ o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length
+ iMsg := len(o.buf)
+ err := enc()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ lMsg := len(o.buf) - iMsg
+ lLen := sizeVarint(uint64(lMsg))
+ switch x := lLen - (iMsg - iLen); {
+ case x > 0: // actual length is x bytes larger than the space we reserved
+ // Move msg x bytes right.
+ o.buf = append(o.buf, zeroes[:x]...)
+ copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
+ case x < 0: // actual length is x bytes smaller than the space we reserved
+ // Move msg x bytes left.
+ copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
+ o.buf = o.buf[:len(o.buf)+x] // x is negative
+ }
+ // Encode the length in the reserved space.
+ o.buf = o.buf[:iLen]
+ o.EncodeVarint(uint64(lMsg))
+ o.buf = o.buf[:len(o.buf)+lMsg]
+ return state.err
+}
+
+// errorState maintains the first error that occurs and updates that error
+// with additional context.
+type errorState struct {
+ err error
+}
+
+// shouldContinue reports whether encoding should continue upon encountering the
+// given error. If the error is RequiredNotSetError, shouldContinue returns true
+// and, if this is the first appearance of that error, remembers it for future
+// reporting.
+//
+// If prop is not nil, it may update any error with additional context about the
+// field with the error.
+func (s *errorState) shouldContinue(err error, prop *Properties) bool {
+ // Ignore unset required fields.
+ reqNotSet, ok := err.(*RequiredNotSetError)
+ if !ok {
+ return false
+ }
+ if s.err == nil {
+ if prop != nil {
+ err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field}
+ }
+ s.err = err
+ }
+ return true
+}
diff --git a/src/kube2msb/vendor/github.com/gogo/protobuf/proto/encode_gogo.go b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/encode_gogo.go
new file mode 100644
index 0000000..f77cfb1
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/encode_gogo.go
@@ -0,0 +1,354 @@
+// Extensions for Protocol Buffers to create more go like structures.
+//
+// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
+// http://github.com/gogo/protobuf/gogoproto
+//
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// http://github.com/golang/protobuf/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "reflect"
+)
+
+func NewRequiredNotSetError(field string) *RequiredNotSetError {
+ return &RequiredNotSetError{field}
+}
+
+type Sizer interface {
+ Size() int
+}
+
+func (o *Buffer) enc_ext_slice_byte(p *Properties, base structPointer) error {
+ s := *structPointer_Bytes(base, p.field)
+ if s == nil {
+ return ErrNil
+ }
+ o.buf = append(o.buf, s...)
+ return nil
+}
+
+func size_ext_slice_byte(p *Properties, base structPointer) (n int) {
+ s := *structPointer_Bytes(base, p.field)
+ if s == nil {
+ return 0
+ }
+ n += len(s)
+ return
+}
+
+// Encode a reference to bool pointer.
+func (o *Buffer) enc_ref_bool(p *Properties, base structPointer) error {
+ v := *structPointer_BoolVal(base, p.field)
+ x := 0
+ if v {
+ x = 1
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func size_ref_bool(p *Properties, base structPointer) int {
+ return len(p.tagcode) + 1 // each bool takes exactly one byte
+}
+
+// Encode a reference to int32 pointer.
+func (o *Buffer) enc_ref_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Val(base, p.field)
+ x := int32(word32Val_Get(v))
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func size_ref_int32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32Val(base, p.field)
+ x := int32(word32Val_Get(v))
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+func (o *Buffer) enc_ref_uint32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Val(base, p.field)
+ x := word32Val_Get(v)
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func size_ref_uint32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32Val(base, p.field)
+ x := word32Val_Get(v)
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+// Encode a reference to an int64 pointer.
+func (o *Buffer) enc_ref_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64Val(base, p.field)
+ x := word64Val_Get(v)
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, x)
+ return nil
+}
+
+func size_ref_int64(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word64Val(base, p.field)
+ x := word64Val_Get(v)
+ n += len(p.tagcode)
+ n += p.valSize(x)
+ return
+}
+
+// Encode a reference to a string pointer.
+func (o *Buffer) enc_ref_string(p *Properties, base structPointer) error {
+ v := *structPointer_StringVal(base, p.field)
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(v)
+ return nil
+}
+
+func size_ref_string(p *Properties, base structPointer) (n int) {
+ v := *structPointer_StringVal(base, p.field)
+ n += len(p.tagcode)
+ n += sizeStringBytes(v)
+ return
+}
+
+// Encode a reference to a message struct.
+func (o *Buffer) enc_ref_struct_message(p *Properties, base structPointer) error {
+ var state errorState
+ structp := structPointer_GetRefStructPointer(base, p.field)
+ if structPointer_IsNil(structp) {
+ return ErrNil
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, err := m.Marshal()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ return nil
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ return o.enc_len_struct(p.sprop, structp, &state)
+}
+
+//TODO this is only copied, please fix this
+func size_ref_struct_message(p *Properties, base structPointer) int {
+ structp := structPointer_GetRefStructPointer(base, p.field)
+ if structPointer_IsNil(structp) {
+ return 0
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, _ := m.Marshal()
+ n0 := len(p.tagcode)
+ n1 := sizeRawBytes(data)
+ return n0 + n1
+ }
+
+ n0 := len(p.tagcode)
+ n1 := size_struct(p.sprop, structp)
+ n2 := sizeVarint(uint64(n1)) // size of encoded length
+ return n0 + n1 + n2
+}
+
+// Encode a slice of references to message struct pointers ([]struct).
+func (o *Buffer) enc_slice_ref_struct_message(p *Properties, base structPointer) error {
+ var state errorState
+ ss := structPointer_GetStructPointer(base, p.field)
+ ss1 := structPointer_GetRefStructPointer(ss, field(0))
+ size := p.stype.Size()
+ l := structPointer_Len(base, p.field)
+ for i := 0; i < l; i++ {
+ structp := structPointer_Add(ss1, field(uintptr(i)*size))
+ if structPointer_IsNil(structp) {
+ return errRepeatedHasNil
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, err := m.Marshal()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ continue
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ err := o.enc_len_struct(p.sprop, structp, &state)
+ if err != nil && !state.shouldContinue(err, nil) {
+ if err == ErrNil {
+ return errRepeatedHasNil
+ }
+ return err
+ }
+
+ }
+ return state.err
+}
+
+//TODO this is only copied, please fix this
+func size_slice_ref_struct_message(p *Properties, base structPointer) (n int) {
+ ss := structPointer_GetStructPointer(base, p.field)
+ ss1 := structPointer_GetRefStructPointer(ss, field(0))
+ size := p.stype.Size()
+ l := structPointer_Len(base, p.field)
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ structp := structPointer_Add(ss1, field(uintptr(i)*size))
+ if structPointer_IsNil(structp) {
+ return // return the size up to this point
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, _ := m.Marshal()
+ n += len(p.tagcode)
+ n += sizeRawBytes(data)
+ continue
+ }
+
+ n0 := size_struct(p.sprop, structp)
+ n1 := sizeVarint(uint64(n0)) // size of encoded length
+ n += n0 + n1
+ }
+ return
+}
+
+func (o *Buffer) enc_custom_bytes(p *Properties, base structPointer) error {
+ i := structPointer_InterfaceRef(base, p.field, p.ctype)
+ if i == nil {
+ return ErrNil
+ }
+ custom := i.(Marshaler)
+ data, err := custom.Marshal()
+ if err != nil {
+ return err
+ }
+ if data == nil {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ return nil
+}
+
+func size_custom_bytes(p *Properties, base structPointer) (n int) {
+ n += len(p.tagcode)
+ i := structPointer_InterfaceRef(base, p.field, p.ctype)
+ if i == nil {
+ return 0
+ }
+ custom := i.(Marshaler)
+ data, _ := custom.Marshal()
+ n += sizeRawBytes(data)
+ return
+}
+
+func (o *Buffer) enc_custom_ref_bytes(p *Properties, base structPointer) error {
+ custom := structPointer_InterfaceAt(base, p.field, p.ctype).(Marshaler)
+ data, err := custom.Marshal()
+ if err != nil {
+ return err
+ }
+ if data == nil {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ return nil
+}
+
+func size_custom_ref_bytes(p *Properties, base structPointer) (n int) {
+ n += len(p.tagcode)
+ i := structPointer_InterfaceAt(base, p.field, p.ctype)
+ if i == nil {
+ return 0
+ }
+ custom := i.(Marshaler)
+ data, _ := custom.Marshal()
+ n += sizeRawBytes(data)
+ return
+}
+
+func (o *Buffer) enc_custom_slice_bytes(p *Properties, base structPointer) error {
+ inter := structPointer_InterfaceRef(base, p.field, p.ctype)
+ if inter == nil {
+ return ErrNil
+ }
+ slice := reflect.ValueOf(inter)
+ l := slice.Len()
+ for i := 0; i < l; i++ {
+ v := slice.Index(i)
+ custom := v.Interface().(Marshaler)
+ data, err := custom.Marshal()
+ if err != nil {
+ return err
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ }
+ return nil
+}
+
+func size_custom_slice_bytes(p *Properties, base structPointer) (n int) {
+ inter := structPointer_InterfaceRef(base, p.field, p.ctype)
+ if inter == nil {
+ return 0
+ }
+ slice := reflect.ValueOf(inter)
+ l := slice.Len()
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ v := slice.Index(i)
+ custom := v.Interface().(Marshaler)
+ data, _ := custom.Marshal()
+ n += sizeRawBytes(data)
+ }
+ return
+}
diff --git a/src/kube2msb/vendor/github.com/gogo/protobuf/proto/equal.go b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/equal.go
new file mode 100644
index 0000000..cc3f2c9
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/equal.go
@@ -0,0 +1,266 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer comparison.
+
+package proto
+
+import (
+ "bytes"
+ "log"
+ "reflect"
+ "strings"
+)
+
+/*
+Equal returns true iff protocol buffers a and b are equal.
+The arguments must both be pointers to protocol buffer structs.
+
+Equality is defined in this way:
+ - Two messages are equal iff they are the same type,
+ corresponding fields are equal, unknown field sets
+ are equal, and extensions sets are equal.
+ - Two set scalar fields are equal iff their values are equal.
+ If the fields are of a floating-point type, remember that
+ NaN != x for all x, including NaN.
+ - Two repeated fields are equal iff their lengths are the same,
+ and their corresponding elements are equal (a "bytes" field,
+ although represented by []byte, is not a repeated field)
+ - Two unset fields are equal.
+ - Two unknown field sets are equal if their current
+ encoded state is equal.
+ - Two extension sets are equal iff they have corresponding
+ elements that are pairwise equal.
+ - Every other combination of things are not equal.
+
+The return value is undefined if a and b are not protocol buffers.
+*/
+func Equal(a, b Message) bool {
+ if a == nil || b == nil {
+ return a == b
+ }
+ v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
+ if v1.Type() != v2.Type() {
+ return false
+ }
+ if v1.Kind() == reflect.Ptr {
+ if v1.IsNil() {
+ return v2.IsNil()
+ }
+ if v2.IsNil() {
+ return false
+ }
+ v1, v2 = v1.Elem(), v2.Elem()
+ }
+ if v1.Kind() != reflect.Struct {
+ return false
+ }
+ return equalStruct(v1, v2)
+}
+
+// v1 and v2 are known to have the same type.
+func equalStruct(v1, v2 reflect.Value) bool {
+ for i := 0; i < v1.NumField(); i++ {
+ f := v1.Type().Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ f1, f2 := v1.Field(i), v2.Field(i)
+ if f.Type.Kind() == reflect.Ptr {
+ if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
+ // both unset
+ continue
+ } else if n1 != n2 {
+ // set/unset mismatch
+ return false
+ }
+ b1, ok := f1.Interface().(raw)
+ if ok {
+ b2 := f2.Interface().(raw)
+ // RawMessage
+ if !bytes.Equal(b1.Bytes(), b2.Bytes()) {
+ return false
+ }
+ continue
+ }
+ f1, f2 = f1.Elem(), f2.Elem()
+ }
+ if !equalAny(f1, f2) {
+ return false
+ }
+ }
+
+ if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
+ em2 := v2.FieldByName("XXX_extensions")
+ if !equalExtensions(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
+ return false
+ }
+ }
+
+ uf := v1.FieldByName("XXX_unrecognized")
+ if !uf.IsValid() {
+ return true
+ }
+
+ u1 := uf.Bytes()
+ u2 := v2.FieldByName("XXX_unrecognized").Bytes()
+ if !bytes.Equal(u1, u2) {
+ return false
+ }
+
+ return true
+}
+
+// v1 and v2 are known to have the same type.
+func equalAny(v1, v2 reflect.Value) bool {
+ if v1.Type() == protoMessageType {
+ m1, _ := v1.Interface().(Message)
+ m2, _ := v2.Interface().(Message)
+ return Equal(m1, m2)
+ }
+ switch v1.Kind() {
+ case reflect.Bool:
+ return v1.Bool() == v2.Bool()
+ case reflect.Float32, reflect.Float64:
+ return v1.Float() == v2.Float()
+ case reflect.Int32, reflect.Int64:
+ return v1.Int() == v2.Int()
+ case reflect.Interface:
+ // Probably a oneof field; compare the inner values.
+ n1, n2 := v1.IsNil(), v2.IsNil()
+ if n1 || n2 {
+ return n1 == n2
+ }
+ e1, e2 := v1.Elem(), v2.Elem()
+ if e1.Type() != e2.Type() {
+ return false
+ }
+ return equalAny(e1, e2)
+ case reflect.Map:
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ for _, key := range v1.MapKeys() {
+ val2 := v2.MapIndex(key)
+ if !val2.IsValid() {
+ // This key was not found in the second map.
+ return false
+ }
+ if !equalAny(v1.MapIndex(key), val2) {
+ return false
+ }
+ }
+ return true
+ case reflect.Ptr:
+ return equalAny(v1.Elem(), v2.Elem())
+ case reflect.Slice:
+ if v1.Type().Elem().Kind() == reflect.Uint8 {
+ // short circuit: []byte
+ if v1.IsNil() != v2.IsNil() {
+ return false
+ }
+ return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
+ }
+
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ for i := 0; i < v1.Len(); i++ {
+ if !equalAny(v1.Index(i), v2.Index(i)) {
+ return false
+ }
+ }
+ return true
+ case reflect.String:
+ return v1.Interface().(string) == v2.Interface().(string)
+ case reflect.Struct:
+ return equalStruct(v1, v2)
+ case reflect.Uint32, reflect.Uint64:
+ return v1.Uint() == v2.Uint()
+ }
+
+ // unknown type, so not a protocol buffer
+ log.Printf("proto: don't know how to compare %v", v1)
+ return false
+}
+
+// base is the struct type that the extensions are based on.
+// em1 and em2 are extension maps.
+func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool {
+ if len(em1) != len(em2) {
+ return false
+ }
+
+ for extNum, e1 := range em1 {
+ e2, ok := em2[extNum]
+ if !ok {
+ return false
+ }
+
+ m1, m2 := e1.value, e2.value
+
+ if m1 != nil && m2 != nil {
+ // Both are unencoded.
+ if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) {
+ return false
+ }
+ continue
+ }
+
+ // At least one is encoded. To do a semantically correct comparison
+ // we need to unmarshal them first.
+ var desc *ExtensionDesc
+ if m := extensionMaps[base]; m != nil {
+ desc = m[extNum]
+ }
+ if desc == nil {
+ log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
+ continue
+ }
+ var err error
+ if m1 == nil {
+ m1, err = decodeExtension(e1.enc, desc)
+ }
+ if m2 == nil && err == nil {
+ m2, err = decodeExtension(e2.enc, desc)
+ }
+ if err != nil {
+ // The encoded form is invalid.
+ log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
+ return false
+ }
+ if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/src/kube2msb/vendor/github.com/gogo/protobuf/proto/extensions.go b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/extensions.go
new file mode 100644
index 0000000..9a6374f
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/extensions.go
@@ -0,0 +1,519 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Types and routines for supporting protocol buffer extensions.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "sync"
+)
+
+// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
+var ErrMissingExtension = errors.New("proto: missing extension")
+
+// ExtensionRange represents a range of message extensions for a protocol buffer.
+// Used in code generated by the protocol compiler.
+type ExtensionRange struct {
+ Start, End int32 // both inclusive
+}
+
+// extendableProto is an interface implemented by any protocol buffer that may be extended.
+type extendableProto interface {
+ Message
+ ExtensionRangeArray() []ExtensionRange
+}
+
+type extensionsMap interface {
+ extendableProto
+ ExtensionMap() map[int32]Extension
+}
+
+type extensionsBytes interface {
+ extendableProto
+ GetExtensions() *[]byte
+}
+
+var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
+
+// ExtensionDesc represents an extension specification.
+// Used in generated code from the protocol compiler.
+type ExtensionDesc struct {
+ ExtendedType Message // nil pointer to the type that is being extended
+ ExtensionType interface{} // nil pointer to the extension type
+ Field int32 // field number
+ Name string // fully-qualified name of extension, for text formatting
+ Tag string // protobuf tag style
+}
+
+func (ed *ExtensionDesc) repeated() bool {
+ t := reflect.TypeOf(ed.ExtensionType)
+ return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
+}
+
+// Extension represents an extension in a message.
+type Extension struct {
+ // When an extension is stored in a message using SetExtension
+ // only desc and value are set. When the message is marshaled
+ // enc will be set to the encoded form of the message.
+ //
+ // When a message is unmarshaled and contains extensions, each
+ // extension will have only enc set. When such an extension is
+ // accessed using GetExtension (or GetExtensions) desc and value
+ // will be set.
+ desc *ExtensionDesc
+ value interface{}
+ enc []byte
+}
+
+// SetRawExtension is for testing only.
+func SetRawExtension(base extendableProto, id int32, b []byte) {
+ if ebase, ok := base.(extensionsMap); ok {
+ ebase.ExtensionMap()[id] = Extension{enc: b}
+ } else if ebase, ok := base.(extensionsBytes); ok {
+ clearExtension(base, id)
+ ext := ebase.GetExtensions()
+ *ext = append(*ext, b...)
+ } else {
+ panic("unreachable")
+ }
+}
+
+// isExtensionField returns true iff the given field number is in an extension range.
+func isExtensionField(pb extendableProto, field int32) bool {
+ for _, er := range pb.ExtensionRangeArray() {
+ if er.Start <= field && field <= er.End {
+ return true
+ }
+ }
+ return false
+}
+
+// checkExtensionTypes checks that the given extension is valid for pb.
+func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
+ // Check the extended type.
+ if a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b {
+ return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String())
+ }
+ // Check the range.
+ if !isExtensionField(pb, extension.Field) {
+ return errors.New("proto: bad extension number; not in declared ranges")
+ }
+ return nil
+}
+
+// extPropKey is sufficient to uniquely identify an extension.
+type extPropKey struct {
+ base reflect.Type
+ field int32
+}
+
+var extProp = struct {
+ sync.RWMutex
+ m map[extPropKey]*Properties
+}{
+ m: make(map[extPropKey]*Properties),
+}
+
+func extensionProperties(ed *ExtensionDesc) *Properties {
+ key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
+
+ extProp.RLock()
+ if prop, ok := extProp.m[key]; ok {
+ extProp.RUnlock()
+ return prop
+ }
+ extProp.RUnlock()
+
+ extProp.Lock()
+ defer extProp.Unlock()
+ // Check again.
+ if prop, ok := extProp.m[key]; ok {
+ return prop
+ }
+
+ prop := new(Properties)
+ prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
+ extProp.m[key] = prop
+ return prop
+}
+
+// encodeExtensionMap encodes any unmarshaled (unencoded) extensions in m.
+func encodeExtensionMap(m map[int32]Extension) error {
+ for k, e := range m {
+ err := encodeExtension(&e)
+ if err != nil {
+ return err
+ }
+ m[k] = e
+ }
+ return nil
+}
+
+func encodeExtension(e *Extension) error {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ return nil
+ }
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ et := reflect.TypeOf(e.desc.ExtensionType)
+ props := extensionProperties(e.desc)
+
+ p := NewBuffer(nil)
+ // If e.value has type T, the encoder expects a *struct{ X T }.
+ // Pass a *T with a zero field and hope it all works out.
+ x := reflect.New(et)
+ x.Elem().Set(reflect.ValueOf(e.value))
+ if err := props.enc(p, props, toStructPointer(x)); err != nil {
+ return err
+ }
+ e.enc = p.buf
+ return nil
+}
+
+func sizeExtensionMap(m map[int32]Extension) (n int) {
+ for _, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ n += len(e.enc)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ et := reflect.TypeOf(e.desc.ExtensionType)
+ props := extensionProperties(e.desc)
+
+ // If e.value has type T, the encoder expects a *struct{ X T }.
+ // Pass a *T with a zero field and hope it all works out.
+ x := reflect.New(et)
+ x.Elem().Set(reflect.ValueOf(e.value))
+ n += props.size(props, toStructPointer(x))
+ }
+ return
+}
+
+// HasExtension returns whether the given extension is present in pb.
+func HasExtension(pb extendableProto, extension *ExtensionDesc) bool {
+ // TODO: Check types, field numbers, etc.?
+ if epb, doki := pb.(extensionsMap); doki {
+ _, ok := epb.ExtensionMap()[extension.Field]
+ return ok
+ } else if epb, doki := pb.(extensionsBytes); doki {
+ ext := epb.GetExtensions()
+ buf := *ext
+ o := 0
+ for o < len(buf) {
+ tag, n := DecodeVarint(buf[o:])
+ fieldNum := int32(tag >> 3)
+ if int32(fieldNum) == extension.Field {
+ return true
+ }
+ wireType := int(tag & 0x7)
+ o += n
+ l, err := size(buf[o:], wireType)
+ if err != nil {
+ return false
+ }
+ o += l
+ }
+ return false
+ }
+ panic("unreachable")
+}
+
+func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int {
+ ext := pb.GetExtensions()
+ for offset < len(*ext) {
+ tag, n1 := DecodeVarint((*ext)[offset:])
+ fieldNum := int32(tag >> 3)
+ wireType := int(tag & 0x7)
+ n2, err := size((*ext)[offset+n1:], wireType)
+ if err != nil {
+ panic(err)
+ }
+ newOffset := offset + n1 + n2
+ if fieldNum == theFieldNum {
+ *ext = append((*ext)[:offset], (*ext)[newOffset:]...)
+ return offset
+ }
+ offset = newOffset
+ }
+ return -1
+}
+
+func clearExtension(pb extendableProto, fieldNum int32) {
+ if epb, doki := pb.(extensionsMap); doki {
+ delete(epb.ExtensionMap(), fieldNum)
+ } else if epb, doki := pb.(extensionsBytes); doki {
+ offset := 0
+ for offset != -1 {
+ offset = deleteExtension(epb, fieldNum, offset)
+ }
+ } else {
+ panic("unreachable")
+ }
+}
+
+// ClearExtension removes the given extension from pb.
+func ClearExtension(pb extendableProto, extension *ExtensionDesc) {
+ // TODO: Check types, field numbers, etc.?
+ clearExtension(pb, extension.Field)
+}
+
+// GetExtension parses and returns the given extension of pb.
+// If the extension is not present it returns ErrMissingExtension.
+func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) {
+ if err := checkExtensionTypes(pb, extension); err != nil {
+ return nil, err
+ }
+
+ if epb, doki := pb.(extensionsMap); doki {
+ emap := epb.ExtensionMap()
+ e, ok := emap[extension.Field]
+ if !ok {
+ // defaultExtensionValue returns the default value or
+ // ErrMissingExtension if there is no default.
+ return defaultExtensionValue(extension)
+ }
+ if e.value != nil {
+ // Already decoded. Check the descriptor, though.
+ if e.desc != extension {
+ // This shouldn't happen. If it does, it means that
+ // GetExtension was called twice with two different
+ // descriptors with the same field number.
+ return nil, errors.New("proto: descriptor conflict")
+ }
+ return e.value, nil
+ }
+
+ v, err := decodeExtension(e.enc, extension)
+ if err != nil {
+ return nil, err
+ }
+
+ // Remember the decoded version and drop the encoded version.
+ // That way it is safe to mutate what we return.
+ e.value = v
+ e.desc = extension
+ e.enc = nil
+ emap[extension.Field] = e
+ return e.value, nil
+ } else if epb, doki := pb.(extensionsBytes); doki {
+ ext := epb.GetExtensions()
+ o := 0
+ for o < len(*ext) {
+ tag, n := DecodeVarint((*ext)[o:])
+ fieldNum := int32(tag >> 3)
+ wireType := int(tag & 0x7)
+ l, err := size((*ext)[o+n:], wireType)
+ if err != nil {
+ return nil, err
+ }
+ if int32(fieldNum) == extension.Field {
+ v, err := decodeExtension((*ext)[o:o+n+l], extension)
+ if err != nil {
+ return nil, err
+ }
+ return v, nil
+ }
+ o += n + l
+ }
+ return defaultExtensionValue(extension)
+ }
+ panic("unreachable")
+}
+
+// defaultExtensionValue returns the default value for extension.
+// If no default for an extension is defined ErrMissingExtension is returned.
+func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
+ t := reflect.TypeOf(extension.ExtensionType)
+ props := extensionProperties(extension)
+
+ sf, _, err := fieldDefault(t, props)
+ if err != nil {
+ return nil, err
+ }
+
+ if sf == nil || sf.value == nil {
+ // There is no default value.
+ return nil, ErrMissingExtension
+ }
+
+ if t.Kind() != reflect.Ptr {
+ // We do not need to return a Ptr, we can directly return sf.value.
+ return sf.value, nil
+ }
+
+ // We need to return an interface{} that is a pointer to sf.value.
+ value := reflect.New(t).Elem()
+ value.Set(reflect.New(value.Type().Elem()))
+ if sf.kind == reflect.Int32 {
+ // We may have an int32 or an enum, but the underlying data is int32.
+ // Since we can't set an int32 into a non int32 reflect.value directly
+ // set it as a int32.
+ value.Elem().SetInt(int64(sf.value.(int32)))
+ } else {
+ value.Elem().Set(reflect.ValueOf(sf.value))
+ }
+ return value.Interface(), nil
+}
+
+// decodeExtension decodes an extension encoded in b.
+func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
+ o := NewBuffer(b)
+
+ t := reflect.TypeOf(extension.ExtensionType)
+ rep := extension.repeated()
+
+ props := extensionProperties(extension)
+
+ // t is a pointer to a struct, pointer to basic type or a slice.
+ // Allocate a "field" to store the pointer/slice itself; the
+ // pointer/slice will be stored here. We pass
+ // the address of this field to props.dec.
+ // This passes a zero field and a *t and lets props.dec
+ // interpret it as a *struct{ x t }.
+ value := reflect.New(t).Elem()
+
+ for {
+ // Discard wire type and field number varint. It isn't needed.
+ if _, err := o.DecodeVarint(); err != nil {
+ return nil, err
+ }
+
+ if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil {
+ return nil, err
+ }
+
+ if !rep || o.index >= len(o.buf) {
+ break
+ }
+ }
+ return value.Interface(), nil
+}
+
+// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
+// The returned slice has the same length as es; missing extensions will appear as nil elements.
+func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
+ epb, ok := pb.(extendableProto)
+ if !ok {
+ err = errors.New("proto: not an extendable proto")
+ return
+ }
+ extensions = make([]interface{}, len(es))
+ for i, e := range es {
+ extensions[i], err = GetExtension(epb, e)
+ if err == ErrMissingExtension {
+ err = nil
+ }
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// SetExtension sets the specified extension of pb to the specified value.
+func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error {
+ if err := checkExtensionTypes(pb, extension); err != nil {
+ return err
+ }
+ typ := reflect.TypeOf(extension.ExtensionType)
+ if typ != reflect.TypeOf(value) {
+ return errors.New("proto: bad extension value type")
+ }
+ // nil extension values need to be caught early, because the
+ // encoder can't distinguish an ErrNil due to a nil extension
+ // from an ErrNil due to a missing field. Extensions are
+ // always optional, so the encoder would just swallow the error
+ // and drop all the extensions from the encoded message.
+ if reflect.ValueOf(value).IsNil() {
+ return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
+ }
+ return setExtension(pb, extension, value)
+}
+
+func setExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error {
+ if epb, doki := pb.(extensionsMap); doki {
+ epb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value}
+ } else if epb, doki := pb.(extensionsBytes); doki {
+ ClearExtension(pb, extension)
+ ext := epb.GetExtensions()
+ et := reflect.TypeOf(extension.ExtensionType)
+ props := extensionProperties(extension)
+ p := NewBuffer(nil)
+ x := reflect.New(et)
+ x.Elem().Set(reflect.ValueOf(value))
+ if err := props.enc(p, props, toStructPointer(x)); err != nil {
+ return err
+ }
+ *ext = append(*ext, p.buf...)
+ }
+ return nil
+}
+
+// A global registry of extensions.
+// The generated code will register the generated descriptors by calling RegisterExtension.
+
+var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
+
+// RegisterExtension is called from the generated code.
+func RegisterExtension(desc *ExtensionDesc) {
+ st := reflect.TypeOf(desc.ExtendedType).Elem()
+ m := extensionMaps[st]
+ if m == nil {
+ m = make(map[int32]*ExtensionDesc)
+ extensionMaps[st] = m
+ }
+ if _, ok := m[desc.Field]; ok {
+ panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
+ }
+ m[desc.Field] = desc
+}
+
+// RegisteredExtensions returns a map of the registered extensions of a
+// protocol buffer struct, indexed by the extension number.
+// The argument pb should be a nil pointer to the struct type.
+func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
+ return extensionMaps[reflect.TypeOf(pb).Elem()]
+}
diff --git a/src/kube2msb/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go
new file mode 100644
index 0000000..bd55fb6
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go
@@ -0,0 +1,221 @@
+// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
+// http://github.com/gogo/protobuf/gogoproto
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+ "strings"
+)
+
+func GetBoolExtension(pb extendableProto, extension *ExtensionDesc, ifnotset bool) bool {
+ if reflect.ValueOf(pb).IsNil() {
+ return ifnotset
+ }
+ value, err := GetExtension(pb, extension)
+ if err != nil {
+ return ifnotset
+ }
+ if value == nil {
+ return ifnotset
+ }
+ if value.(*bool) == nil {
+ return ifnotset
+ }
+ return *(value.(*bool))
+}
+
+func (this *Extension) Equal(that *Extension) bool {
+ return bytes.Equal(this.enc, that.enc)
+}
+
+func SizeOfExtensionMap(m map[int32]Extension) (n int) {
+ return sizeExtensionMap(m)
+}
+
+type sortableMapElem struct {
+ field int32
+ ext Extension
+}
+
+func newSortableExtensionsFromMap(m map[int32]Extension) sortableExtensions {
+ s := make(sortableExtensions, 0, len(m))
+ for k, v := range m {
+ s = append(s, &sortableMapElem{field: k, ext: v})
+ }
+ return s
+}
+
+type sortableExtensions []*sortableMapElem
+
+func (this sortableExtensions) Len() int { return len(this) }
+
+func (this sortableExtensions) Swap(i, j int) { this[i], this[j] = this[j], this[i] }
+
+func (this sortableExtensions) Less(i, j int) bool { return this[i].field < this[j].field }
+
+func (this sortableExtensions) String() string {
+ sort.Sort(this)
+ ss := make([]string, len(this))
+ for i := range this {
+ ss[i] = fmt.Sprintf("%d: %v", this[i].field, this[i].ext)
+ }
+ return "map[" + strings.Join(ss, ",") + "]"
+}
+
+func StringFromExtensionsMap(m map[int32]Extension) string {
+ return newSortableExtensionsFromMap(m).String()
+}
+
+func StringFromExtensionsBytes(ext []byte) string {
+ m, err := BytesToExtensionsMap(ext)
+ if err != nil {
+ panic(err)
+ }
+ return StringFromExtensionsMap(m)
+}
+
+func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) {
+ if err := encodeExtensionMap(m); err != nil {
+ return 0, err
+ }
+ keys := make([]int, 0, len(m))
+ for k := range m {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+ for _, k := range keys {
+ n += copy(data[n:], m[int32(k)].enc)
+ }
+ return n, nil
+}
+
+func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) {
+ if m[id].value == nil || m[id].desc == nil {
+ return m[id].enc, nil
+ }
+ if err := encodeExtensionMap(m); err != nil {
+ return nil, err
+ }
+ return m[id].enc, nil
+}
+
+func size(buf []byte, wire int) (int, error) {
+ switch wire {
+ case WireVarint:
+ _, n := DecodeVarint(buf)
+ return n, nil
+ case WireFixed64:
+ return 8, nil
+ case WireBytes:
+ v, n := DecodeVarint(buf)
+ return int(v) + n, nil
+ case WireFixed32:
+ return 4, nil
+ case WireStartGroup:
+ offset := 0
+ for {
+ u, n := DecodeVarint(buf[offset:])
+ fwire := int(u & 0x7)
+ offset += n
+ if fwire == WireEndGroup {
+ return offset, nil
+ }
+ s, err := size(buf[offset:], wire)
+ if err != nil {
+ return 0, err
+ }
+ offset += s
+ }
+ }
+ return 0, fmt.Errorf("proto: can't get size for unknown wire type %d", wire)
+}
+
+func BytesToExtensionsMap(buf []byte) (map[int32]Extension, error) {
+ m := make(map[int32]Extension)
+ i := 0
+ for i < len(buf) {
+ tag, n := DecodeVarint(buf[i:])
+ if n <= 0 {
+ return nil, fmt.Errorf("unable to decode varint")
+ }
+ fieldNum := int32(tag >> 3)
+ wireType := int(tag & 0x7)
+ l, err := size(buf[i+n:], wireType)
+ if err != nil {
+ return nil, err
+ }
+ end := i + int(l) + n
+ m[int32(fieldNum)] = Extension{enc: buf[i:end]}
+ i = end
+ }
+ return m, nil
+}
+
+func NewExtension(e []byte) Extension {
+ ee := Extension{enc: make([]byte, len(e))}
+ copy(ee.enc, e)
+ return ee
+}
+
+func (this Extension) GoString() string {
+ if this.enc == nil {
+ if err := encodeExtension(&this); err != nil {
+ panic(err)
+ }
+ }
+ return fmt.Sprintf("proto.NewExtension(%#v)", this.enc)
+}
+
+func SetUnsafeExtension(pb extendableProto, fieldNum int32, value interface{}) error {
+ typ := reflect.TypeOf(pb).Elem()
+ ext, ok := extensionMaps[typ]
+ if !ok {
+ return fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String())
+ }
+ desc, ok := ext[fieldNum]
+ if !ok {
+ return errors.New("proto: bad extension number; not in declared ranges")
+ }
+ return setExtension(pb, desc, value)
+}
+
+func GetUnsafeExtension(pb extendableProto, fieldNum int32) (interface{}, error) {
+ typ := reflect.TypeOf(pb).Elem()
+ ext, ok := extensionMaps[typ]
+ if !ok {
+ return nil, fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String())
+ }
+ desc, ok := ext[fieldNum]
+ if !ok {
+ return nil, fmt.Errorf("unregistered field number %d", fieldNum)
+ }
+ return GetExtension(pb, desc)
+}
diff --git a/src/kube2msb/vendor/github.com/gogo/protobuf/proto/lib.go b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/lib.go
new file mode 100644
index 0000000..8ffa91a
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/lib.go
@@ -0,0 +1,883 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package proto converts data structures to and from the wire format of
+protocol buffers. It works in concert with the Go source code generated
+for .proto files by the protocol compiler.
+
+A summary of the properties of the protocol buffer interface
+for a protocol buffer variable v:
+
+ - Names are turned from camel_case to CamelCase for export.
+ - There are no methods on v to set fields; just treat
+ them as structure fields.
+ - There are getters that return a field's value if set,
+ and return the field's default value if unset.
+ The getters work even if the receiver is a nil message.
+ - The zero value for a struct is its correct initialization state.
+ All desired fields must be set before marshaling.
+ - A Reset() method will restore a protobuf struct to its zero state.
+ - Non-repeated fields are pointers to the values; nil means unset.
+ That is, optional or required field int32 f becomes F *int32.
+ - Repeated fields are slices.
+ - Helper functions are available to aid the setting of fields.
+ msg.Foo = proto.String("hello") // set field
+ - Constants are defined to hold the default values of all fields that
+ have them. They have the form Default_StructName_FieldName.
+ Because the getter methods handle defaulted values,
+ direct use of these constants should be rare.
+ - Enums are given type names and maps from names to values.
+ Enum values are prefixed by the enclosing message's name, or by the
+ enum's type name if it is a top-level enum. Enum types have a String
+ method, and a Enum method to assist in message construction.
+ - Nested messages, groups and enums have type names prefixed with the name of
+ the surrounding message type.
+ - Extensions are given descriptor names that start with E_,
+ followed by an underscore-delimited list of the nested messages
+ that contain it (if any) followed by the CamelCased name of the
+ extension field itself. HasExtension, ClearExtension, GetExtension
+ and SetExtension are functions for manipulating extensions.
+ - Oneof field sets are given a single field in their message,
+ with distinguished wrapper types for each possible field value.
+ - Marshal and Unmarshal are functions to encode and decode the wire format.
+
+The simplest way to describe this is to see an example.
+Given file test.proto, containing
+
+ package example;
+
+ enum FOO { X = 17; }
+
+ message Test {
+ required string label = 1;
+ optional int32 type = 2 [default=77];
+ repeated int64 reps = 3;
+ optional group OptionalGroup = 4 {
+ required string RequiredField = 5;
+ }
+ oneof union {
+ int32 number = 6;
+ string name = 7;
+ }
+ }
+
+The resulting file, test.pb.go, is:
+
+ package example
+
+ import proto "github.com/gogo/protobuf/proto"
+ import math "math"
+
+ type FOO int32
+ const (
+ FOO_X FOO = 17
+ )
+ var FOO_name = map[int32]string{
+ 17: "X",
+ }
+ var FOO_value = map[string]int32{
+ "X": 17,
+ }
+
+ func (x FOO) Enum() *FOO {
+ p := new(FOO)
+ *p = x
+ return p
+ }
+ func (x FOO) String() string {
+ return proto.EnumName(FOO_name, int32(x))
+ }
+ func (x *FOO) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FOO_value, data)
+ if err != nil {
+ return err
+ }
+ *x = FOO(value)
+ return nil
+ }
+
+ type Test struct {
+ Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
+ Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
+ Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
+ Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
+ // Types that are valid to be assigned to Union:
+ // *Test_Number
+ // *Test_Name
+ Union isTest_Union `protobuf_oneof:"union"`
+ XXX_unrecognized []byte `json:"-"`
+ }
+ func (m *Test) Reset() { *m = Test{} }
+ func (m *Test) String() string { return proto.CompactTextString(m) }
+ func (*Test) ProtoMessage() {}
+
+ type isTest_Union interface {
+ isTest_Union()
+ }
+
+ type Test_Number struct {
+ Number int32 `protobuf:"varint,6,opt,name=number"`
+ }
+ type Test_Name struct {
+ Name string `protobuf:"bytes,7,opt,name=name"`
+ }
+
+ func (*Test_Number) isTest_Union() {}
+ func (*Test_Name) isTest_Union() {}
+
+ func (m *Test) GetUnion() isTest_Union {
+ if m != nil {
+ return m.Union
+ }
+ return nil
+ }
+ const Default_Test_Type int32 = 77
+
+ func (m *Test) GetLabel() string {
+ if m != nil && m.Label != nil {
+ return *m.Label
+ }
+ return ""
+ }
+
+ func (m *Test) GetType() int32 {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return Default_Test_Type
+ }
+
+ func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
+ if m != nil {
+ return m.Optionalgroup
+ }
+ return nil
+ }
+
+ type Test_OptionalGroup struct {
+ RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
+ }
+ func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
+ func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
+
+ func (m *Test_OptionalGroup) GetRequiredField() string {
+ if m != nil && m.RequiredField != nil {
+ return *m.RequiredField
+ }
+ return ""
+ }
+
+ func (m *Test) GetNumber() int32 {
+ if x, ok := m.GetUnion().(*Test_Number); ok {
+ return x.Number
+ }
+ return 0
+ }
+
+ func (m *Test) GetName() string {
+ if x, ok := m.GetUnion().(*Test_Name); ok {
+ return x.Name
+ }
+ return ""
+ }
+
+ func init() {
+ proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
+ }
+
+To create and play with a Test object:
+
+ package main
+
+ import (
+ "log"
+
+ "github.com/gogo/protobuf/proto"
+ pb "./example.pb"
+ )
+
+ func main() {
+ test := &pb.Test{
+ Label: proto.String("hello"),
+ Type: proto.Int32(17),
+ Optionalgroup: &pb.Test_OptionalGroup{
+ RequiredField: proto.String("good bye"),
+ },
+ Union: &pb.Test_Name{"fred"},
+ }
+ data, err := proto.Marshal(test)
+ if err != nil {
+ log.Fatal("marshaling error: ", err)
+ }
+ newTest := &pb.Test{}
+ err = proto.Unmarshal(data, newTest)
+ if err != nil {
+ log.Fatal("unmarshaling error: ", err)
+ }
+ // Now test and newTest contain the same data.
+ if test.GetLabel() != newTest.GetLabel() {
+ log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
+ }
+ // Use a type switch to determine which oneof was set.
+ switch u := test.Union.(type) {
+ case *pb.Test_Number: // u.Number contains the number.
+ case *pb.Test_Name: // u.Name contains the string.
+ }
+ // etc.
+ }
+*/
+package proto
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "reflect"
+ "sort"
+ "strconv"
+ "sync"
+)
+
+// Message is implemented by generated protocol buffer messages.
+type Message interface {
+ Reset()
+ String() string
+ ProtoMessage()
+}
+
+// Stats records allocation details about the protocol buffer encoders
+// and decoders. Useful for tuning the library itself.
+type Stats struct {
+ Emalloc uint64 // mallocs in encode
+ Dmalloc uint64 // mallocs in decode
+ Encode uint64 // number of encodes
+ Decode uint64 // number of decodes
+ Chit uint64 // number of cache hits
+ Cmiss uint64 // number of cache misses
+ Size uint64 // number of sizes
+}
+
+// Set to true to enable stats collection.
+const collectStats = false
+
+var stats Stats
+
+// GetStats returns a copy of the global Stats structure.
+func GetStats() Stats { return stats }
+
+// A Buffer is a buffer manager for marshaling and unmarshaling
+// protocol buffers. It may be reused between invocations to
+// reduce memory usage. It is not necessary to use a Buffer;
+// the global functions Marshal and Unmarshal create a
+// temporary Buffer and are fine for most applications.
+type Buffer struct {
+ buf []byte // encode/decode byte stream
+ index int // write point
+
+ // pools of basic types to amortize allocation.
+ bools []bool
+ uint32s []uint32
+ uint64s []uint64
+
+ // extra pools, only used with pointer_reflect.go
+ int32s []int32
+ int64s []int64
+ float32s []float32
+ float64s []float64
+}
+
+// NewBuffer allocates a new Buffer and initializes its internal data to
+// the contents of the argument slice.
+func NewBuffer(e []byte) *Buffer {
+ return &Buffer{buf: e}
+}
+
+// Reset resets the Buffer, ready for marshaling a new protocol buffer.
+func (p *Buffer) Reset() {
+ p.buf = p.buf[0:0] // for reading/writing
+ p.index = 0 // for reading
+}
+
+// SetBuf replaces the internal buffer with the slice,
+// ready for unmarshaling the contents of the slice.
+func (p *Buffer) SetBuf(s []byte) {
+ p.buf = s
+ p.index = 0
+}
+
+// Bytes returns the contents of the Buffer.
+func (p *Buffer) Bytes() []byte { return p.buf }
+
+/*
+ * Helper routines for simplifying the creation of optional fields of basic type.
+ */
+
+// Bool is a helper routine that allocates a new bool value
+// to store v and returns a pointer to it.
+func Bool(v bool) *bool {
+ return &v
+}
+
+// Int32 is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it.
+func Int32(v int32) *int32 {
+ return &v
+}
+
+// Int is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it, but unlike Int32
+// its argument value is an int.
+func Int(v int) *int32 {
+ p := new(int32)
+ *p = int32(v)
+ return p
+}
+
+// Int64 is a helper routine that allocates a new int64 value
+// to store v and returns a pointer to it.
+func Int64(v int64) *int64 {
+ return &v
+}
+
+// Float32 is a helper routine that allocates a new float32 value
+// to store v and returns a pointer to it.
+func Float32(v float32) *float32 {
+ return &v
+}
+
+// Float64 is a helper routine that allocates a new float64 value
+// to store v and returns a pointer to it.
+func Float64(v float64) *float64 {
+ return &v
+}
+
+// Uint32 is a helper routine that allocates a new uint32 value
+// to store v and returns a pointer to it.
+func Uint32(v uint32) *uint32 {
+ return &v
+}
+
+// Uint64 is a helper routine that allocates a new uint64 value
+// to store v and returns a pointer to it.
+func Uint64(v uint64) *uint64 {
+ return &v
+}
+
+// String is a helper routine that allocates a new string value
+// to store v and returns a pointer to it.
+func String(v string) *string {
+ return &v
+}
+
+// EnumName is a helper function to simplify printing protocol buffer enums
+// by name. Given an enum map and a value, it returns a useful string.
+func EnumName(m map[int32]string, v int32) string {
+ s, ok := m[v]
+ if ok {
+ return s
+ }
+ return strconv.Itoa(int(v))
+}
+
+// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
+// from their JSON-encoded representation. Given a map from the enum's symbolic
+// names to its int values, and a byte buffer containing the JSON-encoded
+// value, it returns an int32 that can be cast to the enum type by the caller.
+//
+// The function can deal with both JSON representations, numeric and symbolic.
+func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
+ if data[0] == '"' {
+ // New style: enums are strings.
+ var repr string
+ if err := json.Unmarshal(data, &repr); err != nil {
+ return -1, err
+ }
+ val, ok := m[repr]
+ if !ok {
+ return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
+ }
+ return val, nil
+ }
+ // Old style: enums are ints.
+ var val int32
+ if err := json.Unmarshal(data, &val); err != nil {
+ return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
+ }
+ return val, nil
+}
+
+// DebugPrint dumps the encoded data in b in a debugging format with a header
+// including the string s. Used in testing but made available for general debugging.
+func (p *Buffer) DebugPrint(s string, b []byte) {
+ var u uint64
+
+ obuf := p.buf
+ index := p.index
+ p.buf = b
+ p.index = 0
+ depth := 0
+
+ fmt.Printf("\n--- %s ---\n", s)
+
+out:
+ for {
+ for i := 0; i < depth; i++ {
+ fmt.Print(" ")
+ }
+
+ index := p.index
+ if index == len(p.buf) {
+ break
+ }
+
+ op, err := p.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: fetching op err %v\n", index, err)
+ break out
+ }
+ tag := op >> 3
+ wire := op & 7
+
+ switch wire {
+ default:
+ fmt.Printf("%3d: t=%3d unknown wire=%d\n",
+ index, tag, wire)
+ break out
+
+ case WireBytes:
+ var r []byte
+
+ r, err = p.DecodeRawBytes(false)
+ if err != nil {
+ break out
+ }
+ fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
+ if len(r) <= 6 {
+ for i := 0; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ } else {
+ for i := 0; i < 3; i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ fmt.Printf(" ..")
+ for i := len(r) - 3; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ }
+ fmt.Printf("\n")
+
+ case WireFixed32:
+ u, err = p.DecodeFixed32()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
+
+ case WireFixed64:
+ u, err = p.DecodeFixed64()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
+
+ case WireVarint:
+ u, err = p.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
+
+ case WireStartGroup:
+ fmt.Printf("%3d: t=%3d start\n", index, tag)
+ depth++
+
+ case WireEndGroup:
+ depth--
+ fmt.Printf("%3d: t=%3d end\n", index, tag)
+ }
+ }
+
+ if depth != 0 {
+ fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
+ }
+ fmt.Printf("\n")
+
+ p.buf = obuf
+ p.index = index
+}
+
+// SetDefaults sets unset protocol buffer fields to their default values.
+// It only modifies fields that are both unset and have defined defaults.
+// It recursively sets default values in any non-nil sub-messages.
+func SetDefaults(pb Message) {
+ setDefaults(reflect.ValueOf(pb), true, false)
+}
+
+// v is a pointer to a struct.
+func setDefaults(v reflect.Value, recur, zeros bool) {
+ v = v.Elem()
+
+ defaultMu.RLock()
+ dm, ok := defaults[v.Type()]
+ defaultMu.RUnlock()
+ if !ok {
+ dm = buildDefaultMessage(v.Type())
+ defaultMu.Lock()
+ defaults[v.Type()] = dm
+ defaultMu.Unlock()
+ }
+
+ for _, sf := range dm.scalars {
+ f := v.Field(sf.index)
+ if !f.IsNil() {
+ // field already set
+ continue
+ }
+ dv := sf.value
+ if dv == nil && !zeros {
+ // no explicit default, and don't want to set zeros
+ continue
+ }
+ fptr := f.Addr().Interface() // **T
+ // TODO: Consider batching the allocations we do here.
+ switch sf.kind {
+ case reflect.Bool:
+ b := new(bool)
+ if dv != nil {
+ *b = dv.(bool)
+ }
+ *(fptr.(**bool)) = b
+ case reflect.Float32:
+ f := new(float32)
+ if dv != nil {
+ *f = dv.(float32)
+ }
+ *(fptr.(**float32)) = f
+ case reflect.Float64:
+ f := new(float64)
+ if dv != nil {
+ *f = dv.(float64)
+ }
+ *(fptr.(**float64)) = f
+ case reflect.Int32:
+ // might be an enum
+ if ft := f.Type(); ft != int32PtrType {
+ // enum
+ f.Set(reflect.New(ft.Elem()))
+ if dv != nil {
+ f.Elem().SetInt(int64(dv.(int32)))
+ }
+ } else {
+ // int32 field
+ i := new(int32)
+ if dv != nil {
+ *i = dv.(int32)
+ }
+ *(fptr.(**int32)) = i
+ }
+ case reflect.Int64:
+ i := new(int64)
+ if dv != nil {
+ *i = dv.(int64)
+ }
+ *(fptr.(**int64)) = i
+ case reflect.String:
+ s := new(string)
+ if dv != nil {
+ *s = dv.(string)
+ }
+ *(fptr.(**string)) = s
+ case reflect.Uint8:
+ // exceptional case: []byte
+ var b []byte
+ if dv != nil {
+ db := dv.([]byte)
+ b = make([]byte, len(db))
+ copy(b, db)
+ } else {
+ b = []byte{}
+ }
+ *(fptr.(*[]byte)) = b
+ case reflect.Uint32:
+ u := new(uint32)
+ if dv != nil {
+ *u = dv.(uint32)
+ }
+ *(fptr.(**uint32)) = u
+ case reflect.Uint64:
+ u := new(uint64)
+ if dv != nil {
+ *u = dv.(uint64)
+ }
+ *(fptr.(**uint64)) = u
+ default:
+ log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
+ }
+ }
+
+ for _, ni := range dm.nested {
+ f := v.Field(ni)
+ // f is *T or []*T or map[T]*T
+ switch f.Kind() {
+ case reflect.Ptr:
+ if f.IsNil() {
+ continue
+ }
+ setDefaults(f, recur, zeros)
+
+ case reflect.Slice:
+ for i := 0; i < f.Len(); i++ {
+ e := f.Index(i)
+ if e.IsNil() {
+ continue
+ }
+ setDefaults(e, recur, zeros)
+ }
+
+ case reflect.Map:
+ for _, k := range f.MapKeys() {
+ e := f.MapIndex(k)
+ if e.IsNil() {
+ continue
+ }
+ setDefaults(e, recur, zeros)
+ }
+ }
+ }
+}
+
+var (
+ // defaults maps a protocol buffer struct type to a slice of the fields,
+ // with its scalar fields set to their proto-declared non-zero default values.
+ defaultMu sync.RWMutex
+ defaults = make(map[reflect.Type]defaultMessage)
+
+ int32PtrType = reflect.TypeOf((*int32)(nil))
+)
+
+// defaultMessage represents information about the default values of a message.
+type defaultMessage struct {
+ scalars []scalarField
+ nested []int // struct field index of nested messages
+}
+
+type scalarField struct {
+ index int // struct field index
+ kind reflect.Kind // element type (the T in *T or []T)
+ value interface{} // the proto-declared default value, or nil
+}
+
+// t is a struct type.
+func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
+ sprop := GetProperties(t)
+ for _, prop := range sprop.Prop {
+ fi, ok := sprop.decoderTags.get(prop.Tag)
+ if !ok {
+ // XXX_unrecognized
+ continue
+ }
+ ft := t.Field(fi).Type
+
+ sf, nested, err := fieldDefault(ft, prop)
+ switch {
+ case err != nil:
+ log.Print(err)
+ case nested:
+ dm.nested = append(dm.nested, fi)
+ case sf != nil:
+ sf.index = fi
+ dm.scalars = append(dm.scalars, *sf)
+ }
+ }
+
+ return dm
+}
+
+// fieldDefault returns the scalarField for field type ft.
+// sf will be nil if the field can not have a default.
+// nestedMessage will be true if this is a nested message.
+// Note that sf.index is not set on return.
+func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
+ var canHaveDefault bool
+ switch ft.Kind() {
+ case reflect.Ptr:
+ if ft.Elem().Kind() == reflect.Struct {
+ nestedMessage = true
+ } else {
+ canHaveDefault = true // proto2 scalar field
+ }
+
+ case reflect.Slice:
+ switch ft.Elem().Kind() {
+ case reflect.Ptr:
+ nestedMessage = true // repeated message
+ case reflect.Uint8:
+ canHaveDefault = true // bytes field
+ }
+
+ case reflect.Map:
+ if ft.Elem().Kind() == reflect.Ptr {
+ nestedMessage = true // map with message values
+ }
+ }
+
+ if !canHaveDefault {
+ if nestedMessage {
+ return nil, true, nil
+ }
+ return nil, false, nil
+ }
+
+ // We now know that ft is a pointer or slice.
+ sf = &scalarField{kind: ft.Elem().Kind()}
+
+ // scalar fields without defaults
+ if !prop.HasDefault {
+ return sf, false, nil
+ }
+
+ // a scalar field: either *T or []byte
+ switch ft.Elem().Kind() {
+ case reflect.Bool:
+ x, err := strconv.ParseBool(prop.Default)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Float32:
+ x, err := strconv.ParseFloat(prop.Default, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
+ }
+ sf.value = float32(x)
+ case reflect.Float64:
+ x, err := strconv.ParseFloat(prop.Default, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Int32:
+ x, err := strconv.ParseInt(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
+ }
+ sf.value = int32(x)
+ case reflect.Int64:
+ x, err := strconv.ParseInt(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.String:
+ sf.value = prop.Default
+ case reflect.Uint8:
+ // []byte (not *uint8)
+ sf.value = []byte(prop.Default)
+ case reflect.Uint32:
+ x, err := strconv.ParseUint(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
+ }
+ sf.value = uint32(x)
+ case reflect.Uint64:
+ x, err := strconv.ParseUint(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ default:
+ return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
+ }
+
+ return sf, false, nil
+}
+
+// Map fields may have key types of non-float scalars, strings and enums.
+// The easiest way to sort them in some deterministic order is to use fmt.
+// If this turns out to be inefficient we can always consider other options,
+// such as doing a Schwartzian transform.
+
+func mapKeys(vs []reflect.Value) sort.Interface {
+ s := mapKeySorter{
+ vs: vs,
+ // default Less function: textual comparison
+ less: func(a, b reflect.Value) bool {
+ return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface())
+ },
+ }
+
+ // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps;
+ // numeric keys are sorted numerically.
+ if len(vs) == 0 {
+ return s
+ }
+ switch vs[0].Kind() {
+ case reflect.Int32, reflect.Int64:
+ s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
+ case reflect.Uint32, reflect.Uint64:
+ s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
+ }
+
+ return s
+}
+
+type mapKeySorter struct {
+ vs []reflect.Value
+ less func(a, b reflect.Value) bool
+}
+
+func (s mapKeySorter) Len() int { return len(s.vs) }
+func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
+func (s mapKeySorter) Less(i, j int) bool {
+ return s.less(s.vs[i], s.vs[j])
+}
+
+// isProto3Zero reports whether v is a zero proto3 value.
+func isProto3Zero(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint32, reflect.Uint64:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.String:
+ return v.String() == ""
+ }
+ return false
+}
diff --git a/src/kube2msb/vendor/github.com/gogo/protobuf/proto/lib_gogo.go b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/lib_gogo.go
new file mode 100644
index 0000000..a6c2c06
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/lib_gogo.go
@@ -0,0 +1,40 @@
+// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
+// http://github.com/gogo/protobuf/gogoproto
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "encoding/json"
+ "strconv"
+)
+
+func MarshalJSONEnum(m map[int32]string, value int32) ([]byte, error) {
+ s, ok := m[value]
+ if !ok {
+ s = strconv.Itoa(int(value))
+ }
+ return json.Marshal(s)
+}
diff --git a/src/kube2msb/vendor/github.com/gogo/protobuf/proto/message_set.go b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/message_set.go
new file mode 100644
index 0000000..e25e01e
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/message_set.go
@@ -0,0 +1,280 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Support for message sets.
+ */
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+)
+
+// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
+// A message type ID is required for storing a protocol buffer in a message set.
+var errNoMessageTypeID = errors.New("proto does not have a message type ID")
+
+// The first two types (_MessageSet_Item and messageSet)
+// model what the protocol compiler produces for the following protocol message:
+// message MessageSet {
+// repeated group Item = 1 {
+// required int32 type_id = 2;
+// required string message = 3;
+// };
+// }
+// That is the MessageSet wire format. We can't use a proto to generate these
+// because that would introduce a circular dependency between it and this package.
+
+type _MessageSet_Item struct {
+ TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
+ Message []byte `protobuf:"bytes,3,req,name=message"`
+}
+
+type messageSet struct {
+ Item []*_MessageSet_Item `protobuf:"group,1,rep"`
+ XXX_unrecognized []byte
+ // TODO: caching?
+}
+
+// Make sure messageSet is a Message.
+var _ Message = (*messageSet)(nil)
+
+// messageTypeIder is an interface satisfied by a protocol buffer type
+// that may be stored in a MessageSet.
+type messageTypeIder interface {
+ MessageTypeId() int32
+}
+
+func (ms *messageSet) find(pb Message) *_MessageSet_Item {
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return nil
+ }
+ id := mti.MessageTypeId()
+ for _, item := range ms.Item {
+ if *item.TypeId == id {
+ return item
+ }
+ }
+ return nil
+}
+
+func (ms *messageSet) Has(pb Message) bool {
+ if ms.find(pb) != nil {
+ return true
+ }
+ return false
+}
+
+func (ms *messageSet) Unmarshal(pb Message) error {
+ if item := ms.find(pb); item != nil {
+ return Unmarshal(item.Message, pb)
+ }
+ if _, ok := pb.(messageTypeIder); !ok {
+ return errNoMessageTypeID
+ }
+ return nil // TODO: return error instead?
+}
+
+func (ms *messageSet) Marshal(pb Message) error {
+ msg, err := Marshal(pb)
+ if err != nil {
+ return err
+ }
+ if item := ms.find(pb); item != nil {
+ // reuse existing item
+ item.Message = msg
+ return nil
+ }
+
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return errNoMessageTypeID
+ }
+
+ mtid := mti.MessageTypeId()
+ ms.Item = append(ms.Item, &_MessageSet_Item{
+ TypeId: &mtid,
+ Message: msg,
+ })
+ return nil
+}
+
+func (ms *messageSet) Reset() { *ms = messageSet{} }
+func (ms *messageSet) String() string { return CompactTextString(ms) }
+func (*messageSet) ProtoMessage() {}
+
+// Support for the message_set_wire_format message option.
+
+func skipVarint(buf []byte) []byte {
+ i := 0
+ for ; buf[i]&0x80 != 0; i++ {
+ }
+ return buf[i+1:]
+}
+
+// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
+// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSet(m map[int32]Extension) ([]byte, error) {
+ if err := encodeExtensionMap(m); err != nil {
+ return nil, err
+ }
+
+ // Sort extension IDs to provide a deterministic encoding.
+ // See also enc_map in encode.go.
+ ids := make([]int, 0, len(m))
+ for id := range m {
+ ids = append(ids, int(id))
+ }
+ sort.Ints(ids)
+
+ ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))}
+ for _, id := range ids {
+ e := m[int32(id)]
+ // Remove the wire type and field number varint, as well as the length varint.
+ msg := skipVarint(skipVarint(e.enc))
+
+ ms.Item = append(ms.Item, &_MessageSet_Item{
+ TypeId: Int32(int32(id)),
+ Message: msg,
+ })
+ }
+ return Marshal(ms)
+}
+
+// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
+// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error {
+ ms := new(messageSet)
+ if err := Unmarshal(buf, ms); err != nil {
+ return err
+ }
+ for _, item := range ms.Item {
+ id := *item.TypeId
+ msg := item.Message
+
+ // Restore wire type and field number varint, plus length varint.
+ // Be careful to preserve duplicate items.
+ b := EncodeVarint(uint64(id)<<3 | WireBytes)
+ if ext, ok := m[id]; ok {
+ // Existing data; rip off the tag and length varint
+ // so we join the new data correctly.
+ // We can assume that ext.enc is set because we are unmarshaling.
+ o := ext.enc[len(b):] // skip wire type and field number
+ _, n := DecodeVarint(o) // calculate length of length varint
+ o = o[n:] // skip length varint
+ msg = append(o, msg...) // join old data and new data
+ }
+ b = append(b, EncodeVarint(uint64(len(msg)))...)
+ b = append(b, msg...)
+
+ m[id] = Extension{enc: b}
+ }
+ return nil
+}
+
+// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
+// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) {
+ var b bytes.Buffer
+ b.WriteByte('{')
+
+ // Process the map in key order for deterministic output.
+ ids := make([]int32, 0, len(m))
+ for id := range m {
+ ids = append(ids, id)
+ }
+ sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
+
+ for i, id := range ids {
+ ext := m[id]
+ if i > 0 {
+ b.WriteByte(',')
+ }
+
+ msd, ok := messageSetMap[id]
+ if !ok {
+ // Unknown type; we can't render it, so skip it.
+ continue
+ }
+ fmt.Fprintf(&b, `"[%s]":`, msd.name)
+
+ x := ext.value
+ if x == nil {
+ x = reflect.New(msd.t.Elem()).Interface()
+ if err := Unmarshal(ext.enc, x.(Message)); err != nil {
+ return nil, err
+ }
+ }
+ d, err := json.Marshal(x)
+ if err != nil {
+ return nil, err
+ }
+ b.Write(d)
+ }
+ b.WriteByte('}')
+ return b.Bytes(), nil
+}
+
+// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
+// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSetJSON(buf []byte, m map[int32]Extension) error {
+ // Common-case fast path.
+ if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
+ return nil
+ }
+
+ // This is fairly tricky, and it's not clear that it is needed.
+ return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
+}
+
+// A global registry of types that can be used in a MessageSet.
+
+var messageSetMap = make(map[int32]messageSetDesc)
+
+type messageSetDesc struct {
+ t reflect.Type // pointer to struct
+ name string
+}
+
+// RegisterMessageSetType is called from the generated code.
+func RegisterMessageSetType(m Message, fieldNum int32, name string) {
+ messageSetMap[fieldNum] = messageSetDesc{
+ t: reflect.TypeOf(m),
+ name: name,
+ }
+}
diff --git a/src/kube2msb/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go
new file mode 100644
index 0000000..749919d
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go
@@ -0,0 +1,479 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build appengine
+
+// This file contains an implementation of proto field accesses using package reflect.
+// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
+// be used on App Engine.
+
+package proto
+
+import (
+ "math"
+ "reflect"
+)
+
+// A structPointer is a pointer to a struct.
+type structPointer struct {
+ v reflect.Value
+}
+
+// toStructPointer returns a structPointer equivalent to the given reflect value.
+// The reflect value must itself be a pointer to a struct.
+func toStructPointer(v reflect.Value) structPointer {
+ return structPointer{v}
+}
+
+// IsNil reports whether p is nil.
+func structPointer_IsNil(p structPointer) bool {
+ return p.v.IsNil()
+}
+
+// Interface returns the struct pointer as an interface value.
+func structPointer_Interface(p structPointer, _ reflect.Type) interface{} {
+ return p.v.Interface()
+}
+
+// A field identifies a field in a struct, accessible from a structPointer.
+// In this implementation, a field is identified by the sequence of field indices
+// passed to reflect's FieldByIndex.
+type field []int
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return f.Index
+}
+
+// invalidField is an invalid field identifier.
+var invalidField = field(nil)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool { return f != nil }
+
+// field returns the given field in the struct as a reflect value.
+func structPointer_field(p structPointer, f field) reflect.Value {
+ // Special case: an extension map entry with a value of type T
+ // passes a *T to the struct-handling code with a zero field,
+ // expecting that it will be treated as equivalent to *struct{ X T },
+ // which has the same memory layout. We have to handle that case
+ // specially, because reflect will panic if we call FieldByIndex on a
+ // non-struct.
+ if f == nil {
+ return p.v.Elem()
+ }
+
+ return p.v.Elem().FieldByIndex(f)
+}
+
+// ifield returns the given field in the struct as an interface value.
+func structPointer_ifield(p structPointer, f field) interface{} {
+ return structPointer_field(p, f).Addr().Interface()
+}
+
+// Bytes returns the address of a []byte field in the struct.
+func structPointer_Bytes(p structPointer, f field) *[]byte {
+ return structPointer_ifield(p, f).(*[]byte)
+}
+
+// BytesSlice returns the address of a [][]byte field in the struct.
+func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
+ return structPointer_ifield(p, f).(*[][]byte)
+}
+
+// Bool returns the address of a *bool field in the struct.
+func structPointer_Bool(p structPointer, f field) **bool {
+ return structPointer_ifield(p, f).(**bool)
+}
+
+// BoolVal returns the address of a bool field in the struct.
+func structPointer_BoolVal(p structPointer, f field) *bool {
+ return structPointer_ifield(p, f).(*bool)
+}
+
+// BoolSlice returns the address of a []bool field in the struct.
+func structPointer_BoolSlice(p structPointer, f field) *[]bool {
+ return structPointer_ifield(p, f).(*[]bool)
+}
+
+// String returns the address of a *string field in the struct.
+func structPointer_String(p structPointer, f field) **string {
+ return structPointer_ifield(p, f).(**string)
+}
+
+// StringVal returns the address of a string field in the struct.
+func structPointer_StringVal(p structPointer, f field) *string {
+ return structPointer_ifield(p, f).(*string)
+}
+
+// StringSlice returns the address of a []string field in the struct.
+func structPointer_StringSlice(p structPointer, f field) *[]string {
+ return structPointer_ifield(p, f).(*[]string)
+}
+
+// ExtMap returns the address of an extension map field in the struct.
+func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
+ return structPointer_ifield(p, f).(*map[int32]Extension)
+}
+
+// NewAt returns the reflect.Value for a pointer to a field in the struct.
+func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
+ return structPointer_field(p, f).Addr()
+}
+
+// SetStructPointer writes a *struct field in the struct.
+func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
+ structPointer_field(p, f).Set(q.v)
+}
+
+// GetStructPointer reads a *struct field in the struct.
+func structPointer_GetStructPointer(p structPointer, f field) structPointer {
+ return structPointer{structPointer_field(p, f)}
+}
+
+// StructPointerSlice the address of a []*struct field in the struct.
+func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice {
+ return structPointerSlice{structPointer_field(p, f)}
+}
+
+// A structPointerSlice represents the address of a slice of pointers to structs
+// (themselves messages or groups). That is, v.Type() is *[]*struct{...}.
+type structPointerSlice struct {
+ v reflect.Value
+}
+
+func (p structPointerSlice) Len() int { return p.v.Len() }
+func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} }
+func (p structPointerSlice) Append(q structPointer) {
+ p.v.Set(reflect.Append(p.v, q.v))
+}
+
+var (
+ int32Type = reflect.TypeOf(int32(0))
+ uint32Type = reflect.TypeOf(uint32(0))
+ float32Type = reflect.TypeOf(float32(0))
+ int64Type = reflect.TypeOf(int64(0))
+ uint64Type = reflect.TypeOf(uint64(0))
+ float64Type = reflect.TypeOf(float64(0))
+)
+
+// A word32 represents a field of type *int32, *uint32, *float32, or *enum.
+// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable.
+type word32 struct {
+ v reflect.Value
+}
+
+// IsNil reports whether p is nil.
+func word32_IsNil(p word32) bool {
+ return p.v.IsNil()
+}
+
+// Set sets p to point at a newly allocated word with bits set to x.
+func word32_Set(p word32, o *Buffer, x uint32) {
+ t := p.v.Type().Elem()
+ switch t {
+ case int32Type:
+ if len(o.int32s) == 0 {
+ o.int32s = make([]int32, uint32PoolSize)
+ }
+ o.int32s[0] = int32(x)
+ p.v.Set(reflect.ValueOf(&o.int32s[0]))
+ o.int32s = o.int32s[1:]
+ return
+ case uint32Type:
+ if len(o.uint32s) == 0 {
+ o.uint32s = make([]uint32, uint32PoolSize)
+ }
+ o.uint32s[0] = x
+ p.v.Set(reflect.ValueOf(&o.uint32s[0]))
+ o.uint32s = o.uint32s[1:]
+ return
+ case float32Type:
+ if len(o.float32s) == 0 {
+ o.float32s = make([]float32, uint32PoolSize)
+ }
+ o.float32s[0] = math.Float32frombits(x)
+ p.v.Set(reflect.ValueOf(&o.float32s[0]))
+ o.float32s = o.float32s[1:]
+ return
+ }
+
+ // must be enum
+ p.v.Set(reflect.New(t))
+ p.v.Elem().SetInt(int64(int32(x)))
+}
+
+// Get gets the bits pointed at by p, as a uint32.
+func word32_Get(p word32) uint32 {
+ elem := p.v.Elem()
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32(p structPointer, f field) word32 {
+ return word32{structPointer_field(p, f)}
+}
+
+// A word32Val represents a field of type int32, uint32, float32, or enum.
+// That is, v.Type() is int32, uint32, float32, or enum and v is assignable.
+type word32Val struct {
+ v reflect.Value
+}
+
+// Set sets *p to x.
+func word32Val_Set(p word32Val, x uint32) {
+ switch p.v.Type() {
+ case int32Type:
+ p.v.SetInt(int64(x))
+ return
+ case uint32Type:
+ p.v.SetUint(uint64(x))
+ return
+ case float32Type:
+ p.v.SetFloat(float64(math.Float32frombits(x)))
+ return
+ }
+
+ // must be enum
+ p.v.SetInt(int64(int32(x)))
+}
+
+// Get gets the bits pointed at by p, as a uint32.
+func word32Val_Get(p word32Val) uint32 {
+ elem := p.v
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct.
+func structPointer_Word32Val(p structPointer, f field) word32Val {
+ return word32Val{structPointer_field(p, f)}
+}
+
+// A word32Slice is a slice of 32-bit values.
+// That is, v.Type() is []int32, []uint32, []float32, or []enum.
+type word32Slice struct {
+ v reflect.Value
+}
+
+func (p word32Slice) Append(x uint32) {
+ n, m := p.v.Len(), p.v.Cap()
+ if n < m {
+ p.v.SetLen(n + 1)
+ } else {
+ t := p.v.Type().Elem()
+ p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
+ }
+ elem := p.v.Index(n)
+ switch elem.Kind() {
+ case reflect.Int32:
+ elem.SetInt(int64(int32(x)))
+ case reflect.Uint32:
+ elem.SetUint(uint64(x))
+ case reflect.Float32:
+ elem.SetFloat(float64(math.Float32frombits(x)))
+ }
+}
+
+func (p word32Slice) Len() int {
+ return p.v.Len()
+}
+
+func (p word32Slice) Index(i int) uint32 {
+ elem := p.v.Index(i)
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct.
+func structPointer_Word32Slice(p structPointer, f field) word32Slice {
+ return word32Slice{structPointer_field(p, f)}
+}
+
+// word64 is like word32 but for 64-bit values.
+type word64 struct {
+ v reflect.Value
+}
+
+func word64_Set(p word64, o *Buffer, x uint64) {
+ t := p.v.Type().Elem()
+ switch t {
+ case int64Type:
+ if len(o.int64s) == 0 {
+ o.int64s = make([]int64, uint64PoolSize)
+ }
+ o.int64s[0] = int64(x)
+ p.v.Set(reflect.ValueOf(&o.int64s[0]))
+ o.int64s = o.int64s[1:]
+ return
+ case uint64Type:
+ if len(o.uint64s) == 0 {
+ o.uint64s = make([]uint64, uint64PoolSize)
+ }
+ o.uint64s[0] = x
+ p.v.Set(reflect.ValueOf(&o.uint64s[0]))
+ o.uint64s = o.uint64s[1:]
+ return
+ case float64Type:
+ if len(o.float64s) == 0 {
+ o.float64s = make([]float64, uint64PoolSize)
+ }
+ o.float64s[0] = math.Float64frombits(x)
+ p.v.Set(reflect.ValueOf(&o.float64s[0]))
+ o.float64s = o.float64s[1:]
+ return
+ }
+ panic("unreachable")
+}
+
+func word64_IsNil(p word64) bool {
+ return p.v.IsNil()
+}
+
+func word64_Get(p word64) uint64 {
+ elem := p.v.Elem()
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return elem.Uint()
+ case reflect.Float64:
+ return math.Float64bits(elem.Float())
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64(p structPointer, f field) word64 {
+ return word64{structPointer_field(p, f)}
+}
+
+// word64Val is like word32Val but for 64-bit values.
+type word64Val struct {
+ v reflect.Value
+}
+
+func word64Val_Set(p word64Val, o *Buffer, x uint64) {
+ switch p.v.Type() {
+ case int64Type:
+ p.v.SetInt(int64(x))
+ return
+ case uint64Type:
+ p.v.SetUint(x)
+ return
+ case float64Type:
+ p.v.SetFloat(math.Float64frombits(x))
+ return
+ }
+ panic("unreachable")
+}
+
+func word64Val_Get(p word64Val) uint64 {
+ elem := p.v
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return elem.Uint()
+ case reflect.Float64:
+ return math.Float64bits(elem.Float())
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64Val(p structPointer, f field) word64Val {
+ return word64Val{structPointer_field(p, f)}
+}
+
+type word64Slice struct {
+ v reflect.Value
+}
+
+func (p word64Slice) Append(x uint64) {
+ n, m := p.v.Len(), p.v.Cap()
+ if n < m {
+ p.v.SetLen(n + 1)
+ } else {
+ t := p.v.Type().Elem()
+ p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
+ }
+ elem := p.v.Index(n)
+ switch elem.Kind() {
+ case reflect.Int64:
+ elem.SetInt(int64(int64(x)))
+ case reflect.Uint64:
+ elem.SetUint(uint64(x))
+ case reflect.Float64:
+ elem.SetFloat(float64(math.Float64frombits(x)))
+ }
+}
+
+func (p word64Slice) Len() int {
+ return p.v.Len()
+}
+
+func (p word64Slice) Index(i int) uint64 {
+ elem := p.v.Index(i)
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return uint64(elem.Uint())
+ case reflect.Float64:
+ return math.Float64bits(float64(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64Slice(p structPointer, f field) word64Slice {
+ return word64Slice{structPointer_field(p, f)}
+}
diff --git a/src/kube2msb/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go
new file mode 100644
index 0000000..e9be0fe
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go
@@ -0,0 +1,266 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build !appengine
+
+// This file contains the implementation of the proto field accesses using package unsafe.
+
+package proto
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+// NOTE: These type_Foo functions would more idiomatically be methods,
+// but Go does not allow methods on pointer types, and we must preserve
+// some pointer type for the garbage collector. We use these
+// funcs with clunky names as our poor approximation to methods.
+//
+// An alternative would be
+// type structPointer struct { p unsafe.Pointer }
+// but that does not registerize as well.
+
+// A structPointer is a pointer to a struct.
+type structPointer unsafe.Pointer
+
+// toStructPointer returns a structPointer equivalent to the given reflect value.
+func toStructPointer(v reflect.Value) structPointer {
+ return structPointer(unsafe.Pointer(v.Pointer()))
+}
+
+// IsNil reports whether p is nil.
+func structPointer_IsNil(p structPointer) bool {
+ return p == nil
+}
+
+// Interface returns the struct pointer, assumed to have element type t,
+// as an interface value.
+func structPointer_Interface(p structPointer, t reflect.Type) interface{} {
+ return reflect.NewAt(t, unsafe.Pointer(p)).Interface()
+}
+
+// A field identifies a field in a struct, accessible from a structPointer.
+// In this implementation, a field is identified by its byte offset from the start of the struct.
+type field uintptr
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return field(f.Offset)
+}
+
+// invalidField is an invalid field identifier.
+const invalidField = ^field(0)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool {
+ return f != ^field(0)
+}
+
+// Bytes returns the address of a []byte field in the struct.
+func structPointer_Bytes(p structPointer, f field) *[]byte {
+ return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BytesSlice returns the address of a [][]byte field in the struct.
+func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
+ return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// Bool returns the address of a *bool field in the struct.
+func structPointer_Bool(p structPointer, f field) **bool {
+ return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BoolVal returns the address of a bool field in the struct.
+func structPointer_BoolVal(p structPointer, f field) *bool {
+ return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BoolSlice returns the address of a []bool field in the struct.
+func structPointer_BoolSlice(p structPointer, f field) *[]bool {
+ return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// String returns the address of a *string field in the struct.
+func structPointer_String(p structPointer, f field) **string {
+ return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StringVal returns the address of a string field in the struct.
+func structPointer_StringVal(p structPointer, f field) *string {
+ return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StringSlice returns the address of a []string field in the struct.
+func structPointer_StringSlice(p structPointer, f field) *[]string {
+ return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// ExtMap returns the address of an extension map field in the struct.
+func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
+ return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// NewAt returns the reflect.Value for a pointer to a field in the struct.
+func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
+ return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f)))
+}
+
+// SetStructPointer writes a *struct field in the struct.
+func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
+ *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q
+}
+
+// GetStructPointer reads a *struct field in the struct.
+func structPointer_GetStructPointer(p structPointer, f field) structPointer {
+ return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StructPointerSlice the address of a []*struct field in the struct.
+func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice {
+ return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups).
+type structPointerSlice []structPointer
+
+func (v *structPointerSlice) Len() int { return len(*v) }
+func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] }
+func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) }
+
+// A word32 is the address of a "pointer to 32-bit value" field.
+type word32 **uint32
+
+// IsNil reports whether *v is nil.
+func word32_IsNil(p word32) bool {
+ return *p == nil
+}
+
+// Set sets *v to point at a newly allocated word set to x.
+func word32_Set(p word32, o *Buffer, x uint32) {
+ if len(o.uint32s) == 0 {
+ o.uint32s = make([]uint32, uint32PoolSize)
+ }
+ o.uint32s[0] = x
+ *p = &o.uint32s[0]
+ o.uint32s = o.uint32s[1:]
+}
+
+// Get gets the value pointed at by *v.
+func word32_Get(p word32) uint32 {
+ return **p
+}
+
+// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32(p structPointer, f field) word32 {
+ return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// A word32Val is the address of a 32-bit value field.
+type word32Val *uint32
+
+// Set sets *p to x.
+func word32Val_Set(p word32Val, x uint32) {
+ *p = x
+}
+
+// Get gets the value pointed at by p.
+func word32Val_Get(p word32Val) uint32 {
+ return *p
+}
+
+// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32Val(p structPointer, f field) word32Val {
+ return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// A word32Slice is a slice of 32-bit values.
+type word32Slice []uint32
+
+func (v *word32Slice) Append(x uint32) { *v = append(*v, x) }
+func (v *word32Slice) Len() int { return len(*v) }
+func (v *word32Slice) Index(i int) uint32 { return (*v)[i] }
+
+// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct.
+func structPointer_Word32Slice(p structPointer, f field) *word32Slice {
+ return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// word64 is like word32 but for 64-bit values.
+type word64 **uint64
+
+func word64_Set(p word64, o *Buffer, x uint64) {
+ if len(o.uint64s) == 0 {
+ o.uint64s = make([]uint64, uint64PoolSize)
+ }
+ o.uint64s[0] = x
+ *p = &o.uint64s[0]
+ o.uint64s = o.uint64s[1:]
+}
+
+func word64_IsNil(p word64) bool {
+ return *p == nil
+}
+
+func word64_Get(p word64) uint64 {
+ return **p
+}
+
+func structPointer_Word64(p structPointer, f field) word64 {
+ return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// word64Val is like word32Val but for 64-bit values.
+type word64Val *uint64
+
+func word64Val_Set(p word64Val, o *Buffer, x uint64) {
+ *p = x
+}
+
+func word64Val_Get(p word64Val) uint64 {
+ return *p
+}
+
+func structPointer_Word64Val(p structPointer, f field) word64Val {
+ return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// word64Slice is like word32Slice but for 64-bit values.
+type word64Slice []uint64
+
+func (v *word64Slice) Append(x uint64) { *v = append(*v, x) }
+func (v *word64Slice) Len() int { return len(*v) }
+func (v *word64Slice) Index(i int) uint64 { return (*v)[i] }
+
+func structPointer_Word64Slice(p structPointer, f field) *word64Slice {
+ return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
diff --git a/src/kube2msb/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go
new file mode 100644
index 0000000..6bc85fa
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go
@@ -0,0 +1,108 @@
+// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
+// http://github.com/gogo/protobuf/gogoproto
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build !appengine
+
+// This file contains the implementation of the proto field accesses using package unsafe.
+
+package proto
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+func structPointer_InterfaceAt(p structPointer, f field, t reflect.Type) interface{} {
+ point := unsafe.Pointer(uintptr(p) + uintptr(f))
+ r := reflect.NewAt(t, point)
+ return r.Interface()
+}
+
+func structPointer_InterfaceRef(p structPointer, f field, t reflect.Type) interface{} {
+ point := unsafe.Pointer(uintptr(p) + uintptr(f))
+ r := reflect.NewAt(t, point)
+ if r.Elem().IsNil() {
+ return nil
+ }
+ return r.Elem().Interface()
+}
+
+func copyUintPtr(oldptr, newptr uintptr, size int) {
+ oldbytes := make([]byte, 0)
+ oldslice := (*reflect.SliceHeader)(unsafe.Pointer(&oldbytes))
+ oldslice.Data = oldptr
+ oldslice.Len = size
+ oldslice.Cap = size
+ newbytes := make([]byte, 0)
+ newslice := (*reflect.SliceHeader)(unsafe.Pointer(&newbytes))
+ newslice.Data = newptr
+ newslice.Len = size
+ newslice.Cap = size
+ copy(newbytes, oldbytes)
+}
+
+func structPointer_Copy(oldptr structPointer, newptr structPointer, size int) {
+ copyUintPtr(uintptr(oldptr), uintptr(newptr), size)
+}
+
+func appendStructPointer(base structPointer, f field, typ reflect.Type) structPointer {
+ size := typ.Elem().Size()
+ oldHeader := structPointer_GetSliceHeader(base, f)
+ newLen := oldHeader.Len + 1
+ slice := reflect.MakeSlice(typ, newLen, newLen)
+ bas := toStructPointer(slice)
+ for i := 0; i < oldHeader.Len; i++ {
+ newElemptr := uintptr(bas) + uintptr(i)*size
+ oldElemptr := oldHeader.Data + uintptr(i)*size
+ copyUintPtr(oldElemptr, newElemptr, int(size))
+ }
+
+ oldHeader.Data = uintptr(bas)
+ oldHeader.Len = newLen
+ oldHeader.Cap = newLen
+
+ return structPointer(unsafe.Pointer(uintptr(unsafe.Pointer(bas)) + uintptr(uintptr(newLen-1)*size)))
+}
+
+func structPointer_FieldPointer(p structPointer, f field) structPointer {
+ return structPointer(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+func structPointer_GetRefStructPointer(p structPointer, f field) structPointer {
+ return structPointer((*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+func structPointer_GetSliceHeader(p structPointer, f field) *reflect.SliceHeader {
+ return (*reflect.SliceHeader)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+func structPointer_Add(p structPointer, size field) structPointer {
+ return structPointer(unsafe.Pointer(uintptr(p) + uintptr(size)))
+}
+
+func structPointer_Len(p structPointer, f field) int {
+ return len(*(*[]interface{})(unsafe.Pointer(structPointer_GetRefStructPointer(p, f))))
+}
diff --git a/src/kube2msb/vendor/github.com/gogo/protobuf/proto/properties.go b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/properties.go
new file mode 100644
index 0000000..4711057
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/properties.go
@@ -0,0 +1,915 @@
+// Extensions for Protocol Buffers to create more go like structures.
+//
+// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
+// http://github.com/gogo/protobuf/gogoproto
+//
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+const debug bool = false
+
+// Constants that identify the encoding of a value on the wire.
+const (
+ WireVarint = 0
+ WireFixed64 = 1
+ WireBytes = 2
+ WireStartGroup = 3
+ WireEndGroup = 4
+ WireFixed32 = 5
+)
+
+const startSize = 10 // initial slice/string sizes
+
+// Encoders are defined in encode.go
+// An encoder outputs the full representation of a field, including its
+// tag and encoder type.
+type encoder func(p *Buffer, prop *Properties, base structPointer) error
+
+// A valueEncoder encodes a single integer in a particular encoding.
+type valueEncoder func(o *Buffer, x uint64) error
+
+// Sizers are defined in encode.go
+// A sizer returns the encoded size of a field, including its tag and encoder
+// type.
+type sizer func(prop *Properties, base structPointer) int
+
+// A valueSizer returns the encoded size of a single integer in a particular
+// encoding.
+type valueSizer func(x uint64) int
+
+// Decoders are defined in decode.go
+// A decoder creates a value from its wire representation.
+// Unrecognized subelements are saved in unrec.
+type decoder func(p *Buffer, prop *Properties, base structPointer) error
+
+// A valueDecoder decodes a single integer in a particular encoding.
+type valueDecoder func(o *Buffer) (x uint64, err error)
+
+// A oneofMarshaler does the marshaling for all oneof fields in a message.
+type oneofMarshaler func(Message, *Buffer) error
+
+// A oneofUnmarshaler does the unmarshaling for a oneof field in a message.
+type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error)
+
+// tagMap is an optimization over map[int]int for typical protocol buffer
+// use-cases. Encoded protocol buffers are often in tag order with small tag
+// numbers.
+type tagMap struct {
+ fastTags []int
+ slowTags map[int]int
+}
+
+// tagMapFastLimit is the upper bound on the tag number that will be stored in
+// the tagMap slice rather than its map.
+const tagMapFastLimit = 1024
+
+func (p *tagMap) get(t int) (int, bool) {
+ if t > 0 && t < tagMapFastLimit {
+ if t >= len(p.fastTags) {
+ return 0, false
+ }
+ fi := p.fastTags[t]
+ return fi, fi >= 0
+ }
+ fi, ok := p.slowTags[t]
+ return fi, ok
+}
+
+func (p *tagMap) put(t int, fi int) {
+ if t > 0 && t < tagMapFastLimit {
+ for len(p.fastTags) < t+1 {
+ p.fastTags = append(p.fastTags, -1)
+ }
+ p.fastTags[t] = fi
+ return
+ }
+ if p.slowTags == nil {
+ p.slowTags = make(map[int]int)
+ }
+ p.slowTags[t] = fi
+}
+
+// StructProperties represents properties for all the fields of a struct.
+// decoderTags and decoderOrigNames should only be used by the decoder.
+type StructProperties struct {
+ Prop []*Properties // properties for each field
+ reqCount int // required count
+ decoderTags tagMap // map from proto tag to struct field number
+ decoderOrigNames map[string]int // map from original name to struct field number
+ order []int // list of struct field numbers in tag order
+ unrecField field // field id of the XXX_unrecognized []byte field
+ extendable bool // is this an extendable proto
+
+ oneofMarshaler oneofMarshaler
+ oneofUnmarshaler oneofUnmarshaler
+ stype reflect.Type
+
+ // OneofTypes contains information about the oneof fields in this message.
+ // It is keyed by the original name of a field.
+ OneofTypes map[string]*OneofProperties
+}
+
+// OneofProperties represents information about a specific field in a oneof.
+type OneofProperties struct {
+ Type reflect.Type // pointer to generated struct type for this oneof field
+ Field int // struct field number of the containing oneof in the message
+ Prop *Properties
+}
+
+// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
+// See encode.go, (*Buffer).enc_struct.
+
+func (sp *StructProperties) Len() int { return len(sp.order) }
+func (sp *StructProperties) Less(i, j int) bool {
+ return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
+}
+func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
+
+// Properties represents the protocol-specific behavior of a single struct field.
+type Properties struct {
+ Name string // name of the field, for error messages
+ OrigName string // original name before protocol compiler (always set)
+ Wire string
+ WireType int
+ Tag int
+ Required bool
+ Optional bool
+ Repeated bool
+ Packed bool // relevant for repeated primitives only
+ Enum string // set for enum types only
+ proto3 bool // whether this is known to be a proto3 field; set for []byte only
+ oneof bool // whether this is a oneof field
+
+ Default string // default value
+ HasDefault bool // whether an explicit default was provided
+ CustomType string
+ def_uint64 uint64
+
+ enc encoder
+ valEnc valueEncoder // set for bool and numeric types only
+ field field
+ tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType)
+ tagbuf [8]byte
+ stype reflect.Type // set for struct types only
+ sstype reflect.Type // set for slices of structs types only
+ ctype reflect.Type // set for custom types only
+ sprop *StructProperties // set for struct types only
+ isMarshaler bool
+ isUnmarshaler bool
+
+ mtype reflect.Type // set for map types only
+ mkeyprop *Properties // set for map types only
+ mvalprop *Properties // set for map types only
+
+ size sizer
+ valSize valueSizer // set for bool and numeric types only
+
+ dec decoder
+ valDec valueDecoder // set for bool and numeric types only
+
+ // If this is a packable field, this will be the decoder for the packed version of the field.
+ packedDec decoder
+}
+
+// String formats the properties in the protobuf struct field tag style.
+func (p *Properties) String() string {
+ s := p.Wire
+ s = ","
+ s += strconv.Itoa(p.Tag)
+ if p.Required {
+ s += ",req"
+ }
+ if p.Optional {
+ s += ",opt"
+ }
+ if p.Repeated {
+ s += ",rep"
+ }
+ if p.Packed {
+ s += ",packed"
+ }
+ if p.OrigName != p.Name {
+ s += ",name=" + p.OrigName
+ }
+ if p.proto3 {
+ s += ",proto3"
+ }
+ if p.oneof {
+ s += ",oneof"
+ }
+ if len(p.Enum) > 0 {
+ s += ",enum=" + p.Enum
+ }
+ if p.HasDefault {
+ s += ",def=" + p.Default
+ }
+ return s
+}
+
+// Parse populates p by parsing a string in the protobuf struct field tag style.
+func (p *Properties) Parse(s string) {
+ // "bytes,49,opt,name=foo,def=hello!"
+ fields := strings.Split(s, ",") // breaks def=, but handled below.
+ if len(fields) < 2 {
+ fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
+ return
+ }
+
+ p.Wire = fields[0]
+ switch p.Wire {
+ case "varint":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeVarint
+ p.valDec = (*Buffer).DecodeVarint
+ p.valSize = sizeVarint
+ case "fixed32":
+ p.WireType = WireFixed32
+ p.valEnc = (*Buffer).EncodeFixed32
+ p.valDec = (*Buffer).DecodeFixed32
+ p.valSize = sizeFixed32
+ case "fixed64":
+ p.WireType = WireFixed64
+ p.valEnc = (*Buffer).EncodeFixed64
+ p.valDec = (*Buffer).DecodeFixed64
+ p.valSize = sizeFixed64
+ case "zigzag32":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeZigzag32
+ p.valDec = (*Buffer).DecodeZigzag32
+ p.valSize = sizeZigzag32
+ case "zigzag64":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeZigzag64
+ p.valDec = (*Buffer).DecodeZigzag64
+ p.valSize = sizeZigzag64
+ case "bytes", "group":
+ p.WireType = WireBytes
+ // no numeric converter for non-numeric types
+ default:
+ fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
+ return
+ }
+
+ var err error
+ p.Tag, err = strconv.Atoi(fields[1])
+ if err != nil {
+ return
+ }
+
+ for i := 2; i < len(fields); i++ {
+ f := fields[i]
+ switch {
+ case f == "req":
+ p.Required = true
+ case f == "opt":
+ p.Optional = true
+ case f == "rep":
+ p.Repeated = true
+ case f == "packed":
+ p.Packed = true
+ case strings.HasPrefix(f, "name="):
+ p.OrigName = f[5:]
+ case strings.HasPrefix(f, "enum="):
+ p.Enum = f[5:]
+ case f == "proto3":
+ p.proto3 = true
+ case f == "oneof":
+ p.oneof = true
+ case strings.HasPrefix(f, "def="):
+ p.HasDefault = true
+ p.Default = f[4:] // rest of string
+ if i+1 < len(fields) {
+ // Commas aren't escaped, and def is always last.
+ p.Default += "," + strings.Join(fields[i+1:], ",")
+ break
+ }
+ case strings.HasPrefix(f, "embedded="):
+ p.OrigName = strings.Split(f, "=")[1]
+ case strings.HasPrefix(f, "customtype="):
+ p.CustomType = strings.Split(f, "=")[1]
+ }
+ }
+}
+
+func logNoSliceEnc(t1, t2 reflect.Type) {
+ fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2)
+}
+
+var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
+
+// Initialize the fields for encoding and decoding.
+func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
+ p.enc = nil
+ p.dec = nil
+ p.size = nil
+ if len(p.CustomType) > 0 {
+ p.setCustomEncAndDec(typ)
+ p.setTag(lockGetProp)
+ return
+ }
+ switch t1 := typ; t1.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1)
+
+ // proto3 scalar types
+
+ case reflect.Bool:
+ if p.proto3 {
+ p.enc = (*Buffer).enc_proto3_bool
+ p.dec = (*Buffer).dec_proto3_bool
+ p.size = size_proto3_bool
+ } else {
+ p.enc = (*Buffer).enc_ref_bool
+ p.dec = (*Buffer).dec_proto3_bool
+ p.size = size_ref_bool
+ }
+ case reflect.Int32:
+ if p.proto3 {
+ p.enc = (*Buffer).enc_proto3_int32
+ p.dec = (*Buffer).dec_proto3_int32
+ p.size = size_proto3_int32
+ } else {
+ p.enc = (*Buffer).enc_ref_int32
+ p.dec = (*Buffer).dec_proto3_int32
+ p.size = size_ref_int32
+ }
+ case reflect.Uint32:
+ if p.proto3 {
+ p.enc = (*Buffer).enc_proto3_uint32
+ p.dec = (*Buffer).dec_proto3_int32 // can reuse
+ p.size = size_proto3_uint32
+ } else {
+ p.enc = (*Buffer).enc_ref_uint32
+ p.dec = (*Buffer).dec_proto3_int32 // can reuse
+ p.size = size_ref_uint32
+ }
+ case reflect.Int64, reflect.Uint64:
+ if p.proto3 {
+ p.enc = (*Buffer).enc_proto3_int64
+ p.dec = (*Buffer).dec_proto3_int64
+ p.size = size_proto3_int64
+ } else {
+ p.enc = (*Buffer).enc_ref_int64
+ p.dec = (*Buffer).dec_proto3_int64
+ p.size = size_ref_int64
+ }
+ case reflect.Float32:
+ if p.proto3 {
+ p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits
+ p.dec = (*Buffer).dec_proto3_int32
+ p.size = size_proto3_uint32
+ } else {
+ p.enc = (*Buffer).enc_ref_uint32 // can just treat them as bits
+ p.dec = (*Buffer).dec_proto3_int32
+ p.size = size_ref_uint32
+ }
+ case reflect.Float64:
+ if p.proto3 {
+ p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits
+ p.dec = (*Buffer).dec_proto3_int64
+ p.size = size_proto3_int64
+ } else {
+ p.enc = (*Buffer).enc_ref_int64 // can just treat them as bits
+ p.dec = (*Buffer).dec_proto3_int64
+ p.size = size_ref_int64
+ }
+ case reflect.String:
+ if p.proto3 {
+ p.enc = (*Buffer).enc_proto3_string
+ p.dec = (*Buffer).dec_proto3_string
+ p.size = size_proto3_string
+ } else {
+ p.enc = (*Buffer).enc_ref_string
+ p.dec = (*Buffer).dec_proto3_string
+ p.size = size_ref_string
+ }
+ case reflect.Struct:
+ p.stype = typ
+ p.isMarshaler = isMarshaler(typ)
+ p.isUnmarshaler = isUnmarshaler(typ)
+ if p.Wire == "bytes" {
+ p.enc = (*Buffer).enc_ref_struct_message
+ p.dec = (*Buffer).dec_ref_struct_message
+ p.size = size_ref_struct_message
+ } else {
+ fmt.Fprintf(os.Stderr, "proto: no coders for struct %T\n", typ)
+ }
+
+ case reflect.Ptr:
+ switch t2 := t1.Elem(); t2.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2)
+ break
+ case reflect.Bool:
+ p.enc = (*Buffer).enc_bool
+ p.dec = (*Buffer).dec_bool
+ p.size = size_bool
+ case reflect.Int32:
+ p.enc = (*Buffer).enc_int32
+ p.dec = (*Buffer).dec_int32
+ p.size = size_int32
+ case reflect.Uint32:
+ p.enc = (*Buffer).enc_uint32
+ p.dec = (*Buffer).dec_int32 // can reuse
+ p.size = size_uint32
+ case reflect.Int64, reflect.Uint64:
+ p.enc = (*Buffer).enc_int64
+ p.dec = (*Buffer).dec_int64
+ p.size = size_int64
+ case reflect.Float32:
+ p.enc = (*Buffer).enc_uint32 // can just treat them as bits
+ p.dec = (*Buffer).dec_int32
+ p.size = size_uint32
+ case reflect.Float64:
+ p.enc = (*Buffer).enc_int64 // can just treat them as bits
+ p.dec = (*Buffer).dec_int64
+ p.size = size_int64
+ case reflect.String:
+ p.enc = (*Buffer).enc_string
+ p.dec = (*Buffer).dec_string
+ p.size = size_string
+ case reflect.Struct:
+ p.stype = t1.Elem()
+ p.isMarshaler = isMarshaler(t1)
+ p.isUnmarshaler = isUnmarshaler(t1)
+ if p.Wire == "bytes" {
+ p.enc = (*Buffer).enc_struct_message
+ p.dec = (*Buffer).dec_struct_message
+ p.size = size_struct_message
+ } else {
+ p.enc = (*Buffer).enc_struct_group
+ p.dec = (*Buffer).dec_struct_group
+ p.size = size_struct_group
+ }
+ }
+
+ case reflect.Slice:
+ switch t2 := t1.Elem(); t2.Kind() {
+ default:
+ logNoSliceEnc(t1, t2)
+ break
+ case reflect.Bool:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_bool
+ p.size = size_slice_packed_bool
+ } else {
+ p.enc = (*Buffer).enc_slice_bool
+ p.size = size_slice_bool
+ }
+ p.dec = (*Buffer).dec_slice_bool
+ p.packedDec = (*Buffer).dec_slice_packed_bool
+ case reflect.Int32:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int32
+ p.size = size_slice_packed_int32
+ } else {
+ p.enc = (*Buffer).enc_slice_int32
+ p.size = size_slice_int32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case reflect.Uint32:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_uint32
+ p.size = size_slice_packed_uint32
+ } else {
+ p.enc = (*Buffer).enc_slice_uint32
+ p.size = size_slice_uint32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case reflect.Int64, reflect.Uint64:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int64
+ p.size = size_slice_packed_int64
+ } else {
+ p.enc = (*Buffer).enc_slice_int64
+ p.size = size_slice_int64
+ }
+ p.dec = (*Buffer).dec_slice_int64
+ p.packedDec = (*Buffer).dec_slice_packed_int64
+ case reflect.Uint8:
+ p.enc = (*Buffer).enc_slice_byte
+ p.dec = (*Buffer).dec_slice_byte
+ p.size = size_slice_byte
+ // This is a []byte, which is either a bytes field,
+ // or the value of a map field. In the latter case,
+ // we always encode an empty []byte, so we should not
+ // use the proto3 enc/size funcs.
+ // f == nil iff this is the key/value of a map field.
+ if p.proto3 && f != nil {
+ p.enc = (*Buffer).enc_proto3_slice_byte
+ p.size = size_proto3_slice_byte
+ }
+ case reflect.Float32, reflect.Float64:
+ switch t2.Bits() {
+ case 32:
+ // can just treat them as bits
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_uint32
+ p.size = size_slice_packed_uint32
+ } else {
+ p.enc = (*Buffer).enc_slice_uint32
+ p.size = size_slice_uint32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case 64:
+ // can just treat them as bits
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int64
+ p.size = size_slice_packed_int64
+ } else {
+ p.enc = (*Buffer).enc_slice_int64
+ p.size = size_slice_int64
+ }
+ p.dec = (*Buffer).dec_slice_int64
+ p.packedDec = (*Buffer).dec_slice_packed_int64
+ default:
+ logNoSliceEnc(t1, t2)
+ break
+ }
+ case reflect.String:
+ p.enc = (*Buffer).enc_slice_string
+ p.dec = (*Buffer).dec_slice_string
+ p.size = size_slice_string
+ case reflect.Ptr:
+ switch t3 := t2.Elem(); t3.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3)
+ break
+ case reflect.Struct:
+ p.stype = t2.Elem()
+ p.isMarshaler = isMarshaler(t2)
+ p.isUnmarshaler = isUnmarshaler(t2)
+ if p.Wire == "bytes" {
+ p.enc = (*Buffer).enc_slice_struct_message
+ p.dec = (*Buffer).dec_slice_struct_message
+ p.size = size_slice_struct_message
+ } else {
+ p.enc = (*Buffer).enc_slice_struct_group
+ p.dec = (*Buffer).dec_slice_struct_group
+ p.size = size_slice_struct_group
+ }
+ }
+ case reflect.Slice:
+ switch t2.Elem().Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem())
+ break
+ case reflect.Uint8:
+ p.enc = (*Buffer).enc_slice_slice_byte
+ p.dec = (*Buffer).dec_slice_slice_byte
+ p.size = size_slice_slice_byte
+ }
+ case reflect.Struct:
+ p.setSliceOfNonPointerStructs(t1)
+ }
+
+ case reflect.Map:
+ p.enc = (*Buffer).enc_new_map
+ p.dec = (*Buffer).dec_new_map
+ p.size = size_new_map
+
+ p.mtype = t1
+ p.mkeyprop = &Properties{}
+ p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
+ p.mvalprop = &Properties{}
+ vtype := p.mtype.Elem()
+ if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
+ // The value type is not a message (*T) or bytes ([]byte),
+ // so we need encoders for the pointer to this type.
+ vtype = reflect.PtrTo(vtype)
+ }
+ p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
+ }
+ p.setTag(lockGetProp)
+}
+
+func (p *Properties) setTag(lockGetProp bool) {
+ // precalculate tag code
+ wire := p.WireType
+ if p.Packed {
+ wire = WireBytes
+ }
+ x := uint32(p.Tag)<<3 | uint32(wire)
+ i := 0
+ for i = 0; x > 127; i++ {
+ p.tagbuf[i] = 0x80 | uint8(x&0x7F)
+ x >>= 7
+ }
+ p.tagbuf[i] = uint8(x)
+ p.tagcode = p.tagbuf[0 : i+1]
+
+ if p.stype != nil {
+ if lockGetProp {
+ p.sprop = GetProperties(p.stype)
+ } else {
+ p.sprop = getPropertiesLocked(p.stype)
+ }
+ }
+}
+
+var (
+ marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
+ unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
+)
+
+// isMarshaler reports whether type t implements Marshaler.
+func isMarshaler(t reflect.Type) bool {
+ return t.Implements(marshalerType)
+}
+
+// isUnmarshaler reports whether type t implements Unmarshaler.
+func isUnmarshaler(t reflect.Type) bool {
+ return t.Implements(unmarshalerType)
+}
+
+// Init populates the properties from a protocol buffer struct tag.
+func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
+ p.init(typ, name, tag, f, true)
+}
+
+func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
+ // "bytes,49,opt,def=hello!"
+ p.Name = name
+ p.OrigName = name
+ if f != nil {
+ p.field = toField(f)
+ }
+ if tag == "" {
+ return
+ }
+ p.Parse(tag)
+ p.setEncAndDec(typ, f, lockGetProp)
+}
+
+var (
+ propertiesMu sync.RWMutex
+ propertiesMap = make(map[reflect.Type]*StructProperties)
+)
+
+// GetProperties returns the list of properties for the type represented by t.
+// t must represent a generated struct type of a protocol message.
+func GetProperties(t reflect.Type) *StructProperties {
+ if t.Kind() != reflect.Struct {
+ panic("proto: type must have kind struct")
+ }
+
+ // Most calls to GetProperties in a long-running program will be
+ // retrieving details for types we have seen before.
+ propertiesMu.RLock()
+ sprop, ok := propertiesMap[t]
+ propertiesMu.RUnlock()
+ if ok {
+ if collectStats {
+ stats.Chit++
+ }
+ return sprop
+ }
+
+ propertiesMu.Lock()
+ sprop = getPropertiesLocked(t)
+ propertiesMu.Unlock()
+ return sprop
+}
+
+// getPropertiesLocked requires that propertiesMu is held.
+func getPropertiesLocked(t reflect.Type) *StructProperties {
+ if prop, ok := propertiesMap[t]; ok {
+ if collectStats {
+ stats.Chit++
+ }
+ return prop
+ }
+ if collectStats {
+ stats.Cmiss++
+ }
+
+ prop := new(StructProperties)
+ // in case of recursive protos, fill this in now.
+ propertiesMap[t] = prop
+
+ // build properties
+ prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType)
+ prop.unrecField = invalidField
+ prop.Prop = make([]*Properties, t.NumField())
+ prop.order = make([]int, t.NumField())
+
+ isOneofMessage := false
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ p := new(Properties)
+ name := f.Name
+ p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
+
+ if f.Name == "XXX_extensions" { // special case
+ if len(f.Tag.Get("protobuf")) > 0 {
+ p.enc = (*Buffer).enc_ext_slice_byte
+ p.dec = nil // not needed
+ p.size = size_ext_slice_byte
+ } else {
+ p.enc = (*Buffer).enc_map
+ p.dec = nil // not needed
+ p.size = size_map
+ }
+ }
+ if f.Name == "XXX_unrecognized" { // special case
+ prop.unrecField = toField(&f)
+ }
+ oneof := f.Tag.Get("protobuf_oneof") != "" // special case
+ if oneof {
+ isOneofMessage = true
+ }
+ prop.Prop[i] = p
+ prop.order[i] = i
+ if debug {
+ print(i, " ", f.Name, " ", t.String(), " ")
+ if p.Tag > 0 {
+ print(p.String())
+ }
+ print("\n")
+ }
+ if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && !oneof {
+ fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
+ }
+ }
+
+ // Re-order prop.order.
+ sort.Sort(prop)
+
+ type oneofMessage interface {
+ XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), []interface{})
+ }
+ if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); isOneofMessage && ok {
+ var oots []interface{}
+ prop.oneofMarshaler, prop.oneofUnmarshaler, oots = om.XXX_OneofFuncs()
+ prop.stype = t
+
+ // Interpret oneof metadata.
+ prop.OneofTypes = make(map[string]*OneofProperties)
+ for _, oot := range oots {
+ oop := &OneofProperties{
+ Type: reflect.ValueOf(oot).Type(), // *T
+ Prop: new(Properties),
+ }
+ sft := oop.Type.Elem().Field(0)
+ oop.Prop.Name = sft.Name
+ oop.Prop.Parse(sft.Tag.Get("protobuf"))
+ // There will be exactly one interface field that
+ // this new value is assignable to.
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if f.Type.Kind() != reflect.Interface {
+ continue
+ }
+ if !oop.Type.AssignableTo(f.Type) {
+ continue
+ }
+ oop.Field = i
+ break
+ }
+ prop.OneofTypes[oop.Prop.OrigName] = oop
+ }
+ }
+
+ // build required counts
+ // build tags
+ reqCount := 0
+ prop.decoderOrigNames = make(map[string]int)
+ for i, p := range prop.Prop {
+ if strings.HasPrefix(p.Name, "XXX_") {
+ // Internal fields should not appear in tags/origNames maps.
+ // They are handled specially when encoding and decoding.
+ continue
+ }
+ if p.Required {
+ reqCount++
+ }
+ prop.decoderTags.put(p.Tag, i)
+ prop.decoderOrigNames[p.OrigName] = i
+ }
+ prop.reqCount = reqCount
+
+ return prop
+}
+
+// Return the Properties object for the x[0]'th field of the structure.
+func propByIndex(t reflect.Type, x []int) *Properties {
+ if len(x) != 1 {
+ fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t)
+ return nil
+ }
+ prop := GetProperties(t)
+ return prop.Prop[x[0]]
+}
+
+// Get the address and type of a pointer to a struct from an interface.
+func getbase(pb Message) (t reflect.Type, b structPointer, err error) {
+ if pb == nil {
+ err = ErrNil
+ return
+ }
+ // get the reflect type of the pointer to the struct.
+ t = reflect.TypeOf(pb)
+ // get the address of the struct.
+ value := reflect.ValueOf(pb)
+ b = toStructPointer(value)
+ return
+}
+
+// A global registry of enum types.
+// The generated code will register the generated maps by calling RegisterEnum.
+
+var enumValueMaps = make(map[string]map[string]int32)
+var enumStringMaps = make(map[string]map[int32]string)
+
+// RegisterEnum is called from the generated code to install the enum descriptor
+// maps into the global table to aid parsing text format protocol buffers.
+func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
+ if _, ok := enumValueMaps[typeName]; ok {
+ panic("proto: duplicate enum registered: " + typeName)
+ }
+ enumValueMaps[typeName] = valueMap
+ if _, ok := enumStringMaps[typeName]; ok {
+ panic("proto: duplicate enum registered: " + typeName)
+ }
+ enumStringMaps[typeName] = unusedNameMap
+}
+
+// EnumValueMap returns the mapping from names to integers of the
+// enum type enumType, or a nil if not found.
+func EnumValueMap(enumType string) map[string]int32 {
+ return enumValueMaps[enumType]
+}
+
+// A registry of all linked message types.
+// The string is a fully-qualified proto name ("pkg.Message").
+var (
+ protoTypes = make(map[string]reflect.Type)
+ revProtoTypes = make(map[reflect.Type]string)
+)
+
+// RegisterType is called from generated code and maps from the fully qualified
+// proto name to the type (pointer to struct) of the protocol buffer.
+func RegisterType(x Message, name string) {
+ if _, ok := protoTypes[name]; ok {
+ // TODO: Some day, make this a panic.
+ log.Printf("proto: duplicate proto type registered: %s", name)
+ return
+ }
+ t := reflect.TypeOf(x)
+ protoTypes[name] = t
+ revProtoTypes[t] = name
+}
+
+// MessageName returns the fully-qualified proto name for the given message type.
+func MessageName(x Message) string { return revProtoTypes[reflect.TypeOf(x)] }
+
+// MessageType returns the message type (pointer to struct) for a named message.
+func MessageType(name string) reflect.Type { return protoTypes[name] }
diff --git a/src/kube2msb/vendor/github.com/gogo/protobuf/proto/properties_gogo.go b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/properties_gogo.go
new file mode 100644
index 0000000..8daf9f7
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/properties_gogo.go
@@ -0,0 +1,64 @@
+// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
+// http://github.com/gogo/protobuf/gogoproto
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "fmt"
+ "os"
+ "reflect"
+)
+
+func (p *Properties) setCustomEncAndDec(typ reflect.Type) {
+ p.ctype = typ
+ if p.Repeated {
+ p.enc = (*Buffer).enc_custom_slice_bytes
+ p.dec = (*Buffer).dec_custom_slice_bytes
+ p.size = size_custom_slice_bytes
+ } else if typ.Kind() == reflect.Ptr {
+ p.enc = (*Buffer).enc_custom_bytes
+ p.dec = (*Buffer).dec_custom_bytes
+ p.size = size_custom_bytes
+ } else {
+ p.enc = (*Buffer).enc_custom_ref_bytes
+ p.dec = (*Buffer).dec_custom_ref_bytes
+ p.size = size_custom_ref_bytes
+ }
+}
+
+func (p *Properties) setSliceOfNonPointerStructs(typ reflect.Type) {
+ t2 := typ.Elem()
+ p.sstype = typ
+ p.stype = t2
+ p.isMarshaler = isMarshaler(t2)
+ p.isUnmarshaler = isUnmarshaler(t2)
+ p.enc = (*Buffer).enc_slice_ref_struct_message
+ p.dec = (*Buffer).dec_slice_ref_struct_message
+ p.size = size_slice_ref_struct_message
+ if p.Wire != "bytes" {
+ fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T \n", typ, t2)
+ }
+}
diff --git a/src/kube2msb/vendor/github.com/gogo/protobuf/proto/skip_gogo.go b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/skip_gogo.go
new file mode 100644
index 0000000..4fe7e08
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/skip_gogo.go
@@ -0,0 +1,117 @@
+// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
+// http://github.com/gogo/protobuf/gogoproto
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "fmt"
+ "io"
+)
+
+func Skip(data []byte) (n int, err error) {
+ l := len(data)
+ index := 0
+ for index < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if index >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[index]
+ index++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for {
+ if index >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ index++
+ if data[index-1] < 0x80 {
+ break
+ }
+ }
+ return index, nil
+ case 1:
+ index += 8
+ return index, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if index >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[index]
+ index++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ index += length
+ return index, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = index
+ for shift := uint(0); ; shift += 7 {
+ if index >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[index]
+ index++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := Skip(data[start:])
+ if err != nil {
+ return 0, err
+ }
+ index = start + next
+ }
+ return index, nil
+ case 4:
+ return index, nil
+ case 5:
+ index += 4
+ return index, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
diff --git a/src/kube2msb/vendor/github.com/gogo/protobuf/proto/text.go b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/text.go
new file mode 100644
index 0000000..7c9ae90
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/text.go
@@ -0,0 +1,793 @@
+// Extensions for Protocol Buffers to create more go like structures.
+//
+// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
+// http://github.com/gogo/protobuf/gogoproto
+//
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for writing the text protocol buffer format.
+
+import (
+ "bufio"
+ "bytes"
+ "encoding"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "math"
+ "reflect"
+ "sort"
+ "strings"
+)
+
+var (
+ newline = []byte("\n")
+ spaces = []byte(" ")
+ gtNewline = []byte(">\n")
+ endBraceNewline = []byte("}\n")
+ backslashN = []byte{'\\', 'n'}
+ backslashR = []byte{'\\', 'r'}
+ backslashT = []byte{'\\', 't'}
+ backslashDQ = []byte{'\\', '"'}
+ backslashBS = []byte{'\\', '\\'}
+ posInf = []byte("inf")
+ negInf = []byte("-inf")
+ nan = []byte("nan")
+)
+
+type writer interface {
+ io.Writer
+ WriteByte(byte) error
+}
+
+// textWriter is an io.Writer that tracks its indentation level.
+type textWriter struct {
+ ind int
+ complete bool // if the current position is a complete line
+ compact bool // whether to write out as a one-liner
+ w writer
+}
+
+func (w *textWriter) WriteString(s string) (n int, err error) {
+ if !strings.Contains(s, "\n") {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ w.complete = false
+ return io.WriteString(w.w, s)
+ }
+ // WriteString is typically called without newlines, so this
+ // codepath and its copy are rare. We copy to avoid
+ // duplicating all of Write's logic here.
+ return w.Write([]byte(s))
+}
+
+func (w *textWriter) Write(p []byte) (n int, err error) {
+ newlines := bytes.Count(p, newline)
+ if newlines == 0 {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ n, err = w.w.Write(p)
+ w.complete = false
+ return n, err
+ }
+
+ frags := bytes.SplitN(p, newline, newlines+1)
+ if w.compact {
+ for i, frag := range frags {
+ if i > 0 {
+ if err := w.w.WriteByte(' '); err != nil {
+ return n, err
+ }
+ n++
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ }
+ return n, nil
+ }
+
+ for i, frag := range frags {
+ if w.complete {
+ w.writeIndent()
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ if i+1 < len(frags) {
+ if err := w.w.WriteByte('\n'); err != nil {
+ return n, err
+ }
+ n++
+ }
+ }
+ w.complete = len(frags[len(frags)-1]) == 0
+ return n, nil
+}
+
+func (w *textWriter) WriteByte(c byte) error {
+ if w.compact && c == '\n' {
+ c = ' '
+ }
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ err := w.w.WriteByte(c)
+ w.complete = c == '\n'
+ return err
+}
+
+func (w *textWriter) indent() { w.ind++ }
+
+func (w *textWriter) unindent() {
+ if w.ind == 0 {
+ log.Printf("proto: textWriter unindented too far")
+ return
+ }
+ w.ind--
+}
+
+func writeName(w *textWriter, props *Properties) error {
+ if _, err := w.WriteString(props.OrigName); err != nil {
+ return err
+ }
+ if props.Wire != "group" {
+ return w.WriteByte(':')
+ }
+ return nil
+}
+
+// raw is the interface satisfied by RawMessage.
+type raw interface {
+ Bytes() []byte
+}
+
+func writeStruct(w *textWriter, sv reflect.Value) error {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < sv.NumField(); i++ {
+ fv := sv.Field(i)
+ props := sprops.Prop[i]
+ name := st.Field(i).Name
+
+ if strings.HasPrefix(name, "XXX_") {
+ // There are two XXX_ fields:
+ // XXX_unrecognized []byte
+ // XXX_extensions map[int32]proto.Extension
+ // The first is handled here;
+ // the second is handled at the bottom of this function.
+ if name == "XXX_unrecognized" && !fv.IsNil() {
+ if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ // Field not filled in. This could be an optional field or
+ // a required field that wasn't filled in. Either way, there
+ // isn't anything we can show for it.
+ continue
+ }
+ if fv.Kind() == reflect.Slice && fv.IsNil() {
+ // Repeated field that is empty, or a bytes field that is unused.
+ continue
+ }
+
+ if props.Repeated && fv.Kind() == reflect.Slice {
+ // Repeated field.
+ for j := 0; j < fv.Len(); j++ {
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ v := fv.Index(j)
+ if v.Kind() == reflect.Ptr && v.IsNil() {
+ // A nil message in a repeated field is not valid,
+ // but we can handle that more gracefully than panicking.
+ if _, err := w.Write([]byte("<nil>\n")); err != nil {
+ return err
+ }
+ continue
+ }
+ if len(props.Enum) > 0 {
+ if err := writeEnum(w, v, props); err != nil {
+ return err
+ }
+ } else if err := writeAny(w, v, props); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Map {
+ // Map fields are rendered as a repeated struct with key/value fields.
+ keys := fv.MapKeys()
+ sort.Sort(mapKeys(keys))
+ for _, key := range keys {
+ val := fv.MapIndex(key)
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ // open struct
+ if err := w.WriteByte('<'); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ // key
+ if _, err := w.WriteString("key:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := writeAny(w, key, props.mkeyprop); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ // nil values aren't legal, but we can avoid panicking because of them.
+ if val.Kind() != reflect.Ptr || !val.IsNil() {
+ // value
+ if _, err := w.WriteString("value:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := writeAny(w, val, props.mvalprop); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ // close struct
+ w.unindent()
+ if err := w.WriteByte('>'); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
+ // empty bytes field
+ continue
+ }
+ if props.proto3 && fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
+ // proto3 non-repeated scalar field; skip if zero value
+ if isProto3Zero(fv) {
+ continue
+ }
+ }
+
+ if fv.Kind() == reflect.Interface {
+ // Check if it is a oneof.
+ if st.Field(i).Tag.Get("protobuf_oneof") != "" {
+ // fv is nil, or holds a pointer to generated struct.
+ // That generated struct has exactly one field,
+ // which has a protobuf struct tag.
+ if fv.IsNil() {
+ continue
+ }
+ inner := fv.Elem().Elem() // interface -> *T -> T
+ tag := inner.Type().Field(0).Tag.Get("protobuf")
+ props.Parse(tag) // Overwrite the outer props.
+ // Write the value in the oneof, not the oneof itself.
+ fv = inner.Field(0)
+
+ // Special case to cope with malformed messages gracefully:
+ // If the value in the oneof is a nil pointer, don't panic
+ // in writeAny.
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ // Use errors.New so writeAny won't render quotes.
+ msg := errors.New("/* nil */")
+ fv = reflect.ValueOf(&msg).Elem()
+ }
+ }
+ }
+
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if b, ok := fv.Interface().(raw); ok {
+ if err := writeRaw(w, b.Bytes()); err != nil {
+ return err
+ }
+ continue
+ }
+
+ if len(props.Enum) > 0 {
+ if err := writeEnum(w, fv, props); err != nil {
+ return err
+ }
+ } else if err := writeAny(w, fv, props); err != nil {
+ return err
+ }
+
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+
+ // Extensions (the XXX_extensions field).
+ pv := sv
+ if pv.CanAddr() {
+ pv = sv.Addr()
+ } else {
+ pv = reflect.New(sv.Type())
+ pv.Elem().Set(sv)
+ }
+ if pv.Type().Implements(extendableProtoType) {
+ if err := writeExtensions(w, pv); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// writeRaw writes an uninterpreted raw message.
+func writeRaw(w *textWriter, b []byte) error {
+ if err := w.WriteByte('<'); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ if err := writeUnknownStruct(w, b); err != nil {
+ return err
+ }
+ w.unindent()
+ if err := w.WriteByte('>'); err != nil {
+ return err
+ }
+ return nil
+}
+
+// writeAny writes an arbitrary field.
+func writeAny(w *textWriter, v reflect.Value, props *Properties) error {
+ v = reflect.Indirect(v)
+
+ if props != nil && len(props.CustomType) > 0 {
+ custom, ok := v.Interface().(Marshaler)
+ if ok {
+ data, err := custom.Marshal()
+ if err != nil {
+ return err
+ }
+ if err := writeString(w, string(data)); err != nil {
+ return err
+ }
+ return nil
+ }
+ }
+
+ // Floats have special cases.
+ if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
+ x := v.Float()
+ var b []byte
+ switch {
+ case math.IsInf(x, 1):
+ b = posInf
+ case math.IsInf(x, -1):
+ b = negInf
+ case math.IsNaN(x):
+ b = nan
+ }
+ if b != nil {
+ _, err := w.Write(b)
+ return err
+ }
+ // Other values are handled below.
+ }
+
+ // We don't attempt to serialise every possible value type; only those
+ // that can occur in protocol buffers.
+ switch v.Kind() {
+ case reflect.Slice:
+ // Should only be a []byte; repeated fields are handled in writeStruct.
+ if err := writeString(w, string(v.Bytes())); err != nil {
+ return err
+ }
+ case reflect.String:
+ if err := writeString(w, v.String()); err != nil {
+ return err
+ }
+ case reflect.Struct:
+ // Required/optional group/message.
+ var bra, ket byte = '<', '>'
+ if props != nil && props.Wire == "group" {
+ bra, ket = '{', '}'
+ }
+ if err := w.WriteByte(bra); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ if tm, ok := v.Interface().(encoding.TextMarshaler); ok {
+ text, err := tm.MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = w.Write(text); err != nil {
+ return err
+ }
+ } else if err := writeStruct(w, v); err != nil {
+ return err
+ }
+ w.unindent()
+ if err := w.WriteByte(ket); err != nil {
+ return err
+ }
+ default:
+ _, err := fmt.Fprint(w, v.Interface())
+ return err
+ }
+ return nil
+}
+
+// equivalent to C's isprint.
+func isprint(c byte) bool {
+ return c >= 0x20 && c < 0x7f
+}
+
+// writeString writes a string in the protocol buffer text format.
+// It is similar to strconv.Quote except we don't use Go escape sequences,
+// we treat the string as a byte sequence, and we use octal escapes.
+// These differences are to maintain interoperability with the other
+// languages' implementations of the text format.
+func writeString(w *textWriter, s string) error {
+ // use WriteByte here to get any needed indent
+ if err := w.WriteByte('"'); err != nil {
+ return err
+ }
+ // Loop over the bytes, not the runes.
+ for i := 0; i < len(s); i++ {
+ var err error
+ // Divergence from C++: we don't escape apostrophes.
+ // There's no need to escape them, and the C++ parser
+ // copes with a naked apostrophe.
+ switch c := s[i]; c {
+ case '\n':
+ _, err = w.w.Write(backslashN)
+ case '\r':
+ _, err = w.w.Write(backslashR)
+ case '\t':
+ _, err = w.w.Write(backslashT)
+ case '"':
+ _, err = w.w.Write(backslashDQ)
+ case '\\':
+ _, err = w.w.Write(backslashBS)
+ default:
+ if isprint(c) {
+ err = w.w.WriteByte(c)
+ } else {
+ _, err = fmt.Fprintf(w.w, "\\%03o", c)
+ }
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return w.WriteByte('"')
+}
+
+func writeUnknownStruct(w *textWriter, data []byte) (err error) {
+ if !w.compact {
+ if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
+ return err
+ }
+ }
+ b := NewBuffer(data)
+ for b.index < len(b.buf) {
+ x, err := b.DecodeVarint()
+ if err != nil {
+ _, ferr := fmt.Fprintf(w, "/* %v */\n", err)
+ return ferr
+ }
+ wire, tag := x&7, x>>3
+ if wire == WireEndGroup {
+ w.unindent()
+ if _, werr := w.Write(endBraceNewline); werr != nil {
+ return werr
+ }
+ continue
+ }
+ if _, ferr := fmt.Fprint(w, tag); ferr != nil {
+ return ferr
+ }
+ if wire != WireStartGroup {
+ if err := w.WriteByte(':'); err != nil {
+ return err
+ }
+ }
+ if !w.compact || wire == WireStartGroup {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ switch wire {
+ case WireBytes:
+ buf, e := b.DecodeRawBytes(false)
+ if e == nil {
+ _, err = fmt.Fprintf(w, "%q", buf)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", e)
+ }
+ case WireFixed32:
+ x, err = b.DecodeFixed32()
+ err = writeUnknownInt(w, x, err)
+ case WireFixed64:
+ x, err = b.DecodeFixed64()
+ err = writeUnknownInt(w, x, err)
+ case WireStartGroup:
+ err = w.WriteByte('{')
+ w.indent()
+ case WireVarint:
+ x, err = b.DecodeVarint()
+ err = writeUnknownInt(w, x, err)
+ default:
+ _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
+ }
+ if err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeUnknownInt(w *textWriter, x uint64, err error) error {
+ if err == nil {
+ _, err = fmt.Fprint(w, x)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", err)
+ }
+ return err
+}
+
+type int32Slice []int32
+
+func (s int32Slice) Len() int { return len(s) }
+func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
+func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// writeExtensions writes all the extensions in pv.
+// pv is assumed to be a pointer to a protocol message struct that is extendable.
+func writeExtensions(w *textWriter, pv reflect.Value) error {
+ emap := extensionMaps[pv.Type().Elem()]
+ ep := pv.Interface().(extendableProto)
+
+ // Order the extensions by ID.
+ // This isn't strictly necessary, but it will give us
+ // canonical output, which will also make testing easier.
+ var m map[int32]Extension
+ if em, ok := ep.(extensionsMap); ok {
+ m = em.ExtensionMap()
+ } else if em, ok := ep.(extensionsBytes); ok {
+ eb := em.GetExtensions()
+ var err error
+ m, err = BytesToExtensionsMap(*eb)
+ if err != nil {
+ return err
+ }
+ }
+
+ ids := make([]int32, 0, len(m))
+ for id := range m {
+ ids = append(ids, id)
+ }
+ sort.Sort(int32Slice(ids))
+
+ for _, extNum := range ids {
+ ext := m[extNum]
+ var desc *ExtensionDesc
+ if emap != nil {
+ desc = emap[extNum]
+ }
+ if desc == nil {
+ // Unknown extension.
+ if err := writeUnknownStruct(w, ext.enc); err != nil {
+ return err
+ }
+ continue
+ }
+
+ pb, err := GetExtension(ep, desc)
+ if err != nil {
+ return fmt.Errorf("failed getting extension: %v", err)
+ }
+
+ // Repeated extensions will appear as a slice.
+ if !desc.repeated() {
+ if err := writeExtension(w, desc.Name, pb); err != nil {
+ return err
+ }
+ } else {
+ v := reflect.ValueOf(pb)
+ for i := 0; i < v.Len(); i++ {
+ if err := writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func writeExtension(w *textWriter, name string, pb interface{}) error {
+ if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := writeAny(w, reflect.ValueOf(pb), nil); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (w *textWriter) writeIndent() {
+ if !w.complete {
+ return
+ }
+ remain := w.ind * 2
+ for remain > 0 {
+ n := remain
+ if n > len(spaces) {
+ n = len(spaces)
+ }
+ w.w.Write(spaces[:n])
+ remain -= n
+ }
+ w.complete = false
+}
+
+func marshalText(w io.Writer, pb Message, compact bool) error {
+ val := reflect.ValueOf(pb)
+ if pb == nil || val.IsNil() {
+ w.Write([]byte("<nil>"))
+ return nil
+ }
+ var bw *bufio.Writer
+ ww, ok := w.(writer)
+ if !ok {
+ bw = bufio.NewWriter(w)
+ ww = bw
+ }
+ aw := &textWriter{
+ w: ww,
+ complete: true,
+ compact: compact,
+ }
+
+ if tm, ok := pb.(encoding.TextMarshaler); ok {
+ text, err := tm.MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = aw.Write(text); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+ }
+ // Dereference the received pointer so we don't have outer < and >.
+ v := reflect.Indirect(val)
+ if err := writeStruct(aw, v); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+}
+
+// MarshalText writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func MarshalText(w io.Writer, pb Message) error {
+ return marshalText(w, pb, false)
+}
+
+// MarshalTextString is the same as MarshalText, but returns the string directly.
+func MarshalTextString(pb Message) string {
+ var buf bytes.Buffer
+ marshalText(&buf, pb, false)
+ return buf.String()
+}
+
+// CompactText writes a given protocol buffer in compact text format (one line).
+func CompactText(w io.Writer, pb Message) error { return marshalText(w, pb, true) }
+
+// CompactTextString is the same as CompactText, but returns the string directly.
+func CompactTextString(pb Message) string {
+ var buf bytes.Buffer
+ marshalText(&buf, pb, true)
+ return buf.String()
+}
diff --git a/src/kube2msb/vendor/github.com/gogo/protobuf/proto/text_gogo.go b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/text_gogo.go
new file mode 100644
index 0000000..cdb2337
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/text_gogo.go
@@ -0,0 +1,55 @@
+// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
+// http://github.com/gogo/protobuf/gogoproto
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "fmt"
+ "reflect"
+)
+
+func writeEnum(w *textWriter, v reflect.Value, props *Properties) error {
+ m, ok := enumStringMaps[props.Enum]
+ if !ok {
+ if err := writeAny(w, v, props); err != nil {
+ return err
+ }
+ }
+ key := int32(0)
+ if v.Kind() == reflect.Ptr {
+ key = int32(v.Elem().Int())
+ } else {
+ key = int32(v.Int())
+ }
+ s, ok := m[key]
+ if !ok {
+ if err := writeAny(w, v, props); err != nil {
+ return err
+ }
+ }
+ _, err := fmt.Fprint(w, s)
+ return err
+}
diff --git a/src/kube2msb/vendor/github.com/gogo/protobuf/proto/text_parser.go b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/text_parser.go
new file mode 100644
index 0000000..f390969
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/gogo/protobuf/proto/text_parser.go
@@ -0,0 +1,841 @@
+// Extensions for Protocol Buffers to create more go like structures.
+//
+// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
+// http://github.com/gogo/protobuf/gogoproto
+//
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for parsing the Text protocol buffer format.
+// TODO: message sets.
+
+import (
+ "encoding"
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+type ParseError struct {
+ Message string
+ Line int // 1-based line number
+ Offset int // 0-based byte offset from start of input
+}
+
+func (p *ParseError) Error() string {
+ if p.Line == 1 {
+ // show offset only for first line
+ return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
+ }
+ return fmt.Sprintf("line %d: %v", p.Line, p.Message)
+}
+
+type token struct {
+ value string
+ err *ParseError
+ line int // line number
+ offset int // byte number from start of input, not start of line
+ unquoted string // the unquoted version of value, if it was a quoted string
+}
+
+func (t *token) String() string {
+ if t.err == nil {
+ return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
+ }
+ return fmt.Sprintf("parse error: %v", t.err)
+}
+
+type textParser struct {
+ s string // remaining input
+ done bool // whether the parsing is finished (success or error)
+ backed bool // whether back() was called
+ offset, line int
+ cur token
+}
+
+func newTextParser(s string) *textParser {
+ p := new(textParser)
+ p.s = s
+ p.line = 1
+ p.cur.line = 1
+ return p
+}
+
+func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
+ pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
+ p.cur.err = pe
+ p.done = true
+ return pe
+}
+
+// Numbers and identifiers are matched by [-+._A-Za-z0-9]
+func isIdentOrNumberChar(c byte) bool {
+ switch {
+ case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
+ return true
+ case '0' <= c && c <= '9':
+ return true
+ }
+ switch c {
+ case '-', '+', '.', '_':
+ return true
+ }
+ return false
+}
+
+func isWhitespace(c byte) bool {
+ switch c {
+ case ' ', '\t', '\n', '\r':
+ return true
+ }
+ return false
+}
+
+func (p *textParser) skipWhitespace() {
+ i := 0
+ for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
+ if p.s[i] == '#' {
+ // comment; skip to end of line or input
+ for i < len(p.s) && p.s[i] != '\n' {
+ i++
+ }
+ if i == len(p.s) {
+ break
+ }
+ }
+ if p.s[i] == '\n' {
+ p.line++
+ }
+ i++
+ }
+ p.offset += i
+ p.s = p.s[i:len(p.s)]
+ if len(p.s) == 0 {
+ p.done = true
+ }
+}
+
+func (p *textParser) advance() {
+ // Skip whitespace
+ p.skipWhitespace()
+ if p.done {
+ return
+ }
+
+ // Start of non-whitespace
+ p.cur.err = nil
+ p.cur.offset, p.cur.line = p.offset, p.line
+ p.cur.unquoted = ""
+ switch p.s[0] {
+ case '<', '>', '{', '}', ':', '[', ']', ';', ',':
+ // Single symbol
+ p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
+ case '"', '\'':
+ // Quoted string
+ i := 1
+ for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
+ if p.s[i] == '\\' && i+1 < len(p.s) {
+ // skip escaped char
+ i++
+ }
+ i++
+ }
+ if i >= len(p.s) || p.s[i] != p.s[0] {
+ p.errorf("unmatched quote")
+ return
+ }
+ unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
+ if err != nil {
+ p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
+ return
+ }
+ p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
+ p.cur.unquoted = unq
+ default:
+ i := 0
+ for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
+ i++
+ }
+ if i == 0 {
+ p.errorf("unexpected byte %#x", p.s[0])
+ return
+ }
+ p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
+ }
+ p.offset += len(p.cur.value)
+}
+
+var (
+ errBadUTF8 = errors.New("proto: bad UTF-8")
+ errBadHex = errors.New("proto: bad hexadecimal")
+)
+
+func unquoteC(s string, quote rune) (string, error) {
+ // This is based on C++'s tokenizer.cc.
+ // Despite its name, this is *not* parsing C syntax.
+ // For instance, "\0" is an invalid quoted string.
+
+ // Avoid allocation in trivial cases.
+ simple := true
+ for _, r := range s {
+ if r == '\\' || r == quote {
+ simple = false
+ break
+ }
+ }
+ if simple {
+ return s, nil
+ }
+
+ buf := make([]byte, 0, 3*len(s)/2)
+ for len(s) > 0 {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", errBadUTF8
+ }
+ s = s[n:]
+ if r != '\\' {
+ if r < utf8.RuneSelf {
+ buf = append(buf, byte(r))
+ } else {
+ buf = append(buf, string(r)...)
+ }
+ continue
+ }
+
+ ch, tail, err := unescape(s)
+ if err != nil {
+ return "", err
+ }
+ buf = append(buf, ch...)
+ s = tail
+ }
+ return string(buf), nil
+}
+
+func unescape(s string) (ch string, tail string, err error) {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", "", errBadUTF8
+ }
+ s = s[n:]
+ switch r {
+ case 'a':
+ return "\a", s, nil
+ case 'b':
+ return "\b", s, nil
+ case 'f':
+ return "\f", s, nil
+ case 'n':
+ return "\n", s, nil
+ case 'r':
+ return "\r", s, nil
+ case 't':
+ return "\t", s, nil
+ case 'v':
+ return "\v", s, nil
+ case '?':
+ return "?", s, nil // trigraph workaround
+ case '\'', '"', '\\':
+ return string(r), s, nil
+ case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X':
+ if len(s) < 2 {
+ return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
+ }
+ base := 8
+ ss := s[:2]
+ s = s[2:]
+ if r == 'x' || r == 'X' {
+ base = 16
+ } else {
+ ss = string(r) + ss
+ }
+ i, err := strconv.ParseUint(ss, base, 8)
+ if err != nil {
+ return "", "", err
+ }
+ return string([]byte{byte(i)}), s, nil
+ case 'u', 'U':
+ n := 4
+ if r == 'U' {
+ n = 8
+ }
+ if len(s) < n {
+ return "", "", fmt.Errorf(`\%c requires %d digits`, r, n)
+ }
+
+ bs := make([]byte, n/2)
+ for i := 0; i < n; i += 2 {
+ a, ok1 := unhex(s[i])
+ b, ok2 := unhex(s[i+1])
+ if !ok1 || !ok2 {
+ return "", "", errBadHex
+ }
+ bs[i/2] = a<<4 | b
+ }
+ s = s[n:]
+ return string(bs), s, nil
+ }
+ return "", "", fmt.Errorf(`unknown escape \%c`, r)
+}
+
+// Adapted from src/pkg/strconv/quote.go.
+func unhex(b byte) (v byte, ok bool) {
+ switch {
+ case '0' <= b && b <= '9':
+ return b - '0', true
+ case 'a' <= b && b <= 'f':
+ return b - 'a' + 10, true
+ case 'A' <= b && b <= 'F':
+ return b - 'A' + 10, true
+ }
+ return 0, false
+}
+
+// Back off the parser by one token. Can only be done between calls to next().
+// It makes the next advance() a no-op.
+func (p *textParser) back() { p.backed = true }
+
+// Advances the parser and returns the new current token.
+func (p *textParser) next() *token {
+ if p.backed || p.done {
+ p.backed = false
+ return &p.cur
+ }
+ p.advance()
+ if p.done {
+ p.cur.value = ""
+ } else if len(p.cur.value) > 0 && p.cur.value[0] == '"' {
+ // Look for multiple quoted strings separated by whitespace,
+ // and concatenate them.
+ cat := p.cur
+ for {
+ p.skipWhitespace()
+ if p.done || p.s[0] != '"' {
+ break
+ }
+ p.advance()
+ if p.cur.err != nil {
+ return &p.cur
+ }
+ cat.value += " " + p.cur.value
+ cat.unquoted += p.cur.unquoted
+ }
+ p.done = false // parser may have seen EOF, but we want to return cat
+ p.cur = cat
+ }
+ return &p.cur
+}
+
+func (p *textParser) consumeToken(s string) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != s {
+ p.back()
+ return p.errorf("expected %q, found %q", s, tok.value)
+ }
+ return nil
+}
+
+// Return a RequiredNotSetError indicating which required field was not set.
+func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < st.NumField(); i++ {
+ if !isNil(sv.Field(i)) {
+ continue
+ }
+
+ props := sprops.Prop[i]
+ if props.Required {
+ return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
+ }
+ }
+ return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen
+}
+
+// Returns the index in the struct for the named field, as well as the parsed tag properties.
+func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
+ i, ok := sprops.decoderOrigNames[name]
+ if ok {
+ return i, sprops.Prop[i], true
+ }
+ return -1, nil, false
+}
+
+// Consume a ':' from the input stream (if the next token is a colon),
+// returning an error if a colon is needed but not present.
+func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ":" {
+ // Colon is optional when the field is a group or message.
+ needColon := true
+ switch props.Wire {
+ case "group":
+ needColon = false
+ case "bytes":
+ // A "bytes" field is either a message, a string, or a repeated field;
+ // those three become *T, *string and []T respectively, so we can check for
+ // this field being a pointer to a non-string.
+ if typ.Kind() == reflect.Ptr {
+ // *T or *string
+ if typ.Elem().Kind() == reflect.String {
+ break
+ }
+ } else if typ.Kind() == reflect.Slice {
+ // []T or []*T
+ if typ.Elem().Kind() != reflect.Ptr {
+ break
+ }
+ } else if typ.Kind() == reflect.String {
+ // The proto3 exception is for a string field,
+ // which requires a colon.
+ break
+ }
+ needColon = false
+ }
+ if needColon {
+ return p.errorf("expected ':', found %q", tok.value)
+ }
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ reqCount := sprops.reqCount
+ var reqFieldErr error
+ fieldSet := make(map[string]bool)
+ // A struct is a sequence of "name: value", terminated by one of
+ // '>' or '}', or the end of the input. A name may also be
+ // "[extension]".
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ if tok.value == "[" {
+ // Looks like an extension.
+ //
+ // TODO: Check whether we need to handle
+ // namespace rooted names (e.g. ".something.Foo").
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ var desc *ExtensionDesc
+ // This could be faster, but it's functional.
+ // TODO: Do something smarter than a linear scan.
+ for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
+ if d.Name == tok.value {
+ desc = d
+ break
+ }
+ }
+ if desc == nil {
+ return p.errorf("unrecognized extension %q", tok.value)
+ }
+ // Check the extension terminator.
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != "]" {
+ return p.errorf("unrecognized extension terminator %q", tok.value)
+ }
+
+ props := &Properties{}
+ props.Parse(desc.Tag)
+
+ typ := reflect.TypeOf(desc.ExtensionType)
+ if err := p.checkForColon(props, typ); err != nil {
+ return err
+ }
+
+ rep := desc.repeated()
+
+ // Read the extension structure, and set it in
+ // the value we're constructing.
+ var ext reflect.Value
+ if !rep {
+ ext = reflect.New(typ).Elem()
+ } else {
+ ext = reflect.New(typ.Elem()).Elem()
+ }
+ if err := p.readAny(ext, props); err != nil {
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ return err
+ }
+ reqFieldErr = err
+ }
+ ep := sv.Addr().Interface().(extendableProto)
+ if !rep {
+ SetExtension(ep, desc, ext.Interface())
+ } else {
+ old, err := GetExtension(ep, desc)
+ var sl reflect.Value
+ if err == nil {
+ sl = reflect.ValueOf(old) // existing slice
+ } else {
+ sl = reflect.MakeSlice(typ, 0, 1)
+ }
+ sl = reflect.Append(sl, ext)
+ SetExtension(ep, desc, sl.Interface())
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // This is a normal, non-extension field.
+ name := tok.value
+ var dst reflect.Value
+ fi, props, ok := structFieldByName(sprops, name)
+ if ok {
+ dst = sv.Field(fi)
+ } else if oop, ok := sprops.OneofTypes[name]; ok {
+ // It is a oneof.
+ props = oop.Prop
+ nv := reflect.New(oop.Type.Elem())
+ dst = nv.Elem().Field(0)
+ sv.Field(oop.Field).Set(nv)
+ }
+ if !dst.IsValid() {
+ return p.errorf("unknown field name %q in %v", name, st)
+ }
+
+ if dst.Kind() == reflect.Map {
+ // Consume any colon.
+ if err := p.checkForColon(props, dst.Type()); err != nil {
+ return err
+ }
+
+ // Construct the map if it doesn't already exist.
+ if dst.IsNil() {
+ dst.Set(reflect.MakeMap(dst.Type()))
+ }
+ key := reflect.New(dst.Type().Key()).Elem()
+ val := reflect.New(dst.Type().Elem()).Elem()
+
+ // The map entry should be this sequence of tokens:
+ // < key : KEY value : VALUE >
+ // Technically the "key" and "value" could come in any order,
+ // but in practice they won't.
+
+ tok := p.next()
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ if err := p.consumeToken("key"); err != nil {
+ return err
+ }
+ if err := p.consumeToken(":"); err != nil {
+ return err
+ }
+ if err := p.readAny(key, props.mkeyprop); err != nil {
+ return err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ if err := p.consumeToken("value"); err != nil {
+ return err
+ }
+ if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
+ return err
+ }
+ if err := p.readAny(val, props.mvalprop); err != nil {
+ return err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ if err := p.consumeToken(terminator); err != nil {
+ return err
+ }
+
+ dst.SetMapIndex(key, val)
+ continue
+ }
+
+ // Check that it's not already set if it's not a repeated field.
+ if !props.Repeated && fieldSet[name] {
+ return p.errorf("non-repeated field %q was repeated", name)
+ }
+
+ if err := p.checkForColon(props, dst.Type()); err != nil {
+ return err
+ }
+
+ // Parse into the field.
+ fieldSet[name] = true
+ if err := p.readAny(dst, props); err != nil {
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ return err
+ }
+ reqFieldErr = err
+ } else if props.Required {
+ reqCount--
+ }
+
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+
+ }
+
+ if reqCount > 0 {
+ return p.missingRequiredFieldError(sv)
+ }
+ return reqFieldErr
+}
+
+// consumeOptionalSeparator consumes an optional semicolon or comma.
+// It is used in readStruct to provide backward compatibility.
+func (p *textParser) consumeOptionalSeparator() error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ";" && tok.value != "," {
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) readAny(v reflect.Value, props *Properties) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == "" {
+ return p.errorf("unexpected EOF")
+ }
+ if len(props.CustomType) > 0 {
+ if props.Repeated {
+ t := reflect.TypeOf(v.Interface())
+ if t.Kind() == reflect.Slice {
+ tc := reflect.TypeOf(new(Marshaler))
+ ok := t.Elem().Implements(tc.Elem())
+ if ok {
+ fv := v
+ flen := fv.Len()
+ if flen == fv.Cap() {
+ nav := reflect.MakeSlice(v.Type(), flen, 2*flen+1)
+ reflect.Copy(nav, fv)
+ fv.Set(nav)
+ }
+ fv.SetLen(flen + 1)
+
+ // Read one.
+ p.back()
+ return p.readAny(fv.Index(flen), props)
+ }
+ }
+ }
+ if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr {
+ custom := reflect.New(props.ctype.Elem()).Interface().(Unmarshaler)
+ err := custom.Unmarshal([]byte(tok.unquoted))
+ if err != nil {
+ return p.errorf("%v %v: %v", err, v.Type(), tok.value)
+ }
+ v.Set(reflect.ValueOf(custom))
+ } else {
+ custom := reflect.New(reflect.TypeOf(v.Interface())).Interface().(Unmarshaler)
+ err := custom.Unmarshal([]byte(tok.unquoted))
+ if err != nil {
+ return p.errorf("%v %v: %v", err, v.Type(), tok.value)
+ }
+ v.Set(reflect.Indirect(reflect.ValueOf(custom)))
+ }
+ return nil
+ }
+ switch fv := v; fv.Kind() {
+ case reflect.Slice:
+ at := v.Type()
+ if at.Elem().Kind() == reflect.Uint8 {
+ // Special case for []byte
+ if tok.value[0] != '"' && tok.value[0] != '\'' {
+ // Deliberately written out here, as the error after
+ // this switch statement would write "invalid []byte: ...",
+ // which is not as user-friendly.
+ return p.errorf("invalid string: %v", tok.value)
+ }
+ bytes := []byte(tok.unquoted)
+ fv.Set(reflect.ValueOf(bytes))
+ return nil
+ }
+ // Repeated field.
+ if tok.value == "[" {
+ // Repeated field with list notation, like [1,2,3].
+ for {
+ fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+ err := p.readAny(fv.Index(fv.Len()-1), props)
+ if err != nil {
+ return err
+ }
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == "]" {
+ break
+ }
+ if tok.value != "," {
+ return p.errorf("Expected ']' or ',' found %q", tok.value)
+ }
+ }
+ return nil
+ }
+ // One value of the repeated field.
+ p.back()
+ fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+ return p.readAny(fv.Index(fv.Len()-1), props)
+ case reflect.Bool:
+ // Either "true", "false", 1 or 0.
+ switch tok.value {
+ case "true", "1":
+ fv.SetBool(true)
+ return nil
+ case "false", "0":
+ fv.SetBool(false)
+ return nil
+ }
+ case reflect.Float32, reflect.Float64:
+ v := tok.value
+ // Ignore 'f' for compatibility with output generated by C++, but don't
+ // remove 'f' when the value is "-inf" or "inf".
+ if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
+ v = v[:len(v)-1]
+ }
+ if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
+ fv.SetFloat(f)
+ return nil
+ }
+ case reflect.Int32:
+ if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+
+ if len(props.Enum) == 0 {
+ break
+ }
+ m, ok := enumValueMaps[props.Enum]
+ if !ok {
+ break
+ }
+ x, ok := m[tok.value]
+ if !ok {
+ break
+ }
+ fv.SetInt(int64(x))
+ return nil
+ case reflect.Int64:
+ if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+
+ case reflect.Ptr:
+ // A basic field (indirected through pointer), or a repeated message/group
+ p.back()
+ fv.Set(reflect.New(fv.Type().Elem()))
+ return p.readAny(fv.Elem(), props)
+ case reflect.String:
+ if tok.value[0] == '"' || tok.value[0] == '\'' {
+ fv.SetString(tok.unquoted)
+ return nil
+ }
+ case reflect.Struct:
+ var terminator string
+ switch tok.value {
+ case "{":
+ terminator = "}"
+ case "<":
+ terminator = ">"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ // TODO: Handle nested messages which implement encoding.TextUnmarshaler.
+ return p.readStruct(fv, terminator)
+ case reflect.Uint32:
+ if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+ fv.SetUint(uint64(x))
+ return nil
+ }
+ case reflect.Uint64:
+ if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+ fv.SetUint(x)
+ return nil
+ }
+ }
+ return p.errorf("invalid %v: %v", v.Type(), tok.value)
+}
+
+// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
+// before starting to unmarshal, so any existing data in pb is always removed.
+// If a required field is not set and no other error occurs,
+// UnmarshalText returns *RequiredNotSetError.
+func UnmarshalText(s string, pb Message) error {
+ if um, ok := pb.(encoding.TextUnmarshaler); ok {
+ err := um.UnmarshalText([]byte(s))
+ return err
+ }
+ pb.Reset()
+ v := reflect.ValueOf(pb)
+ if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil {
+ return pe
+ }
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/golang/glog/LICENSE b/src/kube2msb/vendor/github.com/golang/glog/LICENSE
new file mode 100644
index 0000000..37ec93a
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/golang/glog/LICENSE
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/src/kube2msb/vendor/github.com/golang/glog/README b/src/kube2msb/vendor/github.com/golang/glog/README
new file mode 100644
index 0000000..5f9c114
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/golang/glog/README
@@ -0,0 +1,44 @@
+glog
+====
+
+Leveled execution logs for Go.
+
+This is an efficient pure Go implementation of leveled logs in the
+manner of the open source C++ package
+ http://code.google.com/p/google-glog
+
+By binding methods to booleans it is possible to use the log package
+without paying the expense of evaluating the arguments to the log.
+Through the -vmodule flag, the package also provides fine-grained
+control over logging at the file level.
+
+The comment from glog.go introduces the ideas:
+
+ Package glog implements logging analogous to the Google-internal
+ C++ INFO/ERROR/V setup. It provides functions Info, Warning,
+ Error, Fatal, plus formatting variants such as Infof. It
+ also provides V-style logging controlled by the -v and
+ -vmodule=file=2 flags.
+
+ Basic examples:
+
+ glog.Info("Prepare to repel boarders")
+
+ glog.Fatalf("Initialization failed: %s", err)
+
+ See the documentation for the V function for an explanation
+ of these examples:
+
+ if glog.V(2) {
+ glog.Info("Starting transaction...")
+ }
+
+ glog.V(2).Infoln("Processed", nItems, "elements")
+
+
+The repository contains an open source version of the log package
+used inside Google. The master copy of the source lives inside
+Google, not here. The code in this repo is for export only and is not itself
+under development. Feature requests will be ignored.
+
+Send bug reports to golang-nuts@googlegroups.com.
diff --git a/src/kube2msb/vendor/github.com/golang/glog/glog.go b/src/kube2msb/vendor/github.com/golang/glog/glog.go
new file mode 100644
index 0000000..3e63fff
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/golang/glog/glog.go
@@ -0,0 +1,1177 @@
+// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
+//
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package glog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup.
+// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as
+// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags.
+//
+// Basic examples:
+//
+// glog.Info("Prepare to repel boarders")
+//
+// glog.Fatalf("Initialization failed: %s", err)
+//
+// See the documentation for the V function for an explanation of these examples:
+//
+// if glog.V(2) {
+// glog.Info("Starting transaction...")
+// }
+//
+// glog.V(2).Infoln("Processed", nItems, "elements")
+//
+// Log output is buffered and written periodically using Flush. Programs
+// should call Flush before exiting to guarantee all log output is written.
+//
+// By default, all log statements write to files in a temporary directory.
+// This package provides several flags that modify this behavior.
+// As a result, flag.Parse must be called before any logging is done.
+//
+// -logtostderr=false
+// Logs are written to standard error instead of to files.
+// -alsologtostderr=false
+// Logs are written to standard error as well as to files.
+// -stderrthreshold=ERROR
+// Log events at or above this severity are logged to standard
+// error as well as to files.
+// -log_dir=""
+// Log files will be written to this directory instead of the
+// default temporary directory.
+//
+// Other flags provide aids to debugging.
+//
+// -log_backtrace_at=""
+// When set to a file and line number holding a logging statement,
+// such as
+// -log_backtrace_at=gopherflakes.go:234
+// a stack trace will be written to the Info log whenever execution
+// hits that statement. (Unlike with -vmodule, the ".go" must be
+// present.)
+// -v=0
+// Enable V-leveled logging at the specified level.
+// -vmodule=""
+// The syntax of the argument is a comma-separated list of pattern=N,
+// where pattern is a literal file name (minus the ".go" suffix) or
+// "glob" pattern and N is a V level. For instance,
+// -vmodule=gopher*=3
+// sets the V level to 3 in all Go files whose names begin "gopher".
+//
+package glog
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "flag"
+ "fmt"
+ "io"
+ stdLog "log"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+// severity identifies the sort of log: info, warning etc. It also implements
+// the flag.Value interface. The -stderrthreshold flag is of type severity and
+// should be modified only through the flag.Value interface. The values match
+// the corresponding constants in C++.
+type severity int32 // sync/atomic int32
+
+// These constants identify the log levels in order of increasing severity.
+// A message written to a high-severity log file is also written to each
+// lower-severity log file.
+const (
+ infoLog severity = iota
+ warningLog
+ errorLog
+ fatalLog
+ numSeverity = 4
+)
+
+const severityChar = "IWEF"
+
+var severityName = []string{
+ infoLog: "INFO",
+ warningLog: "WARNING",
+ errorLog: "ERROR",
+ fatalLog: "FATAL",
+}
+
+// get returns the value of the severity.
+func (s *severity) get() severity {
+ return severity(atomic.LoadInt32((*int32)(s)))
+}
+
+// set sets the value of the severity.
+func (s *severity) set(val severity) {
+ atomic.StoreInt32((*int32)(s), int32(val))
+}
+
+// String is part of the flag.Value interface.
+func (s *severity) String() string {
+ return strconv.FormatInt(int64(*s), 10)
+}
+
+// Get is part of the flag.Value interface.
+func (s *severity) Get() interface{} {
+ return *s
+}
+
+// Set is part of the flag.Value interface.
+func (s *severity) Set(value string) error {
+ var threshold severity
+ // Is it a known name?
+ if v, ok := severityByName(value); ok {
+ threshold = v
+ } else {
+ v, err := strconv.Atoi(value)
+ if err != nil {
+ return err
+ }
+ threshold = severity(v)
+ }
+ logging.stderrThreshold.set(threshold)
+ return nil
+}
+
+func severityByName(s string) (severity, bool) {
+ s = strings.ToUpper(s)
+ for i, name := range severityName {
+ if name == s {
+ return severity(i), true
+ }
+ }
+ return 0, false
+}
+
+// OutputStats tracks the number of output lines and bytes written.
+type OutputStats struct {
+ lines int64
+ bytes int64
+}
+
+// Lines returns the number of lines written.
+func (s *OutputStats) Lines() int64 {
+ return atomic.LoadInt64(&s.lines)
+}
+
+// Bytes returns the number of bytes written.
+func (s *OutputStats) Bytes() int64 {
+ return atomic.LoadInt64(&s.bytes)
+}
+
+// Stats tracks the number of lines of output and number of bytes
+// per severity level. Values must be read with atomic.LoadInt64.
+var Stats struct {
+ Info, Warning, Error OutputStats
+}
+
+var severityStats = [numSeverity]*OutputStats{
+ infoLog: &Stats.Info,
+ warningLog: &Stats.Warning,
+ errorLog: &Stats.Error,
+}
+
+// Level is exported because it appears in the arguments to V and is
+// the type of the v flag, which can be set programmatically.
+// It's a distinct type because we want to discriminate it from logType.
+// Variables of type level are only changed under logging.mu.
+// The -v flag is read only with atomic ops, so the state of the logging
+// module is consistent.
+
+// Level is treated as a sync/atomic int32.
+
+// Level specifies a level of verbosity for V logs. *Level implements
+// flag.Value; the -v flag is of type Level and should be modified
+// only through the flag.Value interface.
+type Level int32
+
+// get returns the value of the Level.
+func (l *Level) get() Level {
+ return Level(atomic.LoadInt32((*int32)(l)))
+}
+
+// set sets the value of the Level.
+func (l *Level) set(val Level) {
+ atomic.StoreInt32((*int32)(l), int32(val))
+}
+
+// String is part of the flag.Value interface.
+func (l *Level) String() string {
+ return strconv.FormatInt(int64(*l), 10)
+}
+
+// Get is part of the flag.Value interface.
+func (l *Level) Get() interface{} {
+ return *l
+}
+
+// Set is part of the flag.Value interface.
+func (l *Level) Set(value string) error {
+ v, err := strconv.Atoi(value)
+ if err != nil {
+ return err
+ }
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ logging.setVState(Level(v), logging.vmodule.filter, false)
+ return nil
+}
+
+// moduleSpec represents the setting of the -vmodule flag.
+type moduleSpec struct {
+ filter []modulePat
+}
+
+// modulePat contains a filter for the -vmodule flag.
+// It holds a verbosity level and a file pattern to match.
+type modulePat struct {
+ pattern string
+ literal bool // The pattern is a literal string
+ level Level
+}
+
+// match reports whether the file matches the pattern. It uses a string
+// comparison if the pattern contains no metacharacters.
+func (m *modulePat) match(file string) bool {
+ if m.literal {
+ return file == m.pattern
+ }
+ match, _ := filepath.Match(m.pattern, file)
+ return match
+}
+
+func (m *moduleSpec) String() string {
+ // Lock because the type is not atomic. TODO: clean this up.
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ var b bytes.Buffer
+ for i, f := range m.filter {
+ if i > 0 {
+ b.WriteRune(',')
+ }
+ fmt.Fprintf(&b, "%s=%d", f.pattern, f.level)
+ }
+ return b.String()
+}
+
+// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the
+// struct is not exported.
+func (m *moduleSpec) Get() interface{} {
+ return nil
+}
+
+var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N")
+
+// Syntax: -vmodule=recordio=2,file=1,gfs*=3
+func (m *moduleSpec) Set(value string) error {
+ var filter []modulePat
+ for _, pat := range strings.Split(value, ",") {
+ if len(pat) == 0 {
+ // Empty strings such as from a trailing comma can be ignored.
+ continue
+ }
+ patLev := strings.Split(pat, "=")
+ if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 {
+ return errVmoduleSyntax
+ }
+ pattern := patLev[0]
+ v, err := strconv.Atoi(patLev[1])
+ if err != nil {
+ return errors.New("syntax error: expect comma-separated list of filename=N")
+ }
+ if v < 0 {
+ return errors.New("negative value for vmodule level")
+ }
+ if v == 0 {
+ continue // Ignore. It's harmless but no point in paying the overhead.
+ }
+ // TODO: check syntax of filter?
+ filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)})
+ }
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ logging.setVState(logging.verbosity, filter, true)
+ return nil
+}
+
+// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters
+// that require filepath.Match to be called to match the pattern.
+func isLiteral(pattern string) bool {
+ return !strings.ContainsAny(pattern, `\*?[]`)
+}
+
+// traceLocation represents the setting of the -log_backtrace_at flag.
+type traceLocation struct {
+ file string
+ line int
+}
+
+// isSet reports whether the trace location has been specified.
+// logging.mu is held.
+func (t *traceLocation) isSet() bool {
+ return t.line > 0
+}
+
+// match reports whether the specified file and line matches the trace location.
+// The argument file name is the full path, not the basename specified in the flag.
+// logging.mu is held.
+func (t *traceLocation) match(file string, line int) bool {
+ if t.line != line {
+ return false
+ }
+ if i := strings.LastIndex(file, "/"); i >= 0 {
+ file = file[i+1:]
+ }
+ return t.file == file
+}
+
+func (t *traceLocation) String() string {
+ // Lock because the type is not atomic. TODO: clean this up.
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ return fmt.Sprintf("%s:%d", t.file, t.line)
+}
+
+// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the
+// struct is not exported
+func (t *traceLocation) Get() interface{} {
+ return nil
+}
+
+var errTraceSyntax = errors.New("syntax error: expect file.go:234")
+
+// Syntax: -log_backtrace_at=gopherflakes.go:234
+// Note that unlike vmodule the file extension is included here.
+func (t *traceLocation) Set(value string) error {
+ if value == "" {
+ // Unset.
+ t.line = 0
+ t.file = ""
+ }
+ fields := strings.Split(value, ":")
+ if len(fields) != 2 {
+ return errTraceSyntax
+ }
+ file, line := fields[0], fields[1]
+ if !strings.Contains(file, ".") {
+ return errTraceSyntax
+ }
+ v, err := strconv.Atoi(line)
+ if err != nil {
+ return errTraceSyntax
+ }
+ if v <= 0 {
+ return errors.New("negative or zero value for level")
+ }
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ t.line = v
+ t.file = file
+ return nil
+}
+
+// flushSyncWriter is the interface satisfied by logging destinations.
+type flushSyncWriter interface {
+ Flush() error
+ Sync() error
+ io.Writer
+}
+
+func init() {
+ flag.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files")
+ flag.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files")
+ flag.Var(&logging.verbosity, "v", "log level for V logs")
+ flag.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr")
+ flag.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging")
+ flag.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace")
+
+ // Default stderrThreshold is ERROR.
+ logging.stderrThreshold = errorLog
+
+ logging.setVState(0, nil, false)
+ go logging.flushDaemon()
+}
+
+// Flush flushes all pending log I/O.
+func Flush() {
+ logging.lockAndFlushAll()
+}
+
+// loggingT collects all the global state of the logging setup.
+type loggingT struct {
+ // Boolean flags. Not handled atomically because the flag.Value interface
+ // does not let us avoid the =true, and that shorthand is necessary for
+ // compatibility. TODO: does this matter enough to fix? Seems unlikely.
+ toStderr bool // The -logtostderr flag.
+ alsoToStderr bool // The -alsologtostderr flag.
+
+ // Level flag. Handled atomically.
+ stderrThreshold severity // The -stderrthreshold flag.
+
+ // freeList is a list of byte buffers, maintained under freeListMu.
+ freeList *buffer
+ // freeListMu maintains the free list. It is separate from the main mutex
+ // so buffers can be grabbed and printed to without holding the main lock,
+ // for better parallelization.
+ freeListMu sync.Mutex
+
+ // mu protects the remaining elements of this structure and is
+ // used to synchronize logging.
+ mu sync.Mutex
+ // file holds writer for each of the log types.
+ file [numSeverity]flushSyncWriter
+ // pcs is used in V to avoid an allocation when computing the caller's PC.
+ pcs [1]uintptr
+ // vmap is a cache of the V Level for each V() call site, identified by PC.
+ // It is wiped whenever the vmodule flag changes state.
+ vmap map[uintptr]Level
+ // filterLength stores the length of the vmodule filter chain. If greater
+ // than zero, it means vmodule is enabled. It may be read safely
+ // using sync.LoadInt32, but is only modified under mu.
+ filterLength int32
+ // traceLocation is the state of the -log_backtrace_at flag.
+ traceLocation traceLocation
+ // These flags are modified only under lock, although verbosity may be fetched
+ // safely using atomic.LoadInt32.
+ vmodule moduleSpec // The state of the -vmodule flag.
+ verbosity Level // V logging level, the value of the -v flag/
+}
+
+// buffer holds a byte Buffer for reuse. The zero value is ready for use.
+type buffer struct {
+ bytes.Buffer
+ tmp [64]byte // temporary byte array for creating headers.
+ next *buffer
+}
+
+var logging loggingT
+
+// setVState sets a consistent state for V logging.
+// l.mu is held.
+func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) {
+ // Turn verbosity off so V will not fire while we are in transition.
+ logging.verbosity.set(0)
+ // Ditto for filter length.
+ atomic.StoreInt32(&logging.filterLength, 0)
+
+ // Set the new filters and wipe the pc->Level map if the filter has changed.
+ if setFilter {
+ logging.vmodule.filter = filter
+ logging.vmap = make(map[uintptr]Level)
+ }
+
+ // Things are consistent now, so enable filtering and verbosity.
+ // They are enabled in order opposite to that in V.
+ atomic.StoreInt32(&logging.filterLength, int32(len(filter)))
+ logging.verbosity.set(verbosity)
+}
+
+// getBuffer returns a new, ready-to-use buffer.
+func (l *loggingT) getBuffer() *buffer {
+ l.freeListMu.Lock()
+ b := l.freeList
+ if b != nil {
+ l.freeList = b.next
+ }
+ l.freeListMu.Unlock()
+ if b == nil {
+ b = new(buffer)
+ } else {
+ b.next = nil
+ b.Reset()
+ }
+ return b
+}
+
+// putBuffer returns a buffer to the free list.
+func (l *loggingT) putBuffer(b *buffer) {
+ if b.Len() >= 256 {
+ // Let big buffers die a natural death.
+ return
+ }
+ l.freeListMu.Lock()
+ b.next = l.freeList
+ l.freeList = b
+ l.freeListMu.Unlock()
+}
+
+var timeNow = time.Now // Stubbed out for testing.
+
+/*
+header formats a log header as defined by the C++ implementation.
+It returns a buffer containing the formatted header and the user's file and line number.
+The depth specifies how many stack frames above lives the source line to be identified in the log message.
+
+Log lines have this form:
+ Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg...
+where the fields are defined as follows:
+ L A single character, representing the log level (eg 'I' for INFO)
+ mm The month (zero padded; ie May is '05')
+ dd The day (zero padded)
+ hh:mm:ss.uuuuuu Time in hours, minutes and fractional seconds
+ threadid The space-padded thread ID as returned by GetTID()
+ file The file name
+ line The line number
+ msg The user-supplied message
+*/
+func (l *loggingT) header(s severity, depth int) (*buffer, string, int) {
+ _, file, line, ok := runtime.Caller(3 + depth)
+ if !ok {
+ file = "???"
+ line = 1
+ } else {
+ slash := strings.LastIndex(file, "/")
+ if slash >= 0 {
+ file = file[slash+1:]
+ }
+ }
+ return l.formatHeader(s, file, line), file, line
+}
+
+// formatHeader formats a log header using the provided file name and line number.
+func (l *loggingT) formatHeader(s severity, file string, line int) *buffer {
+ now := timeNow()
+ if line < 0 {
+ line = 0 // not a real line number, but acceptable to someDigits
+ }
+ if s > fatalLog {
+ s = infoLog // for safety.
+ }
+ buf := l.getBuffer()
+
+ // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand.
+ // It's worth about 3X. Fprintf is hard.
+ _, month, day := now.Date()
+ hour, minute, second := now.Clock()
+ // Lmmdd hh:mm:ss.uuuuuu threadid file:line]
+ buf.tmp[0] = severityChar[s]
+ buf.twoDigits(1, int(month))
+ buf.twoDigits(3, day)
+ buf.tmp[5] = ' '
+ buf.twoDigits(6, hour)
+ buf.tmp[8] = ':'
+ buf.twoDigits(9, minute)
+ buf.tmp[11] = ':'
+ buf.twoDigits(12, second)
+ buf.tmp[14] = '.'
+ buf.nDigits(6, 15, now.Nanosecond()/1000, '0')
+ buf.tmp[21] = ' '
+ buf.nDigits(7, 22, pid, ' ') // TODO: should be TID
+ buf.tmp[29] = ' '
+ buf.Write(buf.tmp[:30])
+ buf.WriteString(file)
+ buf.tmp[0] = ':'
+ n := buf.someDigits(1, line)
+ buf.tmp[n+1] = ']'
+ buf.tmp[n+2] = ' '
+ buf.Write(buf.tmp[:n+3])
+ return buf
+}
+
+// Some custom tiny helper functions to print the log header efficiently.
+
+const digits = "0123456789"
+
+// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i].
+func (buf *buffer) twoDigits(i, d int) {
+ buf.tmp[i+1] = digits[d%10]
+ d /= 10
+ buf.tmp[i] = digits[d%10]
+}
+
+// nDigits formats an n-digit integer at buf.tmp[i],
+// padding with pad on the left.
+// It assumes d >= 0.
+func (buf *buffer) nDigits(n, i, d int, pad byte) {
+ j := n - 1
+ for ; j >= 0 && d > 0; j-- {
+ buf.tmp[i+j] = digits[d%10]
+ d /= 10
+ }
+ for ; j >= 0; j-- {
+ buf.tmp[i+j] = pad
+ }
+}
+
+// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i].
+func (buf *buffer) someDigits(i, d int) int {
+ // Print into the top, then copy down. We know there's space for at least
+ // a 10-digit number.
+ j := len(buf.tmp)
+ for {
+ j--
+ buf.tmp[j] = digits[d%10]
+ d /= 10
+ if d == 0 {
+ break
+ }
+ }
+ return copy(buf.tmp[i:], buf.tmp[j:])
+}
+
+func (l *loggingT) println(s severity, args ...interface{}) {
+ buf, file, line := l.header(s, 0)
+ fmt.Fprintln(buf, args...)
+ l.output(s, buf, file, line, false)
+}
+
+func (l *loggingT) print(s severity, args ...interface{}) {
+ l.printDepth(s, 1, args...)
+}
+
+func (l *loggingT) printDepth(s severity, depth int, args ...interface{}) {
+ buf, file, line := l.header(s, depth)
+ fmt.Fprint(buf, args...)
+ if buf.Bytes()[buf.Len()-1] != '\n' {
+ buf.WriteByte('\n')
+ }
+ l.output(s, buf, file, line, false)
+}
+
+func (l *loggingT) printf(s severity, format string, args ...interface{}) {
+ buf, file, line := l.header(s, 0)
+ fmt.Fprintf(buf, format, args...)
+ if buf.Bytes()[buf.Len()-1] != '\n' {
+ buf.WriteByte('\n')
+ }
+ l.output(s, buf, file, line, false)
+}
+
+// printWithFileLine behaves like print but uses the provided file and line number. If
+// alsoLogToStderr is true, the log message always appears on standard error; it
+// will also appear in the log file unless --logtostderr is set.
+func (l *loggingT) printWithFileLine(s severity, file string, line int, alsoToStderr bool, args ...interface{}) {
+ buf := l.formatHeader(s, file, line)
+ fmt.Fprint(buf, args...)
+ if buf.Bytes()[buf.Len()-1] != '\n' {
+ buf.WriteByte('\n')
+ }
+ l.output(s, buf, file, line, alsoToStderr)
+}
+
+// output writes the data to the log files and releases the buffer.
+func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoToStderr bool) {
+ l.mu.Lock()
+ if l.traceLocation.isSet() {
+ if l.traceLocation.match(file, line) {
+ buf.Write(stacks(false))
+ }
+ }
+ data := buf.Bytes()
+ if l.toStderr {
+ os.Stderr.Write(data)
+ } else {
+ if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() {
+ os.Stderr.Write(data)
+ }
+ if l.file[s] == nil {
+ if err := l.createFiles(s); err != nil {
+ os.Stderr.Write(data) // Make sure the message appears somewhere.
+ l.exit(err)
+ }
+ }
+ switch s {
+ case fatalLog:
+ l.file[fatalLog].Write(data)
+ fallthrough
+ case errorLog:
+ l.file[errorLog].Write(data)
+ fallthrough
+ case warningLog:
+ l.file[warningLog].Write(data)
+ fallthrough
+ case infoLog:
+ l.file[infoLog].Write(data)
+ }
+ }
+ if s == fatalLog {
+ // If we got here via Exit rather than Fatal, print no stacks.
+ if atomic.LoadUint32(&fatalNoStacks) > 0 {
+ l.mu.Unlock()
+ timeoutFlush(10 * time.Second)
+ os.Exit(1)
+ }
+ // Dump all goroutine stacks before exiting.
+ // First, make sure we see the trace for the current goroutine on standard error.
+ // If -logtostderr has been specified, the loop below will do that anyway
+ // as the first stack in the full dump.
+ if !l.toStderr {
+ os.Stderr.Write(stacks(false))
+ }
+ // Write the stack trace for all goroutines to the files.
+ trace := stacks(true)
+ logExitFunc = func(error) {} // If we get a write error, we'll still exit below.
+ for log := fatalLog; log >= infoLog; log-- {
+ if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set.
+ f.Write(trace)
+ }
+ }
+ l.mu.Unlock()
+ timeoutFlush(10 * time.Second)
+ os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway.
+ }
+ l.putBuffer(buf)
+ l.mu.Unlock()
+ if stats := severityStats[s]; stats != nil {
+ atomic.AddInt64(&stats.lines, 1)
+ atomic.AddInt64(&stats.bytes, int64(len(data)))
+ }
+}
+
+// timeoutFlush calls Flush and returns when it completes or after timeout
+// elapses, whichever happens first. This is needed because the hooks invoked
+// by Flush may deadlock when glog.Fatal is called from a hook that holds
+// a lock.
+func timeoutFlush(timeout time.Duration) {
+ done := make(chan bool, 1)
+ go func() {
+ Flush() // calls logging.lockAndFlushAll()
+ done <- true
+ }()
+ select {
+ case <-done:
+ case <-time.After(timeout):
+ fmt.Fprintln(os.Stderr, "glog: Flush took longer than", timeout)
+ }
+}
+
+// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines.
+func stacks(all bool) []byte {
+ // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though.
+ n := 10000
+ if all {
+ n = 100000
+ }
+ var trace []byte
+ for i := 0; i < 5; i++ {
+ trace = make([]byte, n)
+ nbytes := runtime.Stack(trace, all)
+ if nbytes < len(trace) {
+ return trace[:nbytes]
+ }
+ n *= 2
+ }
+ return trace
+}
+
+// logExitFunc provides a simple mechanism to override the default behavior
+// of exiting on error. Used in testing and to guarantee we reach a required exit
+// for fatal logs. Instead, exit could be a function rather than a method but that
+// would make its use clumsier.
+var logExitFunc func(error)
+
+// exit is called if there is trouble creating or writing log files.
+// It flushes the logs and exits the program; there's no point in hanging around.
+// l.mu is held.
+func (l *loggingT) exit(err error) {
+ fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err)
+ // If logExitFunc is set, we do that instead of exiting.
+ if logExitFunc != nil {
+ logExitFunc(err)
+ return
+ }
+ l.flushAll()
+ os.Exit(2)
+}
+
+// syncBuffer joins a bufio.Writer to its underlying file, providing access to the
+// file's Sync method and providing a wrapper for the Write method that provides log
+// file rotation. There are conflicting methods, so the file cannot be embedded.
+// l.mu is held for all its methods.
+type syncBuffer struct {
+ logger *loggingT
+ *bufio.Writer
+ file *os.File
+ sev severity
+ nbytes uint64 // The number of bytes written to this file
+}
+
+func (sb *syncBuffer) Sync() error {
+ return sb.file.Sync()
+}
+
+func (sb *syncBuffer) Write(p []byte) (n int, err error) {
+ if sb.nbytes+uint64(len(p)) >= MaxSize {
+ if err := sb.rotateFile(time.Now()); err != nil {
+ sb.logger.exit(err)
+ }
+ }
+ n, err = sb.Writer.Write(p)
+ sb.nbytes += uint64(n)
+ if err != nil {
+ sb.logger.exit(err)
+ }
+ return
+}
+
+// rotateFile closes the syncBuffer's file and starts a new one.
+func (sb *syncBuffer) rotateFile(now time.Time) error {
+ if sb.file != nil {
+ sb.Flush()
+ sb.file.Close()
+ }
+ var err error
+ sb.file, _, err = create(severityName[sb.sev], now)
+ sb.nbytes = 0
+ if err != nil {
+ return err
+ }
+
+ sb.Writer = bufio.NewWriterSize(sb.file, bufferSize)
+
+ // Write header.
+ var buf bytes.Buffer
+ fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05"))
+ fmt.Fprintf(&buf, "Running on machine: %s\n", host)
+ fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH)
+ fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n")
+ n, err := sb.file.Write(buf.Bytes())
+ sb.nbytes += uint64(n)
+ return err
+}
+
+// bufferSize sizes the buffer associated with each log file. It's large
+// so that log records can accumulate without the logging thread blocking
+// on disk I/O. The flushDaemon will block instead.
+const bufferSize = 256 * 1024
+
+// createFiles creates all the log files for severity from sev down to infoLog.
+// l.mu is held.
+func (l *loggingT) createFiles(sev severity) error {
+ now := time.Now()
+ // Files are created in decreasing severity order, so as soon as we find one
+ // has already been created, we can stop.
+ for s := sev; s >= infoLog && l.file[s] == nil; s-- {
+ sb := &syncBuffer{
+ logger: l,
+ sev: s,
+ }
+ if err := sb.rotateFile(now); err != nil {
+ return err
+ }
+ l.file[s] = sb
+ }
+ return nil
+}
+
+const flushInterval = 30 * time.Second
+
+// flushDaemon periodically flushes the log file buffers.
+func (l *loggingT) flushDaemon() {
+ for _ = range time.NewTicker(flushInterval).C {
+ l.lockAndFlushAll()
+ }
+}
+
+// lockAndFlushAll is like flushAll but locks l.mu first.
+func (l *loggingT) lockAndFlushAll() {
+ l.mu.Lock()
+ l.flushAll()
+ l.mu.Unlock()
+}
+
+// flushAll flushes all the logs and attempts to "sync" their data to disk.
+// l.mu is held.
+func (l *loggingT) flushAll() {
+ // Flush from fatal down, in case there's trouble flushing.
+ for s := fatalLog; s >= infoLog; s-- {
+ file := l.file[s]
+ if file != nil {
+ file.Flush() // ignore error
+ file.Sync() // ignore error
+ }
+ }
+}
+
+// CopyStandardLogTo arranges for messages written to the Go "log" package's
+// default logs to also appear in the Google logs for the named and lower
+// severities. Subsequent changes to the standard log's default output location
+// or format may break this behavior.
+//
+// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not
+// recognized, CopyStandardLogTo panics.
+func CopyStandardLogTo(name string) {
+ sev, ok := severityByName(name)
+ if !ok {
+ panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name))
+ }
+ // Set a log format that captures the user's file and line:
+ // d.go:23: message
+ stdLog.SetFlags(stdLog.Lshortfile)
+ stdLog.SetOutput(logBridge(sev))
+}
+
+// logBridge provides the Write method that enables CopyStandardLogTo to connect
+// Go's standard logs to the logs provided by this package.
+type logBridge severity
+
+// Write parses the standard logging line and passes its components to the
+// logger for severity(lb).
+func (lb logBridge) Write(b []byte) (n int, err error) {
+ var (
+ file = "???"
+ line = 1
+ text string
+ )
+ // Split "d.go:23: message" into "d.go", "23", and "message".
+ if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 {
+ text = fmt.Sprintf("bad log format: %s", b)
+ } else {
+ file = string(parts[0])
+ text = string(parts[2][1:]) // skip leading space
+ line, err = strconv.Atoi(string(parts[1]))
+ if err != nil {
+ text = fmt.Sprintf("bad line number: %s", b)
+ line = 1
+ }
+ }
+ // printWithFileLine with alsoToStderr=true, so standard log messages
+ // always appear on standard error.
+ logging.printWithFileLine(severity(lb), file, line, true, text)
+ return len(b), nil
+}
+
+// setV computes and remembers the V level for a given PC
+// when vmodule is enabled.
+// File pattern matching takes the basename of the file, stripped
+// of its .go suffix, and uses filepath.Match, which is a little more
+// general than the *? matching used in C++.
+// l.mu is held.
+func (l *loggingT) setV(pc uintptr) Level {
+ fn := runtime.FuncForPC(pc)
+ file, _ := fn.FileLine(pc)
+ // The file is something like /a/b/c/d.go. We want just the d.
+ if strings.HasSuffix(file, ".go") {
+ file = file[:len(file)-3]
+ }
+ if slash := strings.LastIndex(file, "/"); slash >= 0 {
+ file = file[slash+1:]
+ }
+ for _, filter := range l.vmodule.filter {
+ if filter.match(file) {
+ l.vmap[pc] = filter.level
+ return filter.level
+ }
+ }
+ l.vmap[pc] = 0
+ return 0
+}
+
+// Verbose is a boolean type that implements Infof (like Printf) etc.
+// See the documentation of V for more information.
+type Verbose bool
+
+// V reports whether verbosity at the call site is at least the requested level.
+// The returned value is a boolean of type Verbose, which implements Info, Infoln
+// and Infof. These methods will write to the Info log if called.
+// Thus, one may write either
+// if glog.V(2) { glog.Info("log this") }
+// or
+// glog.V(2).Info("log this")
+// The second form is shorter but the first is cheaper if logging is off because it does
+// not evaluate its arguments.
+//
+// Whether an individual call to V generates a log record depends on the setting of
+// the -v and --vmodule flags; both are off by default. If the level in the call to
+// V is at least the value of -v, or of -vmodule for the source file containing the
+// call, the V call will log.
+func V(level Level) Verbose {
+ // This function tries hard to be cheap unless there's work to do.
+ // The fast path is two atomic loads and compares.
+
+ // Here is a cheap but safe test to see if V logging is enabled globally.
+ if logging.verbosity.get() >= level {
+ return Verbose(true)
+ }
+
+ // It's off globally but it vmodule may still be set.
+ // Here is another cheap but safe test to see if vmodule is enabled.
+ if atomic.LoadInt32(&logging.filterLength) > 0 {
+ // Now we need a proper lock to use the logging structure. The pcs field
+ // is shared so we must lock before accessing it. This is fairly expensive,
+ // but if V logging is enabled we're slow anyway.
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ if runtime.Callers(2, logging.pcs[:]) == 0 {
+ return Verbose(false)
+ }
+ v, ok := logging.vmap[logging.pcs[0]]
+ if !ok {
+ v = logging.setV(logging.pcs[0])
+ }
+ return Verbose(v >= level)
+ }
+ return Verbose(false)
+}
+
+// Info is equivalent to the global Info function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) Info(args ...interface{}) {
+ if v {
+ logging.print(infoLog, args...)
+ }
+}
+
+// Infoln is equivalent to the global Infoln function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) Infoln(args ...interface{}) {
+ if v {
+ logging.println(infoLog, args...)
+ }
+}
+
+// Infof is equivalent to the global Infof function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) Infof(format string, args ...interface{}) {
+ if v {
+ logging.printf(infoLog, format, args...)
+ }
+}
+
+// Info logs to the INFO log.
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Info(args ...interface{}) {
+ logging.print(infoLog, args...)
+}
+
+// InfoDepth acts as Info but uses depth to determine which call frame to log.
+// InfoDepth(0, "msg") is the same as Info("msg").
+func InfoDepth(depth int, args ...interface{}) {
+ logging.printDepth(infoLog, depth, args...)
+}
+
+// Infoln logs to the INFO log.
+// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
+func Infoln(args ...interface{}) {
+ logging.println(infoLog, args...)
+}
+
+// Infof logs to the INFO log.
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Infof(format string, args ...interface{}) {
+ logging.printf(infoLog, format, args...)
+}
+
+// Warning logs to the WARNING and INFO logs.
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Warning(args ...interface{}) {
+ logging.print(warningLog, args...)
+}
+
+// WarningDepth acts as Warning but uses depth to determine which call frame to log.
+// WarningDepth(0, "msg") is the same as Warning("msg").
+func WarningDepth(depth int, args ...interface{}) {
+ logging.printDepth(warningLog, depth, args...)
+}
+
+// Warningln logs to the WARNING and INFO logs.
+// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
+func Warningln(args ...interface{}) {
+ logging.println(warningLog, args...)
+}
+
+// Warningf logs to the WARNING and INFO logs.
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Warningf(format string, args ...interface{}) {
+ logging.printf(warningLog, format, args...)
+}
+
+// Error logs to the ERROR, WARNING, and INFO logs.
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Error(args ...interface{}) {
+ logging.print(errorLog, args...)
+}
+
+// ErrorDepth acts as Error but uses depth to determine which call frame to log.
+// ErrorDepth(0, "msg") is the same as Error("msg").
+func ErrorDepth(depth int, args ...interface{}) {
+ logging.printDepth(errorLog, depth, args...)
+}
+
+// Errorln logs to the ERROR, WARNING, and INFO logs.
+// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
+func Errorln(args ...interface{}) {
+ logging.println(errorLog, args...)
+}
+
+// Errorf logs to the ERROR, WARNING, and INFO logs.
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Errorf(format string, args ...interface{}) {
+ logging.printf(errorLog, format, args...)
+}
+
+// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs,
+// including a stack trace of all running goroutines, then calls os.Exit(255).
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Fatal(args ...interface{}) {
+ logging.print(fatalLog, args...)
+}
+
+// FatalDepth acts as Fatal but uses depth to determine which call frame to log.
+// FatalDepth(0, "msg") is the same as Fatal("msg").
+func FatalDepth(depth int, args ...interface{}) {
+ logging.printDepth(fatalLog, depth, args...)
+}
+
+// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs,
+// including a stack trace of all running goroutines, then calls os.Exit(255).
+// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
+func Fatalln(args ...interface{}) {
+ logging.println(fatalLog, args...)
+}
+
+// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs,
+// including a stack trace of all running goroutines, then calls os.Exit(255).
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Fatalf(format string, args ...interface{}) {
+ logging.printf(fatalLog, format, args...)
+}
+
+// fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks.
+// It allows Exit and relatives to use the Fatal logs.
+var fatalNoStacks uint32
+
+// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Exit(args ...interface{}) {
+ atomic.StoreUint32(&fatalNoStacks, 1)
+ logging.print(fatalLog, args...)
+}
+
+// ExitDepth acts as Exit but uses depth to determine which call frame to log.
+// ExitDepth(0, "msg") is the same as Exit("msg").
+func ExitDepth(depth int, args ...interface{}) {
+ atomic.StoreUint32(&fatalNoStacks, 1)
+ logging.printDepth(fatalLog, depth, args...)
+}
+
+// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
+func Exitln(args ...interface{}) {
+ atomic.StoreUint32(&fatalNoStacks, 1)
+ logging.println(fatalLog, args...)
+}
+
+// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Exitf(format string, args ...interface{}) {
+ atomic.StoreUint32(&fatalNoStacks, 1)
+ logging.printf(fatalLog, format, args...)
+}
diff --git a/src/kube2msb/vendor/github.com/golang/glog/glog_file.go b/src/kube2msb/vendor/github.com/golang/glog/glog_file.go
new file mode 100644
index 0000000..65075d2
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/golang/glog/glog_file.go
@@ -0,0 +1,124 @@
+// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
+//
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// File I/O for logs.
+
+package glog
+
+import (
+ "errors"
+ "flag"
+ "fmt"
+ "os"
+ "os/user"
+ "path/filepath"
+ "strings"
+ "sync"
+ "time"
+)
+
+// MaxSize is the maximum size of a log file in bytes.
+var MaxSize uint64 = 1024 * 1024 * 1800
+
+// logDirs lists the candidate directories for new log files.
+var logDirs []string
+
+// If non-empty, overrides the choice of directory in which to write logs.
+// See createLogDirs for the full list of possible destinations.
+var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory")
+
+func createLogDirs() {
+ if *logDir != "" {
+ logDirs = append(logDirs, *logDir)
+ }
+ logDirs = append(logDirs, os.TempDir())
+}
+
+var (
+ pid = os.Getpid()
+ program = filepath.Base(os.Args[0])
+ host = "unknownhost"
+ userName = "unknownuser"
+)
+
+func init() {
+ h, err := os.Hostname()
+ if err == nil {
+ host = shortHostname(h)
+ }
+
+ current, err := user.Current()
+ if err == nil {
+ userName = current.Username
+ }
+
+ // Sanitize userName since it may contain filepath separators on Windows.
+ userName = strings.Replace(userName, `\`, "_", -1)
+}
+
+// shortHostname returns its argument, truncating at the first period.
+// For instance, given "www.google.com" it returns "www".
+func shortHostname(hostname string) string {
+ if i := strings.Index(hostname, "."); i >= 0 {
+ return hostname[:i]
+ }
+ return hostname
+}
+
+// logName returns a new log file name containing tag, with start time t, and
+// the name for the symlink for tag.
+func logName(tag string, t time.Time) (name, link string) {
+ name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d",
+ program,
+ host,
+ userName,
+ tag,
+ t.Year(),
+ t.Month(),
+ t.Day(),
+ t.Hour(),
+ t.Minute(),
+ t.Second(),
+ pid)
+ return name, program + "." + tag
+}
+
+var onceLogDirs sync.Once
+
+// create creates a new log file and returns the file and its filename, which
+// contains tag ("INFO", "FATAL", etc.) and t. If the file is created
+// successfully, create also attempts to update the symlink for that tag, ignoring
+// errors.
+func create(tag string, t time.Time) (f *os.File, filename string, err error) {
+ onceLogDirs.Do(createLogDirs)
+ if len(logDirs) == 0 {
+ return nil, "", errors.New("log: no log dirs")
+ }
+ name, link := logName(tag, t)
+ var lastErr error
+ for _, dir := range logDirs {
+ fname := filepath.Join(dir, name)
+ f, err := os.Create(fname)
+ if err == nil {
+ symlink := filepath.Join(dir, link)
+ os.Remove(symlink) // ignore err
+ os.Symlink(name, symlink) // ignore err
+ return f, fname, nil
+ }
+ lastErr = err
+ }
+ return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr)
+}
diff --git a/src/kube2msb/vendor/github.com/golang/protobuf/LICENSE b/src/kube2msb/vendor/github.com/golang/protobuf/LICENSE
new file mode 100644
index 0000000..1b1b192
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/golang/protobuf/LICENSE
@@ -0,0 +1,31 @@
+Go support for Protocol Buffers - Google's data interchange format
+
+Copyright 2010 The Go Authors. All rights reserved.
+https://github.com/golang/protobuf
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/src/kube2msb/vendor/github.com/golang/protobuf/proto/Makefile b/src/kube2msb/vendor/github.com/golang/protobuf/proto/Makefile
new file mode 100644
index 0000000..f1f0656
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/golang/protobuf/proto/Makefile
@@ -0,0 +1,43 @@
+# Go support for Protocol Buffers - Google's data interchange format
+#
+# Copyright 2010 The Go Authors. All rights reserved.
+# https://github.com/golang/protobuf
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+install:
+ go install
+
+test: install generate-test-pbs
+ go test
+
+
+generate-test-pbs:
+ make install
+ make -C testdata
+ protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata:. proto3_proto/proto3.proto
+ make
diff --git a/src/kube2msb/vendor/github.com/golang/protobuf/proto/clone.go b/src/kube2msb/vendor/github.com/golang/protobuf/proto/clone.go
new file mode 100644
index 0000000..e98ddec
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/golang/protobuf/proto/clone.go
@@ -0,0 +1,223 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer deep copy and merge.
+// TODO: RawMessage.
+
+package proto
+
+import (
+ "log"
+ "reflect"
+ "strings"
+)
+
+// Clone returns a deep copy of a protocol buffer.
+func Clone(pb Message) Message {
+ in := reflect.ValueOf(pb)
+ if in.IsNil() {
+ return pb
+ }
+
+ out := reflect.New(in.Type().Elem())
+ // out is empty so a merge is a deep copy.
+ mergeStruct(out.Elem(), in.Elem())
+ return out.Interface().(Message)
+}
+
+// Merge merges src into dst.
+// Required and optional fields that are set in src will be set to that value in dst.
+// Elements of repeated fields will be appended.
+// Merge panics if src and dst are not the same type, or if dst is nil.
+func Merge(dst, src Message) {
+ in := reflect.ValueOf(src)
+ out := reflect.ValueOf(dst)
+ if out.IsNil() {
+ panic("proto: nil destination")
+ }
+ if in.Type() != out.Type() {
+ // Explicit test prior to mergeStruct so that mistyped nils will fail
+ panic("proto: type mismatch")
+ }
+ if in.IsNil() {
+ // Merging nil into non-nil is a quiet no-op
+ return
+ }
+ mergeStruct(out.Elem(), in.Elem())
+}
+
+func mergeStruct(out, in reflect.Value) {
+ sprop := GetProperties(in.Type())
+ for i := 0; i < in.NumField(); i++ {
+ f := in.Type().Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
+ }
+
+ if emIn, ok := in.Addr().Interface().(extendableProto); ok {
+ emOut := out.Addr().Interface().(extendableProto)
+ mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap())
+ }
+
+ uf := in.FieldByName("XXX_unrecognized")
+ if !uf.IsValid() {
+ return
+ }
+ uin := uf.Bytes()
+ if len(uin) > 0 {
+ out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
+ }
+}
+
+// mergeAny performs a merge between two values of the same type.
+// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
+// prop is set if this is a struct field (it may be nil).
+func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
+ if in.Type() == protoMessageType {
+ if !in.IsNil() {
+ if out.IsNil() {
+ out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
+ } else {
+ Merge(out.Interface().(Message), in.Interface().(Message))
+ }
+ }
+ return
+ }
+ switch in.Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ if !viaPtr && isProto3Zero(in) {
+ return
+ }
+ out.Set(in)
+ case reflect.Interface:
+ // Probably a oneof field; copy non-nil values.
+ if in.IsNil() {
+ return
+ }
+ // Allocate destination if it is not set, or set to a different type.
+ // Otherwise we will merge as normal.
+ if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
+ out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
+ }
+ mergeAny(out.Elem(), in.Elem(), false, nil)
+ case reflect.Map:
+ if in.Len() == 0 {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.MakeMap(in.Type()))
+ }
+ // For maps with value types of *T or []byte we need to deep copy each value.
+ elemKind := in.Type().Elem().Kind()
+ for _, key := range in.MapKeys() {
+ var val reflect.Value
+ switch elemKind {
+ case reflect.Ptr:
+ val = reflect.New(in.Type().Elem().Elem())
+ mergeAny(val, in.MapIndex(key), false, nil)
+ case reflect.Slice:
+ val = in.MapIndex(key)
+ val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+ default:
+ val = in.MapIndex(key)
+ }
+ out.SetMapIndex(key, val)
+ }
+ case reflect.Ptr:
+ if in.IsNil() {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.New(in.Elem().Type()))
+ }
+ mergeAny(out.Elem(), in.Elem(), true, nil)
+ case reflect.Slice:
+ if in.IsNil() {
+ return
+ }
+ if in.Type().Elem().Kind() == reflect.Uint8 {
+ // []byte is a scalar bytes field, not a repeated field.
+
+ // Edge case: if this is in a proto3 message, a zero length
+ // bytes field is considered the zero value, and should not
+ // be merged.
+ if prop != nil && prop.proto3 && in.Len() == 0 {
+ return
+ }
+
+ // Make a deep copy.
+ // Append to []byte{} instead of []byte(nil) so that we never end up
+ // with a nil result.
+ out.SetBytes(append([]byte{}, in.Bytes()...))
+ return
+ }
+ n := in.Len()
+ if out.IsNil() {
+ out.Set(reflect.MakeSlice(in.Type(), 0, n))
+ }
+ switch in.Type().Elem().Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ out.Set(reflect.AppendSlice(out, in))
+ default:
+ for i := 0; i < n; i++ {
+ x := reflect.Indirect(reflect.New(in.Type().Elem()))
+ mergeAny(x, in.Index(i), false, nil)
+ out.Set(reflect.Append(out, x))
+ }
+ }
+ case reflect.Struct:
+ mergeStruct(out, in)
+ default:
+ // unknown type, so not a protocol buffer
+ log.Printf("proto: don't know how to copy %v", in)
+ }
+}
+
+func mergeExtension(out, in map[int32]Extension) {
+ for extNum, eIn := range in {
+ eOut := Extension{desc: eIn.desc}
+ if eIn.value != nil {
+ v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
+ mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
+ eOut.value = v.Interface()
+ }
+ if eIn.enc != nil {
+ eOut.enc = make([]byte, len(eIn.enc))
+ copy(eOut.enc, eIn.enc)
+ }
+
+ out[extNum] = eOut
+ }
+}
diff --git a/src/kube2msb/vendor/github.com/golang/protobuf/proto/decode.go b/src/kube2msb/vendor/github.com/golang/protobuf/proto/decode.go
new file mode 100644
index 0000000..5810782
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/golang/protobuf/proto/decode.go
@@ -0,0 +1,867 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for decoding protocol buffer data to construct in-memory representations.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+)
+
+// errOverflow is returned when an integer is too large to be represented.
+var errOverflow = errors.New("proto: integer overflow")
+
+// ErrInternalBadWireType is returned by generated code when an incorrect
+// wire type is encountered. It does not get returned to user code.
+var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
+
+// The fundamental decoders that interpret bytes on the wire.
+// Those that take integer types all return uint64 and are
+// therefore of type valueDecoder.
+
+// DecodeVarint reads a varint-encoded integer from the slice.
+// It returns the integer and the number of bytes consumed, or
+// zero if there is not enough.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func DecodeVarint(buf []byte) (x uint64, n int) {
+ // x, n already 0
+ for shift := uint(0); shift < 64; shift += 7 {
+ if n >= len(buf) {
+ return 0, 0
+ }
+ b := uint64(buf[n])
+ n++
+ x |= (b & 0x7F) << shift
+ if (b & 0x80) == 0 {
+ return x, n
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ return 0, 0
+}
+
+// DecodeVarint reads a varint-encoded integer from the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) DecodeVarint() (x uint64, err error) {
+ // x, err already 0
+
+ i := p.index
+ l := len(p.buf)
+
+ for shift := uint(0); shift < 64; shift += 7 {
+ if i >= l {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ b := p.buf[i]
+ i++
+ x |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ p.index = i
+ return
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ err = errOverflow
+ return
+}
+
+// DecodeFixed64 reads a 64-bit integer from the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) DecodeFixed64() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 8
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-8])
+ x |= uint64(p.buf[i-7]) << 8
+ x |= uint64(p.buf[i-6]) << 16
+ x |= uint64(p.buf[i-5]) << 24
+ x |= uint64(p.buf[i-4]) << 32
+ x |= uint64(p.buf[i-3]) << 40
+ x |= uint64(p.buf[i-2]) << 48
+ x |= uint64(p.buf[i-1]) << 56
+ return
+}
+
+// DecodeFixed32 reads a 32-bit integer from the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) DecodeFixed32() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 4
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-4])
+ x |= uint64(p.buf[i-3]) << 8
+ x |= uint64(p.buf[i-2]) << 16
+ x |= uint64(p.buf[i-1]) << 24
+ return
+}
+
+// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
+// from the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
+ return
+}
+
+// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
+// from the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
+ return
+}
+
+// These are not ValueDecoders: they produce an array of bytes or a string.
+// bytes, embedded messages
+
+// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
+ n, err := p.DecodeVarint()
+ if err != nil {
+ return nil, err
+ }
+
+ nb := int(n)
+ if nb < 0 {
+ return nil, fmt.Errorf("proto: bad byte length %d", nb)
+ }
+ end := p.index + nb
+ if end < p.index || end > len(p.buf) {
+ return nil, io.ErrUnexpectedEOF
+ }
+
+ if !alloc {
+ // todo: check if can get more uses of alloc=false
+ buf = p.buf[p.index:end]
+ p.index += nb
+ return
+ }
+
+ buf = make([]byte, nb)
+ copy(buf, p.buf[p.index:])
+ p.index += nb
+ return
+}
+
+// DecodeStringBytes reads an encoded string from the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) DecodeStringBytes() (s string, err error) {
+ buf, err := p.DecodeRawBytes(false)
+ if err != nil {
+ return
+ }
+ return string(buf), nil
+}
+
+// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
+// If the protocol buffer has extensions, and the field matches, add it as an extension.
+// Otherwise, if the XXX_unrecognized field exists, append the skipped data there.
+func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error {
+ oi := o.index
+
+ err := o.skip(t, tag, wire)
+ if err != nil {
+ return err
+ }
+
+ if !unrecField.IsValid() {
+ return nil
+ }
+
+ ptr := structPointer_Bytes(base, unrecField)
+
+ // Add the skipped field to struct field
+ obuf := o.buf
+
+ o.buf = *ptr
+ o.EncodeVarint(uint64(tag<<3 | wire))
+ *ptr = append(o.buf, obuf[oi:o.index]...)
+
+ o.buf = obuf
+
+ return nil
+}
+
+// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
+func (o *Buffer) skip(t reflect.Type, tag, wire int) error {
+
+ var u uint64
+ var err error
+
+ switch wire {
+ case WireVarint:
+ _, err = o.DecodeVarint()
+ case WireFixed64:
+ _, err = o.DecodeFixed64()
+ case WireBytes:
+ _, err = o.DecodeRawBytes(false)
+ case WireFixed32:
+ _, err = o.DecodeFixed32()
+ case WireStartGroup:
+ for {
+ u, err = o.DecodeVarint()
+ if err != nil {
+ break
+ }
+ fwire := int(u & 0x7)
+ if fwire == WireEndGroup {
+ break
+ }
+ ftag := int(u >> 3)
+ err = o.skip(t, ftag, fwire)
+ if err != nil {
+ break
+ }
+ }
+ default:
+ err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t)
+ }
+ return err
+}
+
+// Unmarshaler is the interface representing objects that can
+// unmarshal themselves. The method should reset the receiver before
+// decoding starts. The argument points to data that may be
+// overwritten, so implementations should not keep references to the
+// buffer.
+type Unmarshaler interface {
+ Unmarshal([]byte) error
+}
+
+// Unmarshal parses the protocol buffer representation in buf and places the
+// decoded result in pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// Unmarshal resets pb before starting to unmarshal, so any
+// existing data in pb is always removed. Use UnmarshalMerge
+// to preserve and append to existing data.
+func Unmarshal(buf []byte, pb Message) error {
+ pb.Reset()
+ return UnmarshalMerge(buf, pb)
+}
+
+// UnmarshalMerge parses the protocol buffer representation in buf and
+// writes the decoded result to pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// UnmarshalMerge merges into existing data in pb.
+// Most code should use Unmarshal instead.
+func UnmarshalMerge(buf []byte, pb Message) error {
+ // If the object can unmarshal itself, let it.
+ if u, ok := pb.(Unmarshaler); ok {
+ return u.Unmarshal(buf)
+ }
+ return NewBuffer(buf).Unmarshal(pb)
+}
+
+// DecodeMessage reads a count-delimited message from the Buffer.
+func (p *Buffer) DecodeMessage(pb Message) error {
+ enc, err := p.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ return NewBuffer(enc).Unmarshal(pb)
+}
+
+// DecodeGroup reads a tag-delimited group from the Buffer.
+func (p *Buffer) DecodeGroup(pb Message) error {
+ typ, base, err := getbase(pb)
+ if err != nil {
+ return err
+ }
+ return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base)
+}
+
+// Unmarshal parses the protocol buffer representation in the
+// Buffer and places the decoded result in pb. If the struct
+// underlying pb does not match the data in the buffer, the results can be
+// unpredictable.
+func (p *Buffer) Unmarshal(pb Message) error {
+ // If the object can unmarshal itself, let it.
+ if u, ok := pb.(Unmarshaler); ok {
+ err := u.Unmarshal(p.buf[p.index:])
+ p.index = len(p.buf)
+ return err
+ }
+
+ typ, base, err := getbase(pb)
+ if err != nil {
+ return err
+ }
+
+ err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base)
+
+ if collectStats {
+ stats.Decode++
+ }
+
+ return err
+}
+
+// unmarshalType does the work of unmarshaling a structure.
+func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error {
+ var state errorState
+ required, reqFields := prop.reqCount, uint64(0)
+
+ var err error
+ for err == nil && o.index < len(o.buf) {
+ oi := o.index
+ var u uint64
+ u, err = o.DecodeVarint()
+ if err != nil {
+ break
+ }
+ wire := int(u & 0x7)
+ if wire == WireEndGroup {
+ if is_group {
+ return nil // input is satisfied
+ }
+ return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
+ }
+ tag := int(u >> 3)
+ if tag <= 0 {
+ return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire)
+ }
+ fieldnum, ok := prop.decoderTags.get(tag)
+ if !ok {
+ // Maybe it's an extension?
+ if prop.extendable {
+ if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) {
+ if err = o.skip(st, tag, wire); err == nil {
+ ext := e.ExtensionMap()[int32(tag)] // may be missing
+ ext.enc = append(ext.enc, o.buf[oi:o.index]...)
+ e.ExtensionMap()[int32(tag)] = ext
+ }
+ continue
+ }
+ }
+ // Maybe it's a oneof?
+ if prop.oneofUnmarshaler != nil {
+ m := structPointer_Interface(base, st).(Message)
+ // First return value indicates whether tag is a oneof field.
+ ok, err = prop.oneofUnmarshaler(m, tag, wire, o)
+ if err == ErrInternalBadWireType {
+ // Map the error to something more descriptive.
+ // Do the formatting here to save generated code space.
+ err = fmt.Errorf("bad wiretype for oneof field in %T", m)
+ }
+ if ok {
+ continue
+ }
+ }
+ err = o.skipAndSave(st, tag, wire, base, prop.unrecField)
+ continue
+ }
+ p := prop.Prop[fieldnum]
+
+ if p.dec == nil {
+ fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name)
+ continue
+ }
+ dec := p.dec
+ if wire != WireStartGroup && wire != p.WireType {
+ if wire == WireBytes && p.packedDec != nil {
+ // a packable field
+ dec = p.packedDec
+ } else {
+ err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType)
+ continue
+ }
+ }
+ decErr := dec(o, p, base)
+ if decErr != nil && !state.shouldContinue(decErr, p) {
+ err = decErr
+ }
+ if err == nil && p.Required {
+ // Successfully decoded a required field.
+ if tag <= 64 {
+ // use bitmap for fields 1-64 to catch field reuse.
+ var mask uint64 = 1 << uint64(tag-1)
+ if reqFields&mask == 0 {
+ // new required field
+ reqFields |= mask
+ required--
+ }
+ } else {
+ // This is imprecise. It can be fooled by a required field
+ // with a tag > 64 that is encoded twice; that's very rare.
+ // A fully correct implementation would require allocating
+ // a data structure, which we would like to avoid.
+ required--
+ }
+ }
+ }
+ if err == nil {
+ if is_group {
+ return io.ErrUnexpectedEOF
+ }
+ if state.err != nil {
+ return state.err
+ }
+ if required > 0 {
+ // Not enough information to determine the exact field. If we use extra
+ // CPU, we could determine the field only if the missing required field
+ // has a tag <= 64 and we check reqFields.
+ return &RequiredNotSetError{"{Unknown}"}
+ }
+ }
+ return err
+}
+
+// Individual type decoders
+// For each,
+// u is the decoded value,
+// v is a pointer to the field (pointer) in the struct
+
+// Sizes of the pools to allocate inside the Buffer.
+// The goal is modest amortization and allocation
+// on at least 16-byte boundaries.
+const (
+ boolPoolSize = 16
+ uint32PoolSize = 8
+ uint64PoolSize = 4
+)
+
+// Decode a bool.
+func (o *Buffer) dec_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ if len(o.bools) == 0 {
+ o.bools = make([]bool, boolPoolSize)
+ }
+ o.bools[0] = u != 0
+ *structPointer_Bool(base, p.field) = &o.bools[0]
+ o.bools = o.bools[1:]
+ return nil
+}
+
+func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ *structPointer_BoolVal(base, p.field) = u != 0
+ return nil
+}
+
+// Decode an int32.
+func (o *Buffer) dec_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word32_Set(structPointer_Word32(base, p.field), o, uint32(u))
+ return nil
+}
+
+func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u))
+ return nil
+}
+
+// Decode an int64.
+func (o *Buffer) dec_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word64_Set(structPointer_Word64(base, p.field), o, u)
+ return nil
+}
+
+func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word64Val_Set(structPointer_Word64Val(base, p.field), o, u)
+ return nil
+}
+
+// Decode a string.
+func (o *Buffer) dec_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ *structPointer_String(base, p.field) = &s
+ return nil
+}
+
+func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ *structPointer_StringVal(base, p.field) = s
+ return nil
+}
+
+// Decode a slice of bytes ([]byte).
+func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error {
+ b, err := o.DecodeRawBytes(true)
+ if err != nil {
+ return err
+ }
+ *structPointer_Bytes(base, p.field) = b
+ return nil
+}
+
+// Decode a slice of bools ([]bool).
+func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v := structPointer_BoolSlice(base, p.field)
+ *v = append(*v, u != 0)
+ return nil
+}
+
+// Decode a slice of bools ([]bool) in packed format.
+func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error {
+ v := structPointer_BoolSlice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded bools
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+
+ y := *v
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ y = append(y, u != 0)
+ }
+
+ *v = y
+ return nil
+}
+
+// Decode a slice of int32s ([]int32).
+func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ structPointer_Word32Slice(base, p.field).Append(uint32(u))
+ return nil
+}
+
+// Decode a slice of int32s ([]int32) in packed format.
+func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Slice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded int32s
+
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v.Append(uint32(u))
+ }
+ return nil
+}
+
+// Decode a slice of int64s ([]int64).
+func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+
+ structPointer_Word64Slice(base, p.field).Append(u)
+ return nil
+}
+
+// Decode a slice of int64s ([]int64) in packed format.
+func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64Slice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded int64s
+
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v.Append(u)
+ }
+ return nil
+}
+
+// Decode a slice of strings ([]string).
+func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ v := structPointer_StringSlice(base, p.field)
+ *v = append(*v, s)
+ return nil
+}
+
+// Decode a slice of slice of bytes ([][]byte).
+func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error {
+ b, err := o.DecodeRawBytes(true)
+ if err != nil {
+ return err
+ }
+ v := structPointer_BytesSlice(base, p.field)
+ *v = append(*v, b)
+ return nil
+}
+
+// Decode a map field.
+func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
+ raw, err := o.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ oi := o.index // index at the end of this map entry
+ o.index -= len(raw) // move buffer back to start of map entry
+
+ mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V
+ if mptr.Elem().IsNil() {
+ mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))
+ }
+ v := mptr.Elem() // map[K]V
+
+ // Prepare addressable doubly-indirect placeholders for the key and value types.
+ // See enc_new_map for why.
+ keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K
+ keybase := toStructPointer(keyptr.Addr()) // **K
+
+ var valbase structPointer
+ var valptr reflect.Value
+ switch p.mtype.Elem().Kind() {
+ case reflect.Slice:
+ // []byte
+ var dummy []byte
+ valptr = reflect.ValueOf(&dummy) // *[]byte
+ valbase = toStructPointer(valptr) // *[]byte
+ case reflect.Ptr:
+ // message; valptr is **Msg; need to allocate the intermediate pointer
+ valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
+ valptr.Set(reflect.New(valptr.Type().Elem()))
+ valbase = toStructPointer(valptr)
+ default:
+ // everything else
+ valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
+ valbase = toStructPointer(valptr.Addr()) // **V
+ }
+
+ // Decode.
+ // This parses a restricted wire format, namely the encoding of a message
+ // with two fields. See enc_new_map for the format.
+ for o.index < oi {
+ // tagcode for key and value properties are always a single byte
+ // because they have tags 1 and 2.
+ tagcode := o.buf[o.index]
+ o.index++
+ switch tagcode {
+ case p.mkeyprop.tagcode[0]:
+ if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil {
+ return err
+ }
+ case p.mvalprop.tagcode[0]:
+ if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil {
+ return err
+ }
+ default:
+ // TODO: Should we silently skip this instead?
+ return fmt.Errorf("proto: bad map data tag %d", raw[0])
+ }
+ }
+ keyelem, valelem := keyptr.Elem(), valptr.Elem()
+ if !keyelem.IsValid() || !valelem.IsValid() {
+ // We did not decode the key or the value in the map entry.
+ // Either way, it's an invalid map entry.
+ return fmt.Errorf("proto: bad map data: missing key/val")
+ }
+
+ v.SetMapIndex(keyelem, valelem)
+ return nil
+}
+
+// Decode a group.
+func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error {
+ bas := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(bas) {
+ // allocate new nested message
+ bas = toStructPointer(reflect.New(p.stype))
+ structPointer_SetStructPointer(base, p.field, bas)
+ }
+ return o.unmarshalType(p.stype, p.sprop, true, bas)
+}
+
+// Decode an embedded message.
+func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) {
+ raw, e := o.DecodeRawBytes(false)
+ if e != nil {
+ return e
+ }
+
+ bas := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(bas) {
+ // allocate new nested message
+ bas = toStructPointer(reflect.New(p.stype))
+ structPointer_SetStructPointer(base, p.field, bas)
+ }
+
+ // If the object can unmarshal itself, let it.
+ if p.isUnmarshaler {
+ iv := structPointer_Interface(bas, p.stype)
+ return iv.(Unmarshaler).Unmarshal(raw)
+ }
+
+ obuf := o.buf
+ oi := o.index
+ o.buf = raw
+ o.index = 0
+
+ err = o.unmarshalType(p.stype, p.sprop, false, bas)
+ o.buf = obuf
+ o.index = oi
+
+ return err
+}
+
+// Decode a slice of embedded messages.
+func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error {
+ return o.dec_slice_struct(p, false, base)
+}
+
+// Decode a slice of embedded groups.
+func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error {
+ return o.dec_slice_struct(p, true, base)
+}
+
+// Decode a slice of structs ([]*struct).
+func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error {
+ v := reflect.New(p.stype)
+ bas := toStructPointer(v)
+ structPointer_StructPointerSlice(base, p.field).Append(bas)
+
+ if is_group {
+ err := o.unmarshalType(p.stype, p.sprop, is_group, bas)
+ return err
+ }
+
+ raw, err := o.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+
+ // If the object can unmarshal itself, let it.
+ if p.isUnmarshaler {
+ iv := v.Interface()
+ return iv.(Unmarshaler).Unmarshal(raw)
+ }
+
+ obuf := o.buf
+ oi := o.index
+ o.buf = raw
+ o.index = 0
+
+ err = o.unmarshalType(p.stype, p.sprop, is_group, bas)
+
+ o.buf = obuf
+ o.index = oi
+
+ return err
+}
diff --git a/src/kube2msb/vendor/github.com/golang/protobuf/proto/encode.go b/src/kube2msb/vendor/github.com/golang/protobuf/proto/encode.go
new file mode 100644
index 0000000..231b074
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/golang/protobuf/proto/encode.go
@@ -0,0 +1,1325 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+)
+
+// RequiredNotSetError is the error returned if Marshal is called with
+// a protocol buffer struct whose required fields have not
+// all been initialized. It is also the error returned if Unmarshal is
+// called with an encoded protocol buffer that does not include all the
+// required fields.
+//
+// When printed, RequiredNotSetError reports the first unset required field in a
+// message. If the field cannot be precisely determined, it is reported as
+// "{Unknown}".
+type RequiredNotSetError struct {
+ field string
+}
+
+func (e *RequiredNotSetError) Error() string {
+ return fmt.Sprintf("proto: required field %q not set", e.field)
+}
+
+var (
+ // errRepeatedHasNil is the error returned if Marshal is called with
+ // a struct with a repeated field containing a nil element.
+ errRepeatedHasNil = errors.New("proto: repeated field has nil element")
+
+ // ErrNil is the error returned if Marshal is called with nil.
+ ErrNil = errors.New("proto: Marshal called with nil")
+)
+
+// The fundamental encoders that put bytes on the wire.
+// Those that take integer types all accept uint64 and are
+// therefore of type valueEncoder.
+
+const maxVarintBytes = 10 // maximum length of a varint
+
+// EncodeVarint returns the varint encoding of x.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+// Not used by the package itself, but helpful to clients
+// wishing to use the same encoding.
+func EncodeVarint(x uint64) []byte {
+ var buf [maxVarintBytes]byte
+ var n int
+ for n = 0; x > 127; n++ {
+ buf[n] = 0x80 | uint8(x&0x7F)
+ x >>= 7
+ }
+ buf[n] = uint8(x)
+ n++
+ return buf[0:n]
+}
+
+// EncodeVarint writes a varint-encoded integer to the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) EncodeVarint(x uint64) error {
+ for x >= 1<<7 {
+ p.buf = append(p.buf, uint8(x&0x7f|0x80))
+ x >>= 7
+ }
+ p.buf = append(p.buf, uint8(x))
+ return nil
+}
+
+// SizeVarint returns the varint encoding size of an integer.
+func SizeVarint(x uint64) int {
+ return sizeVarint(x)
+}
+
+func sizeVarint(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+
+// EncodeFixed64 writes a 64-bit integer to the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) EncodeFixed64(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24),
+ uint8(x>>32),
+ uint8(x>>40),
+ uint8(x>>48),
+ uint8(x>>56))
+ return nil
+}
+
+func sizeFixed64(x uint64) int {
+ return 8
+}
+
+// EncodeFixed32 writes a 32-bit integer to the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) EncodeFixed32(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24))
+ return nil
+}
+
+func sizeFixed32(x uint64) int {
+ return 4
+}
+
+// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
+// to the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) EncodeZigzag64(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+func sizeZigzag64(x uint64) int {
+ return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
+// to the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) EncodeZigzag32(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+func sizeZigzag32(x uint64) int {
+ return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) EncodeRawBytes(b []byte) error {
+ p.EncodeVarint(uint64(len(b)))
+ p.buf = append(p.buf, b...)
+ return nil
+}
+
+func sizeRawBytes(b []byte) int {
+ return sizeVarint(uint64(len(b))) +
+ len(b)
+}
+
+// EncodeStringBytes writes an encoded string to the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) EncodeStringBytes(s string) error {
+ p.EncodeVarint(uint64(len(s)))
+ p.buf = append(p.buf, s...)
+ return nil
+}
+
+func sizeStringBytes(s string) int {
+ return sizeVarint(uint64(len(s))) +
+ len(s)
+}
+
+// Marshaler is the interface representing objects that can marshal themselves.
+type Marshaler interface {
+ Marshal() ([]byte, error)
+}
+
+// Marshal takes the protocol buffer
+// and encodes it into the wire format, returning the data.
+func Marshal(pb Message) ([]byte, error) {
+ // Can the object marshal itself?
+ if m, ok := pb.(Marshaler); ok {
+ return m.Marshal()
+ }
+ p := NewBuffer(nil)
+ err := p.Marshal(pb)
+ var state errorState
+ if err != nil && !state.shouldContinue(err, nil) {
+ return nil, err
+ }
+ if p.buf == nil && err == nil {
+ // Return a non-nil slice on success.
+ return []byte{}, nil
+ }
+ return p.buf, err
+}
+
+// EncodeMessage writes the protocol buffer to the Buffer,
+// prefixed by a varint-encoded length.
+func (p *Buffer) EncodeMessage(pb Message) error {
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return ErrNil
+ }
+ if err == nil {
+ var state errorState
+ err = p.enc_len_struct(GetProperties(t.Elem()), base, &state)
+ }
+ return err
+}
+
+// Marshal takes the protocol buffer
+// and encodes it into the wire format, writing the result to the
+// Buffer.
+func (p *Buffer) Marshal(pb Message) error {
+ // Can the object marshal itself?
+ if m, ok := pb.(Marshaler); ok {
+ data, err := m.Marshal()
+ if err != nil {
+ return err
+ }
+ p.buf = append(p.buf, data...)
+ return nil
+ }
+
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return ErrNil
+ }
+ if err == nil {
+ err = p.enc_struct(GetProperties(t.Elem()), base)
+ }
+
+ if collectStats {
+ stats.Encode++
+ }
+
+ return err
+}
+
+// Size returns the encoded size of a protocol buffer.
+func Size(pb Message) (n int) {
+ // Can the object marshal itself? If so, Size is slow.
+ // TODO: add Size to Marshaler, or add a Sizer interface.
+ if m, ok := pb.(Marshaler); ok {
+ b, _ := m.Marshal()
+ return len(b)
+ }
+
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return 0
+ }
+ if err == nil {
+ n = size_struct(GetProperties(t.Elem()), base)
+ }
+
+ if collectStats {
+ stats.Size++
+ }
+
+ return
+}
+
+// Individual type encoders.
+
+// Encode a bool.
+func (o *Buffer) enc_bool(p *Properties, base structPointer) error {
+ v := *structPointer_Bool(base, p.field)
+ if v == nil {
+ return ErrNil
+ }
+ x := 0
+ if *v {
+ x = 1
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error {
+ v := *structPointer_BoolVal(base, p.field)
+ if !v {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, 1)
+ return nil
+}
+
+func size_bool(p *Properties, base structPointer) int {
+ v := *structPointer_Bool(base, p.field)
+ if v == nil {
+ return 0
+ }
+ return len(p.tagcode) + 1 // each bool takes exactly one byte
+}
+
+func size_proto3_bool(p *Properties, base structPointer) int {
+ v := *structPointer_BoolVal(base, p.field)
+ if !v && !p.oneof {
+ return 0
+ }
+ return len(p.tagcode) + 1 // each bool takes exactly one byte
+}
+
+// Encode an int32.
+func (o *Buffer) enc_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return ErrNil
+ }
+ x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Val(base, p.field)
+ x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func size_int32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return 0
+ }
+ x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+func size_proto3_int32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32Val(base, p.field)
+ x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
+ if x == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+// Encode a uint32.
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_uint32(p *Properties, base structPointer) error {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return ErrNil
+ }
+ x := word32_Get(v)
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Val(base, p.field)
+ x := word32Val_Get(v)
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func size_uint32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return 0
+ }
+ x := word32_Get(v)
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+func size_proto3_uint32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32Val(base, p.field)
+ x := word32Val_Get(v)
+ if x == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+// Encode an int64.
+func (o *Buffer) enc_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64(base, p.field)
+ if word64_IsNil(v) {
+ return ErrNil
+ }
+ x := word64_Get(v)
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, x)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64Val(base, p.field)
+ x := word64Val_Get(v)
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, x)
+ return nil
+}
+
+func size_int64(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word64(base, p.field)
+ if word64_IsNil(v) {
+ return 0
+ }
+ x := word64_Get(v)
+ n += len(p.tagcode)
+ n += p.valSize(x)
+ return
+}
+
+func size_proto3_int64(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word64Val(base, p.field)
+ x := word64Val_Get(v)
+ if x == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(x)
+ return
+}
+
+// Encode a string.
+func (o *Buffer) enc_string(p *Properties, base structPointer) error {
+ v := *structPointer_String(base, p.field)
+ if v == nil {
+ return ErrNil
+ }
+ x := *v
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(x)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error {
+ v := *structPointer_StringVal(base, p.field)
+ if v == "" {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(v)
+ return nil
+}
+
+func size_string(p *Properties, base structPointer) (n int) {
+ v := *structPointer_String(base, p.field)
+ if v == nil {
+ return 0
+ }
+ x := *v
+ n += len(p.tagcode)
+ n += sizeStringBytes(x)
+ return
+}
+
+func size_proto3_string(p *Properties, base structPointer) (n int) {
+ v := *structPointer_StringVal(base, p.field)
+ if v == "" && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeStringBytes(v)
+ return
+}
+
+// All protocol buffer fields are nillable, but be careful.
+func isNil(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ return v.IsNil()
+ }
+ return false
+}
+
+// Encode a message struct.
+func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error {
+ var state errorState
+ structp := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(structp) {
+ return ErrNil
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, err := m.Marshal()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ return state.err
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ return o.enc_len_struct(p.sprop, structp, &state)
+}
+
+func size_struct_message(p *Properties, base structPointer) int {
+ structp := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(structp) {
+ return 0
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, _ := m.Marshal()
+ n0 := len(p.tagcode)
+ n1 := sizeRawBytes(data)
+ return n0 + n1
+ }
+
+ n0 := len(p.tagcode)
+ n1 := size_struct(p.sprop, structp)
+ n2 := sizeVarint(uint64(n1)) // size of encoded length
+ return n0 + n1 + n2
+}
+
+// Encode a group struct.
+func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error {
+ var state errorState
+ b := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(b) {
+ return ErrNil
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
+ err := o.enc_struct(p.sprop, b)
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ return state.err
+}
+
+func size_struct_group(p *Properties, base structPointer) (n int) {
+ b := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(b) {
+ return 0
+ }
+
+ n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup))
+ n += size_struct(p.sprop, b)
+ n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ return
+}
+
+// Encode a slice of bools ([]bool).
+func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return ErrNil
+ }
+ for _, x := range s {
+ o.buf = append(o.buf, p.tagcode...)
+ v := uint64(0)
+ if x {
+ v = 1
+ }
+ p.valEnc(o, v)
+ }
+ return nil
+}
+
+func size_slice_bool(p *Properties, base structPointer) int {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return 0
+ }
+ return l * (len(p.tagcode) + 1) // each bool takes exactly one byte
+}
+
+// Encode a slice of bools ([]bool) in packed format.
+func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(l)) // each bool takes exactly one byte
+ for _, x := range s {
+ v := uint64(0)
+ if x {
+ v = 1
+ }
+ p.valEnc(o, v)
+ }
+ return nil
+}
+
+func size_slice_packed_bool(p *Properties, base structPointer) (n int) {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(l))
+ n += l // each bool takes exactly one byte
+ return
+}
+
+// Encode a slice of bytes ([]byte).
+func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error {
+ s := *structPointer_Bytes(base, p.field)
+ if s == nil {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(s)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error {
+ s := *structPointer_Bytes(base, p.field)
+ if len(s) == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(s)
+ return nil
+}
+
+func size_slice_byte(p *Properties, base structPointer) (n int) {
+ s := *structPointer_Bytes(base, p.field)
+ if s == nil && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeRawBytes(s)
+ return
+}
+
+func size_proto3_slice_byte(p *Properties, base structPointer) (n int) {
+ s := *structPointer_Bytes(base, p.field)
+ if len(s) == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeRawBytes(s)
+ return
+}
+
+// Encode a slice of int32s ([]int32).
+func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ p.valEnc(o, uint64(x))
+ }
+ return nil
+}
+
+func size_slice_int32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ n += p.valSize(uint64(x))
+ }
+ return
+}
+
+// Encode a slice of int32s ([]int32) in packed format.
+func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ p.valEnc(buf, uint64(x))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_int32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ bufSize += p.valSize(uint64(x))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of uint32s ([]uint32).
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ x := s.Index(i)
+ p.valEnc(o, uint64(x))
+ }
+ return nil
+}
+
+func size_slice_uint32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ x := s.Index(i)
+ n += p.valSize(uint64(x))
+ }
+ return
+}
+
+// Encode a slice of uint32s ([]uint32) in packed format.
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ p.valEnc(buf, uint64(s.Index(i)))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_uint32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ bufSize += p.valSize(uint64(s.Index(i)))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of int64s ([]int64).
+func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, s.Index(i))
+ }
+ return nil
+}
+
+func size_slice_int64(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ n += p.valSize(s.Index(i))
+ }
+ return
+}
+
+// Encode a slice of int64s ([]int64) in packed format.
+func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ p.valEnc(buf, s.Index(i))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_int64(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ bufSize += p.valSize(s.Index(i))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of slice of bytes ([][]byte).
+func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error {
+ ss := *structPointer_BytesSlice(base, p.field)
+ l := len(ss)
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(ss[i])
+ }
+ return nil
+}
+
+func size_slice_slice_byte(p *Properties, base structPointer) (n int) {
+ ss := *structPointer_BytesSlice(base, p.field)
+ l := len(ss)
+ if l == 0 {
+ return 0
+ }
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ n += sizeRawBytes(ss[i])
+ }
+ return
+}
+
+// Encode a slice of strings ([]string).
+func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error {
+ ss := *structPointer_StringSlice(base, p.field)
+ l := len(ss)
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(ss[i])
+ }
+ return nil
+}
+
+func size_slice_string(p *Properties, base structPointer) (n int) {
+ ss := *structPointer_StringSlice(base, p.field)
+ l := len(ss)
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ n += sizeStringBytes(ss[i])
+ }
+ return
+}
+
+// Encode a slice of message structs ([]*struct).
+func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error {
+ var state errorState
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ for i := 0; i < l; i++ {
+ structp := s.Index(i)
+ if structPointer_IsNil(structp) {
+ return errRepeatedHasNil
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, err := m.Marshal()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ continue
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ err := o.enc_len_struct(p.sprop, structp, &state)
+ if err != nil && !state.shouldContinue(err, nil) {
+ if err == ErrNil {
+ return errRepeatedHasNil
+ }
+ return err
+ }
+ }
+ return state.err
+}
+
+func size_slice_struct_message(p *Properties, base structPointer) (n int) {
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ structp := s.Index(i)
+ if structPointer_IsNil(structp) {
+ return // return the size up to this point
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, _ := m.Marshal()
+ n += len(p.tagcode)
+ n += sizeRawBytes(data)
+ continue
+ }
+
+ n0 := size_struct(p.sprop, structp)
+ n1 := sizeVarint(uint64(n0)) // size of encoded length
+ n += n0 + n1
+ }
+ return
+}
+
+// Encode a slice of group structs ([]*struct).
+func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error {
+ var state errorState
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ for i := 0; i < l; i++ {
+ b := s.Index(i)
+ if structPointer_IsNil(b) {
+ return errRepeatedHasNil
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
+
+ err := o.enc_struct(p.sprop, b)
+
+ if err != nil && !state.shouldContinue(err, nil) {
+ if err == ErrNil {
+ return errRepeatedHasNil
+ }
+ return err
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ }
+ return state.err
+}
+
+func size_slice_struct_group(p *Properties, base structPointer) (n int) {
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup))
+ n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup))
+ for i := 0; i < l; i++ {
+ b := s.Index(i)
+ if structPointer_IsNil(b) {
+ return // return size up to this point
+ }
+
+ n += size_struct(p.sprop, b)
+ }
+ return
+}
+
+// Encode an extension map.
+func (o *Buffer) enc_map(p *Properties, base structPointer) error {
+ v := *structPointer_ExtMap(base, p.field)
+ if err := encodeExtensionMap(v); err != nil {
+ return err
+ }
+ // Fast-path for common cases: zero or one extensions.
+ if len(v) <= 1 {
+ for _, e := range v {
+ o.buf = append(o.buf, e.enc...)
+ }
+ return nil
+ }
+
+ // Sort keys to provide a deterministic encoding.
+ keys := make([]int, 0, len(v))
+ for k := range v {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+
+ for _, k := range keys {
+ o.buf = append(o.buf, v[int32(k)].enc...)
+ }
+ return nil
+}
+
+func size_map(p *Properties, base structPointer) int {
+ v := *structPointer_ExtMap(base, p.field)
+ return sizeExtensionMap(v)
+}
+
+// Encode a map field.
+func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
+ var state errorState // XXX: or do we need to plumb this through?
+
+ /*
+ A map defined as
+ map<key_type, value_type> map_field = N;
+ is encoded in the same way as
+ message MapFieldEntry {
+ key_type key = 1;
+ value_type value = 2;
+ }
+ repeated MapFieldEntry map_field = N;
+ */
+
+ v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
+ if v.Len() == 0 {
+ return nil
+ }
+
+ keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
+
+ enc := func() error {
+ if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil {
+ return err
+ }
+ if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil {
+ return err
+ }
+ return nil
+ }
+
+ // Don't sort map keys. It is not required by the spec, and C++ doesn't do it.
+ for _, key := range v.MapKeys() {
+ val := v.MapIndex(key)
+
+ // The only illegal map entry values are nil message pointers.
+ if val.Kind() == reflect.Ptr && val.IsNil() {
+ return errors.New("proto: map has nil element")
+ }
+
+ keycopy.Set(key)
+ valcopy.Set(val)
+
+ o.buf = append(o.buf, p.tagcode...)
+ if err := o.enc_len_thing(enc, &state); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func size_new_map(p *Properties, base structPointer) int {
+ v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
+
+ keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
+
+ n := 0
+ for _, key := range v.MapKeys() {
+ val := v.MapIndex(key)
+ keycopy.Set(key)
+ valcopy.Set(val)
+
+ // Tag codes for key and val are the responsibility of the sub-sizer.
+ keysize := p.mkeyprop.size(p.mkeyprop, keybase)
+ valsize := p.mvalprop.size(p.mvalprop, valbase)
+ entry := keysize + valsize
+ // Add on tag code and length of map entry itself.
+ n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry
+ }
+ return n
+}
+
+// mapEncodeScratch returns a new reflect.Value matching the map's value type,
+// and a structPointer suitable for passing to an encoder or sizer.
+func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) {
+ // Prepare addressable doubly-indirect placeholders for the key and value types.
+ // This is needed because the element-type encoders expect **T, but the map iteration produces T.
+
+ keycopy = reflect.New(mapType.Key()).Elem() // addressable K
+ keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K
+ keyptr.Set(keycopy.Addr()) //
+ keybase = toStructPointer(keyptr.Addr()) // **K
+
+ // Value types are more varied and require special handling.
+ switch mapType.Elem().Kind() {
+ case reflect.Slice:
+ // []byte
+ var dummy []byte
+ valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte
+ valbase = toStructPointer(valcopy.Addr())
+ case reflect.Ptr:
+ // message; the generated field type is map[K]*Msg (so V is *Msg),
+ // so we only need one level of indirection.
+ valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
+ valbase = toStructPointer(valcopy.Addr())
+ default:
+ // everything else
+ valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
+ valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V
+ valptr.Set(valcopy.Addr()) //
+ valbase = toStructPointer(valptr.Addr()) // **V
+ }
+ return
+}
+
+// Encode a struct.
+func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error {
+ var state errorState
+ // Encode fields in tag order so that decoders may use optimizations
+ // that depend on the ordering.
+ // https://developers.google.com/protocol-buffers/docs/encoding#order
+ for _, i := range prop.order {
+ p := prop.Prop[i]
+ if p.enc != nil {
+ err := p.enc(o, p, base)
+ if err != nil {
+ if err == ErrNil {
+ if p.Required && state.err == nil {
+ state.err = &RequiredNotSetError{p.Name}
+ }
+ } else if err == errRepeatedHasNil {
+ // Give more context to nil values in repeated fields.
+ return errors.New("repeated field " + p.OrigName + " has nil element")
+ } else if !state.shouldContinue(err, p) {
+ return err
+ }
+ }
+ }
+ }
+
+ // Do oneof fields.
+ if prop.oneofMarshaler != nil {
+ m := structPointer_Interface(base, prop.stype).(Message)
+ if err := prop.oneofMarshaler(m, o); err != nil {
+ return err
+ }
+ }
+
+ // Add unrecognized fields at the end.
+ if prop.unrecField.IsValid() {
+ v := *structPointer_Bytes(base, prop.unrecField)
+ if len(v) > 0 {
+ o.buf = append(o.buf, v...)
+ }
+ }
+
+ return state.err
+}
+
+func size_struct(prop *StructProperties, base structPointer) (n int) {
+ for _, i := range prop.order {
+ p := prop.Prop[i]
+ if p.size != nil {
+ n += p.size(p, base)
+ }
+ }
+
+ // Add unrecognized fields at the end.
+ if prop.unrecField.IsValid() {
+ v := *structPointer_Bytes(base, prop.unrecField)
+ n += len(v)
+ }
+
+ // Factor in any oneof fields.
+ if prop.oneofSizer != nil {
+ m := structPointer_Interface(base, prop.stype).(Message)
+ n += prop.oneofSizer(m)
+ }
+
+ return
+}
+
+var zeroes [20]byte // longer than any conceivable sizeVarint
+
+// Encode a struct, preceded by its encoded length (as a varint).
+func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error {
+ return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state)
+}
+
+// Encode something, preceded by its encoded length (as a varint).
+func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error {
+ iLen := len(o.buf)
+ o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length
+ iMsg := len(o.buf)
+ err := enc()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ lMsg := len(o.buf) - iMsg
+ lLen := sizeVarint(uint64(lMsg))
+ switch x := lLen - (iMsg - iLen); {
+ case x > 0: // actual length is x bytes larger than the space we reserved
+ // Move msg x bytes right.
+ o.buf = append(o.buf, zeroes[:x]...)
+ copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
+ case x < 0: // actual length is x bytes smaller than the space we reserved
+ // Move msg x bytes left.
+ copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
+ o.buf = o.buf[:len(o.buf)+x] // x is negative
+ }
+ // Encode the length in the reserved space.
+ o.buf = o.buf[:iLen]
+ o.EncodeVarint(uint64(lMsg))
+ o.buf = o.buf[:len(o.buf)+lMsg]
+ return state.err
+}
+
+// errorState maintains the first error that occurs and updates that error
+// with additional context.
+type errorState struct {
+ err error
+}
+
+// shouldContinue reports whether encoding should continue upon encountering the
+// given error. If the error is RequiredNotSetError, shouldContinue returns true
+// and, if this is the first appearance of that error, remembers it for future
+// reporting.
+//
+// If prop is not nil, it may update any error with additional context about the
+// field with the error.
+func (s *errorState) shouldContinue(err error, prop *Properties) bool {
+ // Ignore unset required fields.
+ reqNotSet, ok := err.(*RequiredNotSetError)
+ if !ok {
+ return false
+ }
+ if s.err == nil {
+ if prop != nil {
+ err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field}
+ }
+ s.err = err
+ }
+ return true
+}
diff --git a/src/kube2msb/vendor/github.com/golang/protobuf/proto/equal.go b/src/kube2msb/vendor/github.com/golang/protobuf/proto/equal.go
new file mode 100644
index 0000000..f5db1de
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/golang/protobuf/proto/equal.go
@@ -0,0 +1,276 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer comparison.
+
+package proto
+
+import (
+ "bytes"
+ "log"
+ "reflect"
+ "strings"
+)
+
+/*
+Equal returns true iff protocol buffers a and b are equal.
+The arguments must both be pointers to protocol buffer structs.
+
+Equality is defined in this way:
+ - Two messages are equal iff they are the same type,
+ corresponding fields are equal, unknown field sets
+ are equal, and extensions sets are equal.
+ - Two set scalar fields are equal iff their values are equal.
+ If the fields are of a floating-point type, remember that
+ NaN != x for all x, including NaN. If the message is defined
+ in a proto3 .proto file, fields are not "set"; specifically,
+ zero length proto3 "bytes" fields are equal (nil == {}).
+ - Two repeated fields are equal iff their lengths are the same,
+ and their corresponding elements are equal (a "bytes" field,
+ although represented by []byte, is not a repeated field)
+ - Two unset fields are equal.
+ - Two unknown field sets are equal if their current
+ encoded state is equal.
+ - Two extension sets are equal iff they have corresponding
+ elements that are pairwise equal.
+ - Every other combination of things are not equal.
+
+The return value is undefined if a and b are not protocol buffers.
+*/
+func Equal(a, b Message) bool {
+ if a == nil || b == nil {
+ return a == b
+ }
+ v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
+ if v1.Type() != v2.Type() {
+ return false
+ }
+ if v1.Kind() == reflect.Ptr {
+ if v1.IsNil() {
+ return v2.IsNil()
+ }
+ if v2.IsNil() {
+ return false
+ }
+ v1, v2 = v1.Elem(), v2.Elem()
+ }
+ if v1.Kind() != reflect.Struct {
+ return false
+ }
+ return equalStruct(v1, v2)
+}
+
+// v1 and v2 are known to have the same type.
+func equalStruct(v1, v2 reflect.Value) bool {
+ sprop := GetProperties(v1.Type())
+ for i := 0; i < v1.NumField(); i++ {
+ f := v1.Type().Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ f1, f2 := v1.Field(i), v2.Field(i)
+ if f.Type.Kind() == reflect.Ptr {
+ if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
+ // both unset
+ continue
+ } else if n1 != n2 {
+ // set/unset mismatch
+ return false
+ }
+ b1, ok := f1.Interface().(raw)
+ if ok {
+ b2 := f2.Interface().(raw)
+ // RawMessage
+ if !bytes.Equal(b1.Bytes(), b2.Bytes()) {
+ return false
+ }
+ continue
+ }
+ f1, f2 = f1.Elem(), f2.Elem()
+ }
+ if !equalAny(f1, f2, sprop.Prop[i]) {
+ return false
+ }
+ }
+
+ if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
+ em2 := v2.FieldByName("XXX_extensions")
+ if !equalExtensions(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
+ return false
+ }
+ }
+
+ uf := v1.FieldByName("XXX_unrecognized")
+ if !uf.IsValid() {
+ return true
+ }
+
+ u1 := uf.Bytes()
+ u2 := v2.FieldByName("XXX_unrecognized").Bytes()
+ if !bytes.Equal(u1, u2) {
+ return false
+ }
+
+ return true
+}
+
+// v1 and v2 are known to have the same type.
+// prop may be nil.
+func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
+ if v1.Type() == protoMessageType {
+ m1, _ := v1.Interface().(Message)
+ m2, _ := v2.Interface().(Message)
+ return Equal(m1, m2)
+ }
+ switch v1.Kind() {
+ case reflect.Bool:
+ return v1.Bool() == v2.Bool()
+ case reflect.Float32, reflect.Float64:
+ return v1.Float() == v2.Float()
+ case reflect.Int32, reflect.Int64:
+ return v1.Int() == v2.Int()
+ case reflect.Interface:
+ // Probably a oneof field; compare the inner values.
+ n1, n2 := v1.IsNil(), v2.IsNil()
+ if n1 || n2 {
+ return n1 == n2
+ }
+ e1, e2 := v1.Elem(), v2.Elem()
+ if e1.Type() != e2.Type() {
+ return false
+ }
+ return equalAny(e1, e2, nil)
+ case reflect.Map:
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ for _, key := range v1.MapKeys() {
+ val2 := v2.MapIndex(key)
+ if !val2.IsValid() {
+ // This key was not found in the second map.
+ return false
+ }
+ if !equalAny(v1.MapIndex(key), val2, nil) {
+ return false
+ }
+ }
+ return true
+ case reflect.Ptr:
+ return equalAny(v1.Elem(), v2.Elem(), prop)
+ case reflect.Slice:
+ if v1.Type().Elem().Kind() == reflect.Uint8 {
+ // short circuit: []byte
+
+ // Edge case: if this is in a proto3 message, a zero length
+ // bytes field is considered the zero value.
+ if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
+ return true
+ }
+ if v1.IsNil() != v2.IsNil() {
+ return false
+ }
+ return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
+ }
+
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ for i := 0; i < v1.Len(); i++ {
+ if !equalAny(v1.Index(i), v2.Index(i), prop) {
+ return false
+ }
+ }
+ return true
+ case reflect.String:
+ return v1.Interface().(string) == v2.Interface().(string)
+ case reflect.Struct:
+ return equalStruct(v1, v2)
+ case reflect.Uint32, reflect.Uint64:
+ return v1.Uint() == v2.Uint()
+ }
+
+ // unknown type, so not a protocol buffer
+ log.Printf("proto: don't know how to compare %v", v1)
+ return false
+}
+
+// base is the struct type that the extensions are based on.
+// em1 and em2 are extension maps.
+func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool {
+ if len(em1) != len(em2) {
+ return false
+ }
+
+ for extNum, e1 := range em1 {
+ e2, ok := em2[extNum]
+ if !ok {
+ return false
+ }
+
+ m1, m2 := e1.value, e2.value
+
+ if m1 != nil && m2 != nil {
+ // Both are unencoded.
+ if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+ return false
+ }
+ continue
+ }
+
+ // At least one is encoded. To do a semantically correct comparison
+ // we need to unmarshal them first.
+ var desc *ExtensionDesc
+ if m := extensionMaps[base]; m != nil {
+ desc = m[extNum]
+ }
+ if desc == nil {
+ log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
+ continue
+ }
+ var err error
+ if m1 == nil {
+ m1, err = decodeExtension(e1.enc, desc)
+ }
+ if m2 == nil && err == nil {
+ m2, err = decodeExtension(e2.enc, desc)
+ }
+ if err != nil {
+ // The encoded form is invalid.
+ log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
+ return false
+ }
+ if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/src/kube2msb/vendor/github.com/golang/protobuf/proto/extensions.go b/src/kube2msb/vendor/github.com/golang/protobuf/proto/extensions.go
new file mode 100644
index 0000000..054f4f1
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/golang/protobuf/proto/extensions.go
@@ -0,0 +1,399 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Types and routines for supporting protocol buffer extensions.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "sync"
+)
+
+// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
+var ErrMissingExtension = errors.New("proto: missing extension")
+
+// ExtensionRange represents a range of message extensions for a protocol buffer.
+// Used in code generated by the protocol compiler.
+type ExtensionRange struct {
+ Start, End int32 // both inclusive
+}
+
+// extendableProto is an interface implemented by any protocol buffer that may be extended.
+type extendableProto interface {
+ Message
+ ExtensionRangeArray() []ExtensionRange
+ ExtensionMap() map[int32]Extension
+}
+
+var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
+
+// ExtensionDesc represents an extension specification.
+// Used in generated code from the protocol compiler.
+type ExtensionDesc struct {
+ ExtendedType Message // nil pointer to the type that is being extended
+ ExtensionType interface{} // nil pointer to the extension type
+ Field int32 // field number
+ Name string // fully-qualified name of extension, for text formatting
+ Tag string // protobuf tag style
+}
+
+func (ed *ExtensionDesc) repeated() bool {
+ t := reflect.TypeOf(ed.ExtensionType)
+ return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
+}
+
+// Extension represents an extension in a message.
+type Extension struct {
+ // When an extension is stored in a message using SetExtension
+ // only desc and value are set. When the message is marshaled
+ // enc will be set to the encoded form of the message.
+ //
+ // When a message is unmarshaled and contains extensions, each
+ // extension will have only enc set. When such an extension is
+ // accessed using GetExtension (or GetExtensions) desc and value
+ // will be set.
+ desc *ExtensionDesc
+ value interface{}
+ enc []byte
+}
+
+// SetRawExtension is for testing only.
+func SetRawExtension(base extendableProto, id int32, b []byte) {
+ base.ExtensionMap()[id] = Extension{enc: b}
+}
+
+// isExtensionField returns true iff the given field number is in an extension range.
+func isExtensionField(pb extendableProto, field int32) bool {
+ for _, er := range pb.ExtensionRangeArray() {
+ if er.Start <= field && field <= er.End {
+ return true
+ }
+ }
+ return false
+}
+
+// checkExtensionTypes checks that the given extension is valid for pb.
+func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
+ // Check the extended type.
+ if a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b {
+ return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String())
+ }
+ // Check the range.
+ if !isExtensionField(pb, extension.Field) {
+ return errors.New("proto: bad extension number; not in declared ranges")
+ }
+ return nil
+}
+
+// extPropKey is sufficient to uniquely identify an extension.
+type extPropKey struct {
+ base reflect.Type
+ field int32
+}
+
+var extProp = struct {
+ sync.RWMutex
+ m map[extPropKey]*Properties
+}{
+ m: make(map[extPropKey]*Properties),
+}
+
+func extensionProperties(ed *ExtensionDesc) *Properties {
+ key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
+
+ extProp.RLock()
+ if prop, ok := extProp.m[key]; ok {
+ extProp.RUnlock()
+ return prop
+ }
+ extProp.RUnlock()
+
+ extProp.Lock()
+ defer extProp.Unlock()
+ // Check again.
+ if prop, ok := extProp.m[key]; ok {
+ return prop
+ }
+
+ prop := new(Properties)
+ prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
+ extProp.m[key] = prop
+ return prop
+}
+
+// encodeExtensionMap encodes any unmarshaled (unencoded) extensions in m.
+func encodeExtensionMap(m map[int32]Extension) error {
+ for k, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ et := reflect.TypeOf(e.desc.ExtensionType)
+ props := extensionProperties(e.desc)
+
+ p := NewBuffer(nil)
+ // If e.value has type T, the encoder expects a *struct{ X T }.
+ // Pass a *T with a zero field and hope it all works out.
+ x := reflect.New(et)
+ x.Elem().Set(reflect.ValueOf(e.value))
+ if err := props.enc(p, props, toStructPointer(x)); err != nil {
+ return err
+ }
+ e.enc = p.buf
+ m[k] = e
+ }
+ return nil
+}
+
+func sizeExtensionMap(m map[int32]Extension) (n int) {
+ for _, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ n += len(e.enc)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ et := reflect.TypeOf(e.desc.ExtensionType)
+ props := extensionProperties(e.desc)
+
+ // If e.value has type T, the encoder expects a *struct{ X T }.
+ // Pass a *T with a zero field and hope it all works out.
+ x := reflect.New(et)
+ x.Elem().Set(reflect.ValueOf(e.value))
+ n += props.size(props, toStructPointer(x))
+ }
+ return
+}
+
+// HasExtension returns whether the given extension is present in pb.
+func HasExtension(pb extendableProto, extension *ExtensionDesc) bool {
+ // TODO: Check types, field numbers, etc.?
+ _, ok := pb.ExtensionMap()[extension.Field]
+ return ok
+}
+
+// ClearExtension removes the given extension from pb.
+func ClearExtension(pb extendableProto, extension *ExtensionDesc) {
+ // TODO: Check types, field numbers, etc.?
+ delete(pb.ExtensionMap(), extension.Field)
+}
+
+// GetExtension parses and returns the given extension of pb.
+// If the extension is not present and has no default value it returns ErrMissingExtension.
+func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) {
+ if err := checkExtensionTypes(pb, extension); err != nil {
+ return nil, err
+ }
+
+ emap := pb.ExtensionMap()
+ e, ok := emap[extension.Field]
+ if !ok {
+ // defaultExtensionValue returns the default value or
+ // ErrMissingExtension if there is no default.
+ return defaultExtensionValue(extension)
+ }
+
+ if e.value != nil {
+ // Already decoded. Check the descriptor, though.
+ if e.desc != extension {
+ // This shouldn't happen. If it does, it means that
+ // GetExtension was called twice with two different
+ // descriptors with the same field number.
+ return nil, errors.New("proto: descriptor conflict")
+ }
+ return e.value, nil
+ }
+
+ v, err := decodeExtension(e.enc, extension)
+ if err != nil {
+ return nil, err
+ }
+
+ // Remember the decoded version and drop the encoded version.
+ // That way it is safe to mutate what we return.
+ e.value = v
+ e.desc = extension
+ e.enc = nil
+ emap[extension.Field] = e
+ return e.value, nil
+}
+
+// defaultExtensionValue returns the default value for extension.
+// If no default for an extension is defined ErrMissingExtension is returned.
+func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
+ t := reflect.TypeOf(extension.ExtensionType)
+ props := extensionProperties(extension)
+
+ sf, _, err := fieldDefault(t, props)
+ if err != nil {
+ return nil, err
+ }
+
+ if sf == nil || sf.value == nil {
+ // There is no default value.
+ return nil, ErrMissingExtension
+ }
+
+ if t.Kind() != reflect.Ptr {
+ // We do not need to return a Ptr, we can directly return sf.value.
+ return sf.value, nil
+ }
+
+ // We need to return an interface{} that is a pointer to sf.value.
+ value := reflect.New(t).Elem()
+ value.Set(reflect.New(value.Type().Elem()))
+ if sf.kind == reflect.Int32 {
+ // We may have an int32 or an enum, but the underlying data is int32.
+ // Since we can't set an int32 into a non int32 reflect.value directly
+ // set it as a int32.
+ value.Elem().SetInt(int64(sf.value.(int32)))
+ } else {
+ value.Elem().Set(reflect.ValueOf(sf.value))
+ }
+ return value.Interface(), nil
+}
+
+// decodeExtension decodes an extension encoded in b.
+func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
+ o := NewBuffer(b)
+
+ t := reflect.TypeOf(extension.ExtensionType)
+
+ props := extensionProperties(extension)
+
+ // t is a pointer to a struct, pointer to basic type or a slice.
+ // Allocate a "field" to store the pointer/slice itself; the
+ // pointer/slice will be stored here. We pass
+ // the address of this field to props.dec.
+ // This passes a zero field and a *t and lets props.dec
+ // interpret it as a *struct{ x t }.
+ value := reflect.New(t).Elem()
+
+ for {
+ // Discard wire type and field number varint. It isn't needed.
+ if _, err := o.DecodeVarint(); err != nil {
+ return nil, err
+ }
+
+ if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil {
+ return nil, err
+ }
+
+ if o.index >= len(o.buf) {
+ break
+ }
+ }
+ return value.Interface(), nil
+}
+
+// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
+// The returned slice has the same length as es; missing extensions will appear as nil elements.
+func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
+ epb, ok := pb.(extendableProto)
+ if !ok {
+ err = errors.New("proto: not an extendable proto")
+ return
+ }
+ extensions = make([]interface{}, len(es))
+ for i, e := range es {
+ extensions[i], err = GetExtension(epb, e)
+ if err == ErrMissingExtension {
+ err = nil
+ }
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// SetExtension sets the specified extension of pb to the specified value.
+func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error {
+ if err := checkExtensionTypes(pb, extension); err != nil {
+ return err
+ }
+ typ := reflect.TypeOf(extension.ExtensionType)
+ if typ != reflect.TypeOf(value) {
+ return errors.New("proto: bad extension value type")
+ }
+ // nil extension values need to be caught early, because the
+ // encoder can't distinguish an ErrNil due to a nil extension
+ // from an ErrNil due to a missing field. Extensions are
+ // always optional, so the encoder would just swallow the error
+ // and drop all the extensions from the encoded message.
+ if reflect.ValueOf(value).IsNil() {
+ return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
+ }
+
+ pb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value}
+ return nil
+}
+
+// A global registry of extensions.
+// The generated code will register the generated descriptors by calling RegisterExtension.
+
+var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
+
+// RegisterExtension is called from the generated code.
+func RegisterExtension(desc *ExtensionDesc) {
+ st := reflect.TypeOf(desc.ExtendedType).Elem()
+ m := extensionMaps[st]
+ if m == nil {
+ m = make(map[int32]*ExtensionDesc)
+ extensionMaps[st] = m
+ }
+ if _, ok := m[desc.Field]; ok {
+ panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
+ }
+ m[desc.Field] = desc
+}
+
+// RegisteredExtensions returns a map of the registered extensions of a
+// protocol buffer struct, indexed by the extension number.
+// The argument pb should be a nil pointer to the struct type.
+func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
+ return extensionMaps[reflect.TypeOf(pb).Elem()]
+}
diff --git a/src/kube2msb/vendor/github.com/golang/protobuf/proto/lib.go b/src/kube2msb/vendor/github.com/golang/protobuf/proto/lib.go
new file mode 100644
index 0000000..0de8f8d
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/golang/protobuf/proto/lib.go
@@ -0,0 +1,894 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package proto converts data structures to and from the wire format of
+protocol buffers. It works in concert with the Go source code generated
+for .proto files by the protocol compiler.
+
+A summary of the properties of the protocol buffer interface
+for a protocol buffer variable v:
+
+ - Names are turned from camel_case to CamelCase for export.
+ - There are no methods on v to set fields; just treat
+ them as structure fields.
+ - There are getters that return a field's value if set,
+ and return the field's default value if unset.
+ The getters work even if the receiver is a nil message.
+ - The zero value for a struct is its correct initialization state.
+ All desired fields must be set before marshaling.
+ - A Reset() method will restore a protobuf struct to its zero state.
+ - Non-repeated fields are pointers to the values; nil means unset.
+ That is, optional or required field int32 f becomes F *int32.
+ - Repeated fields are slices.
+ - Helper functions are available to aid the setting of fields.
+ msg.Foo = proto.String("hello") // set field
+ - Constants are defined to hold the default values of all fields that
+ have them. They have the form Default_StructName_FieldName.
+ Because the getter methods handle defaulted values,
+ direct use of these constants should be rare.
+ - Enums are given type names and maps from names to values.
+ Enum values are prefixed by the enclosing message's name, or by the
+ enum's type name if it is a top-level enum. Enum types have a String
+ method, and a Enum method to assist in message construction.
+ - Nested messages, groups and enums have type names prefixed with the name of
+ the surrounding message type.
+ - Extensions are given descriptor names that start with E_,
+ followed by an underscore-delimited list of the nested messages
+ that contain it (if any) followed by the CamelCased name of the
+ extension field itself. HasExtension, ClearExtension, GetExtension
+ and SetExtension are functions for manipulating extensions.
+ - Oneof field sets are given a single field in their message,
+ with distinguished wrapper types for each possible field value.
+ - Marshal and Unmarshal are functions to encode and decode the wire format.
+
+When the .proto file specifies `syntax="proto3"`, there are some differences:
+
+ - Non-repeated fields of non-message type are values instead of pointers.
+ - Getters are only generated for message and oneof fields.
+ - Enum types do not get an Enum method.
+
+The simplest way to describe this is to see an example.
+Given file test.proto, containing
+
+ package example;
+
+ enum FOO { X = 17; }
+
+ message Test {
+ required string label = 1;
+ optional int32 type = 2 [default=77];
+ repeated int64 reps = 3;
+ optional group OptionalGroup = 4 {
+ required string RequiredField = 5;
+ }
+ oneof union {
+ int32 number = 6;
+ string name = 7;
+ }
+ }
+
+The resulting file, test.pb.go, is:
+
+ package example
+
+ import proto "github.com/golang/protobuf/proto"
+ import math "math"
+
+ type FOO int32
+ const (
+ FOO_X FOO = 17
+ )
+ var FOO_name = map[int32]string{
+ 17: "X",
+ }
+ var FOO_value = map[string]int32{
+ "X": 17,
+ }
+
+ func (x FOO) Enum() *FOO {
+ p := new(FOO)
+ *p = x
+ return p
+ }
+ func (x FOO) String() string {
+ return proto.EnumName(FOO_name, int32(x))
+ }
+ func (x *FOO) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FOO_value, data)
+ if err != nil {
+ return err
+ }
+ *x = FOO(value)
+ return nil
+ }
+
+ type Test struct {
+ Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
+ Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
+ Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
+ Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
+ // Types that are valid to be assigned to Union:
+ // *Test_Number
+ // *Test_Name
+ Union isTest_Union `protobuf_oneof:"union"`
+ XXX_unrecognized []byte `json:"-"`
+ }
+ func (m *Test) Reset() { *m = Test{} }
+ func (m *Test) String() string { return proto.CompactTextString(m) }
+ func (*Test) ProtoMessage() {}
+
+ type isTest_Union interface {
+ isTest_Union()
+ }
+
+ type Test_Number struct {
+ Number int32 `protobuf:"varint,6,opt,name=number"`
+ }
+ type Test_Name struct {
+ Name string `protobuf:"bytes,7,opt,name=name"`
+ }
+
+ func (*Test_Number) isTest_Union() {}
+ func (*Test_Name) isTest_Union() {}
+
+ func (m *Test) GetUnion() isTest_Union {
+ if m != nil {
+ return m.Union
+ }
+ return nil
+ }
+ const Default_Test_Type int32 = 77
+
+ func (m *Test) GetLabel() string {
+ if m != nil && m.Label != nil {
+ return *m.Label
+ }
+ return ""
+ }
+
+ func (m *Test) GetType() int32 {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return Default_Test_Type
+ }
+
+ func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
+ if m != nil {
+ return m.Optionalgroup
+ }
+ return nil
+ }
+
+ type Test_OptionalGroup struct {
+ RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
+ }
+ func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
+ func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
+
+ func (m *Test_OptionalGroup) GetRequiredField() string {
+ if m != nil && m.RequiredField != nil {
+ return *m.RequiredField
+ }
+ return ""
+ }
+
+ func (m *Test) GetNumber() int32 {
+ if x, ok := m.GetUnion().(*Test_Number); ok {
+ return x.Number
+ }
+ return 0
+ }
+
+ func (m *Test) GetName() string {
+ if x, ok := m.GetUnion().(*Test_Name); ok {
+ return x.Name
+ }
+ return ""
+ }
+
+ func init() {
+ proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
+ }
+
+To create and play with a Test object:
+
+ package main
+
+ import (
+ "log"
+
+ "github.com/golang/protobuf/proto"
+ pb "./example.pb"
+ )
+
+ func main() {
+ test := &pb.Test{
+ Label: proto.String("hello"),
+ Type: proto.Int32(17),
+ Reps: []int64{1, 2, 3},
+ Optionalgroup: &pb.Test_OptionalGroup{
+ RequiredField: proto.String("good bye"),
+ },
+ Union: &pb.Test_Name{"fred"},
+ }
+ data, err := proto.Marshal(test)
+ if err != nil {
+ log.Fatal("marshaling error: ", err)
+ }
+ newTest := &pb.Test{}
+ err = proto.Unmarshal(data, newTest)
+ if err != nil {
+ log.Fatal("unmarshaling error: ", err)
+ }
+ // Now test and newTest contain the same data.
+ if test.GetLabel() != newTest.GetLabel() {
+ log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
+ }
+ // Use a type switch to determine which oneof was set.
+ switch u := test.Union.(type) {
+ case *pb.Test_Number: // u.Number contains the number.
+ case *pb.Test_Name: // u.Name contains the string.
+ }
+ // etc.
+ }
+*/
+package proto
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "reflect"
+ "sort"
+ "strconv"
+ "sync"
+)
+
+// Message is implemented by generated protocol buffer messages.
+type Message interface {
+ Reset()
+ String() string
+ ProtoMessage()
+}
+
+// Stats records allocation details about the protocol buffer encoders
+// and decoders. Useful for tuning the library itself.
+type Stats struct {
+ Emalloc uint64 // mallocs in encode
+ Dmalloc uint64 // mallocs in decode
+ Encode uint64 // number of encodes
+ Decode uint64 // number of decodes
+ Chit uint64 // number of cache hits
+ Cmiss uint64 // number of cache misses
+ Size uint64 // number of sizes
+}
+
+// Set to true to enable stats collection.
+const collectStats = false
+
+var stats Stats
+
+// GetStats returns a copy of the global Stats structure.
+func GetStats() Stats { return stats }
+
+// A Buffer is a buffer manager for marshaling and unmarshaling
+// protocol buffers. It may be reused between invocations to
+// reduce memory usage. It is not necessary to use a Buffer;
+// the global functions Marshal and Unmarshal create a
+// temporary Buffer and are fine for most applications.
+type Buffer struct {
+ buf []byte // encode/decode byte stream
+ index int // write point
+
+ // pools of basic types to amortize allocation.
+ bools []bool
+ uint32s []uint32
+ uint64s []uint64
+
+ // extra pools, only used with pointer_reflect.go
+ int32s []int32
+ int64s []int64
+ float32s []float32
+ float64s []float64
+}
+
+// NewBuffer allocates a new Buffer and initializes its internal data to
+// the contents of the argument slice.
+func NewBuffer(e []byte) *Buffer {
+ return &Buffer{buf: e}
+}
+
+// Reset resets the Buffer, ready for marshaling a new protocol buffer.
+func (p *Buffer) Reset() {
+ p.buf = p.buf[0:0] // for reading/writing
+ p.index = 0 // for reading
+}
+
+// SetBuf replaces the internal buffer with the slice,
+// ready for unmarshaling the contents of the slice.
+func (p *Buffer) SetBuf(s []byte) {
+ p.buf = s
+ p.index = 0
+}
+
+// Bytes returns the contents of the Buffer.
+func (p *Buffer) Bytes() []byte { return p.buf }
+
+/*
+ * Helper routines for simplifying the creation of optional fields of basic type.
+ */
+
+// Bool is a helper routine that allocates a new bool value
+// to store v and returns a pointer to it.
+func Bool(v bool) *bool {
+ return &v
+}
+
+// Int32 is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it.
+func Int32(v int32) *int32 {
+ return &v
+}
+
+// Int is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it, but unlike Int32
+// its argument value is an int.
+func Int(v int) *int32 {
+ p := new(int32)
+ *p = int32(v)
+ return p
+}
+
+// Int64 is a helper routine that allocates a new int64 value
+// to store v and returns a pointer to it.
+func Int64(v int64) *int64 {
+ return &v
+}
+
+// Float32 is a helper routine that allocates a new float32 value
+// to store v and returns a pointer to it.
+func Float32(v float32) *float32 {
+ return &v
+}
+
+// Float64 is a helper routine that allocates a new float64 value
+// to store v and returns a pointer to it.
+func Float64(v float64) *float64 {
+ return &v
+}
+
+// Uint32 is a helper routine that allocates a new uint32 value
+// to store v and returns a pointer to it.
+func Uint32(v uint32) *uint32 {
+ return &v
+}
+
+// Uint64 is a helper routine that allocates a new uint64 value
+// to store v and returns a pointer to it.
+func Uint64(v uint64) *uint64 {
+ return &v
+}
+
+// String is a helper routine that allocates a new string value
+// to store v and returns a pointer to it.
+func String(v string) *string {
+ return &v
+}
+
+// EnumName is a helper function to simplify printing protocol buffer enums
+// by name. Given an enum map and a value, it returns a useful string.
+func EnumName(m map[int32]string, v int32) string {
+ s, ok := m[v]
+ if ok {
+ return s
+ }
+ return strconv.Itoa(int(v))
+}
+
+// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
+// from their JSON-encoded representation. Given a map from the enum's symbolic
+// names to its int values, and a byte buffer containing the JSON-encoded
+// value, it returns an int32 that can be cast to the enum type by the caller.
+//
+// The function can deal with both JSON representations, numeric and symbolic.
+func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
+ if data[0] == '"' {
+ // New style: enums are strings.
+ var repr string
+ if err := json.Unmarshal(data, &repr); err != nil {
+ return -1, err
+ }
+ val, ok := m[repr]
+ if !ok {
+ return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
+ }
+ return val, nil
+ }
+ // Old style: enums are ints.
+ var val int32
+ if err := json.Unmarshal(data, &val); err != nil {
+ return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
+ }
+ return val, nil
+}
+
+// DebugPrint dumps the encoded data in b in a debugging format with a header
+// including the string s. Used in testing but made available for general debugging.
+func (p *Buffer) DebugPrint(s string, b []byte) {
+ var u uint64
+
+ obuf := p.buf
+ index := p.index
+ p.buf = b
+ p.index = 0
+ depth := 0
+
+ fmt.Printf("\n--- %s ---\n", s)
+
+out:
+ for {
+ for i := 0; i < depth; i++ {
+ fmt.Print(" ")
+ }
+
+ index := p.index
+ if index == len(p.buf) {
+ break
+ }
+
+ op, err := p.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: fetching op err %v\n", index, err)
+ break out
+ }
+ tag := op >> 3
+ wire := op & 7
+
+ switch wire {
+ default:
+ fmt.Printf("%3d: t=%3d unknown wire=%d\n",
+ index, tag, wire)
+ break out
+
+ case WireBytes:
+ var r []byte
+
+ r, err = p.DecodeRawBytes(false)
+ if err != nil {
+ break out
+ }
+ fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
+ if len(r) <= 6 {
+ for i := 0; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ } else {
+ for i := 0; i < 3; i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ fmt.Printf(" ..")
+ for i := len(r) - 3; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ }
+ fmt.Printf("\n")
+
+ case WireFixed32:
+ u, err = p.DecodeFixed32()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
+
+ case WireFixed64:
+ u, err = p.DecodeFixed64()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
+
+ case WireVarint:
+ u, err = p.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
+
+ case WireStartGroup:
+ fmt.Printf("%3d: t=%3d start\n", index, tag)
+ depth++
+
+ case WireEndGroup:
+ depth--
+ fmt.Printf("%3d: t=%3d end\n", index, tag)
+ }
+ }
+
+ if depth != 0 {
+ fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
+ }
+ fmt.Printf("\n")
+
+ p.buf = obuf
+ p.index = index
+}
+
+// SetDefaults sets unset protocol buffer fields to their default values.
+// It only modifies fields that are both unset and have defined defaults.
+// It recursively sets default values in any non-nil sub-messages.
+func SetDefaults(pb Message) {
+ setDefaults(reflect.ValueOf(pb), true, false)
+}
+
+// v is a pointer to a struct.
+func setDefaults(v reflect.Value, recur, zeros bool) {
+ v = v.Elem()
+
+ defaultMu.RLock()
+ dm, ok := defaults[v.Type()]
+ defaultMu.RUnlock()
+ if !ok {
+ dm = buildDefaultMessage(v.Type())
+ defaultMu.Lock()
+ defaults[v.Type()] = dm
+ defaultMu.Unlock()
+ }
+
+ for _, sf := range dm.scalars {
+ f := v.Field(sf.index)
+ if !f.IsNil() {
+ // field already set
+ continue
+ }
+ dv := sf.value
+ if dv == nil && !zeros {
+ // no explicit default, and don't want to set zeros
+ continue
+ }
+ fptr := f.Addr().Interface() // **T
+ // TODO: Consider batching the allocations we do here.
+ switch sf.kind {
+ case reflect.Bool:
+ b := new(bool)
+ if dv != nil {
+ *b = dv.(bool)
+ }
+ *(fptr.(**bool)) = b
+ case reflect.Float32:
+ f := new(float32)
+ if dv != nil {
+ *f = dv.(float32)
+ }
+ *(fptr.(**float32)) = f
+ case reflect.Float64:
+ f := new(float64)
+ if dv != nil {
+ *f = dv.(float64)
+ }
+ *(fptr.(**float64)) = f
+ case reflect.Int32:
+ // might be an enum
+ if ft := f.Type(); ft != int32PtrType {
+ // enum
+ f.Set(reflect.New(ft.Elem()))
+ if dv != nil {
+ f.Elem().SetInt(int64(dv.(int32)))
+ }
+ } else {
+ // int32 field
+ i := new(int32)
+ if dv != nil {
+ *i = dv.(int32)
+ }
+ *(fptr.(**int32)) = i
+ }
+ case reflect.Int64:
+ i := new(int64)
+ if dv != nil {
+ *i = dv.(int64)
+ }
+ *(fptr.(**int64)) = i
+ case reflect.String:
+ s := new(string)
+ if dv != nil {
+ *s = dv.(string)
+ }
+ *(fptr.(**string)) = s
+ case reflect.Uint8:
+ // exceptional case: []byte
+ var b []byte
+ if dv != nil {
+ db := dv.([]byte)
+ b = make([]byte, len(db))
+ copy(b, db)
+ } else {
+ b = []byte{}
+ }
+ *(fptr.(*[]byte)) = b
+ case reflect.Uint32:
+ u := new(uint32)
+ if dv != nil {
+ *u = dv.(uint32)
+ }
+ *(fptr.(**uint32)) = u
+ case reflect.Uint64:
+ u := new(uint64)
+ if dv != nil {
+ *u = dv.(uint64)
+ }
+ *(fptr.(**uint64)) = u
+ default:
+ log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
+ }
+ }
+
+ for _, ni := range dm.nested {
+ f := v.Field(ni)
+ // f is *T or []*T or map[T]*T
+ switch f.Kind() {
+ case reflect.Ptr:
+ if f.IsNil() {
+ continue
+ }
+ setDefaults(f, recur, zeros)
+
+ case reflect.Slice:
+ for i := 0; i < f.Len(); i++ {
+ e := f.Index(i)
+ if e.IsNil() {
+ continue
+ }
+ setDefaults(e, recur, zeros)
+ }
+
+ case reflect.Map:
+ for _, k := range f.MapKeys() {
+ e := f.MapIndex(k)
+ if e.IsNil() {
+ continue
+ }
+ setDefaults(e, recur, zeros)
+ }
+ }
+ }
+}
+
+var (
+ // defaults maps a protocol buffer struct type to a slice of the fields,
+ // with its scalar fields set to their proto-declared non-zero default values.
+ defaultMu sync.RWMutex
+ defaults = make(map[reflect.Type]defaultMessage)
+
+ int32PtrType = reflect.TypeOf((*int32)(nil))
+)
+
+// defaultMessage represents information about the default values of a message.
+type defaultMessage struct {
+ scalars []scalarField
+ nested []int // struct field index of nested messages
+}
+
+type scalarField struct {
+ index int // struct field index
+ kind reflect.Kind // element type (the T in *T or []T)
+ value interface{} // the proto-declared default value, or nil
+}
+
+// t is a struct type.
+func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
+ sprop := GetProperties(t)
+ for _, prop := range sprop.Prop {
+ fi, ok := sprop.decoderTags.get(prop.Tag)
+ if !ok {
+ // XXX_unrecognized
+ continue
+ }
+ ft := t.Field(fi).Type
+
+ sf, nested, err := fieldDefault(ft, prop)
+ switch {
+ case err != nil:
+ log.Print(err)
+ case nested:
+ dm.nested = append(dm.nested, fi)
+ case sf != nil:
+ sf.index = fi
+ dm.scalars = append(dm.scalars, *sf)
+ }
+ }
+
+ return dm
+}
+
+// fieldDefault returns the scalarField for field type ft.
+// sf will be nil if the field can not have a default.
+// nestedMessage will be true if this is a nested message.
+// Note that sf.index is not set on return.
+func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
+ var canHaveDefault bool
+ switch ft.Kind() {
+ case reflect.Ptr:
+ if ft.Elem().Kind() == reflect.Struct {
+ nestedMessage = true
+ } else {
+ canHaveDefault = true // proto2 scalar field
+ }
+
+ case reflect.Slice:
+ switch ft.Elem().Kind() {
+ case reflect.Ptr:
+ nestedMessage = true // repeated message
+ case reflect.Uint8:
+ canHaveDefault = true // bytes field
+ }
+
+ case reflect.Map:
+ if ft.Elem().Kind() == reflect.Ptr {
+ nestedMessage = true // map with message values
+ }
+ }
+
+ if !canHaveDefault {
+ if nestedMessage {
+ return nil, true, nil
+ }
+ return nil, false, nil
+ }
+
+ // We now know that ft is a pointer or slice.
+ sf = &scalarField{kind: ft.Elem().Kind()}
+
+ // scalar fields without defaults
+ if !prop.HasDefault {
+ return sf, false, nil
+ }
+
+ // a scalar field: either *T or []byte
+ switch ft.Elem().Kind() {
+ case reflect.Bool:
+ x, err := strconv.ParseBool(prop.Default)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Float32:
+ x, err := strconv.ParseFloat(prop.Default, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
+ }
+ sf.value = float32(x)
+ case reflect.Float64:
+ x, err := strconv.ParseFloat(prop.Default, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Int32:
+ x, err := strconv.ParseInt(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
+ }
+ sf.value = int32(x)
+ case reflect.Int64:
+ x, err := strconv.ParseInt(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.String:
+ sf.value = prop.Default
+ case reflect.Uint8:
+ // []byte (not *uint8)
+ sf.value = []byte(prop.Default)
+ case reflect.Uint32:
+ x, err := strconv.ParseUint(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
+ }
+ sf.value = uint32(x)
+ case reflect.Uint64:
+ x, err := strconv.ParseUint(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ default:
+ return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
+ }
+
+ return sf, false, nil
+}
+
+// Map fields may have key types of non-float scalars, strings and enums.
+// The easiest way to sort them in some deterministic order is to use fmt.
+// If this turns out to be inefficient we can always consider other options,
+// such as doing a Schwartzian transform.
+
+func mapKeys(vs []reflect.Value) sort.Interface {
+ s := mapKeySorter{
+ vs: vs,
+ // default Less function: textual comparison
+ less: func(a, b reflect.Value) bool {
+ return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface())
+ },
+ }
+
+ // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps;
+ // numeric keys are sorted numerically.
+ if len(vs) == 0 {
+ return s
+ }
+ switch vs[0].Kind() {
+ case reflect.Int32, reflect.Int64:
+ s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
+ case reflect.Uint32, reflect.Uint64:
+ s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
+ }
+
+ return s
+}
+
+type mapKeySorter struct {
+ vs []reflect.Value
+ less func(a, b reflect.Value) bool
+}
+
+func (s mapKeySorter) Len() int { return len(s.vs) }
+func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
+func (s mapKeySorter) Less(i, j int) bool {
+ return s.less(s.vs[i], s.vs[j])
+}
+
+// isProto3Zero reports whether v is a zero proto3 value.
+func isProto3Zero(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint32, reflect.Uint64:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.String:
+ return v.String() == ""
+ }
+ return false
+}
+
+// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
+// to assert that that code is compatible with this version of the proto package.
+const ProtoPackageIsVersion1 = true
diff --git a/src/kube2msb/vendor/github.com/golang/protobuf/proto/message_set.go b/src/kube2msb/vendor/github.com/golang/protobuf/proto/message_set.go
new file mode 100644
index 0000000..e25e01e
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/golang/protobuf/proto/message_set.go
@@ -0,0 +1,280 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Support for message sets.
+ */
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+)
+
+// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
+// A message type ID is required for storing a protocol buffer in a message set.
+var errNoMessageTypeID = errors.New("proto does not have a message type ID")
+
+// The first two types (_MessageSet_Item and messageSet)
+// model what the protocol compiler produces for the following protocol message:
+// message MessageSet {
+// repeated group Item = 1 {
+// required int32 type_id = 2;
+// required string message = 3;
+// };
+// }
+// That is the MessageSet wire format. We can't use a proto to generate these
+// because that would introduce a circular dependency between it and this package.
+
+type _MessageSet_Item struct {
+ TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
+ Message []byte `protobuf:"bytes,3,req,name=message"`
+}
+
+type messageSet struct {
+ Item []*_MessageSet_Item `protobuf:"group,1,rep"`
+ XXX_unrecognized []byte
+ // TODO: caching?
+}
+
+// Make sure messageSet is a Message.
+var _ Message = (*messageSet)(nil)
+
+// messageTypeIder is an interface satisfied by a protocol buffer type
+// that may be stored in a MessageSet.
+type messageTypeIder interface {
+ MessageTypeId() int32
+}
+
+func (ms *messageSet) find(pb Message) *_MessageSet_Item {
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return nil
+ }
+ id := mti.MessageTypeId()
+ for _, item := range ms.Item {
+ if *item.TypeId == id {
+ return item
+ }
+ }
+ return nil
+}
+
+func (ms *messageSet) Has(pb Message) bool {
+ if ms.find(pb) != nil {
+ return true
+ }
+ return false
+}
+
+func (ms *messageSet) Unmarshal(pb Message) error {
+ if item := ms.find(pb); item != nil {
+ return Unmarshal(item.Message, pb)
+ }
+ if _, ok := pb.(messageTypeIder); !ok {
+ return errNoMessageTypeID
+ }
+ return nil // TODO: return error instead?
+}
+
+func (ms *messageSet) Marshal(pb Message) error {
+ msg, err := Marshal(pb)
+ if err != nil {
+ return err
+ }
+ if item := ms.find(pb); item != nil {
+ // reuse existing item
+ item.Message = msg
+ return nil
+ }
+
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return errNoMessageTypeID
+ }
+
+ mtid := mti.MessageTypeId()
+ ms.Item = append(ms.Item, &_MessageSet_Item{
+ TypeId: &mtid,
+ Message: msg,
+ })
+ return nil
+}
+
+func (ms *messageSet) Reset() { *ms = messageSet{} }
+func (ms *messageSet) String() string { return CompactTextString(ms) }
+func (*messageSet) ProtoMessage() {}
+
+// Support for the message_set_wire_format message option.
+
+func skipVarint(buf []byte) []byte {
+ i := 0
+ for ; buf[i]&0x80 != 0; i++ {
+ }
+ return buf[i+1:]
+}
+
+// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
+// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSet(m map[int32]Extension) ([]byte, error) {
+ if err := encodeExtensionMap(m); err != nil {
+ return nil, err
+ }
+
+ // Sort extension IDs to provide a deterministic encoding.
+ // See also enc_map in encode.go.
+ ids := make([]int, 0, len(m))
+ for id := range m {
+ ids = append(ids, int(id))
+ }
+ sort.Ints(ids)
+
+ ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))}
+ for _, id := range ids {
+ e := m[int32(id)]
+ // Remove the wire type and field number varint, as well as the length varint.
+ msg := skipVarint(skipVarint(e.enc))
+
+ ms.Item = append(ms.Item, &_MessageSet_Item{
+ TypeId: Int32(int32(id)),
+ Message: msg,
+ })
+ }
+ return Marshal(ms)
+}
+
+// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
+// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error {
+ ms := new(messageSet)
+ if err := Unmarshal(buf, ms); err != nil {
+ return err
+ }
+ for _, item := range ms.Item {
+ id := *item.TypeId
+ msg := item.Message
+
+ // Restore wire type and field number varint, plus length varint.
+ // Be careful to preserve duplicate items.
+ b := EncodeVarint(uint64(id)<<3 | WireBytes)
+ if ext, ok := m[id]; ok {
+ // Existing data; rip off the tag and length varint
+ // so we join the new data correctly.
+ // We can assume that ext.enc is set because we are unmarshaling.
+ o := ext.enc[len(b):] // skip wire type and field number
+ _, n := DecodeVarint(o) // calculate length of length varint
+ o = o[n:] // skip length varint
+ msg = append(o, msg...) // join old data and new data
+ }
+ b = append(b, EncodeVarint(uint64(len(msg)))...)
+ b = append(b, msg...)
+
+ m[id] = Extension{enc: b}
+ }
+ return nil
+}
+
+// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
+// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) {
+ var b bytes.Buffer
+ b.WriteByte('{')
+
+ // Process the map in key order for deterministic output.
+ ids := make([]int32, 0, len(m))
+ for id := range m {
+ ids = append(ids, id)
+ }
+ sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
+
+ for i, id := range ids {
+ ext := m[id]
+ if i > 0 {
+ b.WriteByte(',')
+ }
+
+ msd, ok := messageSetMap[id]
+ if !ok {
+ // Unknown type; we can't render it, so skip it.
+ continue
+ }
+ fmt.Fprintf(&b, `"[%s]":`, msd.name)
+
+ x := ext.value
+ if x == nil {
+ x = reflect.New(msd.t.Elem()).Interface()
+ if err := Unmarshal(ext.enc, x.(Message)); err != nil {
+ return nil, err
+ }
+ }
+ d, err := json.Marshal(x)
+ if err != nil {
+ return nil, err
+ }
+ b.Write(d)
+ }
+ b.WriteByte('}')
+ return b.Bytes(), nil
+}
+
+// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
+// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSetJSON(buf []byte, m map[int32]Extension) error {
+ // Common-case fast path.
+ if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
+ return nil
+ }
+
+ // This is fairly tricky, and it's not clear that it is needed.
+ return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
+}
+
+// A global registry of types that can be used in a MessageSet.
+
+var messageSetMap = make(map[int32]messageSetDesc)
+
+type messageSetDesc struct {
+ t reflect.Type // pointer to struct
+ name string
+}
+
+// RegisterMessageSetType is called from the generated code.
+func RegisterMessageSetType(m Message, fieldNum int32, name string) {
+ messageSetMap[fieldNum] = messageSetDesc{
+ t: reflect.TypeOf(m),
+ name: name,
+ }
+}
diff --git a/src/kube2msb/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/src/kube2msb/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
new file mode 100644
index 0000000..749919d
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
@@ -0,0 +1,479 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build appengine
+
+// This file contains an implementation of proto field accesses using package reflect.
+// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
+// be used on App Engine.
+
+package proto
+
+import (
+ "math"
+ "reflect"
+)
+
+// A structPointer is a pointer to a struct.
+type structPointer struct {
+ v reflect.Value
+}
+
+// toStructPointer returns a structPointer equivalent to the given reflect value.
+// The reflect value must itself be a pointer to a struct.
+func toStructPointer(v reflect.Value) structPointer {
+ return structPointer{v}
+}
+
+// IsNil reports whether p is nil.
+func structPointer_IsNil(p structPointer) bool {
+ return p.v.IsNil()
+}
+
+// Interface returns the struct pointer as an interface value.
+func structPointer_Interface(p structPointer, _ reflect.Type) interface{} {
+ return p.v.Interface()
+}
+
+// A field identifies a field in a struct, accessible from a structPointer.
+// In this implementation, a field is identified by the sequence of field indices
+// passed to reflect's FieldByIndex.
+type field []int
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return f.Index
+}
+
+// invalidField is an invalid field identifier.
+var invalidField = field(nil)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool { return f != nil }
+
+// field returns the given field in the struct as a reflect value.
+func structPointer_field(p structPointer, f field) reflect.Value {
+ // Special case: an extension map entry with a value of type T
+ // passes a *T to the struct-handling code with a zero field,
+ // expecting that it will be treated as equivalent to *struct{ X T },
+ // which has the same memory layout. We have to handle that case
+ // specially, because reflect will panic if we call FieldByIndex on a
+ // non-struct.
+ if f == nil {
+ return p.v.Elem()
+ }
+
+ return p.v.Elem().FieldByIndex(f)
+}
+
+// ifield returns the given field in the struct as an interface value.
+func structPointer_ifield(p structPointer, f field) interface{} {
+ return structPointer_field(p, f).Addr().Interface()
+}
+
+// Bytes returns the address of a []byte field in the struct.
+func structPointer_Bytes(p structPointer, f field) *[]byte {
+ return structPointer_ifield(p, f).(*[]byte)
+}
+
+// BytesSlice returns the address of a [][]byte field in the struct.
+func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
+ return structPointer_ifield(p, f).(*[][]byte)
+}
+
+// Bool returns the address of a *bool field in the struct.
+func structPointer_Bool(p structPointer, f field) **bool {
+ return structPointer_ifield(p, f).(**bool)
+}
+
+// BoolVal returns the address of a bool field in the struct.
+func structPointer_BoolVal(p structPointer, f field) *bool {
+ return structPointer_ifield(p, f).(*bool)
+}
+
+// BoolSlice returns the address of a []bool field in the struct.
+func structPointer_BoolSlice(p structPointer, f field) *[]bool {
+ return structPointer_ifield(p, f).(*[]bool)
+}
+
+// String returns the address of a *string field in the struct.
+func structPointer_String(p structPointer, f field) **string {
+ return structPointer_ifield(p, f).(**string)
+}
+
+// StringVal returns the address of a string field in the struct.
+func structPointer_StringVal(p structPointer, f field) *string {
+ return structPointer_ifield(p, f).(*string)
+}
+
+// StringSlice returns the address of a []string field in the struct.
+func structPointer_StringSlice(p structPointer, f field) *[]string {
+ return structPointer_ifield(p, f).(*[]string)
+}
+
+// ExtMap returns the address of an extension map field in the struct.
+func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
+ return structPointer_ifield(p, f).(*map[int32]Extension)
+}
+
+// NewAt returns the reflect.Value for a pointer to a field in the struct.
+func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
+ return structPointer_field(p, f).Addr()
+}
+
+// SetStructPointer writes a *struct field in the struct.
+func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
+ structPointer_field(p, f).Set(q.v)
+}
+
+// GetStructPointer reads a *struct field in the struct.
+func structPointer_GetStructPointer(p structPointer, f field) structPointer {
+ return structPointer{structPointer_field(p, f)}
+}
+
+// StructPointerSlice the address of a []*struct field in the struct.
+func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice {
+ return structPointerSlice{structPointer_field(p, f)}
+}
+
+// A structPointerSlice represents the address of a slice of pointers to structs
+// (themselves messages or groups). That is, v.Type() is *[]*struct{...}.
+type structPointerSlice struct {
+ v reflect.Value
+}
+
+func (p structPointerSlice) Len() int { return p.v.Len() }
+func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} }
+func (p structPointerSlice) Append(q structPointer) {
+ p.v.Set(reflect.Append(p.v, q.v))
+}
+
+var (
+ int32Type = reflect.TypeOf(int32(0))
+ uint32Type = reflect.TypeOf(uint32(0))
+ float32Type = reflect.TypeOf(float32(0))
+ int64Type = reflect.TypeOf(int64(0))
+ uint64Type = reflect.TypeOf(uint64(0))
+ float64Type = reflect.TypeOf(float64(0))
+)
+
+// A word32 represents a field of type *int32, *uint32, *float32, or *enum.
+// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable.
+type word32 struct {
+ v reflect.Value
+}
+
+// IsNil reports whether p is nil.
+func word32_IsNil(p word32) bool {
+ return p.v.IsNil()
+}
+
+// Set sets p to point at a newly allocated word with bits set to x.
+func word32_Set(p word32, o *Buffer, x uint32) {
+ t := p.v.Type().Elem()
+ switch t {
+ case int32Type:
+ if len(o.int32s) == 0 {
+ o.int32s = make([]int32, uint32PoolSize)
+ }
+ o.int32s[0] = int32(x)
+ p.v.Set(reflect.ValueOf(&o.int32s[0]))
+ o.int32s = o.int32s[1:]
+ return
+ case uint32Type:
+ if len(o.uint32s) == 0 {
+ o.uint32s = make([]uint32, uint32PoolSize)
+ }
+ o.uint32s[0] = x
+ p.v.Set(reflect.ValueOf(&o.uint32s[0]))
+ o.uint32s = o.uint32s[1:]
+ return
+ case float32Type:
+ if len(o.float32s) == 0 {
+ o.float32s = make([]float32, uint32PoolSize)
+ }
+ o.float32s[0] = math.Float32frombits(x)
+ p.v.Set(reflect.ValueOf(&o.float32s[0]))
+ o.float32s = o.float32s[1:]
+ return
+ }
+
+ // must be enum
+ p.v.Set(reflect.New(t))
+ p.v.Elem().SetInt(int64(int32(x)))
+}
+
+// Get gets the bits pointed at by p, as a uint32.
+func word32_Get(p word32) uint32 {
+ elem := p.v.Elem()
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32(p structPointer, f field) word32 {
+ return word32{structPointer_field(p, f)}
+}
+
+// A word32Val represents a field of type int32, uint32, float32, or enum.
+// That is, v.Type() is int32, uint32, float32, or enum and v is assignable.
+type word32Val struct {
+ v reflect.Value
+}
+
+// Set sets *p to x.
+func word32Val_Set(p word32Val, x uint32) {
+ switch p.v.Type() {
+ case int32Type:
+ p.v.SetInt(int64(x))
+ return
+ case uint32Type:
+ p.v.SetUint(uint64(x))
+ return
+ case float32Type:
+ p.v.SetFloat(float64(math.Float32frombits(x)))
+ return
+ }
+
+ // must be enum
+ p.v.SetInt(int64(int32(x)))
+}
+
+// Get gets the bits pointed at by p, as a uint32.
+func word32Val_Get(p word32Val) uint32 {
+ elem := p.v
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct.
+func structPointer_Word32Val(p structPointer, f field) word32Val {
+ return word32Val{structPointer_field(p, f)}
+}
+
+// A word32Slice is a slice of 32-bit values.
+// That is, v.Type() is []int32, []uint32, []float32, or []enum.
+type word32Slice struct {
+ v reflect.Value
+}
+
+func (p word32Slice) Append(x uint32) {
+ n, m := p.v.Len(), p.v.Cap()
+ if n < m {
+ p.v.SetLen(n + 1)
+ } else {
+ t := p.v.Type().Elem()
+ p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
+ }
+ elem := p.v.Index(n)
+ switch elem.Kind() {
+ case reflect.Int32:
+ elem.SetInt(int64(int32(x)))
+ case reflect.Uint32:
+ elem.SetUint(uint64(x))
+ case reflect.Float32:
+ elem.SetFloat(float64(math.Float32frombits(x)))
+ }
+}
+
+func (p word32Slice) Len() int {
+ return p.v.Len()
+}
+
+func (p word32Slice) Index(i int) uint32 {
+ elem := p.v.Index(i)
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct.
+func structPointer_Word32Slice(p structPointer, f field) word32Slice {
+ return word32Slice{structPointer_field(p, f)}
+}
+
+// word64 is like word32 but for 64-bit values.
+type word64 struct {
+ v reflect.Value
+}
+
+func word64_Set(p word64, o *Buffer, x uint64) {
+ t := p.v.Type().Elem()
+ switch t {
+ case int64Type:
+ if len(o.int64s) == 0 {
+ o.int64s = make([]int64, uint64PoolSize)
+ }
+ o.int64s[0] = int64(x)
+ p.v.Set(reflect.ValueOf(&o.int64s[0]))
+ o.int64s = o.int64s[1:]
+ return
+ case uint64Type:
+ if len(o.uint64s) == 0 {
+ o.uint64s = make([]uint64, uint64PoolSize)
+ }
+ o.uint64s[0] = x
+ p.v.Set(reflect.ValueOf(&o.uint64s[0]))
+ o.uint64s = o.uint64s[1:]
+ return
+ case float64Type:
+ if len(o.float64s) == 0 {
+ o.float64s = make([]float64, uint64PoolSize)
+ }
+ o.float64s[0] = math.Float64frombits(x)
+ p.v.Set(reflect.ValueOf(&o.float64s[0]))
+ o.float64s = o.float64s[1:]
+ return
+ }
+ panic("unreachable")
+}
+
+func word64_IsNil(p word64) bool {
+ return p.v.IsNil()
+}
+
+func word64_Get(p word64) uint64 {
+ elem := p.v.Elem()
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return elem.Uint()
+ case reflect.Float64:
+ return math.Float64bits(elem.Float())
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64(p structPointer, f field) word64 {
+ return word64{structPointer_field(p, f)}
+}
+
+// word64Val is like word32Val but for 64-bit values.
+type word64Val struct {
+ v reflect.Value
+}
+
+func word64Val_Set(p word64Val, o *Buffer, x uint64) {
+ switch p.v.Type() {
+ case int64Type:
+ p.v.SetInt(int64(x))
+ return
+ case uint64Type:
+ p.v.SetUint(x)
+ return
+ case float64Type:
+ p.v.SetFloat(math.Float64frombits(x))
+ return
+ }
+ panic("unreachable")
+}
+
+func word64Val_Get(p word64Val) uint64 {
+ elem := p.v
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return elem.Uint()
+ case reflect.Float64:
+ return math.Float64bits(elem.Float())
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64Val(p structPointer, f field) word64Val {
+ return word64Val{structPointer_field(p, f)}
+}
+
+type word64Slice struct {
+ v reflect.Value
+}
+
+func (p word64Slice) Append(x uint64) {
+ n, m := p.v.Len(), p.v.Cap()
+ if n < m {
+ p.v.SetLen(n + 1)
+ } else {
+ t := p.v.Type().Elem()
+ p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
+ }
+ elem := p.v.Index(n)
+ switch elem.Kind() {
+ case reflect.Int64:
+ elem.SetInt(int64(int64(x)))
+ case reflect.Uint64:
+ elem.SetUint(uint64(x))
+ case reflect.Float64:
+ elem.SetFloat(float64(math.Float64frombits(x)))
+ }
+}
+
+func (p word64Slice) Len() int {
+ return p.v.Len()
+}
+
+func (p word64Slice) Index(i int) uint64 {
+ elem := p.v.Index(i)
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return uint64(elem.Uint())
+ case reflect.Float64:
+ return math.Float64bits(float64(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64Slice(p structPointer, f field) word64Slice {
+ return word64Slice{structPointer_field(p, f)}
+}
diff --git a/src/kube2msb/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/src/kube2msb/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
new file mode 100644
index 0000000..e9be0fe
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
@@ -0,0 +1,266 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build !appengine
+
+// This file contains the implementation of the proto field accesses using package unsafe.
+
+package proto
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+// NOTE: These type_Foo functions would more idiomatically be methods,
+// but Go does not allow methods on pointer types, and we must preserve
+// some pointer type for the garbage collector. We use these
+// funcs with clunky names as our poor approximation to methods.
+//
+// An alternative would be
+// type structPointer struct { p unsafe.Pointer }
+// but that does not registerize as well.
+
+// A structPointer is a pointer to a struct.
+type structPointer unsafe.Pointer
+
+// toStructPointer returns a structPointer equivalent to the given reflect value.
+func toStructPointer(v reflect.Value) structPointer {
+ return structPointer(unsafe.Pointer(v.Pointer()))
+}
+
+// IsNil reports whether p is nil.
+func structPointer_IsNil(p structPointer) bool {
+ return p == nil
+}
+
+// Interface returns the struct pointer, assumed to have element type t,
+// as an interface value.
+func structPointer_Interface(p structPointer, t reflect.Type) interface{} {
+ return reflect.NewAt(t, unsafe.Pointer(p)).Interface()
+}
+
+// A field identifies a field in a struct, accessible from a structPointer.
+// In this implementation, a field is identified by its byte offset from the start of the struct.
+type field uintptr
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return field(f.Offset)
+}
+
+// invalidField is an invalid field identifier.
+const invalidField = ^field(0)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool {
+ return f != ^field(0)
+}
+
+// Bytes returns the address of a []byte field in the struct.
+func structPointer_Bytes(p structPointer, f field) *[]byte {
+ return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BytesSlice returns the address of a [][]byte field in the struct.
+func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
+ return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// Bool returns the address of a *bool field in the struct.
+func structPointer_Bool(p structPointer, f field) **bool {
+ return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BoolVal returns the address of a bool field in the struct.
+func structPointer_BoolVal(p structPointer, f field) *bool {
+ return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BoolSlice returns the address of a []bool field in the struct.
+func structPointer_BoolSlice(p structPointer, f field) *[]bool {
+ return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// String returns the address of a *string field in the struct.
+func structPointer_String(p structPointer, f field) **string {
+ return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StringVal returns the address of a string field in the struct.
+func structPointer_StringVal(p structPointer, f field) *string {
+ return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StringSlice returns the address of a []string field in the struct.
+func structPointer_StringSlice(p structPointer, f field) *[]string {
+ return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// ExtMap returns the address of an extension map field in the struct.
+func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
+ return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// NewAt returns the reflect.Value for a pointer to a field in the struct.
+func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
+ return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f)))
+}
+
+// SetStructPointer writes a *struct field in the struct.
+func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
+ *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q
+}
+
+// GetStructPointer reads a *struct field in the struct.
+func structPointer_GetStructPointer(p structPointer, f field) structPointer {
+ return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StructPointerSlice the address of a []*struct field in the struct.
+func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice {
+ return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups).
+type structPointerSlice []structPointer
+
+func (v *structPointerSlice) Len() int { return len(*v) }
+func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] }
+func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) }
+
+// A word32 is the address of a "pointer to 32-bit value" field.
+type word32 **uint32
+
+// IsNil reports whether *v is nil.
+func word32_IsNil(p word32) bool {
+ return *p == nil
+}
+
+// Set sets *v to point at a newly allocated word set to x.
+func word32_Set(p word32, o *Buffer, x uint32) {
+ if len(o.uint32s) == 0 {
+ o.uint32s = make([]uint32, uint32PoolSize)
+ }
+ o.uint32s[0] = x
+ *p = &o.uint32s[0]
+ o.uint32s = o.uint32s[1:]
+}
+
+// Get gets the value pointed at by *v.
+func word32_Get(p word32) uint32 {
+ return **p
+}
+
+// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32(p structPointer, f field) word32 {
+ return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// A word32Val is the address of a 32-bit value field.
+type word32Val *uint32
+
+// Set sets *p to x.
+func word32Val_Set(p word32Val, x uint32) {
+ *p = x
+}
+
+// Get gets the value pointed at by p.
+func word32Val_Get(p word32Val) uint32 {
+ return *p
+}
+
+// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32Val(p structPointer, f field) word32Val {
+ return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// A word32Slice is a slice of 32-bit values.
+type word32Slice []uint32
+
+func (v *word32Slice) Append(x uint32) { *v = append(*v, x) }
+func (v *word32Slice) Len() int { return len(*v) }
+func (v *word32Slice) Index(i int) uint32 { return (*v)[i] }
+
+// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct.
+func structPointer_Word32Slice(p structPointer, f field) *word32Slice {
+ return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// word64 is like word32 but for 64-bit values.
+type word64 **uint64
+
+func word64_Set(p word64, o *Buffer, x uint64) {
+ if len(o.uint64s) == 0 {
+ o.uint64s = make([]uint64, uint64PoolSize)
+ }
+ o.uint64s[0] = x
+ *p = &o.uint64s[0]
+ o.uint64s = o.uint64s[1:]
+}
+
+func word64_IsNil(p word64) bool {
+ return *p == nil
+}
+
+func word64_Get(p word64) uint64 {
+ return **p
+}
+
+func structPointer_Word64(p structPointer, f field) word64 {
+ return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// word64Val is like word32Val but for 64-bit values.
+type word64Val *uint64
+
+func word64Val_Set(p word64Val, o *Buffer, x uint64) {
+ *p = x
+}
+
+func word64Val_Get(p word64Val) uint64 {
+ return *p
+}
+
+func structPointer_Word64Val(p structPointer, f field) word64Val {
+ return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// word64Slice is like word32Slice but for 64-bit values.
+type word64Slice []uint64
+
+func (v *word64Slice) Append(x uint64) { *v = append(*v, x) }
+func (v *word64Slice) Len() int { return len(*v) }
+func (v *word64Slice) Index(i int) uint64 { return (*v)[i] }
+
+func structPointer_Word64Slice(p structPointer, f field) *word64Slice {
+ return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
diff --git a/src/kube2msb/vendor/github.com/golang/protobuf/proto/properties.go b/src/kube2msb/vendor/github.com/golang/protobuf/proto/properties.go
new file mode 100644
index 0000000..d4531c0
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/golang/protobuf/proto/properties.go
@@ -0,0 +1,842 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+const debug bool = false
+
+// Constants that identify the encoding of a value on the wire.
+const (
+ WireVarint = 0
+ WireFixed64 = 1
+ WireBytes = 2
+ WireStartGroup = 3
+ WireEndGroup = 4
+ WireFixed32 = 5
+)
+
+const startSize = 10 // initial slice/string sizes
+
+// Encoders are defined in encode.go
+// An encoder outputs the full representation of a field, including its
+// tag and encoder type.
+type encoder func(p *Buffer, prop *Properties, base structPointer) error
+
+// A valueEncoder encodes a single integer in a particular encoding.
+type valueEncoder func(o *Buffer, x uint64) error
+
+// Sizers are defined in encode.go
+// A sizer returns the encoded size of a field, including its tag and encoder
+// type.
+type sizer func(prop *Properties, base structPointer) int
+
+// A valueSizer returns the encoded size of a single integer in a particular
+// encoding.
+type valueSizer func(x uint64) int
+
+// Decoders are defined in decode.go
+// A decoder creates a value from its wire representation.
+// Unrecognized subelements are saved in unrec.
+type decoder func(p *Buffer, prop *Properties, base structPointer) error
+
+// A valueDecoder decodes a single integer in a particular encoding.
+type valueDecoder func(o *Buffer) (x uint64, err error)
+
+// A oneofMarshaler does the marshaling for all oneof fields in a message.
+type oneofMarshaler func(Message, *Buffer) error
+
+// A oneofUnmarshaler does the unmarshaling for a oneof field in a message.
+type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error)
+
+// A oneofSizer does the sizing for all oneof fields in a message.
+type oneofSizer func(Message) int
+
+// tagMap is an optimization over map[int]int for typical protocol buffer
+// use-cases. Encoded protocol buffers are often in tag order with small tag
+// numbers.
+type tagMap struct {
+ fastTags []int
+ slowTags map[int]int
+}
+
+// tagMapFastLimit is the upper bound on the tag number that will be stored in
+// the tagMap slice rather than its map.
+const tagMapFastLimit = 1024
+
+func (p *tagMap) get(t int) (int, bool) {
+ if t > 0 && t < tagMapFastLimit {
+ if t >= len(p.fastTags) {
+ return 0, false
+ }
+ fi := p.fastTags[t]
+ return fi, fi >= 0
+ }
+ fi, ok := p.slowTags[t]
+ return fi, ok
+}
+
+func (p *tagMap) put(t int, fi int) {
+ if t > 0 && t < tagMapFastLimit {
+ for len(p.fastTags) < t+1 {
+ p.fastTags = append(p.fastTags, -1)
+ }
+ p.fastTags[t] = fi
+ return
+ }
+ if p.slowTags == nil {
+ p.slowTags = make(map[int]int)
+ }
+ p.slowTags[t] = fi
+}
+
+// StructProperties represents properties for all the fields of a struct.
+// decoderTags and decoderOrigNames should only be used by the decoder.
+type StructProperties struct {
+ Prop []*Properties // properties for each field
+ reqCount int // required count
+ decoderTags tagMap // map from proto tag to struct field number
+ decoderOrigNames map[string]int // map from original name to struct field number
+ order []int // list of struct field numbers in tag order
+ unrecField field // field id of the XXX_unrecognized []byte field
+ extendable bool // is this an extendable proto
+
+ oneofMarshaler oneofMarshaler
+ oneofUnmarshaler oneofUnmarshaler
+ oneofSizer oneofSizer
+ stype reflect.Type
+
+ // OneofTypes contains information about the oneof fields in this message.
+ // It is keyed by the original name of a field.
+ OneofTypes map[string]*OneofProperties
+}
+
+// OneofProperties represents information about a specific field in a oneof.
+type OneofProperties struct {
+ Type reflect.Type // pointer to generated struct type for this oneof field
+ Field int // struct field number of the containing oneof in the message
+ Prop *Properties
+}
+
+// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
+// See encode.go, (*Buffer).enc_struct.
+
+func (sp *StructProperties) Len() int { return len(sp.order) }
+func (sp *StructProperties) Less(i, j int) bool {
+ return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
+}
+func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
+
+// Properties represents the protocol-specific behavior of a single struct field.
+type Properties struct {
+ Name string // name of the field, for error messages
+ OrigName string // original name before protocol compiler (always set)
+ Wire string
+ WireType int
+ Tag int
+ Required bool
+ Optional bool
+ Repeated bool
+ Packed bool // relevant for repeated primitives only
+ Enum string // set for enum types only
+ proto3 bool // whether this is known to be a proto3 field; set for []byte only
+ oneof bool // whether this is a oneof field
+
+ Default string // default value
+ HasDefault bool // whether an explicit default was provided
+ def_uint64 uint64
+
+ enc encoder
+ valEnc valueEncoder // set for bool and numeric types only
+ field field
+ tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType)
+ tagbuf [8]byte
+ stype reflect.Type // set for struct types only
+ sprop *StructProperties // set for struct types only
+ isMarshaler bool
+ isUnmarshaler bool
+
+ mtype reflect.Type // set for map types only
+ mkeyprop *Properties // set for map types only
+ mvalprop *Properties // set for map types only
+
+ size sizer
+ valSize valueSizer // set for bool and numeric types only
+
+ dec decoder
+ valDec valueDecoder // set for bool and numeric types only
+
+ // If this is a packable field, this will be the decoder for the packed version of the field.
+ packedDec decoder
+}
+
+// String formats the properties in the protobuf struct field tag style.
+func (p *Properties) String() string {
+ s := p.Wire
+ s = ","
+ s += strconv.Itoa(p.Tag)
+ if p.Required {
+ s += ",req"
+ }
+ if p.Optional {
+ s += ",opt"
+ }
+ if p.Repeated {
+ s += ",rep"
+ }
+ if p.Packed {
+ s += ",packed"
+ }
+ if p.OrigName != p.Name {
+ s += ",name=" + p.OrigName
+ }
+ if p.proto3 {
+ s += ",proto3"
+ }
+ if p.oneof {
+ s += ",oneof"
+ }
+ if len(p.Enum) > 0 {
+ s += ",enum=" + p.Enum
+ }
+ if p.HasDefault {
+ s += ",def=" + p.Default
+ }
+ return s
+}
+
+// Parse populates p by parsing a string in the protobuf struct field tag style.
+func (p *Properties) Parse(s string) {
+ // "bytes,49,opt,name=foo,def=hello!"
+ fields := strings.Split(s, ",") // breaks def=, but handled below.
+ if len(fields) < 2 {
+ fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
+ return
+ }
+
+ p.Wire = fields[0]
+ switch p.Wire {
+ case "varint":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeVarint
+ p.valDec = (*Buffer).DecodeVarint
+ p.valSize = sizeVarint
+ case "fixed32":
+ p.WireType = WireFixed32
+ p.valEnc = (*Buffer).EncodeFixed32
+ p.valDec = (*Buffer).DecodeFixed32
+ p.valSize = sizeFixed32
+ case "fixed64":
+ p.WireType = WireFixed64
+ p.valEnc = (*Buffer).EncodeFixed64
+ p.valDec = (*Buffer).DecodeFixed64
+ p.valSize = sizeFixed64
+ case "zigzag32":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeZigzag32
+ p.valDec = (*Buffer).DecodeZigzag32
+ p.valSize = sizeZigzag32
+ case "zigzag64":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeZigzag64
+ p.valDec = (*Buffer).DecodeZigzag64
+ p.valSize = sizeZigzag64
+ case "bytes", "group":
+ p.WireType = WireBytes
+ // no numeric converter for non-numeric types
+ default:
+ fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
+ return
+ }
+
+ var err error
+ p.Tag, err = strconv.Atoi(fields[1])
+ if err != nil {
+ return
+ }
+
+ for i := 2; i < len(fields); i++ {
+ f := fields[i]
+ switch {
+ case f == "req":
+ p.Required = true
+ case f == "opt":
+ p.Optional = true
+ case f == "rep":
+ p.Repeated = true
+ case f == "packed":
+ p.Packed = true
+ case strings.HasPrefix(f, "name="):
+ p.OrigName = f[5:]
+ case strings.HasPrefix(f, "enum="):
+ p.Enum = f[5:]
+ case f == "proto3":
+ p.proto3 = true
+ case f == "oneof":
+ p.oneof = true
+ case strings.HasPrefix(f, "def="):
+ p.HasDefault = true
+ p.Default = f[4:] // rest of string
+ if i+1 < len(fields) {
+ // Commas aren't escaped, and def is always last.
+ p.Default += "," + strings.Join(fields[i+1:], ",")
+ break
+ }
+ }
+ }
+}
+
+func logNoSliceEnc(t1, t2 reflect.Type) {
+ fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2)
+}
+
+var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
+
+// Initialize the fields for encoding and decoding.
+func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
+ p.enc = nil
+ p.dec = nil
+ p.size = nil
+
+ switch t1 := typ; t1.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1)
+
+ // proto3 scalar types
+
+ case reflect.Bool:
+ p.enc = (*Buffer).enc_proto3_bool
+ p.dec = (*Buffer).dec_proto3_bool
+ p.size = size_proto3_bool
+ case reflect.Int32:
+ p.enc = (*Buffer).enc_proto3_int32
+ p.dec = (*Buffer).dec_proto3_int32
+ p.size = size_proto3_int32
+ case reflect.Uint32:
+ p.enc = (*Buffer).enc_proto3_uint32
+ p.dec = (*Buffer).dec_proto3_int32 // can reuse
+ p.size = size_proto3_uint32
+ case reflect.Int64, reflect.Uint64:
+ p.enc = (*Buffer).enc_proto3_int64
+ p.dec = (*Buffer).dec_proto3_int64
+ p.size = size_proto3_int64
+ case reflect.Float32:
+ p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits
+ p.dec = (*Buffer).dec_proto3_int32
+ p.size = size_proto3_uint32
+ case reflect.Float64:
+ p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits
+ p.dec = (*Buffer).dec_proto3_int64
+ p.size = size_proto3_int64
+ case reflect.String:
+ p.enc = (*Buffer).enc_proto3_string
+ p.dec = (*Buffer).dec_proto3_string
+ p.size = size_proto3_string
+
+ case reflect.Ptr:
+ switch t2 := t1.Elem(); t2.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2)
+ break
+ case reflect.Bool:
+ p.enc = (*Buffer).enc_bool
+ p.dec = (*Buffer).dec_bool
+ p.size = size_bool
+ case reflect.Int32:
+ p.enc = (*Buffer).enc_int32
+ p.dec = (*Buffer).dec_int32
+ p.size = size_int32
+ case reflect.Uint32:
+ p.enc = (*Buffer).enc_uint32
+ p.dec = (*Buffer).dec_int32 // can reuse
+ p.size = size_uint32
+ case reflect.Int64, reflect.Uint64:
+ p.enc = (*Buffer).enc_int64
+ p.dec = (*Buffer).dec_int64
+ p.size = size_int64
+ case reflect.Float32:
+ p.enc = (*Buffer).enc_uint32 // can just treat them as bits
+ p.dec = (*Buffer).dec_int32
+ p.size = size_uint32
+ case reflect.Float64:
+ p.enc = (*Buffer).enc_int64 // can just treat them as bits
+ p.dec = (*Buffer).dec_int64
+ p.size = size_int64
+ case reflect.String:
+ p.enc = (*Buffer).enc_string
+ p.dec = (*Buffer).dec_string
+ p.size = size_string
+ case reflect.Struct:
+ p.stype = t1.Elem()
+ p.isMarshaler = isMarshaler(t1)
+ p.isUnmarshaler = isUnmarshaler(t1)
+ if p.Wire == "bytes" {
+ p.enc = (*Buffer).enc_struct_message
+ p.dec = (*Buffer).dec_struct_message
+ p.size = size_struct_message
+ } else {
+ p.enc = (*Buffer).enc_struct_group
+ p.dec = (*Buffer).dec_struct_group
+ p.size = size_struct_group
+ }
+ }
+
+ case reflect.Slice:
+ switch t2 := t1.Elem(); t2.Kind() {
+ default:
+ logNoSliceEnc(t1, t2)
+ break
+ case reflect.Bool:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_bool
+ p.size = size_slice_packed_bool
+ } else {
+ p.enc = (*Buffer).enc_slice_bool
+ p.size = size_slice_bool
+ }
+ p.dec = (*Buffer).dec_slice_bool
+ p.packedDec = (*Buffer).dec_slice_packed_bool
+ case reflect.Int32:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int32
+ p.size = size_slice_packed_int32
+ } else {
+ p.enc = (*Buffer).enc_slice_int32
+ p.size = size_slice_int32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case reflect.Uint32:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_uint32
+ p.size = size_slice_packed_uint32
+ } else {
+ p.enc = (*Buffer).enc_slice_uint32
+ p.size = size_slice_uint32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case reflect.Int64, reflect.Uint64:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int64
+ p.size = size_slice_packed_int64
+ } else {
+ p.enc = (*Buffer).enc_slice_int64
+ p.size = size_slice_int64
+ }
+ p.dec = (*Buffer).dec_slice_int64
+ p.packedDec = (*Buffer).dec_slice_packed_int64
+ case reflect.Uint8:
+ p.enc = (*Buffer).enc_slice_byte
+ p.dec = (*Buffer).dec_slice_byte
+ p.size = size_slice_byte
+ // This is a []byte, which is either a bytes field,
+ // or the value of a map field. In the latter case,
+ // we always encode an empty []byte, so we should not
+ // use the proto3 enc/size funcs.
+ // f == nil iff this is the key/value of a map field.
+ if p.proto3 && f != nil {
+ p.enc = (*Buffer).enc_proto3_slice_byte
+ p.size = size_proto3_slice_byte
+ }
+ case reflect.Float32, reflect.Float64:
+ switch t2.Bits() {
+ case 32:
+ // can just treat them as bits
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_uint32
+ p.size = size_slice_packed_uint32
+ } else {
+ p.enc = (*Buffer).enc_slice_uint32
+ p.size = size_slice_uint32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case 64:
+ // can just treat them as bits
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int64
+ p.size = size_slice_packed_int64
+ } else {
+ p.enc = (*Buffer).enc_slice_int64
+ p.size = size_slice_int64
+ }
+ p.dec = (*Buffer).dec_slice_int64
+ p.packedDec = (*Buffer).dec_slice_packed_int64
+ default:
+ logNoSliceEnc(t1, t2)
+ break
+ }
+ case reflect.String:
+ p.enc = (*Buffer).enc_slice_string
+ p.dec = (*Buffer).dec_slice_string
+ p.size = size_slice_string
+ case reflect.Ptr:
+ switch t3 := t2.Elem(); t3.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3)
+ break
+ case reflect.Struct:
+ p.stype = t2.Elem()
+ p.isMarshaler = isMarshaler(t2)
+ p.isUnmarshaler = isUnmarshaler(t2)
+ if p.Wire == "bytes" {
+ p.enc = (*Buffer).enc_slice_struct_message
+ p.dec = (*Buffer).dec_slice_struct_message
+ p.size = size_slice_struct_message
+ } else {
+ p.enc = (*Buffer).enc_slice_struct_group
+ p.dec = (*Buffer).dec_slice_struct_group
+ p.size = size_slice_struct_group
+ }
+ }
+ case reflect.Slice:
+ switch t2.Elem().Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem())
+ break
+ case reflect.Uint8:
+ p.enc = (*Buffer).enc_slice_slice_byte
+ p.dec = (*Buffer).dec_slice_slice_byte
+ p.size = size_slice_slice_byte
+ }
+ }
+
+ case reflect.Map:
+ p.enc = (*Buffer).enc_new_map
+ p.dec = (*Buffer).dec_new_map
+ p.size = size_new_map
+
+ p.mtype = t1
+ p.mkeyprop = &Properties{}
+ p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
+ p.mvalprop = &Properties{}
+ vtype := p.mtype.Elem()
+ if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
+ // The value type is not a message (*T) or bytes ([]byte),
+ // so we need encoders for the pointer to this type.
+ vtype = reflect.PtrTo(vtype)
+ }
+ p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
+ }
+
+ // precalculate tag code
+ wire := p.WireType
+ if p.Packed {
+ wire = WireBytes
+ }
+ x := uint32(p.Tag)<<3 | uint32(wire)
+ i := 0
+ for i = 0; x > 127; i++ {
+ p.tagbuf[i] = 0x80 | uint8(x&0x7F)
+ x >>= 7
+ }
+ p.tagbuf[i] = uint8(x)
+ p.tagcode = p.tagbuf[0 : i+1]
+
+ if p.stype != nil {
+ if lockGetProp {
+ p.sprop = GetProperties(p.stype)
+ } else {
+ p.sprop = getPropertiesLocked(p.stype)
+ }
+ }
+}
+
+var (
+ marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
+ unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
+)
+
+// isMarshaler reports whether type t implements Marshaler.
+func isMarshaler(t reflect.Type) bool {
+ // We're checking for (likely) pointer-receiver methods
+ // so if t is not a pointer, something is very wrong.
+ // The calls above only invoke isMarshaler on pointer types.
+ if t.Kind() != reflect.Ptr {
+ panic("proto: misuse of isMarshaler")
+ }
+ return t.Implements(marshalerType)
+}
+
+// isUnmarshaler reports whether type t implements Unmarshaler.
+func isUnmarshaler(t reflect.Type) bool {
+ // We're checking for (likely) pointer-receiver methods
+ // so if t is not a pointer, something is very wrong.
+ // The calls above only invoke isUnmarshaler on pointer types.
+ if t.Kind() != reflect.Ptr {
+ panic("proto: misuse of isUnmarshaler")
+ }
+ return t.Implements(unmarshalerType)
+}
+
+// Init populates the properties from a protocol buffer struct tag.
+func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
+ p.init(typ, name, tag, f, true)
+}
+
+func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
+ // "bytes,49,opt,def=hello!"
+ p.Name = name
+ p.OrigName = name
+ if f != nil {
+ p.field = toField(f)
+ }
+ if tag == "" {
+ return
+ }
+ p.Parse(tag)
+ p.setEncAndDec(typ, f, lockGetProp)
+}
+
+var (
+ propertiesMu sync.RWMutex
+ propertiesMap = make(map[reflect.Type]*StructProperties)
+)
+
+// GetProperties returns the list of properties for the type represented by t.
+// t must represent a generated struct type of a protocol message.
+func GetProperties(t reflect.Type) *StructProperties {
+ if t.Kind() != reflect.Struct {
+ panic("proto: type must have kind struct")
+ }
+
+ // Most calls to GetProperties in a long-running program will be
+ // retrieving details for types we have seen before.
+ propertiesMu.RLock()
+ sprop, ok := propertiesMap[t]
+ propertiesMu.RUnlock()
+ if ok {
+ if collectStats {
+ stats.Chit++
+ }
+ return sprop
+ }
+
+ propertiesMu.Lock()
+ sprop = getPropertiesLocked(t)
+ propertiesMu.Unlock()
+ return sprop
+}
+
+// getPropertiesLocked requires that propertiesMu is held.
+func getPropertiesLocked(t reflect.Type) *StructProperties {
+ if prop, ok := propertiesMap[t]; ok {
+ if collectStats {
+ stats.Chit++
+ }
+ return prop
+ }
+ if collectStats {
+ stats.Cmiss++
+ }
+
+ prop := new(StructProperties)
+ // in case of recursive protos, fill this in now.
+ propertiesMap[t] = prop
+
+ // build properties
+ prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType)
+ prop.unrecField = invalidField
+ prop.Prop = make([]*Properties, t.NumField())
+ prop.order = make([]int, t.NumField())
+
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ p := new(Properties)
+ name := f.Name
+ p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
+
+ if f.Name == "XXX_extensions" { // special case
+ p.enc = (*Buffer).enc_map
+ p.dec = nil // not needed
+ p.size = size_map
+ }
+ if f.Name == "XXX_unrecognized" { // special case
+ prop.unrecField = toField(&f)
+ }
+ oneof := f.Tag.Get("protobuf_oneof") != "" // special case
+ prop.Prop[i] = p
+ prop.order[i] = i
+ if debug {
+ print(i, " ", f.Name, " ", t.String(), " ")
+ if p.Tag > 0 {
+ print(p.String())
+ }
+ print("\n")
+ }
+ if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && !oneof {
+ fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
+ }
+ }
+
+ // Re-order prop.order.
+ sort.Sort(prop)
+
+ type oneofMessage interface {
+ XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
+ }
+ if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
+ var oots []interface{}
+ prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs()
+ prop.stype = t
+
+ // Interpret oneof metadata.
+ prop.OneofTypes = make(map[string]*OneofProperties)
+ for _, oot := range oots {
+ oop := &OneofProperties{
+ Type: reflect.ValueOf(oot).Type(), // *T
+ Prop: new(Properties),
+ }
+ sft := oop.Type.Elem().Field(0)
+ oop.Prop.Name = sft.Name
+ oop.Prop.Parse(sft.Tag.Get("protobuf"))
+ // There will be exactly one interface field that
+ // this new value is assignable to.
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if f.Type.Kind() != reflect.Interface {
+ continue
+ }
+ if !oop.Type.AssignableTo(f.Type) {
+ continue
+ }
+ oop.Field = i
+ break
+ }
+ prop.OneofTypes[oop.Prop.OrigName] = oop
+ }
+ }
+
+ // build required counts
+ // build tags
+ reqCount := 0
+ prop.decoderOrigNames = make(map[string]int)
+ for i, p := range prop.Prop {
+ if strings.HasPrefix(p.Name, "XXX_") {
+ // Internal fields should not appear in tags/origNames maps.
+ // They are handled specially when encoding and decoding.
+ continue
+ }
+ if p.Required {
+ reqCount++
+ }
+ prop.decoderTags.put(p.Tag, i)
+ prop.decoderOrigNames[p.OrigName] = i
+ }
+ prop.reqCount = reqCount
+
+ return prop
+}
+
+// Return the Properties object for the x[0]'th field of the structure.
+func propByIndex(t reflect.Type, x []int) *Properties {
+ if len(x) != 1 {
+ fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t)
+ return nil
+ }
+ prop := GetProperties(t)
+ return prop.Prop[x[0]]
+}
+
+// Get the address and type of a pointer to a struct from an interface.
+func getbase(pb Message) (t reflect.Type, b structPointer, err error) {
+ if pb == nil {
+ err = ErrNil
+ return
+ }
+ // get the reflect type of the pointer to the struct.
+ t = reflect.TypeOf(pb)
+ // get the address of the struct.
+ value := reflect.ValueOf(pb)
+ b = toStructPointer(value)
+ return
+}
+
+// A global registry of enum types.
+// The generated code will register the generated maps by calling RegisterEnum.
+
+var enumValueMaps = make(map[string]map[string]int32)
+
+// RegisterEnum is called from the generated code to install the enum descriptor
+// maps into the global table to aid parsing text format protocol buffers.
+func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
+ if _, ok := enumValueMaps[typeName]; ok {
+ panic("proto: duplicate enum registered: " + typeName)
+ }
+ enumValueMaps[typeName] = valueMap
+}
+
+// EnumValueMap returns the mapping from names to integers of the
+// enum type enumType, or a nil if not found.
+func EnumValueMap(enumType string) map[string]int32 {
+ return enumValueMaps[enumType]
+}
+
+// A registry of all linked message types.
+// The string is a fully-qualified proto name ("pkg.Message").
+var (
+ protoTypes = make(map[string]reflect.Type)
+ revProtoTypes = make(map[reflect.Type]string)
+)
+
+// RegisterType is called from generated code and maps from the fully qualified
+// proto name to the type (pointer to struct) of the protocol buffer.
+func RegisterType(x Message, name string) {
+ if _, ok := protoTypes[name]; ok {
+ // TODO: Some day, make this a panic.
+ log.Printf("proto: duplicate proto type registered: %s", name)
+ return
+ }
+ t := reflect.TypeOf(x)
+ protoTypes[name] = t
+ revProtoTypes[t] = name
+}
+
+// MessageName returns the fully-qualified proto name for the given message type.
+func MessageName(x Message) string { return revProtoTypes[reflect.TypeOf(x)] }
+
+// MessageType returns the message type (pointer to struct) for a named message.
+func MessageType(name string) reflect.Type { return protoTypes[name] }
diff --git a/src/kube2msb/vendor/github.com/golang/protobuf/proto/text.go b/src/kube2msb/vendor/github.com/golang/protobuf/proto/text.go
new file mode 100644
index 0000000..2336b14
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/golang/protobuf/proto/text.go
@@ -0,0 +1,751 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for writing the text protocol buffer format.
+
+import (
+ "bufio"
+ "bytes"
+ "encoding"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "math"
+ "reflect"
+ "sort"
+ "strings"
+)
+
+var (
+ newline = []byte("\n")
+ spaces = []byte(" ")
+ gtNewline = []byte(">\n")
+ endBraceNewline = []byte("}\n")
+ backslashN = []byte{'\\', 'n'}
+ backslashR = []byte{'\\', 'r'}
+ backslashT = []byte{'\\', 't'}
+ backslashDQ = []byte{'\\', '"'}
+ backslashBS = []byte{'\\', '\\'}
+ posInf = []byte("inf")
+ negInf = []byte("-inf")
+ nan = []byte("nan")
+)
+
+type writer interface {
+ io.Writer
+ WriteByte(byte) error
+}
+
+// textWriter is an io.Writer that tracks its indentation level.
+type textWriter struct {
+ ind int
+ complete bool // if the current position is a complete line
+ compact bool // whether to write out as a one-liner
+ w writer
+}
+
+func (w *textWriter) WriteString(s string) (n int, err error) {
+ if !strings.Contains(s, "\n") {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ w.complete = false
+ return io.WriteString(w.w, s)
+ }
+ // WriteString is typically called without newlines, so this
+ // codepath and its copy are rare. We copy to avoid
+ // duplicating all of Write's logic here.
+ return w.Write([]byte(s))
+}
+
+func (w *textWriter) Write(p []byte) (n int, err error) {
+ newlines := bytes.Count(p, newline)
+ if newlines == 0 {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ n, err = w.w.Write(p)
+ w.complete = false
+ return n, err
+ }
+
+ frags := bytes.SplitN(p, newline, newlines+1)
+ if w.compact {
+ for i, frag := range frags {
+ if i > 0 {
+ if err := w.w.WriteByte(' '); err != nil {
+ return n, err
+ }
+ n++
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ }
+ return n, nil
+ }
+
+ for i, frag := range frags {
+ if w.complete {
+ w.writeIndent()
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ if i+1 < len(frags) {
+ if err := w.w.WriteByte('\n'); err != nil {
+ return n, err
+ }
+ n++
+ }
+ }
+ w.complete = len(frags[len(frags)-1]) == 0
+ return n, nil
+}
+
+func (w *textWriter) WriteByte(c byte) error {
+ if w.compact && c == '\n' {
+ c = ' '
+ }
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ err := w.w.WriteByte(c)
+ w.complete = c == '\n'
+ return err
+}
+
+func (w *textWriter) indent() { w.ind++ }
+
+func (w *textWriter) unindent() {
+ if w.ind == 0 {
+ log.Printf("proto: textWriter unindented too far")
+ return
+ }
+ w.ind--
+}
+
+func writeName(w *textWriter, props *Properties) error {
+ if _, err := w.WriteString(props.OrigName); err != nil {
+ return err
+ }
+ if props.Wire != "group" {
+ return w.WriteByte(':')
+ }
+ return nil
+}
+
+// raw is the interface satisfied by RawMessage.
+type raw interface {
+ Bytes() []byte
+}
+
+func writeStruct(w *textWriter, sv reflect.Value) error {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < sv.NumField(); i++ {
+ fv := sv.Field(i)
+ props := sprops.Prop[i]
+ name := st.Field(i).Name
+
+ if strings.HasPrefix(name, "XXX_") {
+ // There are two XXX_ fields:
+ // XXX_unrecognized []byte
+ // XXX_extensions map[int32]proto.Extension
+ // The first is handled here;
+ // the second is handled at the bottom of this function.
+ if name == "XXX_unrecognized" && !fv.IsNil() {
+ if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ // Field not filled in. This could be an optional field or
+ // a required field that wasn't filled in. Either way, there
+ // isn't anything we can show for it.
+ continue
+ }
+ if fv.Kind() == reflect.Slice && fv.IsNil() {
+ // Repeated field that is empty, or a bytes field that is unused.
+ continue
+ }
+
+ if props.Repeated && fv.Kind() == reflect.Slice {
+ // Repeated field.
+ for j := 0; j < fv.Len(); j++ {
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ v := fv.Index(j)
+ if v.Kind() == reflect.Ptr && v.IsNil() {
+ // A nil message in a repeated field is not valid,
+ // but we can handle that more gracefully than panicking.
+ if _, err := w.Write([]byte("<nil>\n")); err != nil {
+ return err
+ }
+ continue
+ }
+ if err := writeAny(w, v, props); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Map {
+ // Map fields are rendered as a repeated struct with key/value fields.
+ keys := fv.MapKeys()
+ sort.Sort(mapKeys(keys))
+ for _, key := range keys {
+ val := fv.MapIndex(key)
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ // open struct
+ if err := w.WriteByte('<'); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ // key
+ if _, err := w.WriteString("key:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := writeAny(w, key, props.mkeyprop); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ // nil values aren't legal, but we can avoid panicking because of them.
+ if val.Kind() != reflect.Ptr || !val.IsNil() {
+ // value
+ if _, err := w.WriteString("value:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := writeAny(w, val, props.mvalprop); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ // close struct
+ w.unindent()
+ if err := w.WriteByte('>'); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
+ // empty bytes field
+ continue
+ }
+ if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
+ // proto3 non-repeated scalar field; skip if zero value
+ if isProto3Zero(fv) {
+ continue
+ }
+ }
+
+ if fv.Kind() == reflect.Interface {
+ // Check if it is a oneof.
+ if st.Field(i).Tag.Get("protobuf_oneof") != "" {
+ // fv is nil, or holds a pointer to generated struct.
+ // That generated struct has exactly one field,
+ // which has a protobuf struct tag.
+ if fv.IsNil() {
+ continue
+ }
+ inner := fv.Elem().Elem() // interface -> *T -> T
+ tag := inner.Type().Field(0).Tag.Get("protobuf")
+ props = new(Properties) // Overwrite the outer props var, but not its pointee.
+ props.Parse(tag)
+ // Write the value in the oneof, not the oneof itself.
+ fv = inner.Field(0)
+
+ // Special case to cope with malformed messages gracefully:
+ // If the value in the oneof is a nil pointer, don't panic
+ // in writeAny.
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ // Use errors.New so writeAny won't render quotes.
+ msg := errors.New("/* nil */")
+ fv = reflect.ValueOf(&msg).Elem()
+ }
+ }
+ }
+
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if b, ok := fv.Interface().(raw); ok {
+ if err := writeRaw(w, b.Bytes()); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // Enums have a String method, so writeAny will work fine.
+ if err := writeAny(w, fv, props); err != nil {
+ return err
+ }
+
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+
+ // Extensions (the XXX_extensions field).
+ pv := sv.Addr()
+ if pv.Type().Implements(extendableProtoType) {
+ if err := writeExtensions(w, pv); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// writeRaw writes an uninterpreted raw message.
+func writeRaw(w *textWriter, b []byte) error {
+ if err := w.WriteByte('<'); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ if err := writeUnknownStruct(w, b); err != nil {
+ return err
+ }
+ w.unindent()
+ if err := w.WriteByte('>'); err != nil {
+ return err
+ }
+ return nil
+}
+
+// writeAny writes an arbitrary field.
+func writeAny(w *textWriter, v reflect.Value, props *Properties) error {
+ v = reflect.Indirect(v)
+
+ // Floats have special cases.
+ if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
+ x := v.Float()
+ var b []byte
+ switch {
+ case math.IsInf(x, 1):
+ b = posInf
+ case math.IsInf(x, -1):
+ b = negInf
+ case math.IsNaN(x):
+ b = nan
+ }
+ if b != nil {
+ _, err := w.Write(b)
+ return err
+ }
+ // Other values are handled below.
+ }
+
+ // We don't attempt to serialise every possible value type; only those
+ // that can occur in protocol buffers.
+ switch v.Kind() {
+ case reflect.Slice:
+ // Should only be a []byte; repeated fields are handled in writeStruct.
+ if err := writeString(w, string(v.Interface().([]byte))); err != nil {
+ return err
+ }
+ case reflect.String:
+ if err := writeString(w, v.String()); err != nil {
+ return err
+ }
+ case reflect.Struct:
+ // Required/optional group/message.
+ var bra, ket byte = '<', '>'
+ if props != nil && props.Wire == "group" {
+ bra, ket = '{', '}'
+ }
+ if err := w.WriteByte(bra); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ if tm, ok := v.Interface().(encoding.TextMarshaler); ok {
+ text, err := tm.MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = w.Write(text); err != nil {
+ return err
+ }
+ } else if err := writeStruct(w, v); err != nil {
+ return err
+ }
+ w.unindent()
+ if err := w.WriteByte(ket); err != nil {
+ return err
+ }
+ default:
+ _, err := fmt.Fprint(w, v.Interface())
+ return err
+ }
+ return nil
+}
+
+// equivalent to C's isprint.
+func isprint(c byte) bool {
+ return c >= 0x20 && c < 0x7f
+}
+
+// writeString writes a string in the protocol buffer text format.
+// It is similar to strconv.Quote except we don't use Go escape sequences,
+// we treat the string as a byte sequence, and we use octal escapes.
+// These differences are to maintain interoperability with the other
+// languages' implementations of the text format.
+func writeString(w *textWriter, s string) error {
+ // use WriteByte here to get any needed indent
+ if err := w.WriteByte('"'); err != nil {
+ return err
+ }
+ // Loop over the bytes, not the runes.
+ for i := 0; i < len(s); i++ {
+ var err error
+ // Divergence from C++: we don't escape apostrophes.
+ // There's no need to escape them, and the C++ parser
+ // copes with a naked apostrophe.
+ switch c := s[i]; c {
+ case '\n':
+ _, err = w.w.Write(backslashN)
+ case '\r':
+ _, err = w.w.Write(backslashR)
+ case '\t':
+ _, err = w.w.Write(backslashT)
+ case '"':
+ _, err = w.w.Write(backslashDQ)
+ case '\\':
+ _, err = w.w.Write(backslashBS)
+ default:
+ if isprint(c) {
+ err = w.w.WriteByte(c)
+ } else {
+ _, err = fmt.Fprintf(w.w, "\\%03o", c)
+ }
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return w.WriteByte('"')
+}
+
+func writeUnknownStruct(w *textWriter, data []byte) (err error) {
+ if !w.compact {
+ if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
+ return err
+ }
+ }
+ b := NewBuffer(data)
+ for b.index < len(b.buf) {
+ x, err := b.DecodeVarint()
+ if err != nil {
+ _, err := fmt.Fprintf(w, "/* %v */\n", err)
+ return err
+ }
+ wire, tag := x&7, x>>3
+ if wire == WireEndGroup {
+ w.unindent()
+ if _, err := w.Write(endBraceNewline); err != nil {
+ return err
+ }
+ continue
+ }
+ if _, err := fmt.Fprint(w, tag); err != nil {
+ return err
+ }
+ if wire != WireStartGroup {
+ if err := w.WriteByte(':'); err != nil {
+ return err
+ }
+ }
+ if !w.compact || wire == WireStartGroup {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ switch wire {
+ case WireBytes:
+ buf, e := b.DecodeRawBytes(false)
+ if e == nil {
+ _, err = fmt.Fprintf(w, "%q", buf)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", e)
+ }
+ case WireFixed32:
+ x, err = b.DecodeFixed32()
+ err = writeUnknownInt(w, x, err)
+ case WireFixed64:
+ x, err = b.DecodeFixed64()
+ err = writeUnknownInt(w, x, err)
+ case WireStartGroup:
+ err = w.WriteByte('{')
+ w.indent()
+ case WireVarint:
+ x, err = b.DecodeVarint()
+ err = writeUnknownInt(w, x, err)
+ default:
+ _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
+ }
+ if err != nil {
+ return err
+ }
+ if err = w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeUnknownInt(w *textWriter, x uint64, err error) error {
+ if err == nil {
+ _, err = fmt.Fprint(w, x)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", err)
+ }
+ return err
+}
+
+type int32Slice []int32
+
+func (s int32Slice) Len() int { return len(s) }
+func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
+func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// writeExtensions writes all the extensions in pv.
+// pv is assumed to be a pointer to a protocol message struct that is extendable.
+func writeExtensions(w *textWriter, pv reflect.Value) error {
+ emap := extensionMaps[pv.Type().Elem()]
+ ep := pv.Interface().(extendableProto)
+
+ // Order the extensions by ID.
+ // This isn't strictly necessary, but it will give us
+ // canonical output, which will also make testing easier.
+ m := ep.ExtensionMap()
+ ids := make([]int32, 0, len(m))
+ for id := range m {
+ ids = append(ids, id)
+ }
+ sort.Sort(int32Slice(ids))
+
+ for _, extNum := range ids {
+ ext := m[extNum]
+ var desc *ExtensionDesc
+ if emap != nil {
+ desc = emap[extNum]
+ }
+ if desc == nil {
+ // Unknown extension.
+ if err := writeUnknownStruct(w, ext.enc); err != nil {
+ return err
+ }
+ continue
+ }
+
+ pb, err := GetExtension(ep, desc)
+ if err != nil {
+ return fmt.Errorf("failed getting extension: %v", err)
+ }
+
+ // Repeated extensions will appear as a slice.
+ if !desc.repeated() {
+ if err := writeExtension(w, desc.Name, pb); err != nil {
+ return err
+ }
+ } else {
+ v := reflect.ValueOf(pb)
+ for i := 0; i < v.Len(); i++ {
+ if err := writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func writeExtension(w *textWriter, name string, pb interface{}) error {
+ if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := writeAny(w, reflect.ValueOf(pb), nil); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (w *textWriter) writeIndent() {
+ if !w.complete {
+ return
+ }
+ remain := w.ind * 2
+ for remain > 0 {
+ n := remain
+ if n > len(spaces) {
+ n = len(spaces)
+ }
+ w.w.Write(spaces[:n])
+ remain -= n
+ }
+ w.complete = false
+}
+
+func marshalText(w io.Writer, pb Message, compact bool) error {
+ val := reflect.ValueOf(pb)
+ if pb == nil || val.IsNil() {
+ w.Write([]byte("<nil>"))
+ return nil
+ }
+ var bw *bufio.Writer
+ ww, ok := w.(writer)
+ if !ok {
+ bw = bufio.NewWriter(w)
+ ww = bw
+ }
+ aw := &textWriter{
+ w: ww,
+ complete: true,
+ compact: compact,
+ }
+
+ if tm, ok := pb.(encoding.TextMarshaler); ok {
+ text, err := tm.MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = aw.Write(text); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+ }
+ // Dereference the received pointer so we don't have outer < and >.
+ v := reflect.Indirect(val)
+ if err := writeStruct(aw, v); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+}
+
+// MarshalText writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func MarshalText(w io.Writer, pb Message) error {
+ return marshalText(w, pb, false)
+}
+
+// MarshalTextString is the same as MarshalText, but returns the string directly.
+func MarshalTextString(pb Message) string {
+ var buf bytes.Buffer
+ marshalText(&buf, pb, false)
+ return buf.String()
+}
+
+// CompactText writes a given protocol buffer in compact text format (one line).
+func CompactText(w io.Writer, pb Message) error { return marshalText(w, pb, true) }
+
+// CompactTextString is the same as CompactText, but returns the string directly.
+func CompactTextString(pb Message) string {
+ var buf bytes.Buffer
+ marshalText(&buf, pb, true)
+ return buf.String()
+}
diff --git a/src/kube2msb/vendor/github.com/golang/protobuf/proto/text_parser.go b/src/kube2msb/vendor/github.com/golang/protobuf/proto/text_parser.go
new file mode 100644
index 0000000..4513232
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/golang/protobuf/proto/text_parser.go
@@ -0,0 +1,806 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for parsing the Text protocol buffer format.
+// TODO: message sets.
+
+import (
+ "encoding"
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+type ParseError struct {
+ Message string
+ Line int // 1-based line number
+ Offset int // 0-based byte offset from start of input
+}
+
+func (p *ParseError) Error() string {
+ if p.Line == 1 {
+ // show offset only for first line
+ return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
+ }
+ return fmt.Sprintf("line %d: %v", p.Line, p.Message)
+}
+
+type token struct {
+ value string
+ err *ParseError
+ line int // line number
+ offset int // byte number from start of input, not start of line
+ unquoted string // the unquoted version of value, if it was a quoted string
+}
+
+func (t *token) String() string {
+ if t.err == nil {
+ return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
+ }
+ return fmt.Sprintf("parse error: %v", t.err)
+}
+
+type textParser struct {
+ s string // remaining input
+ done bool // whether the parsing is finished (success or error)
+ backed bool // whether back() was called
+ offset, line int
+ cur token
+}
+
+func newTextParser(s string) *textParser {
+ p := new(textParser)
+ p.s = s
+ p.line = 1
+ p.cur.line = 1
+ return p
+}
+
+func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
+ pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
+ p.cur.err = pe
+ p.done = true
+ return pe
+}
+
+// Numbers and identifiers are matched by [-+._A-Za-z0-9]
+func isIdentOrNumberChar(c byte) bool {
+ switch {
+ case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
+ return true
+ case '0' <= c && c <= '9':
+ return true
+ }
+ switch c {
+ case '-', '+', '.', '_':
+ return true
+ }
+ return false
+}
+
+func isWhitespace(c byte) bool {
+ switch c {
+ case ' ', '\t', '\n', '\r':
+ return true
+ }
+ return false
+}
+
+func isQuote(c byte) bool {
+ switch c {
+ case '"', '\'':
+ return true
+ }
+ return false
+}
+
+func (p *textParser) skipWhitespace() {
+ i := 0
+ for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
+ if p.s[i] == '#' {
+ // comment; skip to end of line or input
+ for i < len(p.s) && p.s[i] != '\n' {
+ i++
+ }
+ if i == len(p.s) {
+ break
+ }
+ }
+ if p.s[i] == '\n' {
+ p.line++
+ }
+ i++
+ }
+ p.offset += i
+ p.s = p.s[i:len(p.s)]
+ if len(p.s) == 0 {
+ p.done = true
+ }
+}
+
+func (p *textParser) advance() {
+ // Skip whitespace
+ p.skipWhitespace()
+ if p.done {
+ return
+ }
+
+ // Start of non-whitespace
+ p.cur.err = nil
+ p.cur.offset, p.cur.line = p.offset, p.line
+ p.cur.unquoted = ""
+ switch p.s[0] {
+ case '<', '>', '{', '}', ':', '[', ']', ';', ',':
+ // Single symbol
+ p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
+ case '"', '\'':
+ // Quoted string
+ i := 1
+ for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
+ if p.s[i] == '\\' && i+1 < len(p.s) {
+ // skip escaped char
+ i++
+ }
+ i++
+ }
+ if i >= len(p.s) || p.s[i] != p.s[0] {
+ p.errorf("unmatched quote")
+ return
+ }
+ unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
+ if err != nil {
+ p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
+ return
+ }
+ p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
+ p.cur.unquoted = unq
+ default:
+ i := 0
+ for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
+ i++
+ }
+ if i == 0 {
+ p.errorf("unexpected byte %#x", p.s[0])
+ return
+ }
+ p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
+ }
+ p.offset += len(p.cur.value)
+}
+
+var (
+ errBadUTF8 = errors.New("proto: bad UTF-8")
+ errBadHex = errors.New("proto: bad hexadecimal")
+)
+
+func unquoteC(s string, quote rune) (string, error) {
+ // This is based on C++'s tokenizer.cc.
+ // Despite its name, this is *not* parsing C syntax.
+ // For instance, "\0" is an invalid quoted string.
+
+ // Avoid allocation in trivial cases.
+ simple := true
+ for _, r := range s {
+ if r == '\\' || r == quote {
+ simple = false
+ break
+ }
+ }
+ if simple {
+ return s, nil
+ }
+
+ buf := make([]byte, 0, 3*len(s)/2)
+ for len(s) > 0 {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", errBadUTF8
+ }
+ s = s[n:]
+ if r != '\\' {
+ if r < utf8.RuneSelf {
+ buf = append(buf, byte(r))
+ } else {
+ buf = append(buf, string(r)...)
+ }
+ continue
+ }
+
+ ch, tail, err := unescape(s)
+ if err != nil {
+ return "", err
+ }
+ buf = append(buf, ch...)
+ s = tail
+ }
+ return string(buf), nil
+}
+
+func unescape(s string) (ch string, tail string, err error) {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", "", errBadUTF8
+ }
+ s = s[n:]
+ switch r {
+ case 'a':
+ return "\a", s, nil
+ case 'b':
+ return "\b", s, nil
+ case 'f':
+ return "\f", s, nil
+ case 'n':
+ return "\n", s, nil
+ case 'r':
+ return "\r", s, nil
+ case 't':
+ return "\t", s, nil
+ case 'v':
+ return "\v", s, nil
+ case '?':
+ return "?", s, nil // trigraph workaround
+ case '\'', '"', '\\':
+ return string(r), s, nil
+ case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X':
+ if len(s) < 2 {
+ return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
+ }
+ base := 8
+ ss := s[:2]
+ s = s[2:]
+ if r == 'x' || r == 'X' {
+ base = 16
+ } else {
+ ss = string(r) + ss
+ }
+ i, err := strconv.ParseUint(ss, base, 8)
+ if err != nil {
+ return "", "", err
+ }
+ return string([]byte{byte(i)}), s, nil
+ case 'u', 'U':
+ n := 4
+ if r == 'U' {
+ n = 8
+ }
+ if len(s) < n {
+ return "", "", fmt.Errorf(`\%c requires %d digits`, r, n)
+ }
+
+ bs := make([]byte, n/2)
+ for i := 0; i < n; i += 2 {
+ a, ok1 := unhex(s[i])
+ b, ok2 := unhex(s[i+1])
+ if !ok1 || !ok2 {
+ return "", "", errBadHex
+ }
+ bs[i/2] = a<<4 | b
+ }
+ s = s[n:]
+ return string(bs), s, nil
+ }
+ return "", "", fmt.Errorf(`unknown escape \%c`, r)
+}
+
+// Adapted from src/pkg/strconv/quote.go.
+func unhex(b byte) (v byte, ok bool) {
+ switch {
+ case '0' <= b && b <= '9':
+ return b - '0', true
+ case 'a' <= b && b <= 'f':
+ return b - 'a' + 10, true
+ case 'A' <= b && b <= 'F':
+ return b - 'A' + 10, true
+ }
+ return 0, false
+}
+
+// Back off the parser by one token. Can only be done between calls to next().
+// It makes the next advance() a no-op.
+func (p *textParser) back() { p.backed = true }
+
+// Advances the parser and returns the new current token.
+func (p *textParser) next() *token {
+ if p.backed || p.done {
+ p.backed = false
+ return &p.cur
+ }
+ p.advance()
+ if p.done {
+ p.cur.value = ""
+ } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
+ // Look for multiple quoted strings separated by whitespace,
+ // and concatenate them.
+ cat := p.cur
+ for {
+ p.skipWhitespace()
+ if p.done || !isQuote(p.s[0]) {
+ break
+ }
+ p.advance()
+ if p.cur.err != nil {
+ return &p.cur
+ }
+ cat.value += " " + p.cur.value
+ cat.unquoted += p.cur.unquoted
+ }
+ p.done = false // parser may have seen EOF, but we want to return cat
+ p.cur = cat
+ }
+ return &p.cur
+}
+
+func (p *textParser) consumeToken(s string) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != s {
+ p.back()
+ return p.errorf("expected %q, found %q", s, tok.value)
+ }
+ return nil
+}
+
+// Return a RequiredNotSetError indicating which required field was not set.
+func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < st.NumField(); i++ {
+ if !isNil(sv.Field(i)) {
+ continue
+ }
+
+ props := sprops.Prop[i]
+ if props.Required {
+ return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
+ }
+ }
+ return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen
+}
+
+// Returns the index in the struct for the named field, as well as the parsed tag properties.
+func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
+ i, ok := sprops.decoderOrigNames[name]
+ if ok {
+ return i, sprops.Prop[i], true
+ }
+ return -1, nil, false
+}
+
+// Consume a ':' from the input stream (if the next token is a colon),
+// returning an error if a colon is needed but not present.
+func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ":" {
+ // Colon is optional when the field is a group or message.
+ needColon := true
+ switch props.Wire {
+ case "group":
+ needColon = false
+ case "bytes":
+ // A "bytes" field is either a message, a string, or a repeated field;
+ // those three become *T, *string and []T respectively, so we can check for
+ // this field being a pointer to a non-string.
+ if typ.Kind() == reflect.Ptr {
+ // *T or *string
+ if typ.Elem().Kind() == reflect.String {
+ break
+ }
+ } else if typ.Kind() == reflect.Slice {
+ // []T or []*T
+ if typ.Elem().Kind() != reflect.Ptr {
+ break
+ }
+ } else if typ.Kind() == reflect.String {
+ // The proto3 exception is for a string field,
+ // which requires a colon.
+ break
+ }
+ needColon = false
+ }
+ if needColon {
+ return p.errorf("expected ':', found %q", tok.value)
+ }
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ reqCount := sprops.reqCount
+ var reqFieldErr error
+ fieldSet := make(map[string]bool)
+ // A struct is a sequence of "name: value", terminated by one of
+ // '>' or '}', or the end of the input. A name may also be
+ // "[extension]".
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ if tok.value == "[" {
+ // Looks like an extension.
+ //
+ // TODO: Check whether we need to handle
+ // namespace rooted names (e.g. ".something.Foo").
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ var desc *ExtensionDesc
+ // This could be faster, but it's functional.
+ // TODO: Do something smarter than a linear scan.
+ for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
+ if d.Name == tok.value {
+ desc = d
+ break
+ }
+ }
+ if desc == nil {
+ return p.errorf("unrecognized extension %q", tok.value)
+ }
+ // Check the extension terminator.
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != "]" {
+ return p.errorf("unrecognized extension terminator %q", tok.value)
+ }
+
+ props := &Properties{}
+ props.Parse(desc.Tag)
+
+ typ := reflect.TypeOf(desc.ExtensionType)
+ if err := p.checkForColon(props, typ); err != nil {
+ return err
+ }
+
+ rep := desc.repeated()
+
+ // Read the extension structure, and set it in
+ // the value we're constructing.
+ var ext reflect.Value
+ if !rep {
+ ext = reflect.New(typ).Elem()
+ } else {
+ ext = reflect.New(typ.Elem()).Elem()
+ }
+ if err := p.readAny(ext, props); err != nil {
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ return err
+ }
+ reqFieldErr = err
+ }
+ ep := sv.Addr().Interface().(extendableProto)
+ if !rep {
+ SetExtension(ep, desc, ext.Interface())
+ } else {
+ old, err := GetExtension(ep, desc)
+ var sl reflect.Value
+ if err == nil {
+ sl = reflect.ValueOf(old) // existing slice
+ } else {
+ sl = reflect.MakeSlice(typ, 0, 1)
+ }
+ sl = reflect.Append(sl, ext)
+ SetExtension(ep, desc, sl.Interface())
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // This is a normal, non-extension field.
+ name := tok.value
+ var dst reflect.Value
+ fi, props, ok := structFieldByName(sprops, name)
+ if ok {
+ dst = sv.Field(fi)
+ } else if oop, ok := sprops.OneofTypes[name]; ok {
+ // It is a oneof.
+ props = oop.Prop
+ nv := reflect.New(oop.Type.Elem())
+ dst = nv.Elem().Field(0)
+ sv.Field(oop.Field).Set(nv)
+ }
+ if !dst.IsValid() {
+ return p.errorf("unknown field name %q in %v", name, st)
+ }
+
+ if dst.Kind() == reflect.Map {
+ // Consume any colon.
+ if err := p.checkForColon(props, dst.Type()); err != nil {
+ return err
+ }
+
+ // Construct the map if it doesn't already exist.
+ if dst.IsNil() {
+ dst.Set(reflect.MakeMap(dst.Type()))
+ }
+ key := reflect.New(dst.Type().Key()).Elem()
+ val := reflect.New(dst.Type().Elem()).Elem()
+
+ // The map entry should be this sequence of tokens:
+ // < key : KEY value : VALUE >
+ // Technically the "key" and "value" could come in any order,
+ // but in practice they won't.
+
+ tok := p.next()
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ if err := p.consumeToken("key"); err != nil {
+ return err
+ }
+ if err := p.consumeToken(":"); err != nil {
+ return err
+ }
+ if err := p.readAny(key, props.mkeyprop); err != nil {
+ return err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ if err := p.consumeToken("value"); err != nil {
+ return err
+ }
+ if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
+ return err
+ }
+ if err := p.readAny(val, props.mvalprop); err != nil {
+ return err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ if err := p.consumeToken(terminator); err != nil {
+ return err
+ }
+
+ dst.SetMapIndex(key, val)
+ continue
+ }
+
+ // Check that it's not already set if it's not a repeated field.
+ if !props.Repeated && fieldSet[name] {
+ return p.errorf("non-repeated field %q was repeated", name)
+ }
+
+ if err := p.checkForColon(props, dst.Type()); err != nil {
+ return err
+ }
+
+ // Parse into the field.
+ fieldSet[name] = true
+ if err := p.readAny(dst, props); err != nil {
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ return err
+ }
+ reqFieldErr = err
+ } else if props.Required {
+ reqCount--
+ }
+
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+
+ }
+
+ if reqCount > 0 {
+ return p.missingRequiredFieldError(sv)
+ }
+ return reqFieldErr
+}
+
+// consumeOptionalSeparator consumes an optional semicolon or comma.
+// It is used in readStruct to provide backward compatibility.
+func (p *textParser) consumeOptionalSeparator() error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ";" && tok.value != "," {
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) readAny(v reflect.Value, props *Properties) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == "" {
+ return p.errorf("unexpected EOF")
+ }
+
+ switch fv := v; fv.Kind() {
+ case reflect.Slice:
+ at := v.Type()
+ if at.Elem().Kind() == reflect.Uint8 {
+ // Special case for []byte
+ if tok.value[0] != '"' && tok.value[0] != '\'' {
+ // Deliberately written out here, as the error after
+ // this switch statement would write "invalid []byte: ...",
+ // which is not as user-friendly.
+ return p.errorf("invalid string: %v", tok.value)
+ }
+ bytes := []byte(tok.unquoted)
+ fv.Set(reflect.ValueOf(bytes))
+ return nil
+ }
+ // Repeated field.
+ if tok.value == "[" {
+ // Repeated field with list notation, like [1,2,3].
+ for {
+ fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+ err := p.readAny(fv.Index(fv.Len()-1), props)
+ if err != nil {
+ return err
+ }
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == "]" {
+ break
+ }
+ if tok.value != "," {
+ return p.errorf("Expected ']' or ',' found %q", tok.value)
+ }
+ }
+ return nil
+ }
+ // One value of the repeated field.
+ p.back()
+ fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+ return p.readAny(fv.Index(fv.Len()-1), props)
+ case reflect.Bool:
+ // Either "true", "false", 1 or 0.
+ switch tok.value {
+ case "true", "1":
+ fv.SetBool(true)
+ return nil
+ case "false", "0":
+ fv.SetBool(false)
+ return nil
+ }
+ case reflect.Float32, reflect.Float64:
+ v := tok.value
+ // Ignore 'f' for compatibility with output generated by C++, but don't
+ // remove 'f' when the value is "-inf" or "inf".
+ if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
+ v = v[:len(v)-1]
+ }
+ if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
+ fv.SetFloat(f)
+ return nil
+ }
+ case reflect.Int32:
+ if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+
+ if len(props.Enum) == 0 {
+ break
+ }
+ m, ok := enumValueMaps[props.Enum]
+ if !ok {
+ break
+ }
+ x, ok := m[tok.value]
+ if !ok {
+ break
+ }
+ fv.SetInt(int64(x))
+ return nil
+ case reflect.Int64:
+ if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+
+ case reflect.Ptr:
+ // A basic field (indirected through pointer), or a repeated message/group
+ p.back()
+ fv.Set(reflect.New(fv.Type().Elem()))
+ return p.readAny(fv.Elem(), props)
+ case reflect.String:
+ if tok.value[0] == '"' || tok.value[0] == '\'' {
+ fv.SetString(tok.unquoted)
+ return nil
+ }
+ case reflect.Struct:
+ var terminator string
+ switch tok.value {
+ case "{":
+ terminator = "}"
+ case "<":
+ terminator = ">"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ // TODO: Handle nested messages which implement encoding.TextUnmarshaler.
+ return p.readStruct(fv, terminator)
+ case reflect.Uint32:
+ if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+ fv.SetUint(uint64(x))
+ return nil
+ }
+ case reflect.Uint64:
+ if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+ fv.SetUint(x)
+ return nil
+ }
+ }
+ return p.errorf("invalid %v: %v", v.Type(), tok.value)
+}
+
+// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
+// before starting to unmarshal, so any existing data in pb is always removed.
+// If a required field is not set and no other error occurs,
+// UnmarshalText returns *RequiredNotSetError.
+func UnmarshalText(s string, pb Message) error {
+ if um, ok := pb.(encoding.TextUnmarshaler); ok {
+ err := um.UnmarshalText([]byte(s))
+ return err
+ }
+ pb.Reset()
+ v := reflect.ValueOf(pb)
+ if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil {
+ return pe
+ }
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/google/cadvisor/LICENSE b/src/kube2msb/vendor/github.com/google/cadvisor/LICENSE
new file mode 100644
index 0000000..97cec18
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/google/cadvisor/LICENSE
@@ -0,0 +1,190 @@
+ Copyright 2014 The cAdvisor Authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
diff --git a/src/kube2msb/vendor/github.com/google/cadvisor/info/v1/container.go b/src/kube2msb/vendor/github.com/google/cadvisor/info/v1/container.go
new file mode 100644
index 0000000..6e7e658
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/google/cadvisor/info/v1/container.go
@@ -0,0 +1,583 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+import (
+ "reflect"
+ "time"
+)
+
+type CpuSpec struct {
+ Limit uint64 `json:"limit"`
+ MaxLimit uint64 `json:"max_limit"`
+ Mask string `json:"mask,omitempty"`
+ Quota uint64 `json:"quota,omitempty"`
+ Period uint64 `json:"period,omitempty"`
+}
+
+type MemorySpec struct {
+ // The amount of memory requested. Default is unlimited (-1).
+ // Units: bytes.
+ Limit uint64 `json:"limit,omitempty"`
+
+ // The amount of guaranteed memory. Default is 0.
+ // Units: bytes.
+ Reservation uint64 `json:"reservation,omitempty"`
+
+ // The amount of swap space requested. Default is unlimited (-1).
+ // Units: bytes.
+ SwapLimit uint64 `json:"swap_limit,omitempty"`
+}
+
+type ContainerSpec struct {
+ // Time at which the container was created.
+ CreationTime time.Time `json:"creation_time,omitempty"`
+
+ // Metadata labels associated with this container.
+ Labels map[string]string `json:"labels,omitempty"`
+ // Metadata envs associated with this container. Only whitelisted envs are added.
+ Envs map[string]string `json:"envs,omitempty"`
+
+ HasCpu bool `json:"has_cpu"`
+ Cpu CpuSpec `json:"cpu,omitempty"`
+
+ HasMemory bool `json:"has_memory"`
+ Memory MemorySpec `json:"memory,omitempty"`
+
+ HasNetwork bool `json:"has_network"`
+
+ HasFilesystem bool `json:"has_filesystem"`
+
+ // HasDiskIo when true, indicates that DiskIo stats will be available.
+ HasDiskIo bool `json:"has_diskio"`
+
+ HasCustomMetrics bool `json:"has_custom_metrics"`
+ CustomMetrics []MetricSpec `json:"custom_metrics,omitempty"`
+
+ // Image name used for this container.
+ Image string `json:"image,omitempty"`
+}
+
+// Container reference contains enough information to uniquely identify a container
+type ContainerReference struct {
+ // The container id
+ Id string `json:"id,omitempty"`
+
+ // The absolute name of the container. This is unique on the machine.
+ Name string `json:"name"`
+
+ // Other names by which the container is known within a certain namespace.
+ // This is unique within that namespace.
+ Aliases []string `json:"aliases,omitempty"`
+
+ // Namespace under which the aliases of a container are unique.
+ // An example of a namespace is "docker" for Docker containers.
+ Namespace string `json:"namespace,omitempty"`
+
+ Labels map[string]string `json:"labels,omitempty"`
+}
+
+// Sorts by container name.
+type ContainerReferenceSlice []ContainerReference
+
+func (self ContainerReferenceSlice) Len() int { return len(self) }
+func (self ContainerReferenceSlice) Swap(i, j int) { self[i], self[j] = self[j], self[i] }
+func (self ContainerReferenceSlice) Less(i, j int) bool { return self[i].Name < self[j].Name }
+
+// ContainerInfoRequest is used when users check a container info from the REST API.
+// It specifies how much data users want to get about a container
+type ContainerInfoRequest struct {
+ // Max number of stats to return. Specify -1 for all stats currently available.
+ // Default: 60
+ NumStats int `json:"num_stats,omitempty"`
+
+ // Start time for which to query information.
+ // If ommitted, the beginning of time is assumed.
+ Start time.Time `json:"start,omitempty"`
+
+ // End time for which to query information.
+ // If ommitted, current time is assumed.
+ End time.Time `json:"end,omitempty"`
+}
+
+// Returns a ContainerInfoRequest with all default values specified.
+func DefaultContainerInfoRequest() ContainerInfoRequest {
+ return ContainerInfoRequest{
+ NumStats: 60,
+ }
+}
+
+func (self *ContainerInfoRequest) Equals(other ContainerInfoRequest) bool {
+ return self.NumStats == other.NumStats &&
+ self.Start.Equal(other.Start) &&
+ self.End.Equal(other.End)
+}
+
+type ContainerInfo struct {
+ ContainerReference
+
+ // The direct subcontainers of the current container.
+ Subcontainers []ContainerReference `json:"subcontainers,omitempty"`
+
+ // The isolation used in the container.
+ Spec ContainerSpec `json:"spec,omitempty"`
+
+ // Historical statistics gathered from the container.
+ Stats []*ContainerStats `json:"stats,omitempty"`
+}
+
+// TODO(vmarmol): Refactor to not need this equality comparison.
+// ContainerInfo may be (un)marshaled by json or other en/decoder. In that
+// case, the Timestamp field in each stats/sample may not be precisely
+// en/decoded. This will lead to small but acceptable differences between a
+// ContainerInfo and its encode-then-decode version. Eq() is used to compare
+// two ContainerInfo accepting small difference (<10ms) of Time fields.
+func (self *ContainerInfo) Eq(b *ContainerInfo) bool {
+
+ // If both self and b are nil, then Eq() returns true
+ if self == nil {
+ return b == nil
+ }
+ if b == nil {
+ return self == nil
+ }
+
+ // For fields other than time.Time, we will compare them precisely.
+ // This would require that any slice should have same order.
+ if !reflect.DeepEqual(self.ContainerReference, b.ContainerReference) {
+ return false
+ }
+ if !reflect.DeepEqual(self.Subcontainers, b.Subcontainers) {
+ return false
+ }
+ if !self.Spec.Eq(&b.Spec) {
+ return false
+ }
+
+ for i, expectedStats := range b.Stats {
+ selfStats := self.Stats[i]
+ if !expectedStats.Eq(selfStats) {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (self *ContainerSpec) Eq(b *ContainerSpec) bool {
+ // Creation within 1s of each other.
+ diff := self.CreationTime.Sub(b.CreationTime)
+ if (diff > time.Second) || (diff < -time.Second) {
+ return false
+ }
+
+ if self.HasCpu != b.HasCpu {
+ return false
+ }
+ if !reflect.DeepEqual(self.Cpu, b.Cpu) {
+ return false
+ }
+ if self.HasMemory != b.HasMemory {
+ return false
+ }
+ if !reflect.DeepEqual(self.Memory, b.Memory) {
+ return false
+ }
+ if self.HasNetwork != b.HasNetwork {
+ return false
+ }
+ if self.HasFilesystem != b.HasFilesystem {
+ return false
+ }
+ if self.HasDiskIo != b.HasDiskIo {
+ return false
+ }
+ if self.HasCustomMetrics != b.HasCustomMetrics {
+ return false
+ }
+ return true
+}
+
+func (self *ContainerInfo) StatsAfter(ref time.Time) []*ContainerStats {
+ n := len(self.Stats) + 1
+ for i, s := range self.Stats {
+ if s.Timestamp.After(ref) {
+ n = i
+ break
+ }
+ }
+ if n > len(self.Stats) {
+ return nil
+ }
+ return self.Stats[n:]
+}
+
+func (self *ContainerInfo) StatsStartTime() time.Time {
+ var ret time.Time
+ for _, s := range self.Stats {
+ if s.Timestamp.Before(ret) || ret.IsZero() {
+ ret = s.Timestamp
+ }
+ }
+ return ret
+}
+
+func (self *ContainerInfo) StatsEndTime() time.Time {
+ var ret time.Time
+ for i := len(self.Stats) - 1; i >= 0; i-- {
+ s := self.Stats[i]
+ if s.Timestamp.After(ret) {
+ ret = s.Timestamp
+ }
+ }
+ return ret
+}
+
+// This mirrors kernel internal structure.
+type LoadStats struct {
+ // Number of sleeping tasks.
+ NrSleeping uint64 `json:"nr_sleeping"`
+
+ // Number of running tasks.
+ NrRunning uint64 `json:"nr_running"`
+
+ // Number of tasks in stopped state
+ NrStopped uint64 `json:"nr_stopped"`
+
+ // Number of tasks in uninterruptible state
+ NrUninterruptible uint64 `json:"nr_uninterruptible"`
+
+ // Number of tasks waiting on IO
+ NrIoWait uint64 `json:"nr_io_wait"`
+}
+
+// CPU usage time statistics.
+type CpuUsage struct {
+ // Total CPU usage.
+ // Units: nanoseconds
+ Total uint64 `json:"total"`
+
+ // Per CPU/core usage of the container.
+ // Unit: nanoseconds.
+ PerCpu []uint64 `json:"per_cpu_usage,omitempty"`
+
+ // Time spent in user space.
+ // Unit: nanoseconds
+ User uint64 `json:"user"`
+
+ // Time spent in kernel space.
+ // Unit: nanoseconds
+ System uint64 `json:"system"`
+}
+
+// All CPU usage metrics are cumulative from the creation of the container
+type CpuStats struct {
+ Usage CpuUsage `json:"usage"`
+ // Smoothed average of number of runnable threads x 1000.
+ // We multiply by thousand to avoid using floats, but preserving precision.
+ // Load is smoothed over the last 10 seconds. Instantaneous value can be read
+ // from LoadStats.NrRunning.
+ LoadAverage int32 `json:"load_average"`
+}
+
+type PerDiskStats struct {
+ Major uint64 `json:"major"`
+ Minor uint64 `json:"minor"`
+ Stats map[string]uint64 `json:"stats"`
+}
+
+type DiskIoStats struct {
+ IoServiceBytes []PerDiskStats `json:"io_service_bytes,omitempty"`
+ IoServiced []PerDiskStats `json:"io_serviced,omitempty"`
+ IoQueued []PerDiskStats `json:"io_queued,omitempty"`
+ Sectors []PerDiskStats `json:"sectors,omitempty"`
+ IoServiceTime []PerDiskStats `json:"io_service_time,omitempty"`
+ IoWaitTime []PerDiskStats `json:"io_wait_time,omitempty"`
+ IoMerged []PerDiskStats `json:"io_merged,omitempty"`
+ IoTime []PerDiskStats `json:"io_time,omitempty"`
+}
+
+type MemoryStats struct {
+ // Current memory usage, this includes all memory regardless of when it was
+ // accessed.
+ // Units: Bytes.
+ Usage uint64 `json:"usage"`
+
+ // Number of bytes of page cache memory.
+ // Units: Bytes.
+ Cache uint64 `json:"cache"`
+
+ // The amount of anonymous and swap cache memory (includes transparent
+ // hugepages).
+ // Units: Bytes.
+ RSS uint64 `json:"rss"`
+
+ // The amount of working set memory, this includes recently accessed memory,
+ // dirty memory, and kernel memory. Working set is <= "usage".
+ // Units: Bytes.
+ WorkingSet uint64 `json:"working_set"`
+
+ Failcnt uint64 `json:"failcnt"`
+
+ ContainerData MemoryStatsMemoryData `json:"container_data,omitempty"`
+ HierarchicalData MemoryStatsMemoryData `json:"hierarchical_data,omitempty"`
+}
+
+type MemoryStatsMemoryData struct {
+ Pgfault uint64 `json:"pgfault"`
+ Pgmajfault uint64 `json:"pgmajfault"`
+}
+
+type InterfaceStats struct {
+ // The name of the interface.
+ Name string `json:"name"`
+ // Cumulative count of bytes received.
+ RxBytes uint64 `json:"rx_bytes"`
+ // Cumulative count of packets received.
+ RxPackets uint64 `json:"rx_packets"`
+ // Cumulative count of receive errors encountered.
+ RxErrors uint64 `json:"rx_errors"`
+ // Cumulative count of packets dropped while receiving.
+ RxDropped uint64 `json:"rx_dropped"`
+ // Cumulative count of bytes transmitted.
+ TxBytes uint64 `json:"tx_bytes"`
+ // Cumulative count of packets transmitted.
+ TxPackets uint64 `json:"tx_packets"`
+ // Cumulative count of transmit errors encountered.
+ TxErrors uint64 `json:"tx_errors"`
+ // Cumulative count of packets dropped while transmitting.
+ TxDropped uint64 `json:"tx_dropped"`
+}
+
+type NetworkStats struct {
+ InterfaceStats `json:",inline"`
+ Interfaces []InterfaceStats `json:"interfaces,omitempty"`
+ // TCP connection stats (Established, Listen...)
+ Tcp TcpStat `json:"tcp"`
+ // TCP6 connection stats (Established, Listen...)
+ Tcp6 TcpStat `json:"tcp6"`
+}
+
+type TcpStat struct {
+ //Count of TCP connections in state "Established"
+ Established uint64
+ //Count of TCP connections in state "Syn_Sent"
+ SynSent uint64
+ //Count of TCP connections in state "Syn_Recv"
+ SynRecv uint64
+ //Count of TCP connections in state "Fin_Wait1"
+ FinWait1 uint64
+ //Count of TCP connections in state "Fin_Wait2"
+ FinWait2 uint64
+ //Count of TCP connections in state "Time_Wait
+ TimeWait uint64
+ //Count of TCP connections in state "Close"
+ Close uint64
+ //Count of TCP connections in state "Close_Wait"
+ CloseWait uint64
+ //Count of TCP connections in state "Listen_Ack"
+ LastAck uint64
+ //Count of TCP connections in state "Listen"
+ Listen uint64
+ //Count of TCP connections in state "Closing"
+ Closing uint64
+}
+
+type FsStats struct {
+ // The block device name associated with the filesystem.
+ Device string `json:"device,omitempty"`
+
+ // Type of the filesytem.
+ Type string `json:"type"`
+
+ // Number of bytes that can be consumed by the container on this filesystem.
+ Limit uint64 `json:"capacity"`
+
+ // Number of bytes that is consumed by the container on this filesystem.
+ Usage uint64 `json:"usage"`
+
+ // Base Usage that is consumed by the container's writable layer.
+ // This field is only applicable for docker container's as of now.
+ BaseUsage uint64 `json:"base_usage"`
+
+ // Number of bytes available for non-root user.
+ Available uint64 `json:"available"`
+
+ // Number of available Inodes
+ InodesFree uint64 `json:"inodes_free"`
+
+ // Number of reads completed
+ // This is the total number of reads completed successfully.
+ ReadsCompleted uint64 `json:"reads_completed"`
+
+ // Number of reads merged
+ // Reads and writes which are adjacent to each other may be merged for
+ // efficiency. Thus two 4K reads may become one 8K read before it is
+ // ultimately handed to the disk, and so it will be counted (and queued)
+ // as only one I/O. This field lets you know how often this was done.
+ ReadsMerged uint64 `json:"reads_merged"`
+
+ // Number of sectors read
+ // This is the total number of sectors read successfully.
+ SectorsRead uint64 `json:"sectors_read"`
+
+ // Number of milliseconds spent reading
+ // This is the total number of milliseconds spent by all reads (as
+ // measured from __make_request() to end_that_request_last()).
+ ReadTime uint64 `json:"read_time"`
+
+ // Number of writes completed
+ // This is the total number of writes completed successfully.
+ WritesCompleted uint64 `json:"writes_completed"`
+
+ // Number of writes merged
+ // See the description of reads merged.
+ WritesMerged uint64 `json:"writes_merged"`
+
+ // Number of sectors written
+ // This is the total number of sectors written successfully.
+ SectorsWritten uint64 `json:"sectors_written"`
+
+ // Number of milliseconds spent writing
+ // This is the total number of milliseconds spent by all writes (as
+ // measured from __make_request() to end_that_request_last()).
+ WriteTime uint64 `json:"write_time"`
+
+ // Number of I/Os currently in progress
+ // The only field that should go to zero. Incremented as requests are
+ // given to appropriate struct request_queue and decremented as they finish.
+ IoInProgress uint64 `json:"io_in_progress"`
+
+ // Number of milliseconds spent doing I/Os
+ // This field increases so long as field 9 is nonzero.
+ IoTime uint64 `json:"io_time"`
+
+ // weighted number of milliseconds spent doing I/Os
+ // This field is incremented at each I/O start, I/O completion, I/O
+ // merge, or read of these stats by the number of I/Os in progress
+ // (field 9) times the number of milliseconds spent doing I/O since the
+ // last update of this field. This can provide an easy measure of both
+ // I/O completion time and the backlog that may be accumulating.
+ WeightedIoTime uint64 `json:"weighted_io_time"`
+}
+
+type ContainerStats struct {
+ // The time of this stat point.
+ Timestamp time.Time `json:"timestamp"`
+ Cpu CpuStats `json:"cpu,omitempty"`
+ DiskIo DiskIoStats `json:"diskio,omitempty"`
+ Memory MemoryStats `json:"memory,omitempty"`
+ Network NetworkStats `json:"network,omitempty"`
+
+ // Filesystem statistics
+ Filesystem []FsStats `json:"filesystem,omitempty"`
+
+ // Task load stats
+ TaskStats LoadStats `json:"task_stats,omitempty"`
+
+ //Custom metrics from all collectors
+ CustomMetrics map[string][]MetricVal `json:"custom_metrics,omitempty"`
+}
+
+func timeEq(t1, t2 time.Time, tolerance time.Duration) bool {
+ // t1 should not be later than t2
+ if t1.After(t2) {
+ t1, t2 = t2, t1
+ }
+ diff := t2.Sub(t1)
+ if diff <= tolerance {
+ return true
+ }
+ return false
+}
+
+const (
+ // 10ms, i.e. 0.01s
+ timePrecision time.Duration = 10 * time.Millisecond
+)
+
+// This function is useful because we do not require precise time
+// representation.
+func (a *ContainerStats) Eq(b *ContainerStats) bool {
+ if !timeEq(a.Timestamp, b.Timestamp, timePrecision) {
+ return false
+ }
+ return a.StatsEq(b)
+}
+
+// Checks equality of the stats values.
+func (a *ContainerStats) StatsEq(b *ContainerStats) bool {
+ // TODO(vmarmol): Consider using this through reflection.
+ if !reflect.DeepEqual(a.Cpu, b.Cpu) {
+ return false
+ }
+ if !reflect.DeepEqual(a.Memory, b.Memory) {
+ return false
+ }
+ if !reflect.DeepEqual(a.DiskIo, b.DiskIo) {
+ return false
+ }
+ if !reflect.DeepEqual(a.Network, b.Network) {
+ return false
+ }
+ if !reflect.DeepEqual(a.Filesystem, b.Filesystem) {
+ return false
+ }
+ return true
+}
+
+// Event contains information general to events such as the time at which they
+// occurred, their specific type, and the actual event. Event types are
+// differentiated by the EventType field of Event.
+type Event struct {
+ // the absolute container name for which the event occurred
+ ContainerName string `json:"container_name"`
+
+ // the time at which the event occurred
+ Timestamp time.Time `json:"timestamp"`
+
+ // the type of event. EventType is an enumerated type
+ EventType EventType `json:"event_type"`
+
+ // the original event object and all of its extraneous data, ex. an
+ // OomInstance
+ EventData EventData `json:"event_data,omitempty"`
+}
+
+// EventType is an enumerated type which lists the categories under which
+// events may fall. The Event field EventType is populated by this enum.
+type EventType string
+
+const (
+ EventOom EventType = "oom"
+ EventOomKill = "oomKill"
+ EventContainerCreation = "containerCreation"
+ EventContainerDeletion = "containerDeletion"
+)
+
+// Extra information about an event. Only one type will be set.
+type EventData struct {
+ // Information about an OOM kill event.
+ OomKill *OomKillEventData `json:"oom,omitempty"`
+}
+
+// Information related to an OOM kill instance
+type OomKillEventData struct {
+ // process id of the killed process
+ Pid int `json:"pid"`
+
+ // The name of the killed process
+ ProcessName string `json:"process_name"`
+}
diff --git a/src/kube2msb/vendor/github.com/google/cadvisor/info/v1/docker.go b/src/kube2msb/vendor/github.com/google/cadvisor/info/v1/docker.go
new file mode 100644
index 0000000..2703c53
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/google/cadvisor/info/v1/docker.go
@@ -0,0 +1,37 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Types used for docker containers.
+package v1
+
+type DockerStatus struct {
+ Version string `json:"version"`
+ KernelVersion string `json:"kernel_version"`
+ OS string `json:"os"`
+ Hostname string `json:"hostname"`
+ RootDir string `json:"root_dir"`
+ Driver string `json:"driver"`
+ DriverStatus map[string]string `json:"driver_status"`
+ ExecDriver string `json:"exec_driver"`
+ NumImages int `json:"num_images"`
+ NumContainers int `json:"num_containers"`
+}
+
+type DockerImage struct {
+ ID string `json:"id"`
+ RepoTags []string `json:"repo_tags"` // repository name and tags.
+ Created int64 `json:"created"` // unix time since creation.
+ VirtualSize int64 `json:"virtual_size"`
+ Size int64 `json:"size"`
+}
diff --git a/src/kube2msb/vendor/github.com/google/cadvisor/info/v1/machine.go b/src/kube2msb/vendor/github.com/google/cadvisor/info/v1/machine.go
new file mode 100644
index 0000000..74a5df4
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/google/cadvisor/info/v1/machine.go
@@ -0,0 +1,205 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+type FsInfo struct {
+ // Block device associated with the filesystem.
+ Device string `json:"device"`
+
+ // Total number of bytes available on the filesystem.
+ Capacity uint64 `json:"capacity"`
+
+ // Type of device.
+ Type string `json:"type"`
+
+ // Total number of inodes available on the filesystem.
+ Inodes uint64 `json:"inodes"`
+}
+
+type Node struct {
+ Id int `json:"node_id"`
+ // Per-node memory
+ Memory uint64 `json:"memory"`
+ Cores []Core `json:"cores"`
+ Caches []Cache `json:"caches"`
+}
+
+type Core struct {
+ Id int `json:"core_id"`
+ Threads []int `json:"thread_ids"`
+ Caches []Cache `json:"caches"`
+}
+
+type Cache struct {
+ // Size of memory cache in bytes.
+ Size uint64 `json:"size"`
+ // Type of memory cache: data, instruction, or unified.
+ Type string `json:"type"`
+ // Level (distance from cpus) in a multi-level cache hierarchy.
+ Level int `json:"level"`
+}
+
+func (self *Node) FindCore(id int) (bool, int) {
+ for i, n := range self.Cores {
+ if n.Id == id {
+ return true, i
+ }
+ }
+ return false, -1
+}
+
+func (self *Node) AddThread(thread int, core int) {
+ var coreIdx int
+ if core == -1 {
+ // Assume one hyperthread per core when topology data is missing.
+ core = thread
+ }
+ ok, coreIdx := self.FindCore(core)
+
+ if !ok {
+ // New core
+ core := Core{Id: core}
+ self.Cores = append(self.Cores, core)
+ coreIdx = len(self.Cores) - 1
+ }
+ self.Cores[coreIdx].Threads = append(self.Cores[coreIdx].Threads, thread)
+}
+
+func (self *Node) AddNodeCache(c Cache) {
+ self.Caches = append(self.Caches, c)
+}
+
+func (self *Node) AddPerCoreCache(c Cache) {
+ for idx := range self.Cores {
+ self.Cores[idx].Caches = append(self.Cores[idx].Caches, c)
+ }
+}
+
+type DiskInfo struct {
+ // device name
+ Name string `json:"name"`
+
+ // Major number
+ Major uint64 `json:"major"`
+
+ // Minor number
+ Minor uint64 `json:"minor"`
+
+ // Size in bytes
+ Size uint64 `json:"size"`
+
+ // I/O Scheduler - one of "none", "noop", "cfq", "deadline"
+ Scheduler string `json:"scheduler"`
+}
+
+type NetInfo struct {
+ // Device name
+ Name string `json:"name"`
+
+ // Mac Address
+ MacAddress string `json:"mac_address"`
+
+ // Speed in MBits/s
+ Speed int64 `json:"speed"`
+
+ // Maximum Transmission Unit
+ Mtu int64 `json:"mtu"`
+}
+
+type CloudProvider string
+
+const (
+ GCE CloudProvider = "GCE"
+ AWS = "AWS"
+ Azure = "Azure"
+ Baremetal = "Baremetal"
+ UnknownProvider = "Unknown"
+)
+
+type InstanceType string
+
+const (
+ NoInstance InstanceType = "None"
+ UnknownInstance = "Unknown"
+)
+
+type InstanceID string
+
+const (
+ UnNamedInstance InstanceID = "None"
+)
+
+type MachineInfo struct {
+ // The number of cores in this machine.
+ NumCores int `json:"num_cores"`
+
+ // Maximum clock speed for the cores, in KHz.
+ CpuFrequency uint64 `json:"cpu_frequency_khz"`
+
+ // The amount of memory (in bytes) in this machine
+ MemoryCapacity uint64 `json:"memory_capacity"`
+
+ // The machine id
+ MachineID string `json:"machine_id"`
+
+ // The system uuid
+ SystemUUID string `json:"system_uuid"`
+
+ // The boot id
+ BootID string `json:"boot_id"`
+
+ // Filesystems on this machine.
+ Filesystems []FsInfo `json:"filesystems"`
+
+ // Disk map
+ DiskMap map[string]DiskInfo `json:"disk_map"`
+
+ // Network devices
+ NetworkDevices []NetInfo `json:"network_devices"`
+
+ // Machine Topology
+ // Describes cpu/memory layout and hierarchy.
+ Topology []Node `json:"topology"`
+
+ // Cloud provider the machine belongs to.
+ CloudProvider CloudProvider `json:"cloud_provider"`
+
+ // Type of cloud instance (e.g. GCE standard) the machine is.
+ InstanceType InstanceType `json:"instance_type"`
+
+ // ID of cloud instance (e.g. instance-1) given to it by the cloud provider.
+ InstanceID InstanceID `json:"instance_id"`
+}
+
+type VersionInfo struct {
+ // Kernel version.
+ KernelVersion string `json:"kernel_version"`
+
+ // OS image being used for cadvisor container, or host image if running on host directly.
+ ContainerOsVersion string `json:"container_os_version"`
+
+ // Docker version.
+ DockerVersion string `json:"docker_version"`
+
+ // cAdvisor version.
+ CadvisorVersion string `json:"cadvisor_version"`
+ // cAdvisor git revision.
+ CadvisorRevision string `json:"cadvisor_revision"`
+}
+
+type MachineInfoFactory interface {
+ GetMachineInfo() (*MachineInfo, error)
+ GetVersionInfo() (*VersionInfo, error)
+}
diff --git a/src/kube2msb/vendor/github.com/google/cadvisor/info/v1/metric.go b/src/kube2msb/vendor/github.com/google/cadvisor/info/v1/metric.go
new file mode 100644
index 0000000..90fd9e4
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/google/cadvisor/info/v1/metric.go
@@ -0,0 +1,79 @@
+// Copyright 2015 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+import (
+ "time"
+)
+
+// Type of metric being exported.
+type MetricType string
+
+const (
+ // Instantaneous value. May increase or decrease.
+ MetricGauge MetricType = "gauge"
+
+ // A counter-like value that is only expected to increase.
+ MetricCumulative = "cumulative"
+
+ // Rate over a time period.
+ MetricDelta = "delta"
+)
+
+// DataType for metric being exported.
+type DataType string
+
+const (
+ IntType DataType = "int"
+ FloatType = "float"
+)
+
+// Spec for custom metric.
+type MetricSpec struct {
+ // The name of the metric.
+ Name string `json:"name"`
+
+ // Type of the metric.
+ Type MetricType `json:"type"`
+
+ // Data Type for the stats.
+ Format DataType `json:"format"`
+
+ // Display Units for the stats.
+ Units string `json:"units"`
+}
+
+// An exported metric.
+type MetricValBasic struct {
+ // Time at which the metric was queried
+ Timestamp time.Time `json:"timestamp"`
+
+ // The value of the metric at this point.
+ IntValue int64 `json:"int_value,omitempty"`
+ FloatValue float64 `json:"float_value,omitempty"`
+}
+
+// An exported metric.
+type MetricVal struct {
+ // Label associated with a metric
+ Label string `json:"label,omitempty"`
+
+ // Time at which the metric was queried
+ Timestamp time.Time `json:"timestamp"`
+
+ // The value of the metric at this point.
+ IntValue int64 `json:"int_value,omitempty"`
+ FloatValue float64 `json:"float_value,omitempty"`
+}
diff --git a/src/kube2msb/vendor/github.com/google/gofuzz/CONTRIBUTING.md b/src/kube2msb/vendor/github.com/google/gofuzz/CONTRIBUTING.md
new file mode 100644
index 0000000..51cf5cd
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/google/gofuzz/CONTRIBUTING.md
@@ -0,0 +1,67 @@
+# How to contribute #
+
+We'd love to accept your patches and contributions to this project. There are
+a just a few small guidelines you need to follow.
+
+
+## Contributor License Agreement ##
+
+Contributions to any Google project must be accompanied by a Contributor
+License Agreement. This is not a copyright **assignment**, it simply gives
+Google permission to use and redistribute your contributions as part of the
+project.
+
+ * If you are an individual writing original source code and you're sure you
+ own the intellectual property, then you'll need to sign an [individual
+ CLA][].
+
+ * If you work for a company that wants to allow you to contribute your work,
+ then you'll need to sign a [corporate CLA][].
+
+You generally only need to submit a CLA once, so if you've already submitted
+one (even if it was for a different project), you probably don't need to do it
+again.
+
+[individual CLA]: https://developers.google.com/open-source/cla/individual
+[corporate CLA]: https://developers.google.com/open-source/cla/corporate
+
+
+## Submitting a patch ##
+
+ 1. It's generally best to start by opening a new issue describing the bug or
+ feature you're intending to fix. Even if you think it's relatively minor,
+ it's helpful to know what people are working on. Mention in the initial
+ issue that you are planning to work on that bug or feature so that it can
+ be assigned to you.
+
+ 1. Follow the normal process of [forking][] the project, and setup a new
+ branch to work in. It's important that each group of changes be done in
+ separate branches in order to ensure that a pull request only includes the
+ commits related to that bug or feature.
+
+ 1. Go makes it very simple to ensure properly formatted code, so always run
+ `go fmt` on your code before committing it. You should also run
+ [golint][] over your code. As noted in the [golint readme][], it's not
+ strictly necessary that your code be completely "lint-free", but this will
+ help you find common style issues.
+
+ 1. Any significant changes should almost always be accompanied by tests. The
+ project already has good test coverage, so look at some of the existing
+ tests if you're unsure how to go about it. [gocov][] and [gocov-html][]
+ are invaluable tools for seeing which parts of your code aren't being
+ exercised by your tests.
+
+ 1. Do your best to have [well-formed commit messages][] for each change.
+ This provides consistency throughout the project, and ensures that commit
+ messages are able to be formatted properly by various git tools.
+
+ 1. Finally, push the commits to your fork and submit a [pull request][].
+
+[forking]: https://help.github.com/articles/fork-a-repo
+[golint]: https://github.com/golang/lint
+[golint readme]: https://github.com/golang/lint/blob/master/README
+[gocov]: https://github.com/axw/gocov
+[gocov-html]: https://github.com/matm/gocov-html
+[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html
+[squash]: http://git-scm.com/book/en/Git-Tools-Rewriting-History#Squashing-Commits
+[pull request]: https://help.github.com/articles/creating-a-pull-request
diff --git a/src/kube2msb/vendor/github.com/google/gofuzz/LICENSE b/src/kube2msb/vendor/github.com/google/gofuzz/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/google/gofuzz/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/src/kube2msb/vendor/github.com/google/gofuzz/README.md b/src/kube2msb/vendor/github.com/google/gofuzz/README.md
new file mode 100644
index 0000000..68fcf2c
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/google/gofuzz/README.md
@@ -0,0 +1,71 @@
+gofuzz
+======
+
+gofuzz is a library for populating go objects with random values.
+
+[![GoDoc](https://godoc.org/github.com/google/gofuzz?status.png)](https://godoc.org/github.com/google/gofuzz)
+[![Travis](https://travis-ci.org/google/gofuzz.svg?branch=master)](https://travis-ci.org/google/gofuzz)
+
+This is useful for testing:
+
+* Do your project's objects really serialize/unserialize correctly in all cases?
+* Is there an incorrectly formatted object that will cause your project to panic?
+
+Import with ```import "github.com/google/gofuzz"```
+
+You can use it on single variables:
+```
+f := fuzz.New()
+var myInt int
+f.Fuzz(&myInt) // myInt gets a random value.
+```
+
+You can use it on maps:
+```
+f := fuzz.New().NilChance(0).NumElements(1, 1)
+var myMap map[ComplexKeyType]string
+f.Fuzz(&myMap) // myMap will have exactly one element.
+```
+
+Customize the chance of getting a nil pointer:
+```
+f := fuzz.New().NilChance(.5)
+var fancyStruct struct {
+ A, B, C, D *string
+}
+f.Fuzz(&fancyStruct) // About half the pointers should be set.
+```
+
+You can even customize the randomization completely if needed:
+```
+type MyEnum string
+const (
+ A MyEnum = "A"
+ B MyEnum = "B"
+)
+type MyInfo struct {
+ Type MyEnum
+ AInfo *string
+ BInfo *string
+}
+
+f := fuzz.New().NilChance(0).Funcs(
+ func(e *MyInfo, c fuzz.Continue) {
+ switch c.Intn(2) {
+ case 0:
+ e.Type = A
+ c.Fuzz(&e.AInfo)
+ case 1:
+ e.Type = B
+ c.Fuzz(&e.BInfo)
+ }
+ },
+)
+
+var myObject MyInfo
+f.Fuzz(&myObject) // Type will correspond to whether A or B info is set.
+```
+
+See more examples in ```example_test.go```.
+
+Happy testing!
diff --git a/src/kube2msb/vendor/github.com/google/gofuzz/doc.go b/src/kube2msb/vendor/github.com/google/gofuzz/doc.go
new file mode 100644
index 0000000..9f9956d
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/google/gofuzz/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package fuzz is a library for populating go objects with random values.
+package fuzz
diff --git a/src/kube2msb/vendor/github.com/google/gofuzz/fuzz.go b/src/kube2msb/vendor/github.com/google/gofuzz/fuzz.go
new file mode 100644
index 0000000..42d9a48
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/google/gofuzz/fuzz.go
@@ -0,0 +1,446 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fuzz
+
+import (
+ "fmt"
+ "math/rand"
+ "reflect"
+ "time"
+)
+
+// fuzzFuncMap is a map from a type to a fuzzFunc that handles that type.
+type fuzzFuncMap map[reflect.Type]reflect.Value
+
+// Fuzzer knows how to fill any object with random fields.
+type Fuzzer struct {
+ fuzzFuncs fuzzFuncMap
+ defaultFuzzFuncs fuzzFuncMap
+ r *rand.Rand
+ nilChance float64
+ minElements int
+ maxElements int
+}
+
+// New returns a new Fuzzer. Customize your Fuzzer further by calling Funcs,
+// RandSource, NilChance, or NumElements in any order.
+func New() *Fuzzer {
+ f := &Fuzzer{
+ defaultFuzzFuncs: fuzzFuncMap{
+ reflect.TypeOf(&time.Time{}): reflect.ValueOf(fuzzTime),
+ },
+
+ fuzzFuncs: fuzzFuncMap{},
+ r: rand.New(rand.NewSource(time.Now().UnixNano())),
+ nilChance: .2,
+ minElements: 1,
+ maxElements: 10,
+ }
+ return f
+}
+
+// Funcs adds each entry in fuzzFuncs as a custom fuzzing function.
+//
+// Each entry in fuzzFuncs must be a function taking two parameters.
+// The first parameter must be a pointer or map. It is the variable that
+// function will fill with random data. The second parameter must be a
+// fuzz.Continue, which will provide a source of randomness and a way
+// to automatically continue fuzzing smaller pieces of the first parameter.
+//
+// These functions are called sensibly, e.g., if you wanted custom string
+// fuzzing, the function `func(s *string, c fuzz.Continue)` would get
+// called and passed the address of strings. Maps and pointers will always
+// be made/new'd for you, ignoring the NilChange option. For slices, it
+// doesn't make much sense to pre-create them--Fuzzer doesn't know how
+// long you want your slice--so take a pointer to a slice, and make it
+// yourself. (If you don't want your map/pointer type pre-made, take a
+// pointer to it, and make it yourself.) See the examples for a range of
+// custom functions.
+func (f *Fuzzer) Funcs(fuzzFuncs ...interface{}) *Fuzzer {
+ for i := range fuzzFuncs {
+ v := reflect.ValueOf(fuzzFuncs[i])
+ if v.Kind() != reflect.Func {
+ panic("Need only funcs!")
+ }
+ t := v.Type()
+ if t.NumIn() != 2 || t.NumOut() != 0 {
+ panic("Need 2 in and 0 out params!")
+ }
+ argT := t.In(0)
+ switch argT.Kind() {
+ case reflect.Ptr, reflect.Map:
+ default:
+ panic("fuzzFunc must take pointer or map type")
+ }
+ if t.In(1) != reflect.TypeOf(Continue{}) {
+ panic("fuzzFunc's second parameter must be type fuzz.Continue")
+ }
+ f.fuzzFuncs[argT] = v
+ }
+ return f
+}
+
+// RandSource causes f to get values from the given source of randomness.
+// Use if you want deterministic fuzzing.
+func (f *Fuzzer) RandSource(s rand.Source) *Fuzzer {
+ f.r = rand.New(s)
+ return f
+}
+
+// NilChance sets the probability of creating a nil pointer, map, or slice to
+// 'p'. 'p' should be between 0 (no nils) and 1 (all nils), inclusive.
+func (f *Fuzzer) NilChance(p float64) *Fuzzer {
+ if p < 0 || p > 1 {
+ panic("p should be between 0 and 1, inclusive.")
+ }
+ f.nilChance = p
+ return f
+}
+
+// NumElements sets the minimum and maximum number of elements that will be
+// added to a non-nil map or slice.
+func (f *Fuzzer) NumElements(atLeast, atMost int) *Fuzzer {
+ if atLeast > atMost {
+ panic("atLeast must be <= atMost")
+ }
+ if atLeast < 0 {
+ panic("atLeast must be >= 0")
+ }
+ f.minElements = atLeast
+ f.maxElements = atMost
+ return f
+}
+
+func (f *Fuzzer) genElementCount() int {
+ if f.minElements == f.maxElements {
+ return f.minElements
+ }
+ return f.minElements + f.r.Intn(f.maxElements-f.minElements)
+}
+
+func (f *Fuzzer) genShouldFill() bool {
+ return f.r.Float64() > f.nilChance
+}
+
+// Fuzz recursively fills all of obj's fields with something random. First
+// this tries to find a custom fuzz function (see Funcs). If there is no
+// custom function this tests whether the object implements fuzz.Interface and,
+// if so, calls Fuzz on it to fuzz itself. If that fails, this will see if
+// there is a default fuzz function provided by this package. If all of that
+// fails, this will generate random values for all primitive fields and then
+// recurse for all non-primitives.
+//
+// Not safe for cyclic or tree-like structs!
+//
+// obj must be a pointer. Only exported (public) fields can be set (thanks, golang :/ )
+// Intended for tests, so will panic on bad input or unimplemented fields.
+func (f *Fuzzer) Fuzz(obj interface{}) {
+ v := reflect.ValueOf(obj)
+ if v.Kind() != reflect.Ptr {
+ panic("needed ptr!")
+ }
+ v = v.Elem()
+ f.doFuzz(v, 0)
+}
+
+// FuzzNoCustom is just like Fuzz, except that any custom fuzz function for
+// obj's type will not be called and obj will not be tested for fuzz.Interface
+// conformance. This applies only to obj and not other instances of obj's
+// type.
+// Not safe for cyclic or tree-like structs!
+// obj must be a pointer. Only exported (public) fields can be set (thanks, golang :/ )
+// Intended for tests, so will panic on bad input or unimplemented fields.
+func (f *Fuzzer) FuzzNoCustom(obj interface{}) {
+ v := reflect.ValueOf(obj)
+ if v.Kind() != reflect.Ptr {
+ panic("needed ptr!")
+ }
+ v = v.Elem()
+ f.doFuzz(v, flagNoCustomFuzz)
+}
+
+const (
+ // Do not try to find a custom fuzz function. Does not apply recursively.
+ flagNoCustomFuzz uint64 = 1 << iota
+)
+
+func (f *Fuzzer) doFuzz(v reflect.Value, flags uint64) {
+ if !v.CanSet() {
+ return
+ }
+
+ if flags&flagNoCustomFuzz == 0 {
+ // Check for both pointer and non-pointer custom functions.
+ if v.CanAddr() && f.tryCustom(v.Addr()) {
+ return
+ }
+ if f.tryCustom(v) {
+ return
+ }
+ }
+
+ if fn, ok := fillFuncMap[v.Kind()]; ok {
+ fn(v, f.r)
+ return
+ }
+ switch v.Kind() {
+ case reflect.Map:
+ if f.genShouldFill() {
+ v.Set(reflect.MakeMap(v.Type()))
+ n := f.genElementCount()
+ for i := 0; i < n; i++ {
+ key := reflect.New(v.Type().Key()).Elem()
+ f.doFuzz(key, 0)
+ val := reflect.New(v.Type().Elem()).Elem()
+ f.doFuzz(val, 0)
+ v.SetMapIndex(key, val)
+ }
+ return
+ }
+ v.Set(reflect.Zero(v.Type()))
+ case reflect.Ptr:
+ if f.genShouldFill() {
+ v.Set(reflect.New(v.Type().Elem()))
+ f.doFuzz(v.Elem(), 0)
+ return
+ }
+ v.Set(reflect.Zero(v.Type()))
+ case reflect.Slice:
+ if f.genShouldFill() {
+ n := f.genElementCount()
+ v.Set(reflect.MakeSlice(v.Type(), n, n))
+ for i := 0; i < n; i++ {
+ f.doFuzz(v.Index(i), 0)
+ }
+ return
+ }
+ v.Set(reflect.Zero(v.Type()))
+ case reflect.Struct:
+ for i := 0; i < v.NumField(); i++ {
+ f.doFuzz(v.Field(i), 0)
+ }
+ case reflect.Array:
+ fallthrough
+ case reflect.Chan:
+ fallthrough
+ case reflect.Func:
+ fallthrough
+ case reflect.Interface:
+ fallthrough
+ default:
+ panic(fmt.Sprintf("Can't handle %#v", v.Interface()))
+ }
+}
+
+// tryCustom searches for custom handlers, and returns true iff it finds a match
+// and successfully randomizes v.
+func (f *Fuzzer) tryCustom(v reflect.Value) bool {
+ // First: see if we have a fuzz function for it.
+ doCustom, ok := f.fuzzFuncs[v.Type()]
+ if !ok {
+ // Second: see if it can fuzz itself.
+ if v.CanInterface() {
+ intf := v.Interface()
+ if fuzzable, ok := intf.(Interface); ok {
+ fuzzable.Fuzz(Continue{f: f, Rand: f.r})
+ return true
+ }
+ }
+ // Finally: see if there is a default fuzz function.
+ doCustom, ok = f.defaultFuzzFuncs[v.Type()]
+ if !ok {
+ return false
+ }
+ }
+
+ switch v.Kind() {
+ case reflect.Ptr:
+ if v.IsNil() {
+ if !v.CanSet() {
+ return false
+ }
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ case reflect.Map:
+ if v.IsNil() {
+ if !v.CanSet() {
+ return false
+ }
+ v.Set(reflect.MakeMap(v.Type()))
+ }
+ default:
+ return false
+ }
+
+ doCustom.Call([]reflect.Value{v, reflect.ValueOf(Continue{
+ f: f,
+ Rand: f.r,
+ })})
+ return true
+}
+
+// Interface represents an object that knows how to fuzz itself. Any time we
+// find a type that implements this interface we will delegate the act of
+// fuzzing itself.
+type Interface interface {
+ Fuzz(c Continue)
+}
+
+// Continue can be passed to custom fuzzing functions to allow them to use
+// the correct source of randomness and to continue fuzzing their members.
+type Continue struct {
+ f *Fuzzer
+
+ // For convenience, Continue implements rand.Rand via embedding.
+ // Use this for generating any randomness if you want your fuzzing
+ // to be repeatable for a given seed.
+ *rand.Rand
+}
+
+// Fuzz continues fuzzing obj. obj must be a pointer.
+func (c Continue) Fuzz(obj interface{}) {
+ v := reflect.ValueOf(obj)
+ if v.Kind() != reflect.Ptr {
+ panic("needed ptr!")
+ }
+ v = v.Elem()
+ c.f.doFuzz(v, 0)
+}
+
+// FuzzNoCustom continues fuzzing obj, except that any custom fuzz function for
+// obj's type will not be called and obj will not be tested for fuzz.Interface
+// conformance. This applies only to obj and not other instances of obj's
+// type.
+func (c Continue) FuzzNoCustom(obj interface{}) {
+ v := reflect.ValueOf(obj)
+ if v.Kind() != reflect.Ptr {
+ panic("needed ptr!")
+ }
+ v = v.Elem()
+ c.f.doFuzz(v, flagNoCustomFuzz)
+}
+
+// RandString makes a random string up to 20 characters long. The returned string
+// may include a variety of (valid) UTF-8 encodings.
+func (c Continue) RandString() string {
+ return randString(c.Rand)
+}
+
+// RandUint64 makes random 64 bit numbers.
+// Weirdly, rand doesn't have a function that gives you 64 random bits.
+func (c Continue) RandUint64() uint64 {
+ return randUint64(c.Rand)
+}
+
+// RandBool returns true or false randomly.
+func (c Continue) RandBool() bool {
+ return randBool(c.Rand)
+}
+
+func fuzzInt(v reflect.Value, r *rand.Rand) {
+ v.SetInt(int64(randUint64(r)))
+}
+
+func fuzzUint(v reflect.Value, r *rand.Rand) {
+ v.SetUint(randUint64(r))
+}
+
+func fuzzTime(t *time.Time, c Continue) {
+ var sec, nsec int64
+ // Allow for about 1000 years of random time values, which keeps things
+ // like JSON parsing reasonably happy.
+ sec = c.Rand.Int63n(1000 * 365 * 24 * 60 * 60)
+ c.Fuzz(&nsec)
+ *t = time.Unix(sec, nsec)
+}
+
+var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){
+ reflect.Bool: func(v reflect.Value, r *rand.Rand) {
+ v.SetBool(randBool(r))
+ },
+ reflect.Int: fuzzInt,
+ reflect.Int8: fuzzInt,
+ reflect.Int16: fuzzInt,
+ reflect.Int32: fuzzInt,
+ reflect.Int64: fuzzInt,
+ reflect.Uint: fuzzUint,
+ reflect.Uint8: fuzzUint,
+ reflect.Uint16: fuzzUint,
+ reflect.Uint32: fuzzUint,
+ reflect.Uint64: fuzzUint,
+ reflect.Uintptr: fuzzUint,
+ reflect.Float32: func(v reflect.Value, r *rand.Rand) {
+ v.SetFloat(float64(r.Float32()))
+ },
+ reflect.Float64: func(v reflect.Value, r *rand.Rand) {
+ v.SetFloat(r.Float64())
+ },
+ reflect.Complex64: func(v reflect.Value, r *rand.Rand) {
+ panic("unimplemented")
+ },
+ reflect.Complex128: func(v reflect.Value, r *rand.Rand) {
+ panic("unimplemented")
+ },
+ reflect.String: func(v reflect.Value, r *rand.Rand) {
+ v.SetString(randString(r))
+ },
+ reflect.UnsafePointer: func(v reflect.Value, r *rand.Rand) {
+ panic("unimplemented")
+ },
+}
+
+// randBool returns true or false randomly.
+func randBool(r *rand.Rand) bool {
+ if r.Int()&1 == 1 {
+ return true
+ }
+ return false
+}
+
+type charRange struct {
+ first, last rune
+}
+
+// choose returns a random unicode character from the given range, using the
+// given randomness source.
+func (r *charRange) choose(rand *rand.Rand) rune {
+ count := int64(r.last - r.first)
+ return r.first + rune(rand.Int63n(count))
+}
+
+var unicodeRanges = []charRange{
+ {' ', '~'}, // ASCII characters
+ {'\u00a0', '\u02af'}, // Multi-byte encoded characters
+ {'\u4e00', '\u9fff'}, // Common CJK (even longer encodings)
+}
+
+// randString makes a random string up to 20 characters long. The returned string
+// may include a variety of (valid) UTF-8 encodings.
+func randString(r *rand.Rand) string {
+ n := r.Intn(20)
+ runes := make([]rune, n)
+ for i := range runes {
+ runes[i] = unicodeRanges[r.Intn(len(unicodeRanges))].choose(r)
+ }
+ return string(runes)
+}
+
+// randUint64 makes random 64 bit numbers.
+// Weirdly, rand doesn't have a function that gives you 64 random bits.
+func randUint64(r *rand.Rand) uint64 {
+ return uint64(r.Uint32())<<32 | uint64(r.Uint32())
+}
diff --git a/src/kube2msb/vendor/github.com/imdario/mergo/LICENSE b/src/kube2msb/vendor/github.com/imdario/mergo/LICENSE
new file mode 100644
index 0000000..6866802
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/imdario/mergo/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2013 Dario Castañé. All rights reserved.
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/kube2msb/vendor/github.com/imdario/mergo/README.md b/src/kube2msb/vendor/github.com/imdario/mergo/README.md
new file mode 100644
index 0000000..cdcea0f
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/imdario/mergo/README.md
@@ -0,0 +1,68 @@
+# Mergo
+
+A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
+
+Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region Marche.
+
+![Mergo dall'alto](http://www.comune.mergo.an.it/Siti/Mergo/Immagini/Foto/mergo_dall_alto.jpg)
+
+## Status
+
+It is ready for production use. It works fine although it may use more of testing. Here some projects in the wild using Mergo:
+
+- [EagerIO/Stout](https://github.com/EagerIO/Stout)
+- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api)
+- [russross/canvasassignments](https://github.com/russross/canvasassignments)
+- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api)
+- [casualjim/exeggutor](https://github.com/casualjim/exeggutor)
+- [divshot/gitling](https://github.com/divshot/gitling)
+- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl)
+
+[![Build Status][1]][2]
+[![GoDoc](https://godoc.org/github.com/imdario/mergo?status.svg)](https://godoc.org/github.com/imdario/mergo)
+
+[1]: https://travis-ci.org/imdario/mergo.png
+[2]: https://travis-ci.org/imdario/mergo
+
+## Installation
+
+ go get github.com/imdario/mergo
+
+ // use in your .go code
+ import (
+ "github.com/imdario/mergo"
+ )
+
+## Usage
+
+You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
+
+ if err := mergo.Merge(&dst, src); err != nil {
+ // ...
+ }
+
+Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field.
+
+ if err := mergo.Map(&dst, srcMap); err != nil {
+ // ...
+ }
+
+Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values.
+
+More information and examples in [godoc documentation](http://godoc.org/github.com/imdario/mergo).
+
+Note: if test are failing due missing package, please execute:
+
+ go get gopkg.in/yaml.v1
+
+## Contact me
+
+If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario)
+
+## About
+
+Written by [Dario Castañé](http://dario.im).
+
+## License
+
+[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE).
diff --git a/src/kube2msb/vendor/github.com/imdario/mergo/doc.go b/src/kube2msb/vendor/github.com/imdario/mergo/doc.go
new file mode 100644
index 0000000..6e9aa7b
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/imdario/mergo/doc.go
@@ -0,0 +1,44 @@
+// Copyright 2013 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package mergo merges same-type structs and maps by setting default values in zero-value fields.
+
+Mergo won't merge unexported (private) fields but will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection).
+
+Usage
+
+From my own work-in-progress project:
+
+ type networkConfig struct {
+ Protocol string
+ Address string
+ ServerType string `json: "server_type"`
+ Port uint16
+ }
+
+ type FssnConfig struct {
+ Network networkConfig
+ }
+
+ var fssnDefault = FssnConfig {
+ networkConfig {
+ "tcp",
+ "127.0.0.1",
+ "http",
+ 31560,
+ },
+ }
+
+ // Inside a function [...]
+
+ if err := mergo.Merge(&config, fssnDefault); err != nil {
+ log.Fatal(err)
+ }
+
+ // More code [...]
+
+*/
+package mergo
diff --git a/src/kube2msb/vendor/github.com/imdario/mergo/map.go b/src/kube2msb/vendor/github.com/imdario/mergo/map.go
new file mode 100644
index 0000000..44361e8
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/imdario/mergo/map.go
@@ -0,0 +1,146 @@
+// Copyright 2014 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Based on src/pkg/reflect/deepequal.go from official
+// golang's stdlib.
+
+package mergo
+
+import (
+ "fmt"
+ "reflect"
+ "unicode"
+ "unicode/utf8"
+)
+
+func changeInitialCase(s string, mapper func(rune) rune) string {
+ if s == "" {
+ return s
+ }
+ r, n := utf8.DecodeRuneInString(s)
+ return string(mapper(r)) + s[n:]
+}
+
+func isExported(field reflect.StructField) bool {
+ r, _ := utf8.DecodeRuneInString(field.Name)
+ return r >= 'A' && r <= 'Z'
+}
+
+// Traverses recursively both values, assigning src's fields values to dst.
+// The map argument tracks comparisons that have already been seen, which allows
+// short circuiting on recursive types.
+func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) {
+ if dst.CanAddr() {
+ addr := dst.UnsafeAddr()
+ h := 17 * addr
+ seen := visited[h]
+ typ := dst.Type()
+ for p := seen; p != nil; p = p.next {
+ if p.ptr == addr && p.typ == typ {
+ return nil
+ }
+ }
+ // Remember, remember...
+ visited[h] = &visit{addr, typ, seen}
+ }
+ zeroValue := reflect.Value{}
+ switch dst.Kind() {
+ case reflect.Map:
+ dstMap := dst.Interface().(map[string]interface{})
+ for i, n := 0, src.NumField(); i < n; i++ {
+ srcType := src.Type()
+ field := srcType.Field(i)
+ if !isExported(field) {
+ continue
+ }
+ fieldName := field.Name
+ fieldName = changeInitialCase(fieldName, unicode.ToLower)
+ if v, ok := dstMap[fieldName]; !ok || isEmptyValue(reflect.ValueOf(v)) {
+ dstMap[fieldName] = src.Field(i).Interface()
+ }
+ }
+ case reflect.Struct:
+ srcMap := src.Interface().(map[string]interface{})
+ for key := range srcMap {
+ srcValue := srcMap[key]
+ fieldName := changeInitialCase(key, unicode.ToUpper)
+ dstElement := dst.FieldByName(fieldName)
+ if dstElement == zeroValue {
+ // We discard it because the field doesn't exist.
+ continue
+ }
+ srcElement := reflect.ValueOf(srcValue)
+ dstKind := dstElement.Kind()
+ srcKind := srcElement.Kind()
+ if srcKind == reflect.Ptr && dstKind != reflect.Ptr {
+ srcElement = srcElement.Elem()
+ srcKind = reflect.TypeOf(srcElement.Interface()).Kind()
+ } else if dstKind == reflect.Ptr {
+ // Can this work? I guess it can't.
+ if srcKind != reflect.Ptr && srcElement.CanAddr() {
+ srcPtr := srcElement.Addr()
+ srcElement = reflect.ValueOf(srcPtr)
+ srcKind = reflect.Ptr
+ }
+ }
+ if !srcElement.IsValid() {
+ continue
+ }
+ if srcKind == dstKind {
+ if err = deepMerge(dstElement, srcElement, visited, depth+1); err != nil {
+ return
+ }
+ } else {
+ if srcKind == reflect.Map {
+ if err = deepMap(dstElement, srcElement, visited, depth+1); err != nil {
+ return
+ }
+ } else {
+ return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind)
+ }
+ }
+ }
+ }
+ return
+}
+
+// Map sets fields' values in dst from src.
+// src can be a map with string keys or a struct. dst must be the opposite:
+// if src is a map, dst must be a valid pointer to struct. If src is a struct,
+// dst must be map[string]interface{}.
+// It won't merge unexported (private) fields and will do recursively
+// any exported field.
+// If dst is a map, keys will be src fields' names in lower camel case.
+// Missing key in src that doesn't match a field in dst will be skipped. This
+// doesn't apply if dst is a map.
+// This is separated method from Merge because it is cleaner and it keeps sane
+// semantics: merging equal types, mapping different (restricted) types.
+func Map(dst, src interface{}) error {
+ var (
+ vDst, vSrc reflect.Value
+ err error
+ )
+ if vDst, vSrc, err = resolveValues(dst, src); err != nil {
+ return err
+ }
+ // To be friction-less, we redirect equal-type arguments
+ // to deepMerge. Only because arguments can be anything.
+ if vSrc.Kind() == vDst.Kind() {
+ return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0)
+ }
+ switch vSrc.Kind() {
+ case reflect.Struct:
+ if vDst.Kind() != reflect.Map {
+ return ErrExpectedMapAsDestination
+ }
+ case reflect.Map:
+ if vDst.Kind() != reflect.Struct {
+ return ErrExpectedStructAsDestination
+ }
+ default:
+ return ErrNotSupported
+ }
+ return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0)
+}
diff --git a/src/kube2msb/vendor/github.com/imdario/mergo/merge.go b/src/kube2msb/vendor/github.com/imdario/mergo/merge.go
new file mode 100644
index 0000000..5d328b1
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/imdario/mergo/merge.go
@@ -0,0 +1,99 @@
+// Copyright 2013 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Based on src/pkg/reflect/deepequal.go from official
+// golang's stdlib.
+
+package mergo
+
+import (
+ "reflect"
+)
+
+// Traverses recursively both values, assigning src's fields values to dst.
+// The map argument tracks comparisons that have already been seen, which allows
+// short circuiting on recursive types.
+func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) {
+ if !src.IsValid() {
+ return
+ }
+ if dst.CanAddr() {
+ addr := dst.UnsafeAddr()
+ h := 17 * addr
+ seen := visited[h]
+ typ := dst.Type()
+ for p := seen; p != nil; p = p.next {
+ if p.ptr == addr && p.typ == typ {
+ return nil
+ }
+ }
+ // Remember, remember...
+ visited[h] = &visit{addr, typ, seen}
+ }
+ switch dst.Kind() {
+ case reflect.Struct:
+ for i, n := 0, dst.NumField(); i < n; i++ {
+ if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1); err != nil {
+ return
+ }
+ }
+ case reflect.Map:
+ for _, key := range src.MapKeys() {
+ srcElement := src.MapIndex(key)
+ if !srcElement.IsValid() {
+ continue
+ }
+ dstElement := dst.MapIndex(key)
+ switch reflect.TypeOf(srcElement.Interface()).Kind() {
+ case reflect.Struct:
+ fallthrough
+ case reflect.Map:
+ if err = deepMerge(dstElement, srcElement, visited, depth+1); err != nil {
+ return
+ }
+ }
+ if !dstElement.IsValid() {
+ dst.SetMapIndex(key, srcElement)
+ }
+ }
+ case reflect.Ptr:
+ fallthrough
+ case reflect.Interface:
+ if src.IsNil() {
+ break
+ } else if dst.IsNil() {
+ if dst.CanSet() && isEmptyValue(dst) {
+ dst.Set(src)
+ }
+ } else if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1); err != nil {
+ return
+ }
+ default:
+ if dst.CanSet() && !isEmptyValue(src) {
+ dst.Set(src)
+ }
+ }
+ return
+}
+
+// Merge sets fields' values in dst from src if they have a zero
+// value of their type.
+// dst and src must be valid same-type structs and dst must be
+// a pointer to struct.
+// It won't merge unexported (private) fields and will do recursively
+// any exported field.
+func Merge(dst, src interface{}) error {
+ var (
+ vDst, vSrc reflect.Value
+ err error
+ )
+ if vDst, vSrc, err = resolveValues(dst, src); err != nil {
+ return err
+ }
+ if vDst.Type() != vSrc.Type() {
+ return ErrDifferentArgumentsTypes
+ }
+ return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0)
+}
diff --git a/src/kube2msb/vendor/github.com/imdario/mergo/mergo.go b/src/kube2msb/vendor/github.com/imdario/mergo/mergo.go
new file mode 100644
index 0000000..f8a0991
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/imdario/mergo/mergo.go
@@ -0,0 +1,90 @@
+// Copyright 2013 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Based on src/pkg/reflect/deepequal.go from official
+// golang's stdlib.
+
+package mergo
+
+import (
+ "errors"
+ "reflect"
+)
+
+// Errors reported by Mergo when it finds invalid arguments.
+var (
+ ErrNilArguments = errors.New("src and dst must not be nil")
+ ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type")
+ ErrNotSupported = errors.New("only structs and maps are supported")
+ ErrExpectedMapAsDestination = errors.New("dst was expected to be a map")
+ ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct")
+)
+
+// During deepMerge, must keep track of checks that are
+// in progress. The comparison algorithm assumes that all
+// checks in progress are true when it reencounters them.
+// Visited are stored in a map indexed by 17 * a1 + a2;
+type visit struct {
+ ptr uintptr
+ typ reflect.Type
+ next *visit
+}
+
+// From src/pkg/encoding/json.
+func isEmptyValue(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ }
+ return false
+}
+
+func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) {
+ if dst == nil || src == nil {
+ err = ErrNilArguments
+ return
+ }
+ vDst = reflect.ValueOf(dst).Elem()
+ if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map {
+ err = ErrNotSupported
+ return
+ }
+ vSrc = reflect.ValueOf(src)
+ // We check if vSrc is a pointer to dereference it.
+ if vSrc.Kind() == reflect.Ptr {
+ vSrc = vSrc.Elem()
+ }
+ return
+}
+
+// Traverses recursively both values, assigning src's fields values to dst.
+// The map argument tracks comparisons that have already been seen, which allows
+// short circuiting on recursive types.
+func deeper(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) {
+ if dst.CanAddr() {
+ addr := dst.UnsafeAddr()
+ h := 17 * addr
+ seen := visited[h]
+ typ := dst.Type()
+ for p := seen; p != nil; p = p.next {
+ if p.ptr == addr && p.typ == typ {
+ return nil
+ }
+ }
+ // Remember, remember...
+ visited[h] = &visit{addr, typ, seen}
+ }
+ return // TODO refactor
+}
diff --git a/src/kube2msb/vendor/github.com/jonboulle/clockwork/LICENSE b/src/kube2msb/vendor/github.com/jonboulle/clockwork/LICENSE
new file mode 100644
index 0000000..5c304d1
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/jonboulle/clockwork/LICENSE
@@ -0,0 +1,201 @@
+Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/src/kube2msb/vendor/github.com/jonboulle/clockwork/README.md b/src/kube2msb/vendor/github.com/jonboulle/clockwork/README.md
new file mode 100644
index 0000000..d43a6c7
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/jonboulle/clockwork/README.md
@@ -0,0 +1,61 @@
+clockwork
+=========
+
+[![Build Status](https://travis-ci.org/jonboulle/clockwork.png?branch=master)](https://travis-ci.org/jonboulle/clockwork)
+[![godoc](https://godoc.org/github.com/jonboulle/clockwork?status.svg)](http://godoc.org/github.com/jonboulle/clockwork)
+
+a simple fake clock for golang
+
+# Usage
+
+Replace uses of the `time` package with the `clockwork.Clock` interface instead.
+
+For example, instead of using `time.Sleep` directly:
+
+```
+func my_func() {
+ time.Sleep(3 * time.Second)
+ do_something()
+}
+```
+
+inject a clock and use its `Sleep` method instead:
+
+```
+func my_func(clock clockwork.Clock) {
+ clock.Sleep(3 * time.Second)
+ do_something()
+}
+```
+
+Now you can easily test `my_func` with a `FakeClock`:
+
+```
+func TestMyFunc(t *testing.T) {
+ c := clockwork.NewFakeClock()
+
+ // Start our sleepy function
+ my_func(c)
+
+ // Ensure we wait until my_func is sleeping
+ c.BlockUntil(1)
+
+ assert_state()
+
+ // Advance the FakeClock forward in time
+ c.Advance(3)
+
+ assert_state()
+}
+```
+
+and in production builds, simply inject the real clock instead:
+```
+my_func(clockwork.NewRealClock())
+```
+
+See [example_test.go](example_test.go) for a full example.
+
+# Credits
+
+clockwork is inspired by @wickman's [threaded fake clock](https://gist.github.com/wickman/3840816), and the [Golang playground](http://blog.golang.org/playground#Faking time)
diff --git a/src/kube2msb/vendor/github.com/jonboulle/clockwork/clockwork.go b/src/kube2msb/vendor/github.com/jonboulle/clockwork/clockwork.go
new file mode 100644
index 0000000..1f1045b
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/jonboulle/clockwork/clockwork.go
@@ -0,0 +1,164 @@
+package clockwork
+
+import (
+ "sync"
+ "time"
+)
+
+// Clock provides an interface that packages can use instead of directly
+// using the time module, so that chronology-related behavior can be tested
+type Clock interface {
+ After(d time.Duration) <-chan time.Time
+ Sleep(d time.Duration)
+ Now() time.Time
+}
+
+// FakeClock provides an interface for a clock which can be
+// manually advanced through time
+type FakeClock interface {
+ Clock
+ // Advance advances the FakeClock to a new point in time, ensuring any existing
+ // sleepers are notified appropriately before returning
+ Advance(d time.Duration)
+ // BlockUntil will block until the FakeClock has the given number of
+ // sleepers (callers of Sleep or After)
+ BlockUntil(n int)
+}
+
+// NewRealClock returns a Clock which simply delegates calls to the actual time
+// package; it should be used by packages in production.
+func NewRealClock() Clock {
+ return &realClock{}
+}
+
+// NewFakeClock returns a FakeClock implementation which can be
+// manually advanced through time for testing.
+func NewFakeClock() FakeClock {
+ return &fakeClock{
+ l: sync.RWMutex{},
+
+ // use a fixture that does not fulfill Time.IsZero()
+ time: time.Date(1900, time.January, 1, 0, 0, 0, 0, time.UTC),
+ }
+}
+
+type realClock struct{}
+
+func (rc *realClock) After(d time.Duration) <-chan time.Time {
+ return time.After(d)
+}
+
+func (rc *realClock) Sleep(d time.Duration) {
+ time.Sleep(d)
+}
+
+func (rc *realClock) Now() time.Time {
+ return time.Now()
+}
+
+type fakeClock struct {
+ sleepers []*sleeper
+ blockers []*blocker
+ time time.Time
+
+ l sync.RWMutex
+}
+
+// sleeper represents a caller of After or Sleep
+type sleeper struct {
+ until time.Time
+ done chan time.Time
+}
+
+// blocker represents a caller of BlockUntil
+type blocker struct {
+ count int
+ ch chan struct{}
+}
+
+// After mimics time.After; it waits for the given duration to elapse on the
+// fakeClock, then sends the current time on the returned channel.
+func (fc *fakeClock) After(d time.Duration) <-chan time.Time {
+ fc.l.Lock()
+ defer fc.l.Unlock()
+ now := fc.time
+ done := make(chan time.Time, 1)
+ if d.Nanoseconds() == 0 {
+ // special case - trigger immediately
+ done <- now
+ } else {
+ // otherwise, add to the set of sleepers
+ s := &sleeper{
+ until: now.Add(d),
+ done: done,
+ }
+ fc.sleepers = append(fc.sleepers, s)
+ // and notify any blockers
+ fc.blockers = notifyBlockers(fc.blockers, len(fc.sleepers))
+ }
+ return done
+}
+
+// notifyBlockers notifies all the blockers waiting until the
+// given number of sleepers are waiting on the fakeClock. It
+// returns an updated slice of blockers (i.e. those still waiting)
+func notifyBlockers(blockers []*blocker, count int) (newBlockers []*blocker) {
+ for _, b := range blockers {
+ if b.count == count {
+ close(b.ch)
+ } else {
+ newBlockers = append(newBlockers, b)
+ }
+ }
+ return
+}
+
+// Sleep blocks until the given duration has passed on the fakeClock
+func (fc *fakeClock) Sleep(d time.Duration) {
+ <-fc.After(d)
+}
+
+// Time returns the current time of the fakeClock
+func (fc *fakeClock) Now() time.Time {
+ fc.l.Lock()
+ defer fc.l.Unlock()
+ return fc.time
+}
+
+// Advance advances fakeClock to a new point in time, ensuring channels from any
+// previous invocations of After are notified appropriately before returning
+func (fc *fakeClock) Advance(d time.Duration) {
+ fc.l.Lock()
+ defer fc.l.Unlock()
+ end := fc.time.Add(d)
+ var newSleepers []*sleeper
+ for _, s := range fc.sleepers {
+ if end.Sub(s.until) >= 0 {
+ s.done <- end
+ } else {
+ newSleepers = append(newSleepers, s)
+ }
+ }
+ fc.sleepers = newSleepers
+ fc.blockers = notifyBlockers(fc.blockers, len(fc.sleepers))
+ fc.time = end
+}
+
+// BlockUntil will block until the fakeClock has the given number of sleepers
+// (callers of Sleep or After)
+func (fc *fakeClock) BlockUntil(n int) {
+ fc.l.Lock()
+ // Fast path: current number of sleepers is what we're looking for
+ if len(fc.sleepers) == n {
+ fc.l.Unlock()
+ return
+ }
+ // Otherwise, set up a new blocker
+ b := &blocker{
+ count: n,
+ ch: make(chan struct{}),
+ }
+ fc.blockers = append(fc.blockers, b)
+ fc.l.Unlock()
+ <-b.ch
+}
diff --git a/src/kube2msb/vendor/github.com/juju/ratelimit/LICENSE b/src/kube2msb/vendor/github.com/juju/ratelimit/LICENSE
new file mode 100644
index 0000000..ade9307
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/juju/ratelimit/LICENSE
@@ -0,0 +1,191 @@
+All files in this repository are licensed as follows. If you contribute
+to this repository, it is assumed that you license your contribution
+under the same license unless you state otherwise.
+
+All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file.
+
+This software is licensed under the LGPLv3, included below.
+
+As a special exception to the GNU Lesser General Public License version 3
+("LGPL3"), the copyright holders of this Library give you permission to
+convey to a third party a Combined Work that links statically or dynamically
+to this Library without providing any Minimal Corresponding Source or
+Minimal Application Code as set out in 4d or providing the installation
+information set out in section 4e, provided that you comply with the other
+provisions of LGPL3 and provided that you meet, for the Application the
+terms and conditions of the license(s) which apply to the Application.
+
+Except as stated in this special exception, the provisions of LGPL3 will
+continue to comply in full to this Library. If you modify this Library, you
+may apply this exception to your version of this Library, but you are not
+obliged to do so. If you do not wish to do so, delete this exception
+statement from your version. This exception does not (and cannot) modify any
+license terms which apply to the Application, with which you must still
+comply.
+
+
+ GNU LESSER GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+ This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+ 0. Additional Definitions.
+
+ As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+ "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+ An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+ A "Combined Work" is a work produced by combining or linking an
+Application with the Library. The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+ The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+ The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+ 1. Exception to Section 3 of the GNU GPL.
+
+ You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+ 2. Conveying Modified Versions.
+
+ If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+ a) under this License, provided that you make a good faith effort to
+ ensure that, in the event an Application does not supply the
+ function or data, the facility still operates, and performs
+ whatever part of its purpose remains meaningful, or
+
+ b) under the GNU GPL, with none of the additional permissions of
+ this License applicable to that copy.
+
+ 3. Object Code Incorporating Material from Library Header Files.
+
+ The object code form of an Application may incorporate material from
+a header file that is part of the Library. You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+ a) Give prominent notice with each copy of the object code that the
+ Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the object code with a copy of the GNU GPL and this license
+ document.
+
+ 4. Combined Works.
+
+ You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+ a) Give prominent notice with each copy of the Combined Work that
+ the Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the Combined Work with a copy of the GNU GPL and this license
+ document.
+
+ c) For a Combined Work that displays copyright notices during
+ execution, include the copyright notice for the Library among
+ these notices, as well as a reference directing the user to the
+ copies of the GNU GPL and this license document.
+
+ d) Do one of the following:
+
+ 0) Convey the Minimal Corresponding Source under the terms of this
+ License, and the Corresponding Application Code in a form
+ suitable for, and under terms that permit, the user to
+ recombine or relink the Application with a modified version of
+ the Linked Version to produce a modified Combined Work, in the
+ manner specified by section 6 of the GNU GPL for conveying
+ Corresponding Source.
+
+ 1) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (a) uses at run time
+ a copy of the Library already present on the user's computer
+ system, and (b) will operate properly with a modified version
+ of the Library that is interface-compatible with the Linked
+ Version.
+
+ e) Provide Installation Information, but only if you would otherwise
+ be required to provide such information under section 6 of the
+ GNU GPL, and only to the extent that such information is
+ necessary to install and execute a modified version of the
+ Combined Work produced by recombining or relinking the
+ Application with a modified version of the Linked Version. (If
+ you use option 4d0, the Installation Information must accompany
+ the Minimal Corresponding Source and Corresponding Application
+ Code. If you use option 4d1, you must provide the Installation
+ Information in the manner specified by section 6 of the GNU GPL
+ for conveying Corresponding Source.)
+
+ 5. Combined Libraries.
+
+ You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+ a) Accompany the combined library with a copy of the same work based
+ on the Library, uncombined with any other library facilities,
+ conveyed under the terms of this License.
+
+ b) Give prominent notice with the combined library that part of it
+ is a work based on the Library, and explaining where to find the
+ accompanying uncombined form of the same work.
+
+ 6. Revised Versions of the GNU Lesser General Public License.
+
+ The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+ If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
diff --git a/src/kube2msb/vendor/github.com/juju/ratelimit/README.md b/src/kube2msb/vendor/github.com/juju/ratelimit/README.md
new file mode 100644
index 0000000..a0fdfe2
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/juju/ratelimit/README.md
@@ -0,0 +1,117 @@
+# ratelimit
+--
+ import "github.com/juju/ratelimit"
+
+The ratelimit package provides an efficient token bucket implementation. See
+http://en.wikipedia.org/wiki/Token_bucket.
+
+## Usage
+
+#### func Reader
+
+```go
+func Reader(r io.Reader, bucket *Bucket) io.Reader
+```
+Reader returns a reader that is rate limited by the given token bucket. Each
+token in the bucket represents one byte.
+
+#### func Writer
+
+```go
+func Writer(w io.Writer, bucket *Bucket) io.Writer
+```
+Writer returns a writer that is rate limited by the given token bucket. Each
+token in the bucket represents one byte.
+
+#### type Bucket
+
+```go
+type Bucket struct {
+}
+```
+
+Bucket represents a token bucket that fills at a predetermined rate. Methods on
+Bucket may be called concurrently.
+
+#### func NewBucket
+
+```go
+func NewBucket(fillInterval time.Duration, capacity int64) *Bucket
+```
+NewBucket returns a new token bucket that fills at the rate of one token every
+fillInterval, up to the given maximum capacity. Both arguments must be positive.
+The bucket is initially full.
+
+#### func NewBucketWithQuantum
+
+```go
+func NewBucketWithQuantum(fillInterval time.Duration, capacity, quantum int64) *Bucket
+```
+NewBucketWithQuantum is similar to NewBucket, but allows the specification of
+the quantum size - quantum tokens are added every fillInterval.
+
+#### func NewBucketWithRate
+
+```go
+func NewBucketWithRate(rate float64, capacity int64) *Bucket
+```
+NewBucketWithRate returns a token bucket that fills the bucket at the rate of
+rate tokens per second up to the given maximum capacity. Because of limited
+clock resolution, at high rates, the actual rate may be up to 1% different from
+the specified rate.
+
+#### func (*Bucket) Rate
+
+```go
+func (tb *Bucket) Rate() float64
+```
+Rate returns the fill rate of the bucket, in tokens per second.
+
+#### func (*Bucket) Take
+
+```go
+func (tb *Bucket) Take(count int64) time.Duration
+```
+Take takes count tokens from the bucket without blocking. It returns the time
+that the caller should wait until the tokens are actually available.
+
+Note that if the request is irrevocable - there is no way to return tokens to
+the bucket once this method commits us to taking them.
+
+#### func (*Bucket) TakeAvailable
+
+```go
+func (tb *Bucket) TakeAvailable(count int64) int64
+```
+TakeAvailable takes up to count immediately available tokens from the bucket. It
+returns the number of tokens removed, or zero if there are no available tokens.
+It does not block.
+
+#### func (*Bucket) TakeMaxDuration
+
+```go
+func (tb *Bucket) TakeMaxDuration(count int64, maxWait time.Duration) (time.Duration, bool)
+```
+TakeMaxDuration is like Take, except that it will only take tokens from the
+bucket if the wait time for the tokens is no greater than maxWait.
+
+If it would take longer than maxWait for the tokens to become available, it does
+nothing and reports false, otherwise it returns the time that the caller should
+wait until the tokens are actually available, and reports true.
+
+#### func (*Bucket) Wait
+
+```go
+func (tb *Bucket) Wait(count int64)
+```
+Wait takes count tokens from the bucket, waiting until they are available.
+
+#### func (*Bucket) WaitMaxDuration
+
+```go
+func (tb *Bucket) WaitMaxDuration(count int64, maxWait time.Duration) bool
+```
+WaitMaxDuration is like Wait except that it will only take tokens from the
+bucket if it needs to wait for no greater than maxWait. It reports whether any
+tokens have been removed from the bucket If no tokens have been removed, it
+returns immediately.
diff --git a/src/kube2msb/vendor/github.com/juju/ratelimit/ratelimit.go b/src/kube2msb/vendor/github.com/juju/ratelimit/ratelimit.go
new file mode 100644
index 0000000..3ef32fb
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/juju/ratelimit/ratelimit.go
@@ -0,0 +1,245 @@
+// Copyright 2014 Canonical Ltd.
+// Licensed under the LGPLv3 with static-linking exception.
+// See LICENCE file for details.
+
+// The ratelimit package provides an efficient token bucket implementation
+// that can be used to limit the rate of arbitrary things.
+// See http://en.wikipedia.org/wiki/Token_bucket.
+package ratelimit
+
+import (
+ "math"
+ "strconv"
+ "sync"
+ "time"
+)
+
+// Bucket represents a token bucket that fills at a predetermined rate.
+// Methods on Bucket may be called concurrently.
+type Bucket struct {
+ startTime time.Time
+ capacity int64
+ quantum int64
+ fillInterval time.Duration
+
+ // The mutex guards the fields following it.
+ mu sync.Mutex
+
+ // avail holds the number of available tokens
+ // in the bucket, as of availTick ticks from startTime.
+ // It will be negative when there are consumers
+ // waiting for tokens.
+ avail int64
+ availTick int64
+}
+
+// NewBucket returns a new token bucket that fills at the
+// rate of one token every fillInterval, up to the given
+// maximum capacity. Both arguments must be
+// positive. The bucket is initially full.
+func NewBucket(fillInterval time.Duration, capacity int64) *Bucket {
+ return NewBucketWithQuantum(fillInterval, capacity, 1)
+}
+
+// rateMargin specifes the allowed variance of actual
+// rate from specified rate. 1% seems reasonable.
+const rateMargin = 0.01
+
+// NewBucketWithRate returns a token bucket that fills the bucket
+// at the rate of rate tokens per second up to the given
+// maximum capacity. Because of limited clock resolution,
+// at high rates, the actual rate may be up to 1% different from the
+// specified rate.
+func NewBucketWithRate(rate float64, capacity int64) *Bucket {
+ for quantum := int64(1); quantum < 1<<50; quantum = nextQuantum(quantum) {
+ fillInterval := time.Duration(1e9 * float64(quantum) / rate)
+ if fillInterval <= 0 {
+ continue
+ }
+ tb := NewBucketWithQuantum(fillInterval, capacity, quantum)
+ if diff := math.Abs(tb.Rate() - rate); diff/rate <= rateMargin {
+ return tb
+ }
+ }
+ panic("cannot find suitable quantum for " + strconv.FormatFloat(rate, 'g', -1, 64))
+}
+
+// nextQuantum returns the next quantum to try after q.
+// We grow the quantum exponentially, but slowly, so we
+// get a good fit in the lower numbers.
+func nextQuantum(q int64) int64 {
+ q1 := q * 11 / 10
+ if q1 == q {
+ q1++
+ }
+ return q1
+}
+
+// NewBucketWithQuantum is similar to NewBucket, but allows
+// the specification of the quantum size - quantum tokens
+// are added every fillInterval.
+func NewBucketWithQuantum(fillInterval time.Duration, capacity, quantum int64) *Bucket {
+ if fillInterval <= 0 {
+ panic("token bucket fill interval is not > 0")
+ }
+ if capacity <= 0 {
+ panic("token bucket capacity is not > 0")
+ }
+ if quantum <= 0 {
+ panic("token bucket quantum is not > 0")
+ }
+ return &Bucket{
+ startTime: time.Now(),
+ capacity: capacity,
+ quantum: quantum,
+ avail: capacity,
+ fillInterval: fillInterval,
+ }
+}
+
+// Wait takes count tokens from the bucket, waiting until they are
+// available.
+func (tb *Bucket) Wait(count int64) {
+ if d := tb.Take(count); d > 0 {
+ time.Sleep(d)
+ }
+}
+
+// WaitMaxDuration is like Wait except that it will
+// only take tokens from the bucket if it needs to wait
+// for no greater than maxWait. It reports whether
+// any tokens have been removed from the bucket
+// If no tokens have been removed, it returns immediately.
+func (tb *Bucket) WaitMaxDuration(count int64, maxWait time.Duration) bool {
+ d, ok := tb.TakeMaxDuration(count, maxWait)
+ if d > 0 {
+ time.Sleep(d)
+ }
+ return ok
+}
+
+const infinityDuration time.Duration = 0x7fffffffffffffff
+
+// Take takes count tokens from the bucket without blocking. It returns
+// the time that the caller should wait until the tokens are actually
+// available.
+//
+// Note that if the request is irrevocable - there is no way to return
+// tokens to the bucket once this method commits us to taking them.
+func (tb *Bucket) Take(count int64) time.Duration {
+ d, _ := tb.take(time.Now(), count, infinityDuration)
+ return d
+}
+
+// TakeMaxDuration is like Take, except that
+// it will only take tokens from the bucket if the wait
+// time for the tokens is no greater than maxWait.
+//
+// If it would take longer than maxWait for the tokens
+// to become available, it does nothing and reports false,
+// otherwise it returns the time that the caller should
+// wait until the tokens are actually available, and reports
+// true.
+func (tb *Bucket) TakeMaxDuration(count int64, maxWait time.Duration) (time.Duration, bool) {
+ return tb.take(time.Now(), count, maxWait)
+}
+
+// TakeAvailable takes up to count immediately available tokens from the
+// bucket. It returns the number of tokens removed, or zero if there are
+// no available tokens. It does not block.
+func (tb *Bucket) TakeAvailable(count int64) int64 {
+ return tb.takeAvailable(time.Now(), count)
+}
+
+// takeAvailable is the internal version of TakeAvailable - it takes the
+// current time as an argument to enable easy testing.
+func (tb *Bucket) takeAvailable(now time.Time, count int64) int64 {
+ if count <= 0 {
+ return 0
+ }
+ tb.mu.Lock()
+ defer tb.mu.Unlock()
+
+ tb.adjust(now)
+ if tb.avail <= 0 {
+ return 0
+ }
+ if count > tb.avail {
+ count = tb.avail
+ }
+ tb.avail -= count
+ return count
+}
+
+// Available returns the number of available tokens. It will be negative
+// when there are consumers waiting for tokens. Note that if this
+// returns greater than zero, it does not guarantee that calls that take
+// tokens from the buffer will succeed, as the number of available
+// tokens could have changed in the meantime. This method is intended
+// primarily for metrics reporting and debugging.
+func (tb *Bucket) Available() int64 {
+ return tb.available(time.Now())
+}
+
+// available is the internal version of available - it takes the current time as
+// an argument to enable easy testing.
+func (tb *Bucket) available(now time.Time) int64 {
+ tb.mu.Lock()
+ defer tb.mu.Unlock()
+ tb.adjust(now)
+ return tb.avail
+}
+
+// Capacity returns the capacity that the bucket was created with.
+func (tb *Bucket) Capacity() int64 {
+ return tb.capacity
+}
+
+// Rate returns the fill rate of the bucket, in tokens per second.
+func (tb *Bucket) Rate() float64 {
+ return 1e9 * float64(tb.quantum) / float64(tb.fillInterval)
+}
+
+// take is the internal version of Take - it takes the current time as
+// an argument to enable easy testing.
+func (tb *Bucket) take(now time.Time, count int64, maxWait time.Duration) (time.Duration, bool) {
+ if count <= 0 {
+ return 0, true
+ }
+ tb.mu.Lock()
+ defer tb.mu.Unlock()
+
+ currentTick := tb.adjust(now)
+ avail := tb.avail - count
+ if avail >= 0 {
+ tb.avail = avail
+ return 0, true
+ }
+ // Round up the missing tokens to the nearest multiple
+ // of quantum - the tokens won't be available until
+ // that tick.
+ endTick := currentTick + (-avail+tb.quantum-1)/tb.quantum
+ endTime := tb.startTime.Add(time.Duration(endTick) * tb.fillInterval)
+ waitTime := endTime.Sub(now)
+ if waitTime > maxWait {
+ return 0, false
+ }
+ tb.avail = avail
+ return waitTime, true
+}
+
+// adjust adjusts the current bucket capacity based on the current time.
+// It returns the current tick.
+func (tb *Bucket) adjust(now time.Time) (currentTick int64) {
+ currentTick = int64(now.Sub(tb.startTime) / tb.fillInterval)
+
+ if tb.avail >= tb.capacity {
+ return
+ }
+ tb.avail += (currentTick - tb.availTick) * tb.quantum
+ if tb.avail > tb.capacity {
+ tb.avail = tb.capacity
+ }
+ tb.availTick = currentTick
+ return
+}
diff --git a/src/kube2msb/vendor/github.com/juju/ratelimit/reader.go b/src/kube2msb/vendor/github.com/juju/ratelimit/reader.go
new file mode 100644
index 0000000..6403bf7
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/juju/ratelimit/reader.go
@@ -0,0 +1,51 @@
+// Copyright 2014 Canonical Ltd.
+// Licensed under the LGPLv3 with static-linking exception.
+// See LICENCE file for details.
+
+package ratelimit
+
+import "io"
+
+type reader struct {
+ r io.Reader
+ bucket *Bucket
+}
+
+// Reader returns a reader that is rate limited by
+// the given token bucket. Each token in the bucket
+// represents one byte.
+func Reader(r io.Reader, bucket *Bucket) io.Reader {
+ return &reader{
+ r: r,
+ bucket: bucket,
+ }
+}
+
+func (r *reader) Read(buf []byte) (int, error) {
+ n, err := r.r.Read(buf)
+ if n <= 0 {
+ return n, err
+ }
+ r.bucket.Wait(int64(n))
+ return n, err
+}
+
+type writer struct {
+ w io.Writer
+ bucket *Bucket
+}
+
+// Writer returns a reader that is rate limited by
+// the given token bucket. Each token in the bucket
+// represents one byte.
+func Writer(w io.Writer, bucket *Bucket) io.Writer {
+ return &writer{
+ w: w,
+ bucket: bucket,
+ }
+}
+
+func (w *writer) Write(buf []byte) (int, error) {
+ w.bucket.Wait(int64(len(buf)))
+ return w.w.Write(buf)
+}
diff --git a/src/kube2msb/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE b/src/kube2msb/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE
new file mode 100644
index 0000000..13f15df
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2013 Matt T. Proud
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/src/kube2msb/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go b/src/kube2msb/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go
new file mode 100644
index 0000000..66d9b54
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go
@@ -0,0 +1,75 @@
+// Copyright 2013 Matt T. Proud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pbutil
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+
+ "github.com/golang/protobuf/proto"
+)
+
+var errInvalidVarint = errors.New("invalid varint32 encountered")
+
+// ReadDelimited decodes a message from the provided length-delimited stream,
+// where the length is encoded as 32-bit varint prefix to the message body.
+// It returns the total number of bytes read and any applicable error. This is
+// roughly equivalent to the companion Java API's
+// MessageLite#parseDelimitedFrom. As per the reader contract, this function
+// calls r.Read repeatedly as required until exactly one message including its
+// prefix is read and decoded (or an error has occurred). The function never
+// reads more bytes from the stream than required. The function never returns
+// an error if a message has been read and decoded correctly, even if the end
+// of the stream has been reached in doing so. In that case, any subsequent
+// calls return (0, io.EOF).
+func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) {
+ // Per AbstractParser#parsePartialDelimitedFrom with
+ // CodedInputStream#readRawVarint32.
+ headerBuf := make([]byte, binary.MaxVarintLen32)
+ var bytesRead, varIntBytes int
+ var messageLength uint64
+ for varIntBytes == 0 { // i.e. no varint has been decoded yet.
+ if bytesRead >= len(headerBuf) {
+ return bytesRead, errInvalidVarint
+ }
+ // We have to read byte by byte here to avoid reading more bytes
+ // than required. Each read byte is appended to what we have
+ // read before.
+ newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1])
+ if newBytesRead == 0 {
+ if err != nil {
+ return bytesRead, err
+ }
+ // A Reader should not return (0, nil), but if it does,
+ // it should be treated as no-op (according to the
+ // Reader contract). So let's go on...
+ continue
+ }
+ bytesRead += newBytesRead
+ // Now present everything read so far to the varint decoder and
+ // see if a varint can be decoded already.
+ messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead])
+ }
+
+ messageBuf := make([]byte, messageLength)
+ newBytesRead, err := io.ReadFull(r, messageBuf)
+ bytesRead += newBytesRead
+ if err != nil {
+ return bytesRead, err
+ }
+
+ return bytesRead, proto.Unmarshal(messageBuf, m)
+}
diff --git a/src/kube2msb/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go b/src/kube2msb/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go
new file mode 100644
index 0000000..c318385
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2013 Matt T. Proud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package pbutil provides record length-delimited Protocol Buffer streaming.
+package pbutil
diff --git a/src/kube2msb/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go b/src/kube2msb/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go
new file mode 100644
index 0000000..4b76ea9
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go
@@ -0,0 +1,46 @@
+// Copyright 2013 Matt T. Proud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pbutil
+
+import (
+ "encoding/binary"
+ "io"
+
+ "github.com/golang/protobuf/proto"
+)
+
+// WriteDelimited encodes and dumps a message to the provided writer prefixed
+// with a 32-bit varint indicating the length of the encoded message, producing
+// a length-delimited record stream, which can be used to chain together
+// encoded messages of the same type together in a file. It returns the total
+// number of bytes written and any applicable error. This is roughly
+// equivalent to the companion Java API's MessageLite#writeDelimitedTo.
+func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) {
+ buffer, err := proto.Marshal(m)
+ if err != nil {
+ return 0, err
+ }
+
+ buf := make([]byte, binary.MaxVarintLen32)
+ encodedLength := binary.PutUvarint(buf, uint64(len(buffer)))
+
+ sync, err := w.Write(buf[:encodedLength])
+ if err != nil {
+ return sync, err
+ }
+
+ n, err = w.Write(buffer)
+ return n + sync, err
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/LICENSE b/src/kube2msb/vendor/github.com/opencontainers/runc/LICENSE
new file mode 100644
index 0000000..2744858
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2014 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/NOTICE b/src/kube2msb/vendor/github.com/opencontainers/runc/NOTICE
new file mode 100644
index 0000000..5c97abc
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/NOTICE
@@ -0,0 +1,17 @@
+runc
+
+Copyright 2012-2015 Docker, Inc.
+
+This product includes software developed at Docker, Inc. (http://www.docker.com).
+
+The following is courtesy of our legal counsel:
+
+
+Use and transfer of Docker may be subject to certain restrictions by the
+United States and other governments.
+It is your responsibility to ensure that your use and/or transfer does not
+violate applicable laws.
+
+For more information, please see http://www.bis.doc.gov
+
+See also http://www.apache.org/dev/crypto.html and/or seek legal counsel.
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go
new file mode 100644
index 0000000..274ab47
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go
@@ -0,0 +1,64 @@
+// +build linux
+
+package cgroups
+
+import (
+ "fmt"
+
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type Manager interface {
+ // Applies cgroup configuration to the process with the specified pid
+ Apply(pid int) error
+
+ // Returns the PIDs inside the cgroup set
+ GetPids() ([]int, error)
+
+ // Returns the PIDs inside the cgroup set & all sub-cgroups
+ GetAllPids() ([]int, error)
+
+ // Returns statistics for the cgroup set
+ GetStats() (*Stats, error)
+
+ // Toggles the freezer cgroup according with specified state
+ Freeze(state configs.FreezerState) error
+
+ // Destroys the cgroup set
+ Destroy() error
+
+ // NewCgroupManager() and LoadCgroupManager() require following attributes:
+ // Paths map[string]string
+ // Cgroups *cgroups.Cgroup
+ // Paths maps cgroup subsystem to path at which it is mounted.
+ // Cgroups specifies specific cgroup settings for the various subsystems
+
+ // Returns cgroup paths to save in a state file and to be able to
+ // restore the object later.
+ GetPaths() map[string]string
+
+ // Set the cgroup as configured.
+ Set(container *configs.Config) error
+}
+
+type NotFoundError struct {
+ Subsystem string
+}
+
+func (e *NotFoundError) Error() string {
+ return fmt.Sprintf("mountpoint for %s not found", e.Subsystem)
+}
+
+func NewNotFoundError(sub string) error {
+ return &NotFoundError{
+ Subsystem: sub,
+ }
+}
+
+func IsNotFound(err error) bool {
+ if err == nil {
+ return false
+ }
+ _, ok := err.(*NotFoundError)
+ return ok
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups_unsupported.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups_unsupported.go
new file mode 100644
index 0000000..278d507
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups_unsupported.go
@@ -0,0 +1,3 @@
+// +build !linux
+
+package cgroups
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/apply_raw.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/apply_raw.go
new file mode 100644
index 0000000..633ab04
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/apply_raw.go
@@ -0,0 +1,402 @@
+// +build linux
+
+package fs
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+ "sync"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+ libcontainerUtils "github.com/opencontainers/runc/libcontainer/utils"
+)
+
+var (
+ subsystems = subsystemSet{
+ &CpusetGroup{},
+ &DevicesGroup{},
+ &MemoryGroup{},
+ &CpuGroup{},
+ &CpuacctGroup{},
+ &PidsGroup{},
+ &BlkioGroup{},
+ &HugetlbGroup{},
+ &NetClsGroup{},
+ &NetPrioGroup{},
+ &PerfEventGroup{},
+ &FreezerGroup{},
+ &NameGroup{GroupName: "name=systemd", Join: true},
+ }
+ CgroupProcesses = "cgroup.procs"
+ HugePageSizes, _ = cgroups.GetHugePageSize()
+)
+
+var errSubsystemDoesNotExist = errors.New("cgroup: subsystem does not exist")
+
+type subsystemSet []subsystem
+
+func (s subsystemSet) Get(name string) (subsystem, error) {
+ for _, ss := range s {
+ if ss.Name() == name {
+ return ss, nil
+ }
+ }
+ return nil, errSubsystemDoesNotExist
+}
+
+type subsystem interface {
+ // Name returns the name of the subsystem.
+ Name() string
+ // Returns the stats, as 'stats', corresponding to the cgroup under 'path'.
+ GetStats(path string, stats *cgroups.Stats) error
+ // Removes the cgroup represented by 'cgroupData'.
+ Remove(*cgroupData) error
+ // Creates and joins the cgroup represented by 'cgroupData'.
+ Apply(*cgroupData) error
+ // Set the cgroup represented by cgroup.
+ Set(path string, cgroup *configs.Cgroup) error
+}
+
+type Manager struct {
+ mu sync.Mutex
+ Cgroups *configs.Cgroup
+ Paths map[string]string
+}
+
+// The absolute path to the root of the cgroup hierarchies.
+var cgroupRootLock sync.Mutex
+var cgroupRoot string
+
+// Gets the cgroupRoot.
+func getCgroupRoot() (string, error) {
+ cgroupRootLock.Lock()
+ defer cgroupRootLock.Unlock()
+
+ if cgroupRoot != "" {
+ return cgroupRoot, nil
+ }
+
+ root, err := cgroups.FindCgroupMountpointDir()
+ if err != nil {
+ return "", err
+ }
+
+ if _, err := os.Stat(root); err != nil {
+ return "", err
+ }
+
+ cgroupRoot = root
+ return cgroupRoot, nil
+}
+
+type cgroupData struct {
+ root string
+ innerPath string
+ config *configs.Cgroup
+ pid int
+}
+
+func (m *Manager) Apply(pid int) (err error) {
+ if m.Cgroups == nil {
+ return nil
+ }
+
+ var c = m.Cgroups
+
+ d, err := getCgroupData(m.Cgroups, pid)
+ if err != nil {
+ return err
+ }
+
+ if c.Paths != nil {
+ paths := make(map[string]string)
+ for name, path := range c.Paths {
+ _, err := d.path(name)
+ if err != nil {
+ if cgroups.IsNotFound(err) {
+ continue
+ }
+ return err
+ }
+ paths[name] = path
+ }
+ m.Paths = paths
+ return cgroups.EnterPid(m.Paths, pid)
+ }
+
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ paths := make(map[string]string)
+ for _, sys := range subsystems {
+ if err := sys.Apply(d); err != nil {
+ return err
+ }
+ // TODO: Apply should, ideally, be reentrant or be broken up into a separate
+ // create and join phase so that the cgroup hierarchy for a container can be
+ // created then join consists of writing the process pids to cgroup.procs
+ p, err := d.path(sys.Name())
+ if err != nil {
+ // The non-presence of the devices subsystem is
+ // considered fatal for security reasons.
+ if cgroups.IsNotFound(err) && sys.Name() != "devices" {
+ continue
+ }
+ return err
+ }
+ paths[sys.Name()] = p
+ }
+ m.Paths = paths
+ return nil
+}
+
+func (m *Manager) Destroy() error {
+ if m.Cgroups.Paths != nil {
+ return nil
+ }
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ if err := cgroups.RemovePaths(m.Paths); err != nil {
+ return err
+ }
+ m.Paths = make(map[string]string)
+ return nil
+}
+
+func (m *Manager) GetPaths() map[string]string {
+ m.mu.Lock()
+ paths := m.Paths
+ m.mu.Unlock()
+ return paths
+}
+
+func (m *Manager) GetStats() (*cgroups.Stats, error) {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ stats := cgroups.NewStats()
+ for name, path := range m.Paths {
+ sys, err := subsystems.Get(name)
+ if err == errSubsystemDoesNotExist || !cgroups.PathExists(path) {
+ continue
+ }
+ if err := sys.GetStats(path, stats); err != nil {
+ return nil, err
+ }
+ }
+ return stats, nil
+}
+
+func (m *Manager) Set(container *configs.Config) error {
+ for _, sys := range subsystems {
+ // Generate fake cgroup data.
+ d, err := getCgroupData(container.Cgroups, -1)
+ if err != nil {
+ return err
+ }
+ // Get the path, but don't error out if the cgroup wasn't found.
+ path, err := d.path(sys.Name())
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+
+ if err := sys.Set(path, container.Cgroups); err != nil {
+ return err
+ }
+ }
+
+ if m.Paths["cpu"] != "" {
+ if err := CheckCpushares(m.Paths["cpu"], container.Cgroups.Resources.CpuShares); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Freeze toggles the container's freezer cgroup depending on the state
+// provided
+func (m *Manager) Freeze(state configs.FreezerState) error {
+ d, err := getCgroupData(m.Cgroups, 0)
+ if err != nil {
+ return err
+ }
+ dir, err := d.path("freezer")
+ if err != nil {
+ return err
+ }
+ prevState := m.Cgroups.Resources.Freezer
+ m.Cgroups.Resources.Freezer = state
+ freezer, err := subsystems.Get("freezer")
+ if err != nil {
+ return err
+ }
+ err = freezer.Set(dir, m.Cgroups)
+ if err != nil {
+ m.Cgroups.Resources.Freezer = prevState
+ return err
+ }
+ return nil
+}
+
+func (m *Manager) GetPids() ([]int, error) {
+ dir, err := getCgroupPath(m.Cgroups)
+ if err != nil {
+ return nil, err
+ }
+ return cgroups.GetPids(dir)
+}
+
+func (m *Manager) GetAllPids() ([]int, error) {
+ dir, err := getCgroupPath(m.Cgroups)
+ if err != nil {
+ return nil, err
+ }
+ return cgroups.GetAllPids(dir)
+}
+
+func getCgroupPath(c *configs.Cgroup) (string, error) {
+ d, err := getCgroupData(c, 0)
+ if err != nil {
+ return "", err
+ }
+
+ return d.path("devices")
+}
+
+func getCgroupData(c *configs.Cgroup, pid int) (*cgroupData, error) {
+ root, err := getCgroupRoot()
+ if err != nil {
+ return nil, err
+ }
+
+ if (c.Name != "" || c.Parent != "") && c.Path != "" {
+ return nil, fmt.Errorf("cgroup: either Path or Name and Parent should be used")
+ }
+
+ // XXX: Do not remove this code. Path safety is important! -- cyphar
+ cgPath := libcontainerUtils.CleanPath(c.Path)
+ cgParent := libcontainerUtils.CleanPath(c.Parent)
+ cgName := libcontainerUtils.CleanPath(c.Name)
+
+ innerPath := cgPath
+ if innerPath == "" {
+ innerPath = filepath.Join(cgParent, cgName)
+ }
+
+ return &cgroupData{
+ root: root,
+ innerPath: innerPath,
+ config: c,
+ pid: pid,
+ }, nil
+}
+
+func (raw *cgroupData) parentPath(subsystem, mountpoint, root string) (string, error) {
+ // Use GetThisCgroupDir instead of GetInitCgroupDir, because the creating
+ // process could in container and shared pid namespace with host, and
+ // /proc/1/cgroup could point to whole other world of cgroups.
+ initPath, err := cgroups.GetThisCgroupDir(subsystem)
+ if err != nil {
+ return "", err
+ }
+ // This is needed for nested containers, because in /proc/self/cgroup we
+ // see pathes from host, which don't exist in container.
+ relDir, err := filepath.Rel(root, initPath)
+ if err != nil {
+ return "", err
+ }
+ return filepath.Join(mountpoint, relDir), nil
+}
+
+func (raw *cgroupData) path(subsystem string) (string, error) {
+ mnt, root, err := cgroups.FindCgroupMountpointAndRoot(subsystem)
+ // If we didn't mount the subsystem, there is no point we make the path.
+ if err != nil {
+ return "", err
+ }
+
+ // If the cgroup name/path is absolute do not look relative to the cgroup of the init process.
+ if filepath.IsAbs(raw.innerPath) {
+ // Sometimes subsystems can be mounted togethger as 'cpu,cpuacct'.
+ return filepath.Join(raw.root, filepath.Base(mnt), raw.innerPath), nil
+ }
+
+ parentPath, err := raw.parentPath(subsystem, mnt, root)
+ if err != nil {
+ return "", err
+ }
+
+ return filepath.Join(parentPath, raw.innerPath), nil
+}
+
+func (raw *cgroupData) join(subsystem string) (string, error) {
+ path, err := raw.path(subsystem)
+ if err != nil {
+ return "", err
+ }
+ if err := os.MkdirAll(path, 0755); err != nil {
+ return "", err
+ }
+ if err := writeFile(path, CgroupProcesses, strconv.Itoa(raw.pid)); err != nil {
+ return "", err
+ }
+ return path, nil
+}
+
+func writeFile(dir, file, data string) error {
+ // Normally dir should not be empty, one case is that cgroup subsystem
+ // is not mounted, we will get empty dir, and we want it fail here.
+ if dir == "" {
+ return fmt.Errorf("no such directory for %s", file)
+ }
+ if err := ioutil.WriteFile(filepath.Join(dir, file), []byte(data), 0700); err != nil {
+ return fmt.Errorf("failed to write %v to %v: %v", data, file, err)
+ }
+ return nil
+}
+
+func readFile(dir, file string) (string, error) {
+ data, err := ioutil.ReadFile(filepath.Join(dir, file))
+ return string(data), err
+}
+
+func removePath(p string, err error) error {
+ if err != nil {
+ return err
+ }
+ if p != "" {
+ return os.RemoveAll(p)
+ }
+ return nil
+}
+
+func CheckCpushares(path string, c int64) error {
+ var cpuShares int64
+
+ if c == 0 {
+ return nil
+ }
+
+ fd, err := os.Open(filepath.Join(path, "cpu.shares"))
+ if err != nil {
+ return err
+ }
+ defer fd.Close()
+
+ _, err = fmt.Fscanf(fd, "%d", &cpuShares)
+ if err != nil && err != io.EOF {
+ return err
+ }
+
+ if c > cpuShares {
+ return fmt.Errorf("The maximum allowed cpu-shares is %d", cpuShares)
+ } else if c < cpuShares {
+ return fmt.Errorf("The minimum allowed cpu-shares is %d", cpuShares)
+ }
+
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/blkio.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/blkio.go
new file mode 100644
index 0000000..a142cb9
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/blkio.go
@@ -0,0 +1,237 @@
+// +build linux
+
+package fs
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type BlkioGroup struct {
+}
+
+func (s *BlkioGroup) Name() string {
+ return "blkio"
+}
+
+func (s *BlkioGroup) Apply(d *cgroupData) error {
+ _, err := d.join("blkio")
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+ return nil
+}
+
+func (s *BlkioGroup) Set(path string, cgroup *configs.Cgroup) error {
+ if cgroup.Resources.BlkioWeight != 0 {
+ if err := writeFile(path, "blkio.weight", strconv.FormatUint(uint64(cgroup.Resources.BlkioWeight), 10)); err != nil {
+ return err
+ }
+ }
+
+ if cgroup.Resources.BlkioLeafWeight != 0 {
+ if err := writeFile(path, "blkio.leaf_weight", strconv.FormatUint(uint64(cgroup.Resources.BlkioLeafWeight), 10)); err != nil {
+ return err
+ }
+ }
+ for _, wd := range cgroup.Resources.BlkioWeightDevice {
+ if err := writeFile(path, "blkio.weight_device", wd.WeightString()); err != nil {
+ return err
+ }
+ if err := writeFile(path, "blkio.leaf_weight_device", wd.LeafWeightString()); err != nil {
+ return err
+ }
+ }
+ for _, td := range cgroup.Resources.BlkioThrottleReadBpsDevice {
+ if err := writeFile(path, "blkio.throttle.read_bps_device", td.String()); err != nil {
+ return err
+ }
+ }
+ for _, td := range cgroup.Resources.BlkioThrottleWriteBpsDevice {
+ if err := writeFile(path, "blkio.throttle.write_bps_device", td.String()); err != nil {
+ return err
+ }
+ }
+ for _, td := range cgroup.Resources.BlkioThrottleReadIOPSDevice {
+ if err := writeFile(path, "blkio.throttle.read_iops_device", td.String()); err != nil {
+ return err
+ }
+ }
+ for _, td := range cgroup.Resources.BlkioThrottleWriteIOPSDevice {
+ if err := writeFile(path, "blkio.throttle.write_iops_device", td.String()); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (s *BlkioGroup) Remove(d *cgroupData) error {
+ return removePath(d.path("blkio"))
+}
+
+/*
+examples:
+
+ blkio.sectors
+ 8:0 6792
+
+ blkio.io_service_bytes
+ 8:0 Read 1282048
+ 8:0 Write 2195456
+ 8:0 Sync 2195456
+ 8:0 Async 1282048
+ 8:0 Total 3477504
+ Total 3477504
+
+ blkio.io_serviced
+ 8:0 Read 124
+ 8:0 Write 104
+ 8:0 Sync 104
+ 8:0 Async 124
+ 8:0 Total 228
+ Total 228
+
+ blkio.io_queued
+ 8:0 Read 0
+ 8:0 Write 0
+ 8:0 Sync 0
+ 8:0 Async 0
+ 8:0 Total 0
+ Total 0
+*/
+
+func splitBlkioStatLine(r rune) bool {
+ return r == ' ' || r == ':'
+}
+
+func getBlkioStat(path string) ([]cgroups.BlkioStatEntry, error) {
+ var blkioStats []cgroups.BlkioStatEntry
+ f, err := os.Open(path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return blkioStats, nil
+ }
+ return nil, err
+ }
+ defer f.Close()
+
+ sc := bufio.NewScanner(f)
+ for sc.Scan() {
+ // format: dev type amount
+ fields := strings.FieldsFunc(sc.Text(), splitBlkioStatLine)
+ if len(fields) < 3 {
+ if len(fields) == 2 && fields[0] == "Total" {
+ // skip total line
+ continue
+ } else {
+ return nil, fmt.Errorf("Invalid line found while parsing %s: %s", path, sc.Text())
+ }
+ }
+
+ v, err := strconv.ParseUint(fields[0], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ major := v
+
+ v, err = strconv.ParseUint(fields[1], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ minor := v
+
+ op := ""
+ valueField := 2
+ if len(fields) == 4 {
+ op = fields[2]
+ valueField = 3
+ }
+ v, err = strconv.ParseUint(fields[valueField], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ blkioStats = append(blkioStats, cgroups.BlkioStatEntry{Major: major, Minor: minor, Op: op, Value: v})
+ }
+
+ return blkioStats, nil
+}
+
+func (s *BlkioGroup) GetStats(path string, stats *cgroups.Stats) error {
+ // Try to read CFQ stats available on all CFQ enabled kernels first
+ if blkioStats, err := getBlkioStat(filepath.Join(path, "blkio.io_serviced_recursive")); err == nil && blkioStats != nil {
+ return getCFQStats(path, stats)
+ }
+ return getStats(path, stats) // Use generic stats as fallback
+}
+
+func getCFQStats(path string, stats *cgroups.Stats) error {
+ var blkioStats []cgroups.BlkioStatEntry
+ var err error
+
+ if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.sectors_recursive")); err != nil {
+ return err
+ }
+ stats.BlkioStats.SectorsRecursive = blkioStats
+
+ if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_service_bytes_recursive")); err != nil {
+ return err
+ }
+ stats.BlkioStats.IoServiceBytesRecursive = blkioStats
+
+ if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_serviced_recursive")); err != nil {
+ return err
+ }
+ stats.BlkioStats.IoServicedRecursive = blkioStats
+
+ if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_queued_recursive")); err != nil {
+ return err
+ }
+ stats.BlkioStats.IoQueuedRecursive = blkioStats
+
+ if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_service_time_recursive")); err != nil {
+ return err
+ }
+ stats.BlkioStats.IoServiceTimeRecursive = blkioStats
+
+ if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_wait_time_recursive")); err != nil {
+ return err
+ }
+ stats.BlkioStats.IoWaitTimeRecursive = blkioStats
+
+ if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_merged_recursive")); err != nil {
+ return err
+ }
+ stats.BlkioStats.IoMergedRecursive = blkioStats
+
+ if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.time_recursive")); err != nil {
+ return err
+ }
+ stats.BlkioStats.IoTimeRecursive = blkioStats
+
+ return nil
+}
+
+func getStats(path string, stats *cgroups.Stats) error {
+ var blkioStats []cgroups.BlkioStatEntry
+ var err error
+
+ if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.throttle.io_service_bytes")); err != nil {
+ return err
+ }
+ stats.BlkioStats.IoServiceBytesRecursive = blkioStats
+
+ if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.throttle.io_serviced")); err != nil {
+ return err
+ }
+ stats.BlkioStats.IoServicedRecursive = blkioStats
+
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu.go
new file mode 100644
index 0000000..a4ef28a
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu.go
@@ -0,0 +1,94 @@
+// +build linux
+
+package fs
+
+import (
+ "bufio"
+ "os"
+ "path/filepath"
+ "strconv"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type CpuGroup struct {
+}
+
+func (s *CpuGroup) Name() string {
+ return "cpu"
+}
+
+func (s *CpuGroup) Apply(d *cgroupData) error {
+ // We always want to join the cpu group, to allow fair cpu scheduling
+ // on a container basis
+ _, err := d.join("cpu")
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+ return nil
+}
+
+func (s *CpuGroup) Set(path string, cgroup *configs.Cgroup) error {
+ if cgroup.Resources.CpuShares != 0 {
+ if err := writeFile(path, "cpu.shares", strconv.FormatInt(cgroup.Resources.CpuShares, 10)); err != nil {
+ return err
+ }
+ }
+ if cgroup.Resources.CpuPeriod != 0 {
+ if err := writeFile(path, "cpu.cfs_period_us", strconv.FormatInt(cgroup.Resources.CpuPeriod, 10)); err != nil {
+ return err
+ }
+ }
+ if cgroup.Resources.CpuQuota != 0 {
+ if err := writeFile(path, "cpu.cfs_quota_us", strconv.FormatInt(cgroup.Resources.CpuQuota, 10)); err != nil {
+ return err
+ }
+ }
+ if cgroup.Resources.CpuRtPeriod != 0 {
+ if err := writeFile(path, "cpu.rt_period_us", strconv.FormatInt(cgroup.Resources.CpuRtPeriod, 10)); err != nil {
+ return err
+ }
+ }
+ if cgroup.Resources.CpuRtRuntime != 0 {
+ if err := writeFile(path, "cpu.rt_runtime_us", strconv.FormatInt(cgroup.Resources.CpuRtRuntime, 10)); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (s *CpuGroup) Remove(d *cgroupData) error {
+ return removePath(d.path("cpu"))
+}
+
+func (s *CpuGroup) GetStats(path string, stats *cgroups.Stats) error {
+ f, err := os.Open(filepath.Join(path, "cpu.stat"))
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil
+ }
+ return err
+ }
+ defer f.Close()
+
+ sc := bufio.NewScanner(f)
+ for sc.Scan() {
+ t, v, err := getCgroupParamKeyValue(sc.Text())
+ if err != nil {
+ return err
+ }
+ switch t {
+ case "nr_periods":
+ stats.CpuStats.ThrottlingData.Periods = v
+
+ case "nr_throttled":
+ stats.CpuStats.ThrottlingData.ThrottledPeriods = v
+
+ case "throttled_time":
+ stats.CpuStats.ThrottlingData.ThrottledTime = v
+ }
+ }
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuacct.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuacct.go
new file mode 100644
index 0000000..53afbad
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuacct.go
@@ -0,0 +1,121 @@
+// +build linux
+
+package fs
+
+import (
+ "fmt"
+ "io/ioutil"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+ "github.com/opencontainers/runc/libcontainer/system"
+)
+
+const (
+ cgroupCpuacctStat = "cpuacct.stat"
+ nanosecondsInSecond = 1000000000
+)
+
+var clockTicks = uint64(system.GetClockTicks())
+
+type CpuacctGroup struct {
+}
+
+func (s *CpuacctGroup) Name() string {
+ return "cpuacct"
+}
+
+func (s *CpuacctGroup) Apply(d *cgroupData) error {
+ // we just want to join this group even though we don't set anything
+ if _, err := d.join("cpuacct"); err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+
+ return nil
+}
+
+func (s *CpuacctGroup) Set(path string, cgroup *configs.Cgroup) error {
+ return nil
+}
+
+func (s *CpuacctGroup) Remove(d *cgroupData) error {
+ return removePath(d.path("cpuacct"))
+}
+
+func (s *CpuacctGroup) GetStats(path string, stats *cgroups.Stats) error {
+ userModeUsage, kernelModeUsage, err := getCpuUsageBreakdown(path)
+ if err != nil {
+ return err
+ }
+
+ totalUsage, err := getCgroupParamUint(path, "cpuacct.usage")
+ if err != nil {
+ return err
+ }
+
+ percpuUsage, err := getPercpuUsage(path)
+ if err != nil {
+ return err
+ }
+
+ stats.CpuStats.CpuUsage.TotalUsage = totalUsage
+ stats.CpuStats.CpuUsage.PercpuUsage = percpuUsage
+ stats.CpuStats.CpuUsage.UsageInUsermode = userModeUsage
+ stats.CpuStats.CpuUsage.UsageInKernelmode = kernelModeUsage
+ return nil
+}
+
+// Returns user and kernel usage breakdown in nanoseconds.
+func getCpuUsageBreakdown(path string) (uint64, uint64, error) {
+ userModeUsage := uint64(0)
+ kernelModeUsage := uint64(0)
+ const (
+ userField = "user"
+ systemField = "system"
+ )
+
+ // Expected format:
+ // user <usage in ticks>
+ // system <usage in ticks>
+ data, err := ioutil.ReadFile(filepath.Join(path, cgroupCpuacctStat))
+ if err != nil {
+ return 0, 0, err
+ }
+ fields := strings.Fields(string(data))
+ if len(fields) != 4 {
+ return 0, 0, fmt.Errorf("failure - %s is expected to have 4 fields", filepath.Join(path, cgroupCpuacctStat))
+ }
+ if fields[0] != userField {
+ return 0, 0, fmt.Errorf("unexpected field %q in %q, expected %q", fields[0], cgroupCpuacctStat, userField)
+ }
+ if fields[2] != systemField {
+ return 0, 0, fmt.Errorf("unexpected field %q in %q, expected %q", fields[2], cgroupCpuacctStat, systemField)
+ }
+ if userModeUsage, err = strconv.ParseUint(fields[1], 10, 64); err != nil {
+ return 0, 0, err
+ }
+ if kernelModeUsage, err = strconv.ParseUint(fields[3], 10, 64); err != nil {
+ return 0, 0, err
+ }
+
+ return (userModeUsage * nanosecondsInSecond) / clockTicks, (kernelModeUsage * nanosecondsInSecond) / clockTicks, nil
+}
+
+func getPercpuUsage(path string) ([]uint64, error) {
+ percpuUsage := []uint64{}
+ data, err := ioutil.ReadFile(filepath.Join(path, "cpuacct.usage_percpu"))
+ if err != nil {
+ return percpuUsage, err
+ }
+ for _, value := range strings.Fields(string(data)) {
+ value, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return percpuUsage, fmt.Errorf("Unable to convert param value to uint64: %s", err)
+ }
+ percpuUsage = append(percpuUsage, value)
+ }
+ return percpuUsage, nil
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset.go
new file mode 100644
index 0000000..cbe62bd
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset.go
@@ -0,0 +1,139 @@
+// +build linux
+
+package fs
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+ libcontainerUtils "github.com/opencontainers/runc/libcontainer/utils"
+)
+
+type CpusetGroup struct {
+}
+
+func (s *CpusetGroup) Name() string {
+ return "cpuset"
+}
+
+func (s *CpusetGroup) Apply(d *cgroupData) error {
+ dir, err := d.path("cpuset")
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+ return s.ApplyDir(dir, d.config, d.pid)
+}
+
+func (s *CpusetGroup) Set(path string, cgroup *configs.Cgroup) error {
+ if cgroup.Resources.CpusetCpus != "" {
+ if err := writeFile(path, "cpuset.cpus", cgroup.Resources.CpusetCpus); err != nil {
+ return err
+ }
+ }
+ if cgroup.Resources.CpusetMems != "" {
+ if err := writeFile(path, "cpuset.mems", cgroup.Resources.CpusetMems); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (s *CpusetGroup) Remove(d *cgroupData) error {
+ return removePath(d.path("cpuset"))
+}
+
+func (s *CpusetGroup) GetStats(path string, stats *cgroups.Stats) error {
+ return nil
+}
+
+func (s *CpusetGroup) ApplyDir(dir string, cgroup *configs.Cgroup, pid int) error {
+ // This might happen if we have no cpuset cgroup mounted.
+ // Just do nothing and don't fail.
+ if dir == "" {
+ return nil
+ }
+ root, err := getCgroupRoot()
+ if err != nil {
+ return err
+ }
+ if err := s.ensureParent(dir, root); err != nil {
+ return err
+ }
+ // because we are not using d.join we need to place the pid into the procs file
+ // unlike the other subsystems
+ if err := writeFile(dir, "cgroup.procs", strconv.Itoa(pid)); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (s *CpusetGroup) getSubsystemSettings(parent string) (cpus []byte, mems []byte, err error) {
+ if cpus, err = ioutil.ReadFile(filepath.Join(parent, "cpuset.cpus")); err != nil {
+ return
+ }
+ if mems, err = ioutil.ReadFile(filepath.Join(parent, "cpuset.mems")); err != nil {
+ return
+ }
+ return cpus, mems, nil
+}
+
+// ensureParent makes sure that the parent directory of current is created
+// and populated with the proper cpus and mems files copied from
+// it's parent.
+func (s *CpusetGroup) ensureParent(current, root string) error {
+ parent := filepath.Dir(current)
+ if libcontainerUtils.CleanPath(parent) == root {
+ return nil
+ }
+ // Avoid infinite recursion.
+ if parent == current {
+ return fmt.Errorf("cpuset: cgroup parent path outside cgroup root")
+ }
+ if err := s.ensureParent(parent, root); err != nil {
+ return err
+ }
+ if err := os.MkdirAll(current, 0755); err != nil {
+ return err
+ }
+ return s.copyIfNeeded(current, parent)
+}
+
+// copyIfNeeded copies the cpuset.cpus and cpuset.mems from the parent
+// directory to the current directory if the file's contents are 0
+func (s *CpusetGroup) copyIfNeeded(current, parent string) error {
+ var (
+ err error
+ currentCpus, currentMems []byte
+ parentCpus, parentMems []byte
+ )
+
+ if currentCpus, currentMems, err = s.getSubsystemSettings(current); err != nil {
+ return err
+ }
+ if parentCpus, parentMems, err = s.getSubsystemSettings(parent); err != nil {
+ return err
+ }
+
+ if s.isEmpty(currentCpus) {
+ if err := writeFile(current, "cpuset.cpus", string(parentCpus)); err != nil {
+ return err
+ }
+ }
+ if s.isEmpty(currentMems) {
+ if err := writeFile(current, "cpuset.mems", string(parentMems)); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (s *CpusetGroup) isEmpty(b []byte) bool {
+ return len(bytes.Trim(b, "\n")) == 0
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/devices.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/devices.go
new file mode 100644
index 0000000..5f78331
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/devices.go
@@ -0,0 +1,78 @@
+// +build linux
+
+package fs
+
+import (
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+ "github.com/opencontainers/runc/libcontainer/system"
+)
+
+type DevicesGroup struct {
+}
+
+func (s *DevicesGroup) Name() string {
+ return "devices"
+}
+
+func (s *DevicesGroup) Apply(d *cgroupData) error {
+ _, err := d.join("devices")
+ if err != nil {
+ // We will return error even it's `not found` error, devices
+ // cgroup is hard requirement for container's security.
+ return err
+ }
+ return nil
+}
+
+func (s *DevicesGroup) Set(path string, cgroup *configs.Cgroup) error {
+ if system.RunningInUserNS() {
+ return nil
+ }
+
+ devices := cgroup.Resources.Devices
+ if len(devices) > 0 {
+ for _, dev := range devices {
+ file := "devices.deny"
+ if dev.Allow {
+ file = "devices.allow"
+ }
+ if err := writeFile(path, file, dev.CgroupString()); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+ if !cgroup.Resources.AllowAllDevices {
+ if err := writeFile(path, "devices.deny", "a"); err != nil {
+ return err
+ }
+
+ for _, dev := range cgroup.Resources.AllowedDevices {
+ if err := writeFile(path, "devices.allow", dev.CgroupString()); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+
+ if err := writeFile(path, "devices.allow", "a"); err != nil {
+ return err
+ }
+
+ for _, dev := range cgroup.Resources.DeniedDevices {
+ if err := writeFile(path, "devices.deny", dev.CgroupString()); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (s *DevicesGroup) Remove(d *cgroupData) error {
+ return removePath(d.path("devices"))
+}
+
+func (s *DevicesGroup) GetStats(path string, stats *cgroups.Stats) error {
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer.go
new file mode 100644
index 0000000..e70dfe3
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer.go
@@ -0,0 +1,61 @@
+// +build linux
+
+package fs
+
+import (
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type FreezerGroup struct {
+}
+
+func (s *FreezerGroup) Name() string {
+ return "freezer"
+}
+
+func (s *FreezerGroup) Apply(d *cgroupData) error {
+ _, err := d.join("freezer")
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+ return nil
+}
+
+func (s *FreezerGroup) Set(path string, cgroup *configs.Cgroup) error {
+ switch cgroup.Resources.Freezer {
+ case configs.Frozen, configs.Thawed:
+ if err := writeFile(path, "freezer.state", string(cgroup.Resources.Freezer)); err != nil {
+ return err
+ }
+
+ for {
+ state, err := readFile(path, "freezer.state")
+ if err != nil {
+ return err
+ }
+ if strings.TrimSpace(state) == string(cgroup.Resources.Freezer) {
+ break
+ }
+ time.Sleep(1 * time.Millisecond)
+ }
+ case configs.Undefined:
+ return nil
+ default:
+ return fmt.Errorf("Invalid argument '%s' to freezer.state", string(cgroup.Resources.Freezer))
+ }
+
+ return nil
+}
+
+func (s *FreezerGroup) Remove(d *cgroupData) error {
+ return removePath(d.path("freezer"))
+}
+
+func (s *FreezerGroup) GetStats(path string, stats *cgroups.Stats) error {
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/fs_unsupported.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/fs_unsupported.go
new file mode 100644
index 0000000..3ef9e03
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/fs_unsupported.go
@@ -0,0 +1,3 @@
+// +build !linux
+
+package fs
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/hugetlb.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/hugetlb.go
new file mode 100644
index 0000000..2f97277
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/hugetlb.go
@@ -0,0 +1,71 @@
+// +build linux
+
+package fs
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type HugetlbGroup struct {
+}
+
+func (s *HugetlbGroup) Name() string {
+ return "hugetlb"
+}
+
+func (s *HugetlbGroup) Apply(d *cgroupData) error {
+ _, err := d.join("hugetlb")
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+ return nil
+}
+
+func (s *HugetlbGroup) Set(path string, cgroup *configs.Cgroup) error {
+ for _, hugetlb := range cgroup.Resources.HugetlbLimit {
+ if err := writeFile(path, strings.Join([]string{"hugetlb", hugetlb.Pagesize, "limit_in_bytes"}, "."), strconv.FormatUint(hugetlb.Limit, 10)); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (s *HugetlbGroup) Remove(d *cgroupData) error {
+ return removePath(d.path("hugetlb"))
+}
+
+func (s *HugetlbGroup) GetStats(path string, stats *cgroups.Stats) error {
+ hugetlbStats := cgroups.HugetlbStats{}
+ for _, pageSize := range HugePageSizes {
+ usage := strings.Join([]string{"hugetlb", pageSize, "usage_in_bytes"}, ".")
+ value, err := getCgroupParamUint(path, usage)
+ if err != nil {
+ return fmt.Errorf("failed to parse %s - %v", usage, err)
+ }
+ hugetlbStats.Usage = value
+
+ maxUsage := strings.Join([]string{"hugetlb", pageSize, "max_usage_in_bytes"}, ".")
+ value, err = getCgroupParamUint(path, maxUsage)
+ if err != nil {
+ return fmt.Errorf("failed to parse %s - %v", maxUsage, err)
+ }
+ hugetlbStats.MaxUsage = value
+
+ failcnt := strings.Join([]string{"hugetlb", pageSize, "failcnt"}, ".")
+ value, err = getCgroupParamUint(path, failcnt)
+ if err != nil {
+ return fmt.Errorf("failed to parse %s - %v", failcnt, err)
+ }
+ hugetlbStats.Failcnt = value
+
+ stats.HugetlbStats[pageSize] = hugetlbStats
+ }
+
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory.go
new file mode 100644
index 0000000..b837128
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory.go
@@ -0,0 +1,291 @@
+// +build linux
+
+package fs
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type MemoryGroup struct {
+}
+
+func (s *MemoryGroup) Name() string {
+ return "memory"
+}
+
+func (s *MemoryGroup) Apply(d *cgroupData) (err error) {
+ path, err := d.path("memory")
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+ // reset error.
+ err = nil
+ if path == "" {
+ // Invalid input.
+ return fmt.Errorf("invalid path for memory cgroups: %+v", d)
+ }
+ defer func() {
+ if err != nil {
+ os.RemoveAll(path)
+ }
+ }()
+ if !cgroups.PathExists(path) {
+ if err = os.MkdirAll(path, 0755); err != nil {
+ return err
+ }
+ }
+ if memoryAssigned(d.config) {
+ // We have to set kernel memory here, as we can't change it once
+ // processes have been attached to the cgroup.
+ if err = s.SetKernelMemory(path, d.config); err != nil {
+ return err
+ }
+ }
+ // We need to join memory cgroup after set memory limits, because
+ // kmem.limit_in_bytes can only be set when the cgroup is empty.
+ if _, jerr := d.join("memory"); jerr != nil && !cgroups.IsNotFound(jerr) {
+ err = jerr
+ return err
+ }
+ return nil
+}
+
+func getModifyTime(path string) (time.Time, error) {
+ stat, err := os.Stat(path)
+ if err != nil {
+ return time.Time{}, fmt.Errorf("failed to get memory cgroups creation time: %v", err)
+ }
+ return stat.ModTime(), nil
+}
+
+func (s *MemoryGroup) SetKernelMemory(path string, cgroup *configs.Cgroup) error {
+ // This has to be done separately because it has special
+ // constraints (it can only be initialized before setting up a
+ // hierarchy or adding a task to the cgroups. However, if
+ // sucessfully initialized, it can be updated anytime afterwards)
+ if cgroup.Resources.KernelMemory != 0 {
+ // Is kmem.limit_in_bytes already set?
+ // memory.kmem.max_usage_in_bytes is a read-only file. Use it to get cgroups creation time.
+ kmemCreationTime, err := getModifyTime(filepath.Join(path, "memory.kmem.max_usage_in_bytes"))
+ if err != nil {
+ return err
+ }
+ kmemLimitsUpdateTime, err := getModifyTime(filepath.Join(path, "memory.kmem.limit_in_bytes"))
+ if err != nil {
+ return err
+ }
+ // kmem.limit_in_bytes has already been set if its update time is after that of creation time.
+ // We use `!=` op instead of `>` because updates are losing precision compared to creation.
+ kmemInitialized := !kmemLimitsUpdateTime.Equal(kmemCreationTime)
+ if !kmemInitialized {
+ // If there's already tasks in the cgroup, we can't change the limit either
+ tasks, err := getCgroupParamString(path, "tasks")
+ if err != nil {
+ return err
+ }
+ if tasks != "" {
+ return fmt.Errorf("cannot set kmem.limit_in_bytes after task have joined this cgroup")
+ }
+ }
+ if err := writeFile(path, "memory.kmem.limit_in_bytes", strconv.FormatInt(cgroup.Resources.KernelMemory, 10)); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func setMemoryAndSwap(path string, cgroup *configs.Cgroup) error {
+ // When memory and swap memory are both set, we need to handle the cases
+ // for updating container.
+ if cgroup.Resources.Memory != 0 && cgroup.Resources.MemorySwap > 0 {
+ memoryUsage, err := getMemoryData(path, "")
+ if err != nil {
+ return err
+ }
+
+ // When update memory limit, we should adapt the write sequence
+ // for memory and swap memory, so it won't fail because the new
+ // value and the old value don't fit kernel's validation.
+ if memoryUsage.Limit < uint64(cgroup.Resources.MemorySwap) {
+ if err := writeFile(path, "memory.memsw.limit_in_bytes", strconv.FormatInt(cgroup.Resources.MemorySwap, 10)); err != nil {
+ return err
+ }
+ if err := writeFile(path, "memory.limit_in_bytes", strconv.FormatInt(cgroup.Resources.Memory, 10)); err != nil {
+ return err
+ }
+ } else {
+ if err := writeFile(path, "memory.limit_in_bytes", strconv.FormatInt(cgroup.Resources.Memory, 10)); err != nil {
+ return err
+ }
+ if err := writeFile(path, "memory.memsw.limit_in_bytes", strconv.FormatInt(cgroup.Resources.MemorySwap, 10)); err != nil {
+ return err
+ }
+ }
+ } else {
+ if cgroup.Resources.Memory != 0 {
+ if err := writeFile(path, "memory.limit_in_bytes", strconv.FormatInt(cgroup.Resources.Memory, 10)); err != nil {
+ return err
+ }
+ }
+ if cgroup.Resources.MemorySwap > 0 {
+ if err := writeFile(path, "memory.memsw.limit_in_bytes", strconv.FormatInt(cgroup.Resources.MemorySwap, 10)); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func (s *MemoryGroup) Set(path string, cgroup *configs.Cgroup) error {
+ if err := setMemoryAndSwap(path, cgroup); err != nil {
+ return err
+ }
+
+ if err := s.SetKernelMemory(path, cgroup); err != nil {
+ return err
+ }
+
+ if cgroup.Resources.MemoryReservation != 0 {
+ if err := writeFile(path, "memory.soft_limit_in_bytes", strconv.FormatInt(cgroup.Resources.MemoryReservation, 10)); err != nil {
+ return err
+ }
+ }
+ if cgroup.Resources.KernelMemoryTCP != 0 {
+ if err := writeFile(path, "memory.kmem.tcp.limit_in_bytes", strconv.FormatInt(cgroup.Resources.KernelMemoryTCP, 10)); err != nil {
+ return err
+ }
+ }
+ if cgroup.Resources.OomKillDisable {
+ if err := writeFile(path, "memory.oom_control", "1"); err != nil {
+ return err
+ }
+ }
+ if cgroup.Resources.MemorySwappiness == nil || int64(*cgroup.Resources.MemorySwappiness) == -1 {
+ return nil
+ } else if int64(*cgroup.Resources.MemorySwappiness) >= 0 && int64(*cgroup.Resources.MemorySwappiness) <= 100 {
+ if err := writeFile(path, "memory.swappiness", strconv.FormatInt(*cgroup.Resources.MemorySwappiness, 10)); err != nil {
+ return err
+ }
+ } else {
+ return fmt.Errorf("invalid value:%d. valid memory swappiness range is 0-100", int64(*cgroup.Resources.MemorySwappiness))
+ }
+
+ return nil
+}
+
+func (s *MemoryGroup) Remove(d *cgroupData) error {
+ return removePath(d.path("memory"))
+}
+
+func (s *MemoryGroup) GetStats(path string, stats *cgroups.Stats) error {
+ // Set stats from memory.stat.
+ statsFile, err := os.Open(filepath.Join(path, "memory.stat"))
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil
+ }
+ return err
+ }
+ defer statsFile.Close()
+
+ sc := bufio.NewScanner(statsFile)
+ for sc.Scan() {
+ t, v, err := getCgroupParamKeyValue(sc.Text())
+ if err != nil {
+ return fmt.Errorf("failed to parse memory.stat (%q) - %v", sc.Text(), err)
+ }
+ stats.MemoryStats.Stats[t] = v
+ }
+ stats.MemoryStats.Cache = stats.MemoryStats.Stats["cache"]
+
+ memoryUsage, err := getMemoryData(path, "")
+ if err != nil {
+ return err
+ }
+ stats.MemoryStats.Usage = memoryUsage
+ swapUsage, err := getMemoryData(path, "memsw")
+ if err != nil {
+ return err
+ }
+ stats.MemoryStats.SwapUsage = swapUsage
+ kernelUsage, err := getMemoryData(path, "kmem")
+ if err != nil {
+ return err
+ }
+ stats.MemoryStats.KernelUsage = kernelUsage
+ kernelTCPUsage, err := getMemoryData(path, "kmem.tcp")
+ if err != nil {
+ return err
+ }
+ stats.MemoryStats.KernelTCPUsage = kernelTCPUsage
+
+ return nil
+}
+
+func memoryAssigned(cgroup *configs.Cgroup) bool {
+ return cgroup.Resources.Memory != 0 ||
+ cgroup.Resources.MemoryReservation != 0 ||
+ cgroup.Resources.MemorySwap > 0 ||
+ cgroup.Resources.KernelMemory > 0 ||
+ cgroup.Resources.KernelMemoryTCP > 0 ||
+ cgroup.Resources.OomKillDisable ||
+ (cgroup.Resources.MemorySwappiness != nil && *cgroup.Resources.MemorySwappiness != -1)
+}
+
+func getMemoryData(path, name string) (cgroups.MemoryData, error) {
+ memoryData := cgroups.MemoryData{}
+
+ moduleName := "memory"
+ if name != "" {
+ moduleName = strings.Join([]string{"memory", name}, ".")
+ }
+ usage := strings.Join([]string{moduleName, "usage_in_bytes"}, ".")
+ maxUsage := strings.Join([]string{moduleName, "max_usage_in_bytes"}, ".")
+ failcnt := strings.Join([]string{moduleName, "failcnt"}, ".")
+ limit := strings.Join([]string{moduleName, "limit_in_bytes"}, ".")
+
+ value, err := getCgroupParamUint(path, usage)
+ if err != nil {
+ if moduleName != "memory" && os.IsNotExist(err) {
+ return cgroups.MemoryData{}, nil
+ }
+ return cgroups.MemoryData{}, fmt.Errorf("failed to parse %s - %v", usage, err)
+ }
+ memoryData.Usage = value
+ value, err = getCgroupParamUint(path, maxUsage)
+ if err != nil {
+ if moduleName != "memory" && os.IsNotExist(err) {
+ return cgroups.MemoryData{}, nil
+ }
+ return cgroups.MemoryData{}, fmt.Errorf("failed to parse %s - %v", maxUsage, err)
+ }
+ memoryData.MaxUsage = value
+ value, err = getCgroupParamUint(path, failcnt)
+ if err != nil {
+ if moduleName != "memory" && os.IsNotExist(err) {
+ return cgroups.MemoryData{}, nil
+ }
+ return cgroups.MemoryData{}, fmt.Errorf("failed to parse %s - %v", failcnt, err)
+ }
+ memoryData.Failcnt = value
+ value, err = getCgroupParamUint(path, limit)
+ if err != nil {
+ if moduleName != "memory" && os.IsNotExist(err) {
+ return cgroups.MemoryData{}, nil
+ }
+ return cgroups.MemoryData{}, fmt.Errorf("failed to parse %s - %v", limit, err)
+ }
+ memoryData.Limit = value
+
+ return memoryData, nil
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/name.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/name.go
new file mode 100644
index 0000000..d8cf1d8
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/name.go
@@ -0,0 +1,40 @@
+// +build linux
+
+package fs
+
+import (
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type NameGroup struct {
+ GroupName string
+ Join bool
+}
+
+func (s *NameGroup) Name() string {
+ return s.GroupName
+}
+
+func (s *NameGroup) Apply(d *cgroupData) error {
+ if s.Join {
+ // ignore errors if the named cgroup does not exist
+ d.join(s.GroupName)
+ }
+ return nil
+}
+
+func (s *NameGroup) Set(path string, cgroup *configs.Cgroup) error {
+ return nil
+}
+
+func (s *NameGroup) Remove(d *cgroupData) error {
+ if s.Join {
+ removePath(d.path(s.GroupName))
+ }
+ return nil
+}
+
+func (s *NameGroup) GetStats(path string, stats *cgroups.Stats) error {
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_cls.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_cls.go
new file mode 100644
index 0000000..8a4054b
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_cls.go
@@ -0,0 +1,41 @@
+// +build linux
+
+package fs
+
+import (
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type NetClsGroup struct {
+}
+
+func (s *NetClsGroup) Name() string {
+ return "net_cls"
+}
+
+func (s *NetClsGroup) Apply(d *cgroupData) error {
+ _, err := d.join("net_cls")
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+ return nil
+}
+
+func (s *NetClsGroup) Set(path string, cgroup *configs.Cgroup) error {
+ if cgroup.Resources.NetClsClassid != "" {
+ if err := writeFile(path, "net_cls.classid", cgroup.Resources.NetClsClassid); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (s *NetClsGroup) Remove(d *cgroupData) error {
+ return removePath(d.path("net_cls"))
+}
+
+func (s *NetClsGroup) GetStats(path string, stats *cgroups.Stats) error {
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_prio.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_prio.go
new file mode 100644
index 0000000..d0ab2af
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_prio.go
@@ -0,0 +1,41 @@
+// +build linux
+
+package fs
+
+import (
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type NetPrioGroup struct {
+}
+
+func (s *NetPrioGroup) Name() string {
+ return "net_prio"
+}
+
+func (s *NetPrioGroup) Apply(d *cgroupData) error {
+ _, err := d.join("net_prio")
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+ return nil
+}
+
+func (s *NetPrioGroup) Set(path string, cgroup *configs.Cgroup) error {
+ for _, prioMap := range cgroup.Resources.NetPrioIfpriomap {
+ if err := writeFile(path, "net_prio.ifpriomap", prioMap.CgroupString()); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (s *NetPrioGroup) Remove(d *cgroupData) error {
+ return removePath(d.path("net_prio"))
+}
+
+func (s *NetPrioGroup) GetStats(path string, stats *cgroups.Stats) error {
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/perf_event.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/perf_event.go
new file mode 100644
index 0000000..5693676
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/perf_event.go
@@ -0,0 +1,35 @@
+// +build linux
+
+package fs
+
+import (
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type PerfEventGroup struct {
+}
+
+func (s *PerfEventGroup) Name() string {
+ return "perf_event"
+}
+
+func (s *PerfEventGroup) Apply(d *cgroupData) error {
+ // we just want to join this group even though we don't set anything
+ if _, err := d.join("perf_event"); err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+ return nil
+}
+
+func (s *PerfEventGroup) Set(path string, cgroup *configs.Cgroup) error {
+ return nil
+}
+
+func (s *PerfEventGroup) Remove(d *cgroupData) error {
+ return removePath(d.path("perf_event"))
+}
+
+func (s *PerfEventGroup) GetStats(path string, stats *cgroups.Stats) error {
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/pids.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/pids.go
new file mode 100644
index 0000000..f1e3720
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/pids.go
@@ -0,0 +1,73 @@
+// +build linux
+
+package fs
+
+import (
+ "fmt"
+ "path/filepath"
+ "strconv"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type PidsGroup struct {
+}
+
+func (s *PidsGroup) Name() string {
+ return "pids"
+}
+
+func (s *PidsGroup) Apply(d *cgroupData) error {
+ _, err := d.join("pids")
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+ return nil
+}
+
+func (s *PidsGroup) Set(path string, cgroup *configs.Cgroup) error {
+ if cgroup.Resources.PidsLimit != 0 {
+ // "max" is the fallback value.
+ limit := "max"
+
+ if cgroup.Resources.PidsLimit > 0 {
+ limit = strconv.FormatInt(cgroup.Resources.PidsLimit, 10)
+ }
+
+ if err := writeFile(path, "pids.max", limit); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (s *PidsGroup) Remove(d *cgroupData) error {
+ return removePath(d.path("pids"))
+}
+
+func (s *PidsGroup) GetStats(path string, stats *cgroups.Stats) error {
+ current, err := getCgroupParamUint(path, "pids.current")
+ if err != nil {
+ return fmt.Errorf("failed to parse pids.current - %s", err)
+ }
+
+ maxString, err := getCgroupParamString(path, "pids.max")
+ if err != nil {
+ return fmt.Errorf("failed to parse pids.max - %s", err)
+ }
+
+ // Default if pids.max == "max" is 0 -- which represents "no limit".
+ var max uint64
+ if maxString != "max" {
+ max, err = parseUint(maxString, 10, 64)
+ if err != nil {
+ return fmt.Errorf("failed to parse pids.max - unable to parse %q as a uint from Cgroup file %q", maxString, filepath.Join(path, "pids.max"))
+ }
+ }
+
+ stats.PidsStats.Current = current
+ stats.PidsStats.Limit = max
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/utils.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/utils.go
new file mode 100644
index 0000000..5ff0a16
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/utils.go
@@ -0,0 +1,78 @@
+// +build linux
+
+package fs
+
+import (
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "path/filepath"
+ "strconv"
+ "strings"
+)
+
+var (
+ ErrNotValidFormat = errors.New("line is not a valid key value format")
+)
+
+// Saturates negative values at zero and returns a uint64.
+// Due to kernel bugs, some of the memory cgroup stats can be negative.
+func parseUint(s string, base, bitSize int) (uint64, error) {
+ value, err := strconv.ParseUint(s, base, bitSize)
+ if err != nil {
+ intValue, intErr := strconv.ParseInt(s, base, bitSize)
+ // 1. Handle negative values greater than MinInt64 (and)
+ // 2. Handle negative values lesser than MinInt64
+ if intErr == nil && intValue < 0 {
+ return 0, nil
+ } else if intErr != nil && intErr.(*strconv.NumError).Err == strconv.ErrRange && intValue < 0 {
+ return 0, nil
+ }
+
+ return value, err
+ }
+
+ return value, nil
+}
+
+// Parses a cgroup param and returns as name, value
+// i.e. "io_service_bytes 1234" will return as io_service_bytes, 1234
+func getCgroupParamKeyValue(t string) (string, uint64, error) {
+ parts := strings.Fields(t)
+ switch len(parts) {
+ case 2:
+ value, err := parseUint(parts[1], 10, 64)
+ if err != nil {
+ return "", 0, fmt.Errorf("unable to convert param value (%q) to uint64: %v", parts[1], err)
+ }
+
+ return parts[0], value, nil
+ default:
+ return "", 0, ErrNotValidFormat
+ }
+}
+
+// Gets a single uint64 value from the specified cgroup file.
+func getCgroupParamUint(cgroupPath, cgroupFile string) (uint64, error) {
+ fileName := filepath.Join(cgroupPath, cgroupFile)
+ contents, err := ioutil.ReadFile(fileName)
+ if err != nil {
+ return 0, err
+ }
+
+ res, err := parseUint(strings.TrimSpace(string(contents)), 10, 64)
+ if err != nil {
+ return res, fmt.Errorf("unable to parse %q as a uint from Cgroup file %q", string(contents), fileName)
+ }
+ return res, nil
+}
+
+// Gets a string value from the specified cgroup file
+func getCgroupParamString(cgroupPath, cgroupFile string) (string, error) {
+ contents, err := ioutil.ReadFile(filepath.Join(cgroupPath, cgroupFile))
+ if err != nil {
+ return "", err
+ }
+
+ return strings.TrimSpace(string(contents)), nil
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/stats.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/stats.go
new file mode 100644
index 0000000..b483f1b
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/stats.go
@@ -0,0 +1,106 @@
+// +build linux
+
+package cgroups
+
+type ThrottlingData struct {
+ // Number of periods with throttling active
+ Periods uint64 `json:"periods,omitempty"`
+ // Number of periods when the container hit its throttling limit.
+ ThrottledPeriods uint64 `json:"throttled_periods,omitempty"`
+ // Aggregate time the container was throttled for in nanoseconds.
+ ThrottledTime uint64 `json:"throttled_time,omitempty"`
+}
+
+// CpuUsage denotes the usage of a CPU.
+// All CPU stats are aggregate since container inception.
+type CpuUsage struct {
+ // Total CPU time consumed.
+ // Units: nanoseconds.
+ TotalUsage uint64 `json:"total_usage,omitempty"`
+ // Total CPU time consumed per core.
+ // Units: nanoseconds.
+ PercpuUsage []uint64 `json:"percpu_usage,omitempty"`
+ // Time spent by tasks of the cgroup in kernel mode.
+ // Units: nanoseconds.
+ UsageInKernelmode uint64 `json:"usage_in_kernelmode"`
+ // Time spent by tasks of the cgroup in user mode.
+ // Units: nanoseconds.
+ UsageInUsermode uint64 `json:"usage_in_usermode"`
+}
+
+type CpuStats struct {
+ CpuUsage CpuUsage `json:"cpu_usage,omitempty"`
+ ThrottlingData ThrottlingData `json:"throttling_data,omitempty"`
+}
+
+type MemoryData struct {
+ Usage uint64 `json:"usage,omitempty"`
+ MaxUsage uint64 `json:"max_usage,omitempty"`
+ Failcnt uint64 `json:"failcnt"`
+ Limit uint64 `json:"limit"`
+}
+
+type MemoryStats struct {
+ // memory used for cache
+ Cache uint64 `json:"cache,omitempty"`
+ // usage of memory
+ Usage MemoryData `json:"usage,omitempty"`
+ // usage of memory + swap
+ SwapUsage MemoryData `json:"swap_usage,omitempty"`
+ // usage of kernel memory
+ KernelUsage MemoryData `json:"kernel_usage,omitempty"`
+ // usage of kernel TCP memory
+ KernelTCPUsage MemoryData `json:"kernel_tcp_usage,omitempty"`
+
+ Stats map[string]uint64 `json:"stats,omitempty"`
+}
+
+type PidsStats struct {
+ // number of pids in the cgroup
+ Current uint64 `json:"current,omitempty"`
+ // active pids hard limit
+ Limit uint64 `json:"limit,omitempty"`
+}
+
+type BlkioStatEntry struct {
+ Major uint64 `json:"major,omitempty"`
+ Minor uint64 `json:"minor,omitempty"`
+ Op string `json:"op,omitempty"`
+ Value uint64 `json:"value,omitempty"`
+}
+
+type BlkioStats struct {
+ // number of bytes tranferred to and from the block device
+ IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive,omitempty"`
+ IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive,omitempty"`
+ IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive,omitempty"`
+ IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive,omitempty"`
+ IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive,omitempty"`
+ IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive,omitempty"`
+ IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive,omitempty"`
+ SectorsRecursive []BlkioStatEntry `json:"sectors_recursive,omitempty"`
+}
+
+type HugetlbStats struct {
+ // current res_counter usage for hugetlb
+ Usage uint64 `json:"usage,omitempty"`
+ // maximum usage ever recorded.
+ MaxUsage uint64 `json:"max_usage,omitempty"`
+ // number of times hugetlb usage allocation failure.
+ Failcnt uint64 `json:"failcnt"`
+}
+
+type Stats struct {
+ CpuStats CpuStats `json:"cpu_stats,omitempty"`
+ MemoryStats MemoryStats `json:"memory_stats,omitempty"`
+ PidsStats PidsStats `json:"pids_stats,omitempty"`
+ BlkioStats BlkioStats `json:"blkio_stats,omitempty"`
+ // the map is in the format "size of hugepage: stats of the hugepage"
+ HugetlbStats map[string]HugetlbStats `json:"hugetlb_stats,omitempty"`
+}
+
+func NewStats() *Stats {
+ memoryStats := MemoryStats{Stats: make(map[string]uint64)}
+ hugetlbStats := make(map[string]HugetlbStats)
+ return &Stats{MemoryStats: memoryStats, HugetlbStats: hugetlbStats}
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go
new file mode 100644
index 0000000..1a7c4e1
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go
@@ -0,0 +1,413 @@
+// +build linux
+
+package cgroups
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/docker/go-units"
+)
+
+const cgroupNamePrefix = "name="
+
+// https://www.kernel.org/doc/Documentation/cgroup-v1/cgroups.txt
+func FindCgroupMountpoint(subsystem string) (string, error) {
+ // We are not using mount.GetMounts() because it's super-inefficient,
+ // parsing it directly sped up x10 times because of not using Sscanf.
+ // It was one of two major performance drawbacks in container start.
+ if !isSubsystemAvailable(subsystem) {
+ return "", NewNotFoundError(subsystem)
+ }
+ f, err := os.Open("/proc/self/mountinfo")
+ if err != nil {
+ return "", err
+ }
+ defer f.Close()
+
+ scanner := bufio.NewScanner(f)
+ for scanner.Scan() {
+ txt := scanner.Text()
+ fields := strings.Split(txt, " ")
+ for _, opt := range strings.Split(fields[len(fields)-1], ",") {
+ if opt == subsystem {
+ return fields[4], nil
+ }
+ }
+ }
+ if err := scanner.Err(); err != nil {
+ return "", err
+ }
+
+ return "", NewNotFoundError(subsystem)
+}
+
+func FindCgroupMountpointAndRoot(subsystem string) (string, string, error) {
+ if !isSubsystemAvailable(subsystem) {
+ return "", "", NewNotFoundError(subsystem)
+ }
+ f, err := os.Open("/proc/self/mountinfo")
+ if err != nil {
+ return "", "", err
+ }
+ defer f.Close()
+
+ scanner := bufio.NewScanner(f)
+ for scanner.Scan() {
+ txt := scanner.Text()
+ fields := strings.Split(txt, " ")
+ for _, opt := range strings.Split(fields[len(fields)-1], ",") {
+ if opt == subsystem {
+ return fields[4], fields[3], nil
+ }
+ }
+ }
+ if err := scanner.Err(); err != nil {
+ return "", "", err
+ }
+
+ return "", "", NewNotFoundError(subsystem)
+}
+
+func isSubsystemAvailable(subsystem string) bool {
+ cgroups, err := ParseCgroupFile("/proc/self/cgroup")
+ if err != nil {
+ return false
+ }
+ _, avail := cgroups[subsystem]
+ return avail
+}
+
+func FindCgroupMountpointDir() (string, error) {
+ f, err := os.Open("/proc/self/mountinfo")
+ if err != nil {
+ return "", err
+ }
+ defer f.Close()
+
+ scanner := bufio.NewScanner(f)
+ for scanner.Scan() {
+ text := scanner.Text()
+ fields := strings.Split(text, " ")
+ // Safe as mountinfo encodes mountpoints with spaces as \040.
+ index := strings.Index(text, " - ")
+ postSeparatorFields := strings.Fields(text[index+3:])
+ numPostFields := len(postSeparatorFields)
+
+ // This is an error as we can't detect if the mount is for "cgroup"
+ if numPostFields == 0 {
+ return "", fmt.Errorf("Found no fields post '-' in %q", text)
+ }
+
+ if postSeparatorFields[0] == "cgroup" {
+ // Check that the mount is properly formated.
+ if numPostFields < 3 {
+ return "", fmt.Errorf("Error found less than 3 fields post '-' in %q", text)
+ }
+
+ return filepath.Dir(fields[4]), nil
+ }
+ }
+ if err := scanner.Err(); err != nil {
+ return "", err
+ }
+
+ return "", NewNotFoundError("cgroup")
+}
+
+type Mount struct {
+ Mountpoint string
+ Root string
+ Subsystems []string
+}
+
+func (m Mount) GetThisCgroupDir(cgroups map[string]string) (string, error) {
+ if len(m.Subsystems) == 0 {
+ return "", fmt.Errorf("no subsystem for mount")
+ }
+
+ return getControllerPath(m.Subsystems[0], cgroups)
+}
+
+func getCgroupMountsHelper(ss map[string]bool, mi io.Reader) ([]Mount, error) {
+ res := make([]Mount, 0, len(ss))
+ scanner := bufio.NewScanner(mi)
+ numFound := 0
+ for scanner.Scan() && numFound < len(ss) {
+ txt := scanner.Text()
+ sepIdx := strings.Index(txt, " - ")
+ if sepIdx == -1 {
+ return nil, fmt.Errorf("invalid mountinfo format")
+ }
+ if txt[sepIdx+3:sepIdx+9] != "cgroup" {
+ continue
+ }
+ fields := strings.Split(txt, " ")
+ m := Mount{
+ Mountpoint: fields[4],
+ Root: fields[3],
+ }
+ for _, opt := range strings.Split(fields[len(fields)-1], ",") {
+ if !ss[opt] {
+ continue
+ }
+ if strings.HasPrefix(opt, cgroupNamePrefix) {
+ m.Subsystems = append(m.Subsystems, opt[len(cgroupNamePrefix):])
+ } else {
+ m.Subsystems = append(m.Subsystems, opt)
+ }
+ numFound++
+ }
+ res = append(res, m)
+ }
+ if err := scanner.Err(); err != nil {
+ return nil, err
+ }
+ return res, nil
+}
+
+func GetCgroupMounts() ([]Mount, error) {
+ f, err := os.Open("/proc/self/mountinfo")
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ all, err := ParseCgroupFile("/proc/self/cgroup")
+ if err != nil {
+ return nil, err
+ }
+
+ allMap := make(map[string]bool)
+ for s := range all {
+ allMap[s] = true
+ }
+ return getCgroupMountsHelper(allMap, f)
+}
+
+// GetAllSubsystems returns all the cgroup subsystems supported by the kernel
+func GetAllSubsystems() ([]string, error) {
+ f, err := os.Open("/proc/cgroups")
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ subsystems := []string{}
+
+ s := bufio.NewScanner(f)
+ for s.Scan() {
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+ text := s.Text()
+ if text[0] != '#' {
+ parts := strings.Fields(text)
+ if len(parts) >= 4 && parts[3] != "0" {
+ subsystems = append(subsystems, parts[0])
+ }
+ }
+ }
+ return subsystems, nil
+}
+
+// GetThisCgroupDir returns the relative path to the cgroup docker is running in.
+func GetThisCgroupDir(subsystem string) (string, error) {
+ cgroups, err := ParseCgroupFile("/proc/self/cgroup")
+ if err != nil {
+ return "", err
+ }
+
+ return getControllerPath(subsystem, cgroups)
+}
+
+func GetInitCgroupDir(subsystem string) (string, error) {
+
+ cgroups, err := ParseCgroupFile("/proc/1/cgroup")
+ if err != nil {
+ return "", err
+ }
+
+ return getControllerPath(subsystem, cgroups)
+}
+
+func readProcsFile(dir string) ([]int, error) {
+ f, err := os.Open(filepath.Join(dir, "cgroup.procs"))
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ var (
+ s = bufio.NewScanner(f)
+ out = []int{}
+ )
+
+ for s.Scan() {
+ if t := s.Text(); t != "" {
+ pid, err := strconv.Atoi(t)
+ if err != nil {
+ return nil, err
+ }
+ out = append(out, pid)
+ }
+ }
+ return out, nil
+}
+
+// ParseCgroupFile parses the given cgroup file, typically from
+// /proc/<pid>/cgroup, into a map of subgroups to cgroup names.
+func ParseCgroupFile(path string) (map[string]string, error) {
+ f, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ return parseCgroupFromReader(f)
+}
+
+// helper function for ParseCgroupFile to make testing easier
+func parseCgroupFromReader(r io.Reader) (map[string]string, error) {
+ s := bufio.NewScanner(r)
+ cgroups := make(map[string]string)
+
+ for s.Scan() {
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ text := s.Text()
+ // from cgroups(7):
+ // /proc/[pid]/cgroup
+ // ...
+ // For each cgroup hierarchy ... there is one entry
+ // containing three colon-separated fields of the form:
+ // hierarchy-ID:subsystem-list:cgroup-path
+ parts := strings.SplitN(text, ":", 3)
+ if len(parts) < 3 {
+ return nil, fmt.Errorf("invalid cgroup entry: must contain at least two colons: %v", text)
+ }
+
+ for _, subs := range strings.Split(parts[1], ",") {
+ cgroups[subs] = parts[2]
+ }
+ }
+ return cgroups, nil
+}
+
+func getControllerPath(subsystem string, cgroups map[string]string) (string, error) {
+
+ if p, ok := cgroups[subsystem]; ok {
+ return p, nil
+ }
+
+ if p, ok := cgroups[cgroupNamePrefix+subsystem]; ok {
+ return p, nil
+ }
+
+ return "", NewNotFoundError(subsystem)
+}
+
+func PathExists(path string) bool {
+ if _, err := os.Stat(path); err != nil {
+ return false
+ }
+ return true
+}
+
+func EnterPid(cgroupPaths map[string]string, pid int) error {
+ for _, path := range cgroupPaths {
+ if PathExists(path) {
+ if err := ioutil.WriteFile(filepath.Join(path, "cgroup.procs"),
+ []byte(strconv.Itoa(pid)), 0700); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// RemovePaths iterates over the provided paths removing them.
+// We trying to remove all paths five times with increasing delay between tries.
+// If after all there are not removed cgroups - appropriate error will be
+// returned.
+func RemovePaths(paths map[string]string) (err error) {
+ delay := 10 * time.Millisecond
+ for i := 0; i < 5; i++ {
+ if i != 0 {
+ time.Sleep(delay)
+ delay *= 2
+ }
+ for s, p := range paths {
+ os.RemoveAll(p)
+ // TODO: here probably should be logging
+ _, err := os.Stat(p)
+ // We need this strange way of checking cgroups existence because
+ // RemoveAll almost always returns error, even on already removed
+ // cgroups
+ if os.IsNotExist(err) {
+ delete(paths, s)
+ }
+ }
+ if len(paths) == 0 {
+ return nil
+ }
+ }
+ return fmt.Errorf("Failed to remove paths: %v", paths)
+}
+
+func GetHugePageSize() ([]string, error) {
+ var pageSizes []string
+ sizeList := []string{"B", "kB", "MB", "GB", "TB", "PB"}
+ files, err := ioutil.ReadDir("/sys/kernel/mm/hugepages")
+ if err != nil {
+ return pageSizes, err
+ }
+ for _, st := range files {
+ nameArray := strings.Split(st.Name(), "-")
+ pageSize, err := units.RAMInBytes(nameArray[1])
+ if err != nil {
+ return []string{}, err
+ }
+ sizeString := units.CustomSize("%g%s", float64(pageSize), 1024.0, sizeList)
+ pageSizes = append(pageSizes, sizeString)
+ }
+
+ return pageSizes, nil
+}
+
+// GetPids returns all pids, that were added to cgroup at path.
+func GetPids(path string) ([]int, error) {
+ return readProcsFile(path)
+}
+
+// GetAllPids returns all pids, that were added to cgroup at path and to all its
+// subcgroups.
+func GetAllPids(path string) ([]int, error) {
+ var pids []int
+ // collect pids from all sub-cgroups
+ err := filepath.Walk(path, func(p string, info os.FileInfo, iErr error) error {
+ dir, file := filepath.Split(p)
+ if file != "cgroup.procs" {
+ return nil
+ }
+ if iErr != nil {
+ return iErr
+ }
+ cPids, err := readProcsFile(dir)
+ if err != nil {
+ return err
+ }
+ pids = append(pids, cPids...)
+ return nil
+ })
+ return pids, err
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go
new file mode 100644
index 0000000..e0f3ca1
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go
@@ -0,0 +1,61 @@
+package configs
+
+import "fmt"
+
+// blockIODevice holds major:minor format supported in blkio cgroup
+type blockIODevice struct {
+ // Major is the device's major number
+ Major int64 `json:"major"`
+ // Minor is the device's minor number
+ Minor int64 `json:"minor"`
+}
+
+// WeightDevice struct holds a `major:minor weight`|`major:minor leaf_weight` pair
+type WeightDevice struct {
+ blockIODevice
+ // Weight is the bandwidth rate for the device, range is from 10 to 1000
+ Weight uint16 `json:"weight"`
+ // LeafWeight is the bandwidth rate for the device while competing with the cgroup's child cgroups, range is from 10 to 1000, cfq scheduler only
+ LeafWeight uint16 `json:"leafWeight"`
+}
+
+// NewWeightDevice returns a configured WeightDevice pointer
+func NewWeightDevice(major, minor int64, weight, leafWeight uint16) *WeightDevice {
+ wd := &WeightDevice{}
+ wd.Major = major
+ wd.Minor = minor
+ wd.Weight = weight
+ wd.LeafWeight = leafWeight
+ return wd
+}
+
+// WeightString formats the struct to be writable to the cgroup specific file
+func (wd *WeightDevice) WeightString() string {
+ return fmt.Sprintf("%d:%d %d", wd.Major, wd.Minor, wd.Weight)
+}
+
+// LeafWeightString formats the struct to be writable to the cgroup specific file
+func (wd *WeightDevice) LeafWeightString() string {
+ return fmt.Sprintf("%d:%d %d", wd.Major, wd.Minor, wd.LeafWeight)
+}
+
+// ThrottleDevice struct holds a `major:minor rate_per_second` pair
+type ThrottleDevice struct {
+ blockIODevice
+ // Rate is the IO rate limit per cgroup per device
+ Rate uint64 `json:"rate"`
+}
+
+// NewThrottleDevice returns a configured ThrottleDevice pointer
+func NewThrottleDevice(major, minor int64, rate uint64) *ThrottleDevice {
+ td := &ThrottleDevice{}
+ td.Major = major
+ td.Minor = minor
+ td.Rate = rate
+ return td
+}
+
+// String formats the struct to be writable to the cgroup specific file
+func (td *ThrottleDevice) String() string {
+ return fmt.Sprintf("%d:%d %d", td.Major, td.Minor, td.Rate)
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unix.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unix.go
new file mode 100644
index 0000000..f2eff91
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unix.go
@@ -0,0 +1,124 @@
+// +build linux freebsd
+
+package configs
+
+type FreezerState string
+
+const (
+ Undefined FreezerState = ""
+ Frozen FreezerState = "FROZEN"
+ Thawed FreezerState = "THAWED"
+)
+
+type Cgroup struct {
+ // Deprecated, use Path instead
+ Name string `json:"name,omitempty"`
+
+ // name of parent of cgroup or slice
+ // Deprecated, use Path instead
+ Parent string `json:"parent,omitempty"`
+
+ // Path specifies the path to cgroups that are created and/or joined by the container.
+ // The path is assumed to be relative to the host system cgroup mountpoint.
+ Path string `json:"path"`
+
+ // ScopePrefix decribes prefix for the scope name
+ ScopePrefix string `json:"scope_prefix"`
+
+ // Paths represent the absolute cgroups paths to join.
+ // This takes precedence over Path.
+ Paths map[string]string
+
+ // Resources contains various cgroups settings to apply
+ *Resources
+}
+
+type Resources struct {
+ // If this is true allow access to any kind of device within the container. If false, allow access only to devices explicitly listed in the allowed_devices list.
+ // Deprecated
+ AllowAllDevices bool `json:"allow_all_devices,omitempty"`
+ // Deprecated
+ AllowedDevices []*Device `json:"allowed_devices,omitempty"`
+ // Deprecated
+ DeniedDevices []*Device `json:"denied_devices,omitempty"`
+
+ Devices []*Device `json:"devices"`
+
+ // Memory limit (in bytes)
+ Memory int64 `json:"memory"`
+
+ // Memory reservation or soft_limit (in bytes)
+ MemoryReservation int64 `json:"memory_reservation"`
+
+ // Total memory usage (memory + swap); set `-1` to enable unlimited swap
+ MemorySwap int64 `json:"memory_swap"`
+
+ // Kernel memory limit (in bytes)
+ KernelMemory int64 `json:"kernel_memory"`
+
+ // Kernel memory limit for TCP use (in bytes)
+ KernelMemoryTCP int64 `json:"kernel_memory_tcp"`
+
+ // CPU shares (relative weight vs. other containers)
+ CpuShares int64 `json:"cpu_shares"`
+
+ // CPU hardcap limit (in usecs). Allowed cpu time in a given period.
+ CpuQuota int64 `json:"cpu_quota"`
+
+ // CPU period to be used for hardcapping (in usecs). 0 to use system default.
+ CpuPeriod int64 `json:"cpu_period"`
+
+ // How many time CPU will use in realtime scheduling (in usecs).
+ CpuRtRuntime int64 `json:"cpu_quota"`
+
+ // CPU period to be used for realtime scheduling (in usecs).
+ CpuRtPeriod int64 `json:"cpu_period"`
+
+ // CPU to use
+ CpusetCpus string `json:"cpuset_cpus"`
+
+ // MEM to use
+ CpusetMems string `json:"cpuset_mems"`
+
+ // Process limit; set <= `0' to disable limit.
+ PidsLimit int64 `json:"pids_limit"`
+
+ // Specifies per cgroup weight, range is from 10 to 1000.
+ BlkioWeight uint16 `json:"blkio_weight"`
+
+ // Specifies tasks' weight in the given cgroup while competing with the cgroup's child cgroups, range is from 10 to 1000, cfq scheduler only
+ BlkioLeafWeight uint16 `json:"blkio_leaf_weight"`
+
+ // Weight per cgroup per device, can override BlkioWeight.
+ BlkioWeightDevice []*WeightDevice `json:"blkio_weight_device"`
+
+ // IO read rate limit per cgroup per device, bytes per second.
+ BlkioThrottleReadBpsDevice []*ThrottleDevice `json:"blkio_throttle_read_bps_device"`
+
+ // IO write rate limit per cgroup per divice, bytes per second.
+ BlkioThrottleWriteBpsDevice []*ThrottleDevice `json:"blkio_throttle_write_bps_device"`
+
+ // IO read rate limit per cgroup per device, IO per second.
+ BlkioThrottleReadIOPSDevice []*ThrottleDevice `json:"blkio_throttle_read_iops_device"`
+
+ // IO write rate limit per cgroup per device, IO per second.
+ BlkioThrottleWriteIOPSDevice []*ThrottleDevice `json:"blkio_throttle_write_iops_device"`
+
+ // set the freeze value for the process
+ Freezer FreezerState `json:"freezer"`
+
+ // Hugetlb limit (in bytes)
+ HugetlbLimit []*HugepageLimit `json:"hugetlb_limit"`
+
+ // Whether to disable OOM Killer
+ OomKillDisable bool `json:"oom_kill_disable"`
+
+ // Tuning swappiness behaviour per cgroup
+ MemorySwappiness *int64 `json:"memory_swappiness"`
+
+ // Set priority of network traffic for container
+ NetPrioIfpriomap []*IfPrioMap `json:"net_prio_ifpriomap"`
+
+ // Set class identifier for container's network packets
+ NetClsClassid string `json:"net_cls_classid"`
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go
new file mode 100644
index 0000000..95e2830
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go
@@ -0,0 +1,6 @@
+// +build !windows,!linux,!freebsd
+
+package configs
+
+type Cgroup struct {
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_windows.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_windows.go
new file mode 100644
index 0000000..d74847b
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_windows.go
@@ -0,0 +1,6 @@
+package configs
+
+// TODO Windows: This can ultimately be entirely factored out on Windows as
+// cgroups are a Unix-specific construct.
+type Cgroup struct {
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go
new file mode 100644
index 0000000..806e0be
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go
@@ -0,0 +1,332 @@
+package configs
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "os/exec"
+ "time"
+
+ "github.com/Sirupsen/logrus"
+)
+
+type Rlimit struct {
+ Type int `json:"type"`
+ Hard uint64 `json:"hard"`
+ Soft uint64 `json:"soft"`
+}
+
+// IDMap represents UID/GID Mappings for User Namespaces.
+type IDMap struct {
+ ContainerID int `json:"container_id"`
+ HostID int `json:"host_id"`
+ Size int `json:"size"`
+}
+
+// Seccomp represents syscall restrictions
+// By default, only the native architecture of the kernel is allowed to be used
+// for syscalls. Additional architectures can be added by specifying them in
+// Architectures.
+type Seccomp struct {
+ DefaultAction Action `json:"default_action"`
+ Architectures []string `json:"architectures"`
+ Syscalls []*Syscall `json:"syscalls"`
+}
+
+// Action is taken upon rule match in Seccomp
+type Action int
+
+const (
+ Kill Action = iota + 1
+ Errno
+ Trap
+ Allow
+ Trace
+)
+
+// Operator is a comparison operator to be used when matching syscall arguments in Seccomp
+type Operator int
+
+const (
+ EqualTo Operator = iota + 1
+ NotEqualTo
+ GreaterThan
+ GreaterThanOrEqualTo
+ LessThan
+ LessThanOrEqualTo
+ MaskEqualTo
+)
+
+// Arg is a rule to match a specific syscall argument in Seccomp
+type Arg struct {
+ Index uint `json:"index"`
+ Value uint64 `json:"value"`
+ ValueTwo uint64 `json:"value_two"`
+ Op Operator `json:"op"`
+}
+
+// Syscall is a rule to match a syscall in Seccomp
+type Syscall struct {
+ Name string `json:"name"`
+ Action Action `json:"action"`
+ Args []*Arg `json:"args"`
+}
+
+// TODO Windows. Many of these fields should be factored out into those parts
+// which are common across platforms, and those which are platform specific.
+
+// Config defines configuration options for executing a process inside a contained environment.
+type Config struct {
+ // NoPivotRoot will use MS_MOVE and a chroot to jail the process into the container's rootfs
+ // This is a common option when the container is running in ramdisk
+ NoPivotRoot bool `json:"no_pivot_root"`
+
+ // ParentDeathSignal specifies the signal that is sent to the container's process in the case
+ // that the parent process dies.
+ ParentDeathSignal int `json:"parent_death_signal"`
+
+ // PivotDir allows a custom directory inside the container's root filesystem to be used as pivot, when NoPivotRoot is not set.
+ // When a custom PivotDir not set, a temporary dir inside the root filesystem will be used. The pivot dir needs to be writeable.
+ // This is required when using read only root filesystems. In these cases, a read/writeable path can be (bind) mounted somewhere inside the root filesystem to act as pivot.
+ PivotDir string `json:"pivot_dir"`
+
+ // Path to a directory containing the container's root filesystem.
+ Rootfs string `json:"rootfs"`
+
+ // Readonlyfs will remount the container's rootfs as readonly where only externally mounted
+ // bind mounts are writtable.
+ Readonlyfs bool `json:"readonlyfs"`
+
+ // Specifies the mount propagation flags to be applied to /.
+ RootPropagation int `json:"rootPropagation"`
+
+ // Mounts specify additional source and destination paths that will be mounted inside the container's
+ // rootfs and mount namespace if specified
+ Mounts []*Mount `json:"mounts"`
+
+ // The device nodes that should be automatically created within the container upon container start. Note, make sure that the node is marked as allowed in the cgroup as well!
+ Devices []*Device `json:"devices"`
+
+ MountLabel string `json:"mount_label"`
+
+ // Hostname optionally sets the container's hostname if provided
+ Hostname string `json:"hostname"`
+
+ // Namespaces specifies the container's namespaces that it should setup when cloning the init process
+ // If a namespace is not provided that namespace is shared from the container's parent process
+ Namespaces Namespaces `json:"namespaces"`
+
+ // Capabilities specify the capabilities to keep when executing the process inside the container
+ // All capbilities not specified will be dropped from the processes capability mask
+ Capabilities []string `json:"capabilities"`
+
+ // Networks specifies the container's network setup to be created
+ Networks []*Network `json:"networks"`
+
+ // Routes can be specified to create entries in the route table as the container is started
+ Routes []*Route `json:"routes"`
+
+ // Cgroups specifies specific cgroup settings for the various subsystems that the container is
+ // placed into to limit the resources the container has available
+ Cgroups *Cgroup `json:"cgroups"`
+
+ // AppArmorProfile specifies the profile to apply to the process running in the container and is
+ // change at the time the process is execed
+ AppArmorProfile string `json:"apparmor_profile,omitempty"`
+
+ // ProcessLabel specifies the label to apply to the process running in the container. It is
+ // commonly used by selinux
+ ProcessLabel string `json:"process_label,omitempty"`
+
+ // Rlimits specifies the resource limits, such as max open files, to set in the container
+ // If Rlimits are not set, the container will inherit rlimits from the parent process
+ Rlimits []Rlimit `json:"rlimits,omitempty"`
+
+ // OomScoreAdj specifies the adjustment to be made by the kernel when calculating oom scores
+ // for a process. Valid values are between the range [-1000, '1000'], where processes with
+ // higher scores are preferred for being killed.
+ // More information about kernel oom score calculation here: https://lwn.net/Articles/317814/
+ OomScoreAdj int `json:"oom_score_adj"`
+
+ // AdditionalGroups specifies the gids that should be added to supplementary groups
+ // in addition to those that the user belongs to.
+ AdditionalGroups []string `json:"additional_groups"`
+
+ // UidMappings is an array of User ID mappings for User Namespaces
+ UidMappings []IDMap `json:"uid_mappings"`
+
+ // GidMappings is an array of Group ID mappings for User Namespaces
+ GidMappings []IDMap `json:"gid_mappings"`
+
+ // MaskPaths specifies paths within the container's rootfs to mask over with a bind
+ // mount pointing to /dev/null as to prevent reads of the file.
+ MaskPaths []string `json:"mask_paths"`
+
+ // ReadonlyPaths specifies paths within the container's rootfs to remount as read-only
+ // so that these files prevent any writes.
+ ReadonlyPaths []string `json:"readonly_paths"`
+
+ // Sysctl is a map of properties and their values. It is the equivalent of using
+ // sysctl -w my.property.name value in Linux.
+ Sysctl map[string]string `json:"sysctl"`
+
+ // Seccomp allows actions to be taken whenever a syscall is made within the container.
+ // A number of rules are given, each having an action to be taken if a syscall matches it.
+ // A default action to be taken if no rules match is also given.
+ Seccomp *Seccomp `json:"seccomp"`
+
+ // NoNewPrivileges controls whether processes in the container can gain additional privileges.
+ NoNewPrivileges bool `json:"no_new_privileges,omitempty"`
+
+ // Hooks are a collection of actions to perform at various container lifecycle events.
+ // CommandHooks are serialized to JSON, but other hooks are not.
+ Hooks *Hooks
+
+ // Version is the version of opencontainer specification that is supported.
+ Version string `json:"version"`
+
+ // Labels are user defined metadata that is stored in the config and populated on the state
+ Labels []string `json:"labels"`
+
+ // NoNewKeyring will not allocated a new session keyring for the container. It will use the
+ // callers keyring in this case.
+ NoNewKeyring bool `json:"no_new_keyring"`
+}
+
+type Hooks struct {
+ // Prestart commands are executed after the container namespaces are created,
+ // but before the user supplied command is executed from init.
+ Prestart []Hook
+
+ // Poststart commands are executed after the container init process starts.
+ Poststart []Hook
+
+ // Poststop commands are executed after the container init process exits.
+ Poststop []Hook
+}
+
+func (hooks *Hooks) UnmarshalJSON(b []byte) error {
+ var state struct {
+ Prestart []CommandHook
+ Poststart []CommandHook
+ Poststop []CommandHook
+ }
+
+ if err := json.Unmarshal(b, &state); err != nil {
+ return err
+ }
+
+ deserialize := func(shooks []CommandHook) (hooks []Hook) {
+ for _, shook := range shooks {
+ hooks = append(hooks, shook)
+ }
+
+ return hooks
+ }
+
+ hooks.Prestart = deserialize(state.Prestart)
+ hooks.Poststart = deserialize(state.Poststart)
+ hooks.Poststop = deserialize(state.Poststop)
+ return nil
+}
+
+func (hooks Hooks) MarshalJSON() ([]byte, error) {
+ serialize := func(hooks []Hook) (serializableHooks []CommandHook) {
+ for _, hook := range hooks {
+ switch chook := hook.(type) {
+ case CommandHook:
+ serializableHooks = append(serializableHooks, chook)
+ default:
+ logrus.Warnf("cannot serialize hook of type %T, skipping", hook)
+ }
+ }
+
+ return serializableHooks
+ }
+
+ return json.Marshal(map[string]interface{}{
+ "prestart": serialize(hooks.Prestart),
+ "poststart": serialize(hooks.Poststart),
+ "poststop": serialize(hooks.Poststop),
+ })
+}
+
+// HookState is the payload provided to a hook on execution.
+type HookState struct {
+ Version string `json:"ociVersion"`
+ ID string `json:"id"`
+ Pid int `json:"pid"`
+ Root string `json:"root"`
+ BundlePath string `json:"bundlePath"`
+}
+
+type Hook interface {
+ // Run executes the hook with the provided state.
+ Run(HookState) error
+}
+
+// NewFunctionHook will call the provided function when the hook is run.
+func NewFunctionHook(f func(HookState) error) FuncHook {
+ return FuncHook{
+ run: f,
+ }
+}
+
+type FuncHook struct {
+ run func(HookState) error
+}
+
+func (f FuncHook) Run(s HookState) error {
+ return f.run(s)
+}
+
+type Command struct {
+ Path string `json:"path"`
+ Args []string `json:"args"`
+ Env []string `json:"env"`
+ Dir string `json:"dir"`
+ Timeout *time.Duration `json:"timeout"`
+}
+
+// NewCommandHook will execute the provided command when the hook is run.
+func NewCommandHook(cmd Command) CommandHook {
+ return CommandHook{
+ Command: cmd,
+ }
+}
+
+type CommandHook struct {
+ Command
+}
+
+func (c Command) Run(s HookState) error {
+ b, err := json.Marshal(s)
+ if err != nil {
+ return err
+ }
+ cmd := exec.Cmd{
+ Path: c.Path,
+ Args: c.Args,
+ Env: c.Env,
+ Stdin: bytes.NewReader(b),
+ }
+ errC := make(chan error, 1)
+ go func() {
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ err = fmt.Errorf("%s: %s", err, out)
+ }
+ errC <- err
+ }()
+ if c.Timeout != nil {
+ select {
+ case err := <-errC:
+ return err
+ case <-time.After(*c.Timeout):
+ cmd.Process.Kill()
+ cmd.Wait()
+ return fmt.Errorf("hook ran past specified timeout of %.1fs", c.Timeout.Seconds())
+ }
+ }
+ return <-errC
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/config_unix.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/config_unix.go
new file mode 100644
index 0000000..a60554a
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/config_unix.go
@@ -0,0 +1,51 @@
+// +build freebsd linux
+
+package configs
+
+import "fmt"
+
+// HostUID gets the root uid for the process on host which could be non-zero
+// when user namespaces are enabled.
+func (c Config) HostUID() (int, error) {
+ if c.Namespaces.Contains(NEWUSER) {
+ if c.UidMappings == nil {
+ return -1, fmt.Errorf("User namespaces enabled, but no user mappings found.")
+ }
+ id, found := c.hostIDFromMapping(0, c.UidMappings)
+ if !found {
+ return -1, fmt.Errorf("User namespaces enabled, but no root user mapping found.")
+ }
+ return id, nil
+ }
+ // Return default root uid 0
+ return 0, nil
+}
+
+// HostGID gets the root gid for the process on host which could be non-zero
+// when user namespaces are enabled.
+func (c Config) HostGID() (int, error) {
+ if c.Namespaces.Contains(NEWUSER) {
+ if c.GidMappings == nil {
+ return -1, fmt.Errorf("User namespaces enabled, but no gid mappings found.")
+ }
+ id, found := c.hostIDFromMapping(0, c.GidMappings)
+ if !found {
+ return -1, fmt.Errorf("User namespaces enabled, but no root group mapping found.")
+ }
+ return id, nil
+ }
+ // Return default root gid 0
+ return 0, nil
+}
+
+// Utility function that gets a host ID for a container ID from user namespace map
+// if that ID is present in the map.
+func (c Config) hostIDFromMapping(containerID int, uMap []IDMap) (int, bool) {
+ for _, m := range uMap {
+ if (containerID >= m.ContainerID) && (containerID <= (m.ContainerID + m.Size - 1)) {
+ hostID := m.HostID + (containerID - m.ContainerID)
+ return hostID, true
+ }
+ }
+ return -1, false
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/device.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/device.go
new file mode 100644
index 0000000..8701bb2
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/device.go
@@ -0,0 +1,57 @@
+package configs
+
+import (
+ "fmt"
+ "os"
+)
+
+const (
+ Wildcard = -1
+)
+
+// TODO Windows: This can be factored out in the future
+
+type Device struct {
+ // Device type, block, char, etc.
+ Type rune `json:"type"`
+
+ // Path to the device.
+ Path string `json:"path"`
+
+ // Major is the device's major number.
+ Major int64 `json:"major"`
+
+ // Minor is the device's minor number.
+ Minor int64 `json:"minor"`
+
+ // Cgroup permissions format, rwm.
+ Permissions string `json:"permissions"`
+
+ // FileMode permission bits for the device.
+ FileMode os.FileMode `json:"file_mode"`
+
+ // Uid of the device.
+ Uid uint32 `json:"uid"`
+
+ // Gid of the device.
+ Gid uint32 `json:"gid"`
+
+ // Write the file to the allowed list
+ Allow bool `json:"allow"`
+}
+
+func (d *Device) CgroupString() string {
+ return fmt.Sprintf("%c %s:%s %s", d.Type, deviceNumberString(d.Major), deviceNumberString(d.Minor), d.Permissions)
+}
+
+func (d *Device) Mkdev() int {
+ return int((d.Major << 8) | (d.Minor & 0xff) | ((d.Minor & 0xfff00) << 12))
+}
+
+// deviceNumberString converts the device number to a string return result.
+func deviceNumberString(number int64) string {
+ if number == Wildcard {
+ return "*"
+ }
+ return fmt.Sprint(number)
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/device_defaults.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/device_defaults.go
new file mode 100644
index 0000000..ba1f437
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/device_defaults.go
@@ -0,0 +1,125 @@
+// +build linux freebsd
+
+package configs
+
+var (
+ // DefaultSimpleDevices are devices that are to be both allowed and created.
+ DefaultSimpleDevices = []*Device{
+ // /dev/null and zero
+ {
+ Path: "/dev/null",
+ Type: 'c',
+ Major: 1,
+ Minor: 3,
+ Permissions: "rwm",
+ FileMode: 0666,
+ },
+ {
+ Path: "/dev/zero",
+ Type: 'c',
+ Major: 1,
+ Minor: 5,
+ Permissions: "rwm",
+ FileMode: 0666,
+ },
+
+ {
+ Path: "/dev/full",
+ Type: 'c',
+ Major: 1,
+ Minor: 7,
+ Permissions: "rwm",
+ FileMode: 0666,
+ },
+
+ // consoles and ttys
+ {
+ Path: "/dev/tty",
+ Type: 'c',
+ Major: 5,
+ Minor: 0,
+ Permissions: "rwm",
+ FileMode: 0666,
+ },
+
+ // /dev/urandom,/dev/random
+ {
+ Path: "/dev/urandom",
+ Type: 'c',
+ Major: 1,
+ Minor: 9,
+ Permissions: "rwm",
+ FileMode: 0666,
+ },
+ {
+ Path: "/dev/random",
+ Type: 'c',
+ Major: 1,
+ Minor: 8,
+ Permissions: "rwm",
+ FileMode: 0666,
+ },
+ }
+ DefaultAllowedDevices = append([]*Device{
+ // allow mknod for any device
+ {
+ Type: 'c',
+ Major: Wildcard,
+ Minor: Wildcard,
+ Permissions: "m",
+ },
+ {
+ Type: 'b',
+ Major: Wildcard,
+ Minor: Wildcard,
+ Permissions: "m",
+ },
+
+ {
+ Path: "/dev/console",
+ Type: 'c',
+ Major: 5,
+ Minor: 1,
+ Permissions: "rwm",
+ },
+ // /dev/pts/ - pts namespaces are "coming soon"
+ {
+ Path: "",
+ Type: 'c',
+ Major: 136,
+ Minor: Wildcard,
+ Permissions: "rwm",
+ },
+ {
+ Path: "",
+ Type: 'c',
+ Major: 5,
+ Minor: 2,
+ Permissions: "rwm",
+ },
+
+ // tuntap
+ {
+ Path: "",
+ Type: 'c',
+ Major: 10,
+ Minor: 200,
+ Permissions: "rwm",
+ },
+ }, DefaultSimpleDevices...)
+ DefaultAutoCreatedDevices = append([]*Device{
+ {
+ // /dev/fuse is created but not allowed.
+ // This is to allow java to work. Because java
+ // Insists on there being a /dev/fuse
+ // https://github.com/docker/docker/issues/514
+ // https://github.com/docker/docker/issues/2393
+ //
+ Path: "/dev/fuse",
+ Type: 'c',
+ Major: 10,
+ Minor: 229,
+ Permissions: "rwm",
+ },
+ }, DefaultSimpleDevices...)
+)
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/hugepage_limit.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/hugepage_limit.go
new file mode 100644
index 0000000..d302163
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/hugepage_limit.go
@@ -0,0 +1,9 @@
+package configs
+
+type HugepageLimit struct {
+ // which type of hugepage to limit.
+ Pagesize string `json:"page_size"`
+
+ // usage limit for hugepage.
+ Limit uint64 `json:"limit"`
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/interface_priority_map.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/interface_priority_map.go
new file mode 100644
index 0000000..9a0395e
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/interface_priority_map.go
@@ -0,0 +1,14 @@
+package configs
+
+import (
+ "fmt"
+)
+
+type IfPrioMap struct {
+ Interface string `json:"interface"`
+ Priority int64 `json:"priority"`
+}
+
+func (i *IfPrioMap) CgroupString() string {
+ return fmt.Sprintf("%s %d", i.Interface, i.Priority)
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go
new file mode 100644
index 0000000..cc770c9
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go
@@ -0,0 +1,30 @@
+package configs
+
+type Mount struct {
+ // Source path for the mount.
+ Source string `json:"source"`
+
+ // Destination path for the mount inside the container.
+ Destination string `json:"destination"`
+
+ // Device the mount is for.
+ Device string `json:"device"`
+
+ // Mount flags.
+ Flags int `json:"flags"`
+
+ // Propagation Flags
+ PropagationFlags []int `json:"propagation_flags"`
+
+ // Mount data applied to the mount.
+ Data string `json:"data"`
+
+ // Relabel source if set, "z" indicates shared, "Z" indicates unshared.
+ Relabel string `json:"relabel"`
+
+ // Optional Command to be run before Source is mounted.
+ PremountCmds []Command `json:"premount_cmds"`
+
+ // Optional Command to be run after Source is mounted.
+ PostmountCmds []Command `json:"postmount_cmds"`
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces.go
new file mode 100644
index 0000000..a3329a3
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces.go
@@ -0,0 +1,5 @@
+package configs
+
+type NamespaceType string
+
+type Namespaces []Namespace
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go
new file mode 100644
index 0000000..fb4b852
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go
@@ -0,0 +1,31 @@
+// +build linux
+
+package configs
+
+import "syscall"
+
+func (n *Namespace) Syscall() int {
+ return namespaceInfo[n.Type]
+}
+
+var namespaceInfo = map[NamespaceType]int{
+ NEWNET: syscall.CLONE_NEWNET,
+ NEWNS: syscall.CLONE_NEWNS,
+ NEWUSER: syscall.CLONE_NEWUSER,
+ NEWIPC: syscall.CLONE_NEWIPC,
+ NEWUTS: syscall.CLONE_NEWUTS,
+ NEWPID: syscall.CLONE_NEWPID,
+}
+
+// CloneFlags parses the container's Namespaces options to set the correct
+// flags on clone, unshare. This function returns flags only for new namespaces.
+func (n *Namespaces) CloneFlags() uintptr {
+ var flag int
+ for _, v := range *n {
+ if v.Path != "" {
+ continue
+ }
+ flag |= namespaceInfo[v.Type]
+ }
+ return uintptr(flag)
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall_unsupported.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall_unsupported.go
new file mode 100644
index 0000000..0547223
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall_unsupported.go
@@ -0,0 +1,15 @@
+// +build !linux,!windows
+
+package configs
+
+func (n *Namespace) Syscall() int {
+ panic("No namespace syscall support")
+ return 0
+}
+
+// CloneFlags parses the container's Namespaces options to set the correct
+// flags on clone, unshare. This function returns flags only for new namespaces.
+func (n *Namespaces) CloneFlags() uintptr {
+ panic("No namespace syscall support")
+ return uintptr(0)
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unix.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unix.go
new file mode 100644
index 0000000..b9c820d
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unix.go
@@ -0,0 +1,127 @@
+// +build linux freebsd
+
+package configs
+
+import (
+ "fmt"
+ "os"
+ "sync"
+)
+
+const (
+ NEWNET NamespaceType = "NEWNET"
+ NEWPID NamespaceType = "NEWPID"
+ NEWNS NamespaceType = "NEWNS"
+ NEWUTS NamespaceType = "NEWUTS"
+ NEWIPC NamespaceType = "NEWIPC"
+ NEWUSER NamespaceType = "NEWUSER"
+)
+
+var (
+ nsLock sync.Mutex
+ supportedNamespaces = make(map[NamespaceType]bool)
+)
+
+// nsToFile converts the namespace type to its filename
+func nsToFile(ns NamespaceType) string {
+ switch ns {
+ case NEWNET:
+ return "net"
+ case NEWNS:
+ return "mnt"
+ case NEWPID:
+ return "pid"
+ case NEWIPC:
+ return "ipc"
+ case NEWUSER:
+ return "user"
+ case NEWUTS:
+ return "uts"
+ }
+ return ""
+}
+
+// IsNamespaceSupported returns whether a namespace is available or
+// not
+func IsNamespaceSupported(ns NamespaceType) bool {
+ nsLock.Lock()
+ defer nsLock.Unlock()
+ supported, ok := supportedNamespaces[ns]
+ if ok {
+ return supported
+ }
+ nsFile := nsToFile(ns)
+ // if the namespace type is unknown, just return false
+ if nsFile == "" {
+ return false
+ }
+ _, err := os.Stat(fmt.Sprintf("/proc/self/ns/%s", nsFile))
+ // a namespace is supported if it exists and we have permissions to read it
+ supported = err == nil
+ supportedNamespaces[ns] = supported
+ return supported
+}
+
+func NamespaceTypes() []NamespaceType {
+ return []NamespaceType{
+ NEWNET,
+ NEWPID,
+ NEWNS,
+ NEWUTS,
+ NEWIPC,
+ NEWUSER,
+ }
+}
+
+// Namespace defines configuration for each namespace. It specifies an
+// alternate path that is able to be joined via setns.
+type Namespace struct {
+ Type NamespaceType `json:"type"`
+ Path string `json:"path"`
+}
+
+func (n *Namespace) GetPath(pid int) string {
+ if n.Path != "" {
+ return n.Path
+ }
+ return fmt.Sprintf("/proc/%d/ns/%s", pid, nsToFile(n.Type))
+}
+
+func (n *Namespaces) Remove(t NamespaceType) bool {
+ i := n.index(t)
+ if i == -1 {
+ return false
+ }
+ *n = append((*n)[:i], (*n)[i+1:]...)
+ return true
+}
+
+func (n *Namespaces) Add(t NamespaceType, path string) {
+ i := n.index(t)
+ if i == -1 {
+ *n = append(*n, Namespace{Type: t, Path: path})
+ return
+ }
+ (*n)[i].Path = path
+}
+
+func (n *Namespaces) index(t NamespaceType) int {
+ for i, ns := range *n {
+ if ns.Type == t {
+ return i
+ }
+ }
+ return -1
+}
+
+func (n *Namespaces) Contains(t NamespaceType) bool {
+ return n.index(t) != -1
+}
+
+func (n *Namespaces) PathOf(t NamespaceType) string {
+ i := n.index(t)
+ if i == -1 {
+ return ""
+ }
+ return (*n)[i].Path
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unsupported.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unsupported.go
new file mode 100644
index 0000000..9a74033
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unsupported.go
@@ -0,0 +1,8 @@
+// +build !linux,!freebsd
+
+package configs
+
+// Namespace defines configuration for each namespace. It specifies an
+// alternate path that is able to be joined via setns.
+type Namespace struct {
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/network.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/network.go
new file mode 100644
index 0000000..ccdb228
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/configs/network.go
@@ -0,0 +1,72 @@
+package configs
+
+// Network defines configuration for a container's networking stack
+//
+// The network configuration can be omitted from a container causing the
+// container to be setup with the host's networking stack
+type Network struct {
+ // Type sets the networks type, commonly veth and loopback
+ Type string `json:"type"`
+
+ // Name of the network interface
+ Name string `json:"name"`
+
+ // The bridge to use.
+ Bridge string `json:"bridge"`
+
+ // MacAddress contains the MAC address to set on the network interface
+ MacAddress string `json:"mac_address"`
+
+ // Address contains the IPv4 and mask to set on the network interface
+ Address string `json:"address"`
+
+ // Gateway sets the gateway address that is used as the default for the interface
+ Gateway string `json:"gateway"`
+
+ // IPv6Address contains the IPv6 and mask to set on the network interface
+ IPv6Address string `json:"ipv6_address"`
+
+ // IPv6Gateway sets the ipv6 gateway address that is used as the default for the interface
+ IPv6Gateway string `json:"ipv6_gateway"`
+
+ // Mtu sets the mtu value for the interface and will be mirrored on both the host and
+ // container's interfaces if a pair is created, specifically in the case of type veth
+ // Note: This does not apply to loopback interfaces.
+ Mtu int `json:"mtu"`
+
+ // TxQueueLen sets the tx_queuelen value for the interface and will be mirrored on both the host and
+ // container's interfaces if a pair is created, specifically in the case of type veth
+ // Note: This does not apply to loopback interfaces.
+ TxQueueLen int `json:"txqueuelen"`
+
+ // HostInterfaceName is a unique name of a veth pair that resides on in the host interface of the
+ // container.
+ HostInterfaceName string `json:"host_interface_name"`
+
+ // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface
+ // bridge port in the case of type veth
+ // Note: This is unsupported on some systems.
+ // Note: This does not apply to loopback interfaces.
+ HairpinMode bool `json:"hairpin_mode"`
+}
+
+// Routes can be specified to create entries in the route table as the container is started
+//
+// All of destination, source, and gateway should be either IPv4 or IPv6.
+// One of the three options must be present, and omitted entries will use their
+// IP family default for the route table. For IPv4 for example, setting the
+// gateway to 1.2.3.4 and the interface to eth0 will set up a standard
+// destination of 0.0.0.0(or *) when viewed in the route table.
+type Route struct {
+ // Sets the destination and mask, should be a CIDR. Accepts IPv4 and IPv6
+ Destination string `json:"destination"`
+
+ // Sets the source and mask, should be a CIDR. Accepts IPv4 and IPv6
+ Source string `json:"source"`
+
+ // Sets the gateway. Accepts IPv4 and IPv6
+ Gateway string `json:"gateway"`
+
+ // The device to set this route up for, for example: eth0
+ InterfaceName string `json:"interface_name"`
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go
new file mode 100644
index 0000000..1afc52b
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go
@@ -0,0 +1,143 @@
+// +build linux
+
+package system
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "os/exec"
+ "syscall"
+ "unsafe"
+)
+
+// If arg2 is nonzero, set the "child subreaper" attribute of the
+// calling process; if arg2 is zero, unset the attribute. When a
+// process is marked as a child subreaper, all of the children
+// that it creates, and their descendants, will be marked as
+// having a subreaper. In effect, a subreaper fulfills the role
+// of init(1) for its descendant processes. Upon termination of
+// a process that is orphaned (i.e., its immediate parent has
+// already terminated) and marked as having a subreaper, the
+// nearest still living ancestor subreaper will receive a SIGCHLD
+// signal and be able to wait(2) on the process to discover its
+// termination status.
+const PR_SET_CHILD_SUBREAPER = 36
+
+type ParentDeathSignal int
+
+func (p ParentDeathSignal) Restore() error {
+ if p == 0 {
+ return nil
+ }
+ current, err := GetParentDeathSignal()
+ if err != nil {
+ return err
+ }
+ if p == current {
+ return nil
+ }
+ return p.Set()
+}
+
+func (p ParentDeathSignal) Set() error {
+ return SetParentDeathSignal(uintptr(p))
+}
+
+func Execv(cmd string, args []string, env []string) error {
+ name, err := exec.LookPath(cmd)
+ if err != nil {
+ return err
+ }
+
+ return syscall.Exec(name, args, env)
+}
+
+func Prlimit(pid, resource int, limit syscall.Rlimit) error {
+ _, _, err := syscall.RawSyscall6(syscall.SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(&limit)), uintptr(unsafe.Pointer(&limit)), 0, 0)
+ if err != 0 {
+ return err
+ }
+ return nil
+}
+
+func SetParentDeathSignal(sig uintptr) error {
+ if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_PDEATHSIG, sig, 0); err != 0 {
+ return err
+ }
+ return nil
+}
+
+func GetParentDeathSignal() (ParentDeathSignal, error) {
+ var sig int
+ _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_GET_PDEATHSIG, uintptr(unsafe.Pointer(&sig)), 0)
+ if err != 0 {
+ return -1, err
+ }
+ return ParentDeathSignal(sig), nil
+}
+
+func SetKeepCaps() error {
+ if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_KEEPCAPS, 1, 0); err != 0 {
+ return err
+ }
+
+ return nil
+}
+
+func ClearKeepCaps() error {
+ if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_KEEPCAPS, 0, 0); err != 0 {
+ return err
+ }
+
+ return nil
+}
+
+func Setctty() error {
+ if _, _, err := syscall.RawSyscall(syscall.SYS_IOCTL, 0, uintptr(syscall.TIOCSCTTY), 0); err != 0 {
+ return err
+ }
+ return nil
+}
+
+// RunningInUserNS detects whether we are currently running in a user namespace.
+// Copied from github.com/lxc/lxd/shared/util.go
+func RunningInUserNS() bool {
+ file, err := os.Open("/proc/self/uid_map")
+ if err != nil {
+ // This kernel-provided file only exists if user namespaces are supported
+ return false
+ }
+ defer file.Close()
+
+ buf := bufio.NewReader(file)
+ l, _, err := buf.ReadLine()
+ if err != nil {
+ return false
+ }
+
+ line := string(l)
+ var a, b, c int64
+ fmt.Sscanf(line, "%d %d %d", &a, &b, &c)
+ /*
+ * We assume we are in the initial user namespace if we have a full
+ * range - 4294967295 uids starting at uid 0.
+ */
+ if a == 0 && b == 0 && c == 4294967295 {
+ return false
+ }
+ return true
+}
+
+// SetSubreaper sets the value i as the subreaper setting for the calling process
+func SetSubreaper(i int) error {
+ return Prctl(PR_SET_CHILD_SUBREAPER, uintptr(i), 0, 0, 0)
+}
+
+func Prctl(option int, arg2, arg3, arg4, arg5 uintptr) (err error) {
+ _, _, e1 := syscall.Syscall6(syscall.SYS_PRCTL, uintptr(option), arg2, arg3, arg4, arg5, 0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/proc.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/proc.go
new file mode 100644
index 0000000..37808a2
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/proc.go
@@ -0,0 +1,27 @@
+package system
+
+import (
+ "io/ioutil"
+ "path/filepath"
+ "strconv"
+ "strings"
+)
+
+// look in /proc to find the process start time so that we can verify
+// that this pid has started after ourself
+func GetProcessStartTime(pid int) (string, error) {
+ data, err := ioutil.ReadFile(filepath.Join("/proc", strconv.Itoa(pid), "stat"))
+ if err != nil {
+ return "", err
+ }
+
+ parts := strings.Split(string(data), " ")
+ // the starttime is located at pos 22
+ // from the man page
+ //
+ // starttime %llu (was %lu before Linux 2.6)
+ // (22) The time the process started after system boot. In kernels before Linux 2.6, this
+ // value was expressed in jiffies. Since Linux 2.6, the value is expressed in clock ticks
+ // (divide by sysconf(_SC_CLK_TCK)).
+ return parts[22-1], nil // starts at 1
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/setns_linux.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/setns_linux.go
new file mode 100644
index 0000000..615ff4c
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/setns_linux.go
@@ -0,0 +1,40 @@
+package system
+
+import (
+ "fmt"
+ "runtime"
+ "syscall"
+)
+
+// Via http://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=7b21fddd087678a70ad64afc0f632e0f1071b092
+//
+// We need different setns values for the different platforms and arch
+// We are declaring the macro here because the SETNS syscall does not exist in th stdlib
+var setNsMap = map[string]uintptr{
+ "linux/386": 346,
+ "linux/arm64": 268,
+ "linux/amd64": 308,
+ "linux/arm": 375,
+ "linux/ppc": 350,
+ "linux/ppc64": 350,
+ "linux/ppc64le": 350,
+ "linux/s390x": 339,
+}
+
+var sysSetns = setNsMap[fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH)]
+
+func SysSetns() uint32 {
+ return uint32(sysSetns)
+}
+
+func Setns(fd uintptr, flags uintptr) error {
+ ns, exists := setNsMap[fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH)]
+ if !exists {
+ return fmt.Errorf("unsupported platform %s/%s", runtime.GOOS, runtime.GOARCH)
+ }
+ _, _, err := syscall.RawSyscall(ns, fd, flags, 0)
+ if err != 0 {
+ return err
+ }
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go
new file mode 100644
index 0000000..c990065
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go
@@ -0,0 +1,25 @@
+// +build linux,386
+
+package system
+
+import (
+ "syscall"
+)
+
+// Setuid sets the uid of the calling thread to the specified uid.
+func Setuid(uid int) (err error) {
+ _, _, e1 := syscall.RawSyscall(syscall.SYS_SETUID, uintptr(uid), 0, 0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+// Setgid sets the gid of the calling thread to the specified gid.
+func Setgid(gid int) (err error) {
+ _, _, e1 := syscall.RawSyscall(syscall.SYS_SETGID32, uintptr(gid), 0, 0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go
new file mode 100644
index 0000000..0816bf8
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go
@@ -0,0 +1,25 @@
+// +build linux,arm64 linux,amd64 linux,ppc linux,ppc64 linux,ppc64le linux,s390x
+
+package system
+
+import (
+ "syscall"
+)
+
+// Setuid sets the uid of the calling thread to the specified uid.
+func Setuid(uid int) (err error) {
+ _, _, e1 := syscall.RawSyscall(syscall.SYS_SETUID, uintptr(uid), 0, 0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+// Setgid sets the gid of the calling thread to the specified gid.
+func Setgid(gid int) (err error) {
+ _, _, e1 := syscall.RawSyscall(syscall.SYS_SETGID, uintptr(gid), 0, 0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_arm.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_arm.go
new file mode 100644
index 0000000..3f780f3
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_arm.go
@@ -0,0 +1,25 @@
+// +build linux,arm
+
+package system
+
+import (
+ "syscall"
+)
+
+// Setuid sets the uid of the calling thread to the specified uid.
+func Setuid(uid int) (err error) {
+ _, _, e1 := syscall.RawSyscall(syscall.SYS_SETUID32, uintptr(uid), 0, 0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+// Setgid sets the gid of the calling thread to the specified gid.
+func Setgid(gid int) (err error) {
+ _, _, e1 := syscall.RawSyscall(syscall.SYS_SETGID32, uintptr(gid), 0, 0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go
new file mode 100644
index 0000000..b3a07cb
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go
@@ -0,0 +1,12 @@
+// +build cgo,linux cgo,freebsd
+
+package system
+
+/*
+#include <unistd.h>
+*/
+import "C"
+
+func GetClockTicks() int {
+ return int(C.sysconf(C._SC_CLK_TCK))
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig_notcgo.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig_notcgo.go
new file mode 100644
index 0000000..d93b5d5
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig_notcgo.go
@@ -0,0 +1,15 @@
+// +build !cgo windows
+
+package system
+
+func GetClockTicks() int {
+ // TODO figure out a better alternative for platforms where we're missing cgo
+ //
+ // TODO Windows. This could be implemented using Win32 QueryPerformanceFrequency().
+ // https://msdn.microsoft.com/en-us/library/windows/desktop/ms644905(v=vs.85).aspx
+ //
+ // An example of its usage can be found here.
+ // https://msdn.microsoft.com/en-us/library/windows/desktop/dn553408(v=vs.85).aspx
+
+ return 100
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go
new file mode 100644
index 0000000..e7cfd62
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go
@@ -0,0 +1,9 @@
+// +build !linux
+
+package system
+
+// RunningInUserNS is a stub for non-Linux systems
+// Always returns false
+func RunningInUserNS() bool {
+ return false
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go
new file mode 100644
index 0000000..30f74df
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go
@@ -0,0 +1,99 @@
+package system
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+var _zero uintptr
+
+// Returns the size of xattrs and nil error
+// Requires path, takes allocated []byte or nil as last argument
+func Llistxattr(path string, dest []byte) (size int, err error) {
+ pathBytes, err := syscall.BytePtrFromString(path)
+ if err != nil {
+ return -1, err
+ }
+ var newpathBytes unsafe.Pointer
+ if len(dest) > 0 {
+ newpathBytes = unsafe.Pointer(&dest[0])
+ } else {
+ newpathBytes = unsafe.Pointer(&_zero)
+ }
+
+ _size, _, errno := syscall.Syscall6(syscall.SYS_LLISTXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(newpathBytes), uintptr(len(dest)), 0, 0, 0)
+ size = int(_size)
+ if errno != 0 {
+ return -1, errno
+ }
+
+ return size, nil
+}
+
+// Returns a []byte slice if the xattr is set and nil otherwise
+// Requires path and its attribute as arguments
+func Lgetxattr(path string, attr string) ([]byte, error) {
+ var sz int
+ pathBytes, err := syscall.BytePtrFromString(path)
+ if err != nil {
+ return nil, err
+ }
+ attrBytes, err := syscall.BytePtrFromString(attr)
+ if err != nil {
+ return nil, err
+ }
+
+ // Start with a 128 length byte array
+ sz = 128
+ dest := make([]byte, sz)
+ destBytes := unsafe.Pointer(&dest[0])
+ _sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0)
+
+ switch {
+ case errno == syscall.ENODATA:
+ return nil, errno
+ case errno == syscall.ENOTSUP:
+ return nil, errno
+ case errno == syscall.ERANGE:
+ // 128 byte array might just not be good enough,
+ // A dummy buffer is used ``uintptr(0)`` to get real size
+ // of the xattrs on disk
+ _sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(unsafe.Pointer(nil)), uintptr(0), 0, 0)
+ sz = int(_sz)
+ if sz < 0 {
+ return nil, errno
+ }
+ dest = make([]byte, sz)
+ destBytes := unsafe.Pointer(&dest[0])
+ _sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0)
+ if errno != 0 {
+ return nil, errno
+ }
+ case errno != 0:
+ return nil, errno
+ }
+ sz = int(_sz)
+ return dest[:sz], nil
+}
+
+func Lsetxattr(path string, attr string, data []byte, flags int) error {
+ pathBytes, err := syscall.BytePtrFromString(path)
+ if err != nil {
+ return err
+ }
+ attrBytes, err := syscall.BytePtrFromString(attr)
+ if err != nil {
+ return err
+ }
+ var dataBytes unsafe.Pointer
+ if len(data) > 0 {
+ dataBytes = unsafe.Pointer(&data[0])
+ } else {
+ dataBytes = unsafe.Pointer(&_zero)
+ }
+ _, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0)
+ if errno != 0 {
+ return errno
+ }
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go
new file mode 100644
index 0000000..3466bfc
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go
@@ -0,0 +1,121 @@
+package utils
+
+import (
+ "crypto/rand"
+ "encoding/hex"
+ "encoding/json"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "syscall"
+)
+
+const (
+ exitSignalOffset = 128
+)
+
+// GenerateRandomName returns a new name joined with a prefix. This size
+// specified is used to truncate the randomly generated value
+func GenerateRandomName(prefix string, size int) (string, error) {
+ id := make([]byte, 32)
+ if _, err := io.ReadFull(rand.Reader, id); err != nil {
+ return "", err
+ }
+ if size > 64 {
+ size = 64
+ }
+ return prefix + hex.EncodeToString(id)[:size], nil
+}
+
+// ResolveRootfs ensures that the current working directory is
+// not a symlink and returns the absolute path to the rootfs
+func ResolveRootfs(uncleanRootfs string) (string, error) {
+ rootfs, err := filepath.Abs(uncleanRootfs)
+ if err != nil {
+ return "", err
+ }
+ return filepath.EvalSymlinks(rootfs)
+}
+
+// ExitStatus returns the correct exit status for a process based on if it
+// was signaled or exited cleanly
+func ExitStatus(status syscall.WaitStatus) int {
+ if status.Signaled() {
+ return exitSignalOffset + int(status.Signal())
+ }
+ return status.ExitStatus()
+}
+
+// WriteJSON writes the provided struct v to w using standard json marshaling
+func WriteJSON(w io.Writer, v interface{}) error {
+ data, err := json.Marshal(v)
+ if err != nil {
+ return err
+ }
+ _, err = w.Write(data)
+ return err
+}
+
+// CleanPath makes a path safe for use with filepath.Join. This is done by not
+// only cleaning the path, but also (if the path is relative) adding a leading
+// '/' and cleaning it (then removing the leading '/'). This ensures that a
+// path resulting from prepending another path will always resolve to lexically
+// be a subdirectory of the prefixed path. This is all done lexically, so paths
+// that include symlinks won't be safe as a result of using CleanPath.
+func CleanPath(path string) string {
+ // Deal with empty strings nicely.
+ if path == "" {
+ return ""
+ }
+
+ // Ensure that all paths are cleaned (especially problematic ones like
+ // "/../../../../../" which can cause lots of issues).
+ path = filepath.Clean(path)
+
+ // If the path isn't absolute, we need to do more processing to fix paths
+ // such as "../../../../<etc>/some/path". We also shouldn't convert absolute
+ // paths to relative ones.
+ if !filepath.IsAbs(path) {
+ path = filepath.Clean(string(os.PathSeparator) + path)
+ // This can't fail, as (by definition) all paths are relative to root.
+ path, _ = filepath.Rel(string(os.PathSeparator), path)
+ }
+
+ // Clean the path again for good measure.
+ return filepath.Clean(path)
+}
+
+// SearchLabels searches a list of key-value pairs for the provided key and
+// returns the corresponding value. The pairs must be separated with '='.
+func SearchLabels(labels []string, query string) string {
+ for _, l := range labels {
+ parts := strings.SplitN(l, "=", 2)
+ if len(parts) < 2 {
+ continue
+ }
+ if parts[0] == query {
+ return parts[1]
+ }
+ }
+ return ""
+}
+
+// Annotations returns the bundle path and user defined annotations from the
+// libcontianer state. We need to remove the bundle because that is a label
+// added by libcontainer.
+func Annotations(labels []string) (bundle string, userAnnotations map[string]string) {
+ userAnnotations = make(map[string]string)
+ for _, l := range labels {
+ parts := strings.SplitN(l, "=", 2)
+ if len(parts) < 2 {
+ continue
+ }
+ if parts[0] == "bundle" {
+ bundle = parts[1]
+ } else {
+ userAnnotations[parts[0]] = parts[1]
+ }
+ }
+ return
+}
diff --git a/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go
new file mode 100644
index 0000000..408918f
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go
@@ -0,0 +1,33 @@
+// +build !windows
+
+package utils
+
+import (
+ "io/ioutil"
+ "strconv"
+ "syscall"
+)
+
+func CloseExecFrom(minFd int) error {
+ fdList, err := ioutil.ReadDir("/proc/self/fd")
+ if err != nil {
+ return err
+ }
+ for _, fi := range fdList {
+ fd, err := strconv.Atoi(fi.Name())
+ if err != nil {
+ // ignore non-numeric file names
+ continue
+ }
+
+ if fd < minFd {
+ // ignore descriptors lower than our specified minimum
+ continue
+ }
+
+ // intentionally ignore errors from syscall.CloseOnExec
+ syscall.CloseOnExec(fd)
+ // the cases where this might fail are basically file descriptors that have already been closed (including and especially the one that was created when ioutil.ReadDir did the "opendir" syscall)
+ }
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/pborman/uuid/CONTRIBUTORS b/src/kube2msb/vendor/github.com/pborman/uuid/CONTRIBUTORS
new file mode 100644
index 0000000..b382a04
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/pborman/uuid/CONTRIBUTORS
@@ -0,0 +1 @@
+Paul Borman <borman@google.com>
diff --git a/src/kube2msb/vendor/github.com/pborman/uuid/LICENSE b/src/kube2msb/vendor/github.com/pborman/uuid/LICENSE
new file mode 100644
index 0000000..5dc6826
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/pborman/uuid/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009,2014 Google Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/kube2msb/vendor/github.com/pborman/uuid/dce.go b/src/kube2msb/vendor/github.com/pborman/uuid/dce.go
new file mode 100644
index 0000000..50a0f2d
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/pborman/uuid/dce.go
@@ -0,0 +1,84 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "encoding/binary"
+ "fmt"
+ "os"
+)
+
+// A Domain represents a Version 2 domain
+type Domain byte
+
+// Domain constants for DCE Security (Version 2) UUIDs.
+const (
+ Person = Domain(0)
+ Group = Domain(1)
+ Org = Domain(2)
+)
+
+// NewDCESecurity returns a DCE Security (Version 2) UUID.
+//
+// The domain should be one of Person, Group or Org.
+// On a POSIX system the id should be the users UID for the Person
+// domain and the users GID for the Group. The meaning of id for
+// the domain Org or on non-POSIX systems is site defined.
+//
+// For a given domain/id pair the same token may be returned for up to
+// 7 minutes and 10 seconds.
+func NewDCESecurity(domain Domain, id uint32) UUID {
+ uuid := NewUUID()
+ if uuid != nil {
+ uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2
+ uuid[9] = byte(domain)
+ binary.BigEndian.PutUint32(uuid[0:], id)
+ }
+ return uuid
+}
+
+// NewDCEPerson returns a DCE Security (Version 2) UUID in the person
+// domain with the id returned by os.Getuid.
+//
+// NewDCEPerson(Person, uint32(os.Getuid()))
+func NewDCEPerson() UUID {
+ return NewDCESecurity(Person, uint32(os.Getuid()))
+}
+
+// NewDCEGroup returns a DCE Security (Version 2) UUID in the group
+// domain with the id returned by os.Getgid.
+//
+// NewDCEGroup(Group, uint32(os.Getgid()))
+func NewDCEGroup() UUID {
+ return NewDCESecurity(Group, uint32(os.Getgid()))
+}
+
+// Domain returns the domain for a Version 2 UUID or false.
+func (uuid UUID) Domain() (Domain, bool) {
+ if v, _ := uuid.Version(); v != 2 {
+ return 0, false
+ }
+ return Domain(uuid[9]), true
+}
+
+// Id returns the id for a Version 2 UUID or false.
+func (uuid UUID) Id() (uint32, bool) {
+ if v, _ := uuid.Version(); v != 2 {
+ return 0, false
+ }
+ return binary.BigEndian.Uint32(uuid[0:4]), true
+}
+
+func (d Domain) String() string {
+ switch d {
+ case Person:
+ return "Person"
+ case Group:
+ return "Group"
+ case Org:
+ return "Org"
+ }
+ return fmt.Sprintf("Domain%d", int(d))
+}
diff --git a/src/kube2msb/vendor/github.com/pborman/uuid/doc.go b/src/kube2msb/vendor/github.com/pborman/uuid/doc.go
new file mode 100644
index 0000000..d8bd013
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/pborman/uuid/doc.go
@@ -0,0 +1,8 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The uuid package generates and inspects UUIDs.
+//
+// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security Services.
+package uuid
diff --git a/src/kube2msb/vendor/github.com/pborman/uuid/hash.go b/src/kube2msb/vendor/github.com/pborman/uuid/hash.go
new file mode 100644
index 0000000..cdd4192
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/pborman/uuid/hash.go
@@ -0,0 +1,53 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "crypto/md5"
+ "crypto/sha1"
+ "hash"
+)
+
+// Well known Name Space IDs and UUIDs
+var (
+ NameSpace_DNS = Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
+ NameSpace_URL = Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")
+ NameSpace_OID = Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")
+ NameSpace_X500 = Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")
+ NIL = Parse("00000000-0000-0000-0000-000000000000")
+)
+
+// NewHash returns a new UUID dervied from the hash of space concatenated with
+// data generated by h. The hash should be at least 16 byte in length. The
+// first 16 bytes of the hash are used to form the UUID. The version of the
+// UUID will be the lower 4 bits of version. NewHash is used to implement
+// NewMD5 and NewSHA1.
+func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
+ h.Reset()
+ h.Write(space)
+ h.Write([]byte(data))
+ s := h.Sum(nil)
+ uuid := make([]byte, 16)
+ copy(uuid, s)
+ uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4)
+ uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant
+ return uuid
+}
+
+// NewMD5 returns a new MD5 (Version 3) UUID based on the
+// supplied name space and data.
+//
+// NewHash(md5.New(), space, data, 3)
+func NewMD5(space UUID, data []byte) UUID {
+ return NewHash(md5.New(), space, data, 3)
+}
+
+// NewSHA1 returns a new SHA1 (Version 5) UUID based on the
+// supplied name space and data.
+//
+// NewHash(sha1.New(), space, data, 5)
+func NewSHA1(space UUID, data []byte) UUID {
+ return NewHash(sha1.New(), space, data, 5)
+}
diff --git a/src/kube2msb/vendor/github.com/pborman/uuid/json.go b/src/kube2msb/vendor/github.com/pborman/uuid/json.go
new file mode 100644
index 0000000..760580a
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/pborman/uuid/json.go
@@ -0,0 +1,30 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import "errors"
+
+func (u UUID) MarshalJSON() ([]byte, error) {
+ if len(u) == 0 {
+ return []byte(`""`), nil
+ }
+ return []byte(`"` + u.String() + `"`), nil
+}
+
+func (u *UUID) UnmarshalJSON(data []byte) error {
+ if len(data) == 0 || string(data) == `""` {
+ return nil
+ }
+ if len(data) < 2 || data[0] != '"' || data[len(data)-1] != '"' {
+ return errors.New("invalid UUID format")
+ }
+ data = data[1 : len(data)-1]
+ uu := Parse(string(data))
+ if uu == nil {
+ return errors.New("invalid UUID format")
+ }
+ *u = uu
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/pborman/uuid/node.go b/src/kube2msb/vendor/github.com/pborman/uuid/node.go
new file mode 100644
index 0000000..dd0a8ac
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/pborman/uuid/node.go
@@ -0,0 +1,101 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import "net"
+
+var (
+ interfaces []net.Interface // cached list of interfaces
+ ifname string // name of interface being used
+ nodeID []byte // hardware for version 1 UUIDs
+)
+
+// NodeInterface returns the name of the interface from which the NodeID was
+// derived. The interface "user" is returned if the NodeID was set by
+// SetNodeID.
+func NodeInterface() string {
+ return ifname
+}
+
+// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs.
+// If name is "" then the first usable interface found will be used or a random
+// Node ID will be generated. If a named interface cannot be found then false
+// is returned.
+//
+// SetNodeInterface never fails when name is "".
+func SetNodeInterface(name string) bool {
+ if interfaces == nil {
+ var err error
+ interfaces, err = net.Interfaces()
+ if err != nil && name != "" {
+ return false
+ }
+ }
+
+ for _, ifs := range interfaces {
+ if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) {
+ if setNodeID(ifs.HardwareAddr) {
+ ifname = ifs.Name
+ return true
+ }
+ }
+ }
+
+ // We found no interfaces with a valid hardware address. If name
+ // does not specify a specific interface generate a random Node ID
+ // (section 4.1.6)
+ if name == "" {
+ if nodeID == nil {
+ nodeID = make([]byte, 6)
+ }
+ randomBits(nodeID)
+ return true
+ }
+ return false
+}
+
+// NodeID returns a slice of a copy of the current Node ID, setting the Node ID
+// if not already set.
+func NodeID() []byte {
+ if nodeID == nil {
+ SetNodeInterface("")
+ }
+ nid := make([]byte, 6)
+ copy(nid, nodeID)
+ return nid
+}
+
+// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes
+// of id are used. If id is less than 6 bytes then false is returned and the
+// Node ID is not set.
+func SetNodeID(id []byte) bool {
+ if setNodeID(id) {
+ ifname = "user"
+ return true
+ }
+ return false
+}
+
+func setNodeID(id []byte) bool {
+ if len(id) < 6 {
+ return false
+ }
+ if nodeID == nil {
+ nodeID = make([]byte, 6)
+ }
+ copy(nodeID, id)
+ return true
+}
+
+// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is
+// not valid. The NodeID is only well defined for version 1 and 2 UUIDs.
+func (uuid UUID) NodeID() []byte {
+ if len(uuid) != 16 {
+ return nil
+ }
+ node := make([]byte, 6)
+ copy(node, uuid[10:])
+ return node
+}
diff --git a/src/kube2msb/vendor/github.com/pborman/uuid/time.go b/src/kube2msb/vendor/github.com/pborman/uuid/time.go
new file mode 100644
index 0000000..7ebc9be
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/pborman/uuid/time.go
@@ -0,0 +1,132 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "encoding/binary"
+ "sync"
+ "time"
+)
+
+// A Time represents a time as the number of 100's of nanoseconds since 15 Oct
+// 1582.
+type Time int64
+
+const (
+ lillian = 2299160 // Julian day of 15 Oct 1582
+ unix = 2440587 // Julian day of 1 Jan 1970
+ epoch = unix - lillian // Days between epochs
+ g1582 = epoch * 86400 // seconds between epochs
+ g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs
+)
+
+var (
+ mu sync.Mutex
+ lasttime uint64 // last time we returned
+ clock_seq uint16 // clock sequence for this run
+
+ timeNow = time.Now // for testing
+)
+
+// UnixTime converts t the number of seconds and nanoseconds using the Unix
+// epoch of 1 Jan 1970.
+func (t Time) UnixTime() (sec, nsec int64) {
+ sec = int64(t - g1582ns100)
+ nsec = (sec % 10000000) * 100
+ sec /= 10000000
+ return sec, nsec
+}
+
+// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and
+// clock sequence as well as adjusting the clock sequence as needed. An error
+// is returned if the current time cannot be determined.
+func GetTime() (Time, uint16, error) {
+ defer mu.Unlock()
+ mu.Lock()
+ return getTime()
+}
+
+func getTime() (Time, uint16, error) {
+ t := timeNow()
+
+ // If we don't have a clock sequence already, set one.
+ if clock_seq == 0 {
+ setClockSequence(-1)
+ }
+ now := uint64(t.UnixNano()/100) + g1582ns100
+
+ // If time has gone backwards with this clock sequence then we
+ // increment the clock sequence
+ if now <= lasttime {
+ clock_seq = ((clock_seq + 1) & 0x3fff) | 0x8000
+ }
+ lasttime = now
+ return Time(now), clock_seq, nil
+}
+
+// ClockSequence returns the current clock sequence, generating one if not
+// already set. The clock sequence is only used for Version 1 UUIDs.
+//
+// The uuid package does not use global static storage for the clock sequence or
+// the last time a UUID was generated. Unless SetClockSequence a new random
+// clock sequence is generated the first time a clock sequence is requested by
+// ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) sequence is generated
+// for
+func ClockSequence() int {
+ defer mu.Unlock()
+ mu.Lock()
+ return clockSequence()
+}
+
+func clockSequence() int {
+ if clock_seq == 0 {
+ setClockSequence(-1)
+ }
+ return int(clock_seq & 0x3fff)
+}
+
+// SetClockSeq sets the clock sequence to the lower 14 bits of seq. Setting to
+// -1 causes a new sequence to be generated.
+func SetClockSequence(seq int) {
+ defer mu.Unlock()
+ mu.Lock()
+ setClockSequence(seq)
+}
+
+func setClockSequence(seq int) {
+ if seq == -1 {
+ var b [2]byte
+ randomBits(b[:]) // clock sequence
+ seq = int(b[0])<<8 | int(b[1])
+ }
+ old_seq := clock_seq
+ clock_seq = uint16(seq&0x3fff) | 0x8000 // Set our variant
+ if old_seq != clock_seq {
+ lasttime = 0
+ }
+}
+
+// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
+// uuid. It returns false if uuid is not valid. The time is only well defined
+// for version 1 and 2 UUIDs.
+func (uuid UUID) Time() (Time, bool) {
+ if len(uuid) != 16 {
+ return 0, false
+ }
+ time := int64(binary.BigEndian.Uint32(uuid[0:4]))
+ time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
+ time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
+ return Time(time), true
+}
+
+// ClockSequence returns the clock sequence encoded in uuid. It returns false
+// if uuid is not valid. The clock sequence is only well defined for version 1
+// and 2 UUIDs.
+func (uuid UUID) ClockSequence() (int, bool) {
+ if len(uuid) != 16 {
+ return 0, false
+ }
+ return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff, true
+}
diff --git a/src/kube2msb/vendor/github.com/pborman/uuid/util.go b/src/kube2msb/vendor/github.com/pborman/uuid/util.go
new file mode 100644
index 0000000..de40b10
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/pborman/uuid/util.go
@@ -0,0 +1,43 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "io"
+)
+
+// randomBits completely fills slice b with random data.
+func randomBits(b []byte) {
+ if _, err := io.ReadFull(rander, b); err != nil {
+ panic(err.Error()) // rand should never fail
+ }
+}
+
+// xvalues returns the value of a byte as a hexadecimal digit or 255.
+var xvalues = []byte{
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255,
+ 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+}
+
+// xtob converts the the first two hex bytes of x into a byte.
+func xtob(x string) (byte, bool) {
+ b1 := xvalues[x[0]]
+ b2 := xvalues[x[1]]
+ return (b1 << 4) | b2, b1 != 255 && b2 != 255
+}
diff --git a/src/kube2msb/vendor/github.com/pborman/uuid/uuid.go b/src/kube2msb/vendor/github.com/pborman/uuid/uuid.go
new file mode 100644
index 0000000..2920fae
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/pborman/uuid/uuid.go
@@ -0,0 +1,163 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "bytes"
+ "crypto/rand"
+ "fmt"
+ "io"
+ "strings"
+)
+
+// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC
+// 4122.
+type UUID []byte
+
+// A Version represents a UUIDs version.
+type Version byte
+
+// A Variant represents a UUIDs variant.
+type Variant byte
+
+// Constants returned by Variant.
+const (
+ Invalid = Variant(iota) // Invalid UUID
+ RFC4122 // The variant specified in RFC4122
+ Reserved // Reserved, NCS backward compatibility.
+ Microsoft // Reserved, Microsoft Corporation backward compatibility.
+ Future // Reserved for future definition.
+)
+
+var rander = rand.Reader // random function
+
+// New returns a new random (version 4) UUID as a string. It is a convenience
+// function for NewRandom().String().
+func New() string {
+ return NewRandom().String()
+}
+
+// Parse decodes s into a UUID or returns nil. Both the UUID form of
+// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
+// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded.
+func Parse(s string) UUID {
+ if len(s) == 36+9 {
+ if strings.ToLower(s[:9]) != "urn:uuid:" {
+ return nil
+ }
+ s = s[9:]
+ } else if len(s) != 36 {
+ return nil
+ }
+ if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
+ return nil
+ }
+ uuid := make([]byte, 16)
+ for i, x := range []int{
+ 0, 2, 4, 6,
+ 9, 11,
+ 14, 16,
+ 19, 21,
+ 24, 26, 28, 30, 32, 34} {
+ if v, ok := xtob(s[x:]); !ok {
+ return nil
+ } else {
+ uuid[i] = v
+ }
+ }
+ return uuid
+}
+
+// Equal returns true if uuid1 and uuid2 are equal.
+func Equal(uuid1, uuid2 UUID) bool {
+ return bytes.Equal(uuid1, uuid2)
+}
+
+// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+// , or "" if uuid is invalid.
+func (uuid UUID) String() string {
+ if uuid == nil || len(uuid) != 16 {
+ return ""
+ }
+ b := []byte(uuid)
+ return fmt.Sprintf("%08x-%04x-%04x-%04x-%012x",
+ b[:4], b[4:6], b[6:8], b[8:10], b[10:])
+}
+
+// URN returns the RFC 2141 URN form of uuid,
+// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid.
+func (uuid UUID) URN() string {
+ if uuid == nil || len(uuid) != 16 {
+ return ""
+ }
+ b := []byte(uuid)
+ return fmt.Sprintf("urn:uuid:%08x-%04x-%04x-%04x-%012x",
+ b[:4], b[4:6], b[6:8], b[8:10], b[10:])
+}
+
+// Variant returns the variant encoded in uuid. It returns Invalid if
+// uuid is invalid.
+func (uuid UUID) Variant() Variant {
+ if len(uuid) != 16 {
+ return Invalid
+ }
+ switch {
+ case (uuid[8] & 0xc0) == 0x80:
+ return RFC4122
+ case (uuid[8] & 0xe0) == 0xc0:
+ return Microsoft
+ case (uuid[8] & 0xe0) == 0xe0:
+ return Future
+ default:
+ return Reserved
+ }
+ panic("unreachable")
+}
+
+// Version returns the verison of uuid. It returns false if uuid is not
+// valid.
+func (uuid UUID) Version() (Version, bool) {
+ if len(uuid) != 16 {
+ return 0, false
+ }
+ return Version(uuid[6] >> 4), true
+}
+
+func (v Version) String() string {
+ if v > 15 {
+ return fmt.Sprintf("BAD_VERSION_%d", v)
+ }
+ return fmt.Sprintf("VERSION_%d", v)
+}
+
+func (v Variant) String() string {
+ switch v {
+ case RFC4122:
+ return "RFC4122"
+ case Reserved:
+ return "Reserved"
+ case Microsoft:
+ return "Microsoft"
+ case Future:
+ return "Future"
+ case Invalid:
+ return "Invalid"
+ }
+ return fmt.Sprintf("BadVariant%d", int(v))
+}
+
+// SetRand sets the random number generator to r, which implents io.Reader.
+// If r.Read returns an error when the package requests random data then
+// a panic will be issued.
+//
+// Calling SetRand with nil sets the random number generator to the default
+// generator.
+func SetRand(r io.Reader) {
+ if r == nil {
+ rander = rand.Reader
+ return
+ }
+ rander = r
+}
diff --git a/src/kube2msb/vendor/github.com/pborman/uuid/version1.go b/src/kube2msb/vendor/github.com/pborman/uuid/version1.go
new file mode 100644
index 0000000..0127eac
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/pborman/uuid/version1.go
@@ -0,0 +1,41 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "encoding/binary"
+)
+
+// NewUUID returns a Version 1 UUID based on the current NodeID and clock
+// sequence, and the current time. If the NodeID has not been set by SetNodeID
+// or SetNodeInterface then it will be set automatically. If the NodeID cannot
+// be set NewUUID returns nil. If clock sequence has not been set by
+// SetClockSequence then it will be set automatically. If GetTime fails to
+// return the current NewUUID returns nil.
+func NewUUID() UUID {
+ if nodeID == nil {
+ SetNodeInterface("")
+ }
+
+ now, seq, err := GetTime()
+ if err != nil {
+ return nil
+ }
+
+ uuid := make([]byte, 16)
+
+ time_low := uint32(now & 0xffffffff)
+ time_mid := uint16((now >> 32) & 0xffff)
+ time_hi := uint16((now >> 48) & 0x0fff)
+ time_hi |= 0x1000 // Version 1
+
+ binary.BigEndian.PutUint32(uuid[0:], time_low)
+ binary.BigEndian.PutUint16(uuid[4:], time_mid)
+ binary.BigEndian.PutUint16(uuid[6:], time_hi)
+ binary.BigEndian.PutUint16(uuid[8:], seq)
+ copy(uuid[10:], nodeID)
+
+ return uuid
+}
diff --git a/src/kube2msb/vendor/github.com/pborman/uuid/version4.go b/src/kube2msb/vendor/github.com/pborman/uuid/version4.go
new file mode 100644
index 0000000..b3d4a36
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/pborman/uuid/version4.go
@@ -0,0 +1,25 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+// Random returns a Random (Version 4) UUID or panics.
+//
+// The strength of the UUIDs is based on the strength of the crypto/rand
+// package.
+//
+// A note about uniqueness derived from from the UUID Wikipedia entry:
+//
+// Randomly generated UUIDs have 122 random bits. One's annual risk of being
+// hit by a meteorite is estimated to be one chance in 17 billion, that
+// means the probability is about 0.00000000006 (6 × 10−11),
+// equivalent to the odds of creating a few tens of trillions of UUIDs in a
+// year and having one duplicate.
+func NewRandom() UUID {
+ uuid := make([]byte, 16)
+ randomBits([]byte(uuid))
+ uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
+ uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
+ return uuid
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/client_golang/LICENSE b/src/kube2msb/vendor/github.com/prometheus/client_golang/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/client_golang/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/src/kube2msb/vendor/github.com/prometheus/client_golang/NOTICE b/src/kube2msb/vendor/github.com/prometheus/client_golang/NOTICE
new file mode 100644
index 0000000..37e4a7d
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/client_golang/NOTICE
@@ -0,0 +1,28 @@
+Prometheus instrumentation library for Go applications
+Copyright 2012-2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
+
+
+The following components are included in this product:
+
+goautoneg
+http://bitbucket.org/ww/goautoneg
+Copyright 2011, Open Knowledge Foundation Ltd.
+See README.txt for license details.
+
+perks - a fork of https://github.com/bmizerany/perks
+https://github.com/beorn7/perks
+Copyright 2013-2015 Blake Mizerany, Björn Rabenstein
+See https://github.com/beorn7/perks/blob/master/README.md for license details.
+
+Go support for Protocol Buffers - Google's data interchange format
+http://github.com/golang/protobuf/
+Copyright 2010 The Go Authors
+See source code for license details.
+
+Support for streaming Protocol Buffer messages for the Go language (golang).
+https://github.com/matttproud/golang_protobuf_extensions
+Copyright 2013 Matt T. Proud
+Licensed under the Apache License, Version 2.0
diff --git a/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/README.md b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/README.md
new file mode 100644
index 0000000..81032be
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/README.md
@@ -0,0 +1,53 @@
+# Overview
+This is the [Prometheus](http://www.prometheus.io) telemetric
+instrumentation client [Go](http://golang.org) client library. It
+enable authors to define process-space metrics for their servers and
+expose them through a web service interface for extraction,
+aggregation, and a whole slew of other post processing techniques.
+
+# Installing
+ $ go get github.com/prometheus/client_golang/prometheus
+
+# Example
+```go
+package main
+
+import (
+ "net/http"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var (
+ indexed = prometheus.NewCounter(prometheus.CounterOpts{
+ Namespace: "my_company",
+ Subsystem: "indexer",
+ Name: "documents_indexed",
+ Help: "The number of documents indexed.",
+ })
+ size = prometheus.NewGauge(prometheus.GaugeOpts{
+ Namespace: "my_company",
+ Subsystem: "storage",
+ Name: "documents_total_size_bytes",
+ Help: "The total size of all documents in the storage.",
+ })
+)
+
+func main() {
+ http.Handle("/metrics", prometheus.Handler())
+
+ indexed.Inc()
+ size.Set(5)
+
+ http.ListenAndServe(":8080", nil)
+}
+
+func init() {
+ prometheus.MustRegister(indexed)
+ prometheus.MustRegister(size)
+}
+```
+
+# Documentation
+
+[![GoDoc](https://godoc.org/github.com/prometheus/client_golang?status.png)](https://godoc.org/github.com/prometheus/client_golang)
diff --git a/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/collector.go
new file mode 100644
index 0000000..c046880
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/collector.go
@@ -0,0 +1,75 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// Collector is the interface implemented by anything that can be used by
+// Prometheus to collect metrics. A Collector has to be registered for
+// collection. See Register, MustRegister, RegisterOrGet, and MustRegisterOrGet.
+//
+// The stock metrics provided by this package (like Gauge, Counter, Summary) are
+// also Collectors (which only ever collect one metric, namely itself). An
+// implementer of Collector may, however, collect multiple metrics in a
+// coordinated fashion and/or create metrics on the fly. Examples for collectors
+// already implemented in this library are the metric vectors (i.e. collection
+// of multiple instances of the same Metric but with different label values)
+// like GaugeVec or SummaryVec, and the ExpvarCollector.
+type Collector interface {
+ // Describe sends the super-set of all possible descriptors of metrics
+ // collected by this Collector to the provided channel and returns once
+ // the last descriptor has been sent. The sent descriptors fulfill the
+ // consistency and uniqueness requirements described in the Desc
+ // documentation. (It is valid if one and the same Collector sends
+ // duplicate descriptors. Those duplicates are simply ignored. However,
+ // two different Collectors must not send duplicate descriptors.) This
+ // method idempotently sends the same descriptors throughout the
+ // lifetime of the Collector. If a Collector encounters an error while
+ // executing this method, it must send an invalid descriptor (created
+ // with NewInvalidDesc) to signal the error to the registry.
+ Describe(chan<- *Desc)
+ // Collect is called by Prometheus when collecting metrics. The
+ // implementation sends each collected metric via the provided channel
+ // and returns once the last metric has been sent. The descriptor of
+ // each sent metric is one of those returned by Describe. Returned
+ // metrics that share the same descriptor must differ in their variable
+ // label values. This method may be called concurrently and must
+ // therefore be implemented in a concurrency safe way. Blocking occurs
+ // at the expense of total performance of rendering all registered
+ // metrics. Ideally, Collector implementations support concurrent
+ // readers.
+ Collect(chan<- Metric)
+}
+
+// SelfCollector implements Collector for a single Metric so that that the
+// Metric collects itself. Add it as an anonymous field to a struct that
+// implements Metric, and call Init with the Metric itself as an argument.
+type SelfCollector struct {
+ self Metric
+}
+
+// Init provides the SelfCollector with a reference to the metric it is supposed
+// to collect. It is usually called within the factory function to create a
+// metric. See example.
+func (c *SelfCollector) Init(self Metric) {
+ c.self = self
+}
+
+// Describe implements Collector.
+func (c *SelfCollector) Describe(ch chan<- *Desc) {
+ ch <- c.self.Desc()
+}
+
+// Collect implements Collector.
+func (c *SelfCollector) Collect(ch chan<- Metric) {
+ ch <- c.self
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/counter.go
new file mode 100644
index 0000000..a2952d1
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/counter.go
@@ -0,0 +1,175 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "errors"
+ "hash/fnv"
+)
+
+// Counter is a Metric that represents a single numerical value that only ever
+// goes up. That implies that it cannot be used to count items whose number can
+// also go down, e.g. the number of currently running goroutines. Those
+// "counters" are represented by Gauges.
+//
+// A Counter is typically used to count requests served, tasks completed, errors
+// occurred, etc.
+//
+// To create Counter instances, use NewCounter.
+type Counter interface {
+ Metric
+ Collector
+
+ // Set is used to set the Counter to an arbitrary value. It is only used
+ // if you have to transfer a value from an external counter into this
+ // Prometheus metric. Do not use it for regular handling of a
+ // Prometheus counter (as it can be used to break the contract of
+ // monotonically increasing values).
+ Set(float64)
+ // Inc increments the counter by 1.
+ Inc()
+ // Add adds the given value to the counter. It panics if the value is <
+ // 0.
+ Add(float64)
+}
+
+// CounterOpts is an alias for Opts. See there for doc comments.
+type CounterOpts Opts
+
+// NewCounter creates a new Counter based on the provided CounterOpts.
+func NewCounter(opts CounterOpts) Counter {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ )
+ result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}}
+ result.Init(result) // Init self-collection.
+ return result
+}
+
+type counter struct {
+ value
+}
+
+func (c *counter) Add(v float64) {
+ if v < 0 {
+ panic(errors.New("counter cannot decrease in value"))
+ }
+ c.value.Add(v)
+}
+
+// CounterVec is a Collector that bundles a set of Counters that all share the
+// same Desc, but have different values for their variable labels. This is used
+// if you want to count the same thing partitioned by various dimensions
+// (e.g. number of HTTP requests, partitioned by response code and
+// method). Create instances with NewCounterVec.
+//
+// CounterVec embeds MetricVec. See there for a full list of methods with
+// detailed documentation.
+type CounterVec struct {
+ MetricVec
+}
+
+// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &CounterVec{
+ MetricVec: MetricVec{
+ children: map[uint64]Metric{},
+ desc: desc,
+ hash: fnv.New64a(),
+ newMetric: func(lvs ...string) Metric {
+ result := &counter{value: value{
+ desc: desc,
+ valType: CounterValue,
+ labelPairs: makeLabelPairs(desc, lvs),
+ }}
+ result.Init(result) // Init self-collection.
+ return result
+ },
+ },
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns a Counter and not a
+// Metric so that no type conversion is required.
+func (m *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Counter), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns a Counter and not a Metric so that no
+// type conversion is required.
+func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Counter), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Add(42)
+func (m *CounterVec) WithLabelValues(lvs ...string) Counter {
+ return m.MetricVec.WithLabelValues(lvs...).(Counter)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+func (m *CounterVec) With(labels Labels) Counter {
+ return m.MetricVec.With(labels).(Counter)
+}
+
+// CounterFunc is a Counter whose value is determined at collect time by calling a
+// provided function.
+//
+// To create CounterFunc instances, use NewCounterFunc.
+type CounterFunc interface {
+ Metric
+ Collector
+}
+
+// NewCounterFunc creates a new CounterFunc based on the provided
+// CounterOpts. The value reported is determined by calling the given function
+// from within the Write method. Take into account that metric collection may
+// happen concurrently. If that results in concurrent calls to Write, like in
+// the case where a CounterFunc is directly registered with Prometheus, the
+// provided function must be concurrency-safe. The function should also honor
+// the contract for a Counter (values only go up, not down), but compliance will
+// not be checked.
+func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc {
+ return newValueFunc(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), CounterValue, function)
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/desc.go
new file mode 100644
index 0000000..fcde784
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/desc.go
@@ -0,0 +1,201 @@
+package prometheus
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "hash/fnv"
+ "regexp"
+ "sort"
+ "strings"
+
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+var (
+ metricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`)
+ labelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
+)
+
+// reservedLabelPrefix is a prefix which is not legal in user-supplied
+// label names.
+const reservedLabelPrefix = "__"
+
+// Labels represents a collection of label name -> value mappings. This type is
+// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
+// metric vector Collectors, e.g.:
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+//
+// The other use-case is the specification of constant label pairs in Opts or to
+// create a Desc.
+type Labels map[string]string
+
+// Desc is the descriptor used by every Prometheus Metric. It is essentially
+// the immutable meta-data of a Metric. The normal Metric implementations
+// included in this package manage their Desc under the hood. Users only have to
+// deal with Desc if they use advanced features like the ExpvarCollector or
+// custom Collectors and Metrics.
+//
+// Descriptors registered with the same registry have to fulfill certain
+// consistency and uniqueness criteria if they share the same fully-qualified
+// name: They must have the same help string and the same label names (aka label
+// dimensions) in each, constLabels and variableLabels, but they must differ in
+// the values of the constLabels.
+//
+// Descriptors that share the same fully-qualified names and the same label
+// values of their constLabels are considered equal.
+//
+// Use NewDesc to create new Desc instances.
+type Desc struct {
+ // fqName has been built from Namespace, Subsystem, and Name.
+ fqName string
+ // help provides some helpful information about this metric.
+ help string
+ // constLabelPairs contains precalculated DTO label pairs based on
+ // the constant labels.
+ constLabelPairs []*dto.LabelPair
+ // VariableLabels contains names of labels for which the metric
+ // maintains variable values.
+ variableLabels []string
+ // id is a hash of the values of the ConstLabels and fqName. This
+ // must be unique among all registered descriptors and can therefore be
+ // used as an identifier of the descriptor.
+ id uint64
+ // dimHash is a hash of the label names (preset and variable) and the
+ // Help string. Each Desc with the same fqName must have the same
+ // dimHash.
+ dimHash uint64
+ // err is an error that occured during construction. It is reported on
+ // registration time.
+ err error
+}
+
+// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
+// and will be reported on registration time. variableLabels and constLabels can
+// be nil if no such labels should be set. fqName and help must not be empty.
+//
+// variableLabels only contain the label names. Their label values are variable
+// and therefore not part of the Desc. (They are managed within the Metric.)
+//
+// For constLabels, the label values are constant. Therefore, they are fully
+// specified in the Desc. See the Opts documentation for the implications of
+// constant labels.
+func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
+ d := &Desc{
+ fqName: fqName,
+ help: help,
+ variableLabels: variableLabels,
+ }
+ if help == "" {
+ d.err = errors.New("empty help string")
+ return d
+ }
+ if !metricNameRE.MatchString(fqName) {
+ d.err = fmt.Errorf("%q is not a valid metric name", fqName)
+ return d
+ }
+ // labelValues contains the label values of const labels (in order of
+ // their sorted label names) plus the fqName (at position 0).
+ labelValues := make([]string, 1, len(constLabels)+1)
+ labelValues[0] = fqName
+ labelNames := make([]string, 0, len(constLabels)+len(variableLabels))
+ labelNameSet := map[string]struct{}{}
+ // First add only the const label names and sort them...
+ for labelName := range constLabels {
+ if !checkLabelName(labelName) {
+ d.err = fmt.Errorf("%q is not a valid label name", labelName)
+ return d
+ }
+ labelNames = append(labelNames, labelName)
+ labelNameSet[labelName] = struct{}{}
+ }
+ sort.Strings(labelNames)
+ // ... so that we can now add const label values in the order of their names.
+ for _, labelName := range labelNames {
+ labelValues = append(labelValues, constLabels[labelName])
+ }
+ // Now add the variable label names, but prefix them with something that
+ // cannot be in a regular label name. That prevents matching the label
+ // dimension with a different mix between preset and variable labels.
+ for _, labelName := range variableLabels {
+ if !checkLabelName(labelName) {
+ d.err = fmt.Errorf("%q is not a valid label name", labelName)
+ return d
+ }
+ labelNames = append(labelNames, "$"+labelName)
+ labelNameSet[labelName] = struct{}{}
+ }
+ if len(labelNames) != len(labelNameSet) {
+ d.err = errors.New("duplicate label names")
+ return d
+ }
+ h := fnv.New64a()
+ var b bytes.Buffer // To copy string contents into, avoiding []byte allocations.
+ for _, val := range labelValues {
+ b.Reset()
+ b.WriteString(val)
+ b.WriteByte(separatorByte)
+ h.Write(b.Bytes())
+ }
+ d.id = h.Sum64()
+ // Sort labelNames so that order doesn't matter for the hash.
+ sort.Strings(labelNames)
+ // Now hash together (in this order) the help string and the sorted
+ // label names.
+ h.Reset()
+ b.Reset()
+ b.WriteString(help)
+ b.WriteByte(separatorByte)
+ h.Write(b.Bytes())
+ for _, labelName := range labelNames {
+ b.Reset()
+ b.WriteString(labelName)
+ b.WriteByte(separatorByte)
+ h.Write(b.Bytes())
+ }
+ d.dimHash = h.Sum64()
+
+ d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels))
+ for n, v := range constLabels {
+ d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{
+ Name: proto.String(n),
+ Value: proto.String(v),
+ })
+ }
+ sort.Sort(LabelPairSorter(d.constLabelPairs))
+ return d
+}
+
+// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the
+// provided error set. If a collector returning such a descriptor is registered,
+// registration will fail with the provided error. NewInvalidDesc can be used by
+// a Collector to signal inability to describe itself.
+func NewInvalidDesc(err error) *Desc {
+ return &Desc{
+ err: err,
+ }
+}
+
+func (d *Desc) String() string {
+ lpStrings := make([]string, 0, len(d.constLabelPairs))
+ for _, lp := range d.constLabelPairs {
+ lpStrings = append(
+ lpStrings,
+ fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()),
+ )
+ }
+ return fmt.Sprintf(
+ "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}",
+ d.fqName,
+ d.help,
+ strings.Join(lpStrings, ","),
+ d.variableLabels,
+ )
+}
+
+func checkLabelName(l string) bool {
+ return labelNameRE.MatchString(l) &&
+ !strings.HasPrefix(l, reservedLabelPrefix)
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/doc.go
new file mode 100644
index 0000000..425fe87
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/doc.go
@@ -0,0 +1,109 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package prometheus provides embeddable metric primitives for servers and
+// standardized exposition of telemetry through a web services interface.
+//
+// All exported functions and methods are safe to be used concurrently unless
+// specified otherwise.
+//
+// To expose metrics registered with the Prometheus registry, an HTTP server
+// needs to know about the Prometheus handler. The usual endpoint is "/metrics".
+//
+// http.Handle("/metrics", prometheus.Handler())
+//
+// As a starting point a very basic usage example:
+//
+// package main
+//
+// import (
+// "net/http"
+//
+// "github.com/prometheus/client_golang/prometheus"
+// )
+//
+// var (
+// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{
+// Name: "cpu_temperature_celsius",
+// Help: "Current temperature of the CPU.",
+// })
+// hdFailures = prometheus.NewCounter(prometheus.CounterOpts{
+// Name: "hd_errors_total",
+// Help: "Number of hard-disk errors.",
+// })
+// )
+//
+// func init() {
+// prometheus.MustRegister(cpuTemp)
+// prometheus.MustRegister(hdFailures)
+// }
+//
+// func main() {
+// cpuTemp.Set(65.3)
+// hdFailures.Inc()
+//
+// http.Handle("/metrics", prometheus.Handler())
+// http.ListenAndServe(":8080", nil)
+// }
+//
+//
+// This is a complete program that exports two metrics, a Gauge and a Counter.
+// It also exports some stats about the HTTP usage of the /metrics
+// endpoint. (See the Handler function for more detail.)
+//
+// Two more advanced metric types are the Summary and Histogram.
+//
+// In addition to the fundamental metric types Gauge, Counter, Summary, and
+// Histogram, a very important part of the Prometheus data model is the
+// partitioning of samples along dimensions called labels, which results in
+// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,
+// and HistogramVec.
+//
+// Those are all the parts needed for basic usage. Detailed documentation and
+// examples are provided below.
+//
+// Everything else this package offers is essentially for "power users" only. A
+// few pointers to "power user features":
+//
+// All the various ...Opts structs have a ConstLabels field for labels that
+// never change their value (which is only useful under special circumstances,
+// see documentation of the Opts type).
+//
+// The Untyped metric behaves like a Gauge, but signals the Prometheus server
+// not to assume anything about its type.
+//
+// Functions to fine-tune how the metric registry works: EnableCollectChecks,
+// PanicOnCollectError, Register, Unregister, SetMetricFamilyInjectionHook.
+//
+// For custom metric collection, there are two entry points: Custom Metric
+// implementations and custom Collector implementations. A Metric is the
+// fundamental unit in the Prometheus data model: a sample at a point in time
+// together with its meta-data (like its fully-qualified name and any number of
+// pairs of label name and label value) that knows how to marshal itself into a
+// data transfer object (aka DTO, implemented as a protocol buffer). A Collector
+// gets registered with the Prometheus registry and manages the collection of
+// one or more Metrics. Many parts of this package are building blocks for
+// Metrics and Collectors. Desc is the metric descriptor, actually used by all
+// metrics under the hood, and by Collectors to describe the Metrics to be
+// collected, but only to be dealt with by users if they implement their own
+// Metrics or Collectors. To create a Desc, the BuildFQName function will come
+// in handy. Other useful components for Metric and Collector implementation
+// include: LabelPairSorter to sort the DTO version of label pairs,
+// NewConstMetric and MustNewConstMetric to create "throw away" Metrics at
+// collection time, MetricVec to bundle custom Metrics into a metric vector
+// Collector, SelfCollector to make a custom Metric collect itself.
+//
+// A good example for a custom Collector is the ExpVarCollector included in this
+// package, which exports variables exported via the "expvar" package as
+// Prometheus metrics.
+package prometheus
diff --git a/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/expvar.go b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/expvar.go
new file mode 100644
index 0000000..0f7630d
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/expvar.go
@@ -0,0 +1,119 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "encoding/json"
+ "expvar"
+)
+
+// ExpvarCollector collects metrics from the expvar interface. It provides a
+// quick way to expose numeric values that are already exported via expvar as
+// Prometheus metrics. Note that the data models of expvar and Prometheus are
+// fundamentally different, and that the ExpvarCollector is inherently
+// slow. Thus, the ExpvarCollector is probably great for experiments and
+// prototying, but you should seriously consider a more direct implementation of
+// Prometheus metrics for monitoring production systems.
+//
+// Use NewExpvarCollector to create new instances.
+type ExpvarCollector struct {
+ exports map[string]*Desc
+}
+
+// NewExpvarCollector returns a newly allocated ExpvarCollector that still has
+// to be registered with the Prometheus registry.
+//
+// The exports map has the following meaning:
+//
+// The keys in the map correspond to expvar keys, i.e. for every expvar key you
+// want to export as Prometheus metric, you need an entry in the exports
+// map. The descriptor mapped to each key describes how to export the expvar
+// value. It defines the name and the help string of the Prometheus metric
+// proxying the expvar value. The type will always be Untyped.
+//
+// For descriptors without variable labels, the expvar value must be a number or
+// a bool. The number is then directly exported as the Prometheus sample
+// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values
+// that are not numbers or bools are silently ignored.
+//
+// If the descriptor has one variable label, the expvar value must be an expvar
+// map. The keys in the expvar map become the various values of the one
+// Prometheus label. The values in the expvar map must be numbers or bools again
+// as above.
+//
+// For descriptors with more than one variable label, the expvar must be a
+// nested expvar map, i.e. where the values of the topmost map are maps again
+// etc. until a depth is reached that corresponds to the number of labels. The
+// leaves of that structure must be numbers or bools as above to serve as the
+// sample values.
+//
+// Anything that does not fit into the scheme above is silently ignored.
+func NewExpvarCollector(exports map[string]*Desc) *ExpvarCollector {
+ return &ExpvarCollector{
+ exports: exports,
+ }
+}
+
+// Describe implements Collector.
+func (e *ExpvarCollector) Describe(ch chan<- *Desc) {
+ for _, desc := range e.exports {
+ ch <- desc
+ }
+}
+
+// Collect implements Collector.
+func (e *ExpvarCollector) Collect(ch chan<- Metric) {
+ for name, desc := range e.exports {
+ var m Metric
+ expVar := expvar.Get(name)
+ if expVar == nil {
+ continue
+ }
+ var v interface{}
+ labels := make([]string, len(desc.variableLabels))
+ if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil {
+ ch <- NewInvalidMetric(desc, err)
+ continue
+ }
+ var processValue func(v interface{}, i int)
+ processValue = func(v interface{}, i int) {
+ if i >= len(labels) {
+ copiedLabels := append(make([]string, 0, len(labels)), labels...)
+ switch v := v.(type) {
+ case float64:
+ m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...)
+ case bool:
+ if v {
+ m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...)
+ } else {
+ m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...)
+ }
+ default:
+ return
+ }
+ ch <- m
+ return
+ }
+ vm, ok := v.(map[string]interface{})
+ if !ok {
+ return
+ }
+ for lv, val := range vm {
+ labels[i] = lv
+ processValue(val, i+1)
+ }
+ }
+ processValue(v, 0)
+ }
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
new file mode 100644
index 0000000..ba8a402
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
@@ -0,0 +1,147 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import "hash/fnv"
+
+// Gauge is a Metric that represents a single numerical value that can
+// arbitrarily go up and down.
+//
+// A Gauge is typically used for measured values like temperatures or current
+// memory usage, but also "counts" that can go up and down, like the number of
+// running goroutines.
+//
+// To create Gauge instances, use NewGauge.
+type Gauge interface {
+ Metric
+ Collector
+
+ // Set sets the Gauge to an arbitrary value.
+ Set(float64)
+ // Inc increments the Gauge by 1.
+ Inc()
+ // Dec decrements the Gauge by 1.
+ Dec()
+ // Add adds the given value to the Gauge. (The value can be
+ // negative, resulting in a decrease of the Gauge.)
+ Add(float64)
+ // Sub subtracts the given value from the Gauge. (The value can be
+ // negative, resulting in an increase of the Gauge.)
+ Sub(float64)
+}
+
+// GaugeOpts is an alias for Opts. See there for doc comments.
+type GaugeOpts Opts
+
+// NewGauge creates a new Gauge based on the provided GaugeOpts.
+func NewGauge(opts GaugeOpts) Gauge {
+ return newValue(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), GaugeValue, 0)
+}
+
+// GaugeVec is a Collector that bundles a set of Gauges that all share the same
+// Desc, but have different values for their variable labels. This is used if
+// you want to count the same thing partitioned by various dimensions
+// (e.g. number of operations queued, partitioned by user and operation
+// type). Create instances with NewGaugeVec.
+type GaugeVec struct {
+ MetricVec
+}
+
+// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &GaugeVec{
+ MetricVec: MetricVec{
+ children: map[uint64]Metric{},
+ desc: desc,
+ hash: fnv.New64a(),
+ newMetric: func(lvs ...string) Metric {
+ return newValue(desc, GaugeValue, 0, lvs...)
+ },
+ },
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns a Gauge and not a
+// Metric so that no type conversion is required.
+func (m *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Gauge), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns a Gauge and not a Metric so that no
+// type conversion is required.
+func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Gauge), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Add(42)
+func (m *GaugeVec) WithLabelValues(lvs ...string) Gauge {
+ return m.MetricVec.WithLabelValues(lvs...).(Gauge)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+func (m *GaugeVec) With(labels Labels) Gauge {
+ return m.MetricVec.With(labels).(Gauge)
+}
+
+// GaugeFunc is a Gauge whose value is determined at collect time by calling a
+// provided function.
+//
+// To create GaugeFunc instances, use NewGaugeFunc.
+type GaugeFunc interface {
+ Metric
+ Collector
+}
+
+// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The
+// value reported is determined by calling the given function from within the
+// Write method. Take into account that metric collection may happen
+// concurrently. If that results in concurrent calls to Write, like in the case
+// where a GaugeFunc is directly registered with Prometheus, the provided
+// function must be concurrency-safe.
+func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc {
+ return newValueFunc(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), GaugeValue, function)
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
new file mode 100644
index 0000000..85fa20b
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
@@ -0,0 +1,50 @@
+package prometheus
+
+import (
+ "runtime"
+ "runtime/debug"
+ "time"
+)
+
+type goCollector struct {
+ goroutines Gauge
+ gcDesc *Desc
+}
+
+// NewGoCollector returns a collector which exports metrics about the current
+// go process.
+func NewGoCollector() *goCollector {
+ return &goCollector{
+ goroutines: NewGauge(GaugeOpts{
+ Name: "go_goroutines",
+ Help: "Number of goroutines that currently exist.",
+ }),
+ gcDesc: NewDesc(
+ "go_gc_duration_seconds",
+ "A summary of the GC invocation durations.",
+ nil, nil),
+ }
+}
+
+// Describe returns all descriptions of the collector.
+func (c *goCollector) Describe(ch chan<- *Desc) {
+ ch <- c.goroutines.Desc()
+ ch <- c.gcDesc
+}
+
+// Collect returns the current state of all metrics of the collector.
+func (c *goCollector) Collect(ch chan<- Metric) {
+ c.goroutines.Set(float64(runtime.NumGoroutine()))
+ ch <- c.goroutines
+
+ var stats debug.GCStats
+ stats.PauseQuantiles = make([]time.Duration, 5)
+ debug.ReadGCStats(&stats)
+
+ quantiles := make(map[float64]float64)
+ for idx, pq := range stats.PauseQuantiles[1:] {
+ quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds()
+ }
+ quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
+ ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles)
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
new file mode 100644
index 0000000..f98a41b
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
@@ -0,0 +1,450 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "fmt"
+ "hash/fnv"
+ "math"
+ "sort"
+ "sync/atomic"
+
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// A Histogram counts individual observations from an event or sample stream in
+// configurable buckets. Similar to a summary, it also provides a sum of
+// observations and an observation count.
+//
+// On the Prometheus server, quantiles can be calculated from a Histogram using
+// the histogram_quantile function in the query language.
+//
+// Note that Histograms, in contrast to Summaries, can be aggregated with the
+// Prometheus query language (see the documentation for detailed
+// procedures). However, Histograms require the user to pre-define suitable
+// buckets, and they are in general less accurate. The Observe method of a
+// Histogram has a very low performance overhead in comparison with the Observe
+// method of a Summary.
+//
+// To create Histogram instances, use NewHistogram.
+type Histogram interface {
+ Metric
+ Collector
+
+ // Observe adds a single observation to the histogram.
+ Observe(float64)
+}
+
+// bucketLabel is used for the label that defines the upper bound of a
+// bucket of a histogram ("le" -> "less or equal").
+const bucketLabel = "le"
+
+var (
+ // DefBuckets are the default Histogram buckets. The default buckets are
+ // tailored to broadly measure the response time (in seconds) of a
+ // network service. Most likely, however, you will be required to define
+ // buckets customized to your use case.
+ DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
+
+ errBucketLabelNotAllowed = fmt.Errorf(
+ "%q is not allowed as label name in histograms", bucketLabel,
+ )
+)
+
+// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest
+// bucket has an upper bound of 'start'. The final +Inf bucket is not counted
+// and not included in the returned slice. The returned slice is meant to be
+// used for the Buckets field of HistogramOpts.
+//
+// The function panics if 'count' is zero or negative.
+func LinearBuckets(start, width float64, count int) []float64 {
+ if count < 1 {
+ panic("LinearBuckets needs a positive count")
+ }
+ buckets := make([]float64, count)
+ for i := range buckets {
+ buckets[i] = start
+ start += width
+ }
+ return buckets
+}
+
+// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an
+// upper bound of 'start' and each following bucket's upper bound is 'factor'
+// times the previous bucket's upper bound. The final +Inf bucket is not counted
+// and not included in the returned slice. The returned slice is meant to be
+// used for the Buckets field of HistogramOpts.
+//
+// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative,
+// or if 'factor' is less than or equal 1.
+func ExponentialBuckets(start, factor float64, count int) []float64 {
+ if count < 1 {
+ panic("ExponentialBuckets needs a positive count")
+ }
+ if start <= 0 {
+ panic("ExponentialBuckets needs a positive start value")
+ }
+ if factor <= 1 {
+ panic("ExponentialBuckets needs a factor greater than 1")
+ }
+ buckets := make([]float64, count)
+ for i := range buckets {
+ buckets[i] = start
+ start *= factor
+ }
+ return buckets
+}
+
+// HistogramOpts bundles the options for creating a Histogram metric. It is
+// mandatory to set Name and Help to a non-empty string. All other fields are
+// optional and can safely be left at their zero value.
+type HistogramOpts struct {
+ // Namespace, Subsystem, and Name are components of the fully-qualified
+ // name of the Histogram (created by joining these components with
+ // "_"). Only Name is mandatory, the others merely help structuring the
+ // name. Note that the fully-qualified name of the Histogram must be a
+ // valid Prometheus metric name.
+ Namespace string
+ Subsystem string
+ Name string
+
+ // Help provides information about this Histogram. Mandatory!
+ //
+ // Metrics with the same fully-qualified name must have the same Help
+ // string.
+ Help string
+
+ // ConstLabels are used to attach fixed labels to this
+ // Histogram. Histograms with the same fully-qualified name must have the
+ // same label names in their ConstLabels.
+ //
+ // Note that in most cases, labels have a value that varies during the
+ // lifetime of a process. Those labels are usually managed with a
+ // HistogramVec. ConstLabels serve only special purposes. One is for the
+ // special case where the value of a label does not change during the
+ // lifetime of a process, e.g. if the revision of the running binary is
+ // put into a label. Another, more advanced purpose is if more than one
+ // Collector needs to collect Histograms with the same fully-qualified
+ // name. In that case, those Summaries must differ in the values of
+ // their ConstLabels. See the Collector examples.
+ //
+ // If the value of a label never changes (not even between binaries),
+ // that label most likely should not be a label at all (but part of the
+ // metric name).
+ ConstLabels Labels
+
+ // Buckets defines the buckets into which observations are counted. Each
+ // element in the slice is the upper inclusive bound of a bucket. The
+ // values must be sorted in strictly increasing order. There is no need
+ // to add a highest bucket with +Inf bound, it will be added
+ // implicitly. The default value is DefBuckets.
+ Buckets []float64
+}
+
+// NewHistogram creates a new Histogram based on the provided HistogramOpts. It
+// panics if the buckets in HistogramOpts are not in strictly increasing order.
+func NewHistogram(opts HistogramOpts) Histogram {
+ return newHistogram(
+ NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ),
+ opts,
+ )
+}
+
+func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {
+ if len(desc.variableLabels) != len(labelValues) {
+ panic(errInconsistentCardinality)
+ }
+
+ for _, n := range desc.variableLabels {
+ if n == bucketLabel {
+ panic(errBucketLabelNotAllowed)
+ }
+ }
+ for _, lp := range desc.constLabelPairs {
+ if lp.GetName() == bucketLabel {
+ panic(errBucketLabelNotAllowed)
+ }
+ }
+
+ if len(opts.Buckets) == 0 {
+ opts.Buckets = DefBuckets
+ }
+
+ h := &histogram{
+ desc: desc,
+ upperBounds: opts.Buckets,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }
+ for i, upperBound := range h.upperBounds {
+ if i < len(h.upperBounds)-1 {
+ if upperBound >= h.upperBounds[i+1] {
+ panic(fmt.Errorf(
+ "histogram buckets must be in increasing order: %f >= %f",
+ upperBound, h.upperBounds[i+1],
+ ))
+ }
+ } else {
+ if math.IsInf(upperBound, +1) {
+ // The +Inf bucket is implicit. Remove it here.
+ h.upperBounds = h.upperBounds[:i]
+ }
+ }
+ }
+ // Finally we know the final length of h.upperBounds and can make counts.
+ h.counts = make([]uint64, len(h.upperBounds))
+
+ h.Init(h) // Init self-collection.
+ return h
+}
+
+type histogram struct {
+ // sumBits contains the bits of the float64 representing the sum of all
+ // observations. sumBits and count have to go first in the struct to
+ // guarantee alignment for atomic operations.
+ // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ sumBits uint64
+ count uint64
+
+ SelfCollector
+ // Note that there is no mutex required.
+
+ desc *Desc
+
+ upperBounds []float64
+ counts []uint64
+
+ labelPairs []*dto.LabelPair
+}
+
+func (h *histogram) Desc() *Desc {
+ return h.desc
+}
+
+func (h *histogram) Observe(v float64) {
+ // TODO(beorn7): For small numbers of buckets (<30), a linear search is
+ // slightly faster than the binary search. If we really care, we could
+ // switch from one search strategy to the other depending on the number
+ // of buckets.
+ //
+ // Microbenchmarks (BenchmarkHistogramNoLabels):
+ // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op
+ // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
+ // 300 buckets: 154 ns/op linear - binary 61.6 ns/op
+ i := sort.SearchFloat64s(h.upperBounds, v)
+ if i < len(h.counts) {
+ atomic.AddUint64(&h.counts[i], 1)
+ }
+ atomic.AddUint64(&h.count, 1)
+ for {
+ oldBits := atomic.LoadUint64(&h.sumBits)
+ newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
+ if atomic.CompareAndSwapUint64(&h.sumBits, oldBits, newBits) {
+ break
+ }
+ }
+}
+
+func (h *histogram) Write(out *dto.Metric) error {
+ his := &dto.Histogram{}
+ buckets := make([]*dto.Bucket, len(h.upperBounds))
+
+ his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits)))
+ his.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count))
+ var count uint64
+ for i, upperBound := range h.upperBounds {
+ count += atomic.LoadUint64(&h.counts[i])
+ buckets[i] = &dto.Bucket{
+ CumulativeCount: proto.Uint64(count),
+ UpperBound: proto.Float64(upperBound),
+ }
+ }
+ his.Bucket = buckets
+ out.Histogram = his
+ out.Label = h.labelPairs
+ return nil
+}
+
+// HistogramVec is a Collector that bundles a set of Histograms that all share the
+// same Desc, but have different values for their variable labels. This is used
+// if you want to count the same thing partitioned by various dimensions
+// (e.g. HTTP request latencies, partitioned by status code and method). Create
+// instances with NewHistogramVec.
+type HistogramVec struct {
+ MetricVec
+}
+
+// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &HistogramVec{
+ MetricVec: MetricVec{
+ children: map[uint64]Metric{},
+ desc: desc,
+ hash: fnv.New64a(),
+ newMetric: func(lvs ...string) Metric {
+ return newHistogram(desc, opts, lvs...)
+ },
+ },
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns a Histogram and not a
+// Metric so that no type conversion is required.
+func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Histogram, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Histogram), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns a Histogram and not a Metric so that no
+// type conversion is required.
+func (m *HistogramVec) GetMetricWith(labels Labels) (Histogram, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Histogram), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Observe(42.21)
+func (m *HistogramVec) WithLabelValues(lvs ...string) Histogram {
+ return m.MetricVec.WithLabelValues(lvs...).(Histogram)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21)
+func (m *HistogramVec) With(labels Labels) Histogram {
+ return m.MetricVec.With(labels).(Histogram)
+}
+
+type constHistogram struct {
+ desc *Desc
+ count uint64
+ sum float64
+ buckets map[float64]uint64
+ labelPairs []*dto.LabelPair
+}
+
+func (h *constHistogram) Desc() *Desc {
+ return h.desc
+}
+
+func (h *constHistogram) Write(out *dto.Metric) error {
+ his := &dto.Histogram{}
+ buckets := make([]*dto.Bucket, 0, len(h.buckets))
+
+ his.SampleCount = proto.Uint64(h.count)
+ his.SampleSum = proto.Float64(h.sum)
+
+ for upperBound, count := range h.buckets {
+ buckets = append(buckets, &dto.Bucket{
+ CumulativeCount: proto.Uint64(count),
+ UpperBound: proto.Float64(upperBound),
+ })
+ }
+
+ if len(buckets) > 0 {
+ sort.Sort(buckSort(buckets))
+ }
+ his.Bucket = buckets
+
+ out.Histogram = his
+ out.Label = h.labelPairs
+
+ return nil
+}
+
+// NewConstHistogram returns a metric representing a Prometheus histogram with
+// fixed values for the count, sum, and bucket counts. As those parameters
+// cannot be changed, the returned value does not implement the Histogram
+// interface (but only the Metric interface). Users of this package will not
+// have much use for it in regular operations. However, when implementing custom
+// Collectors, it is useful as a throw-away metric that is generated on the fly
+// to send it to Prometheus in the Collect method.
+//
+// buckets is a map of upper bounds to cumulative counts, excluding the +Inf
+// bucket.
+//
+// NewConstHistogram returns an error if the length of labelValues is not
+// consistent with the variable labels in Desc.
+func NewConstHistogram(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ buckets map[float64]uint64,
+ labelValues ...string,
+) (Metric, error) {
+ if len(desc.variableLabels) != len(labelValues) {
+ return nil, errInconsistentCardinality
+ }
+ return &constHistogram{
+ desc: desc,
+ count: count,
+ sum: sum,
+ buckets: buckets,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }, nil
+}
+
+// MustNewConstHistogram is a version of NewConstHistogram that panics where
+// NewConstMetric would have returned an error.
+func MustNewConstHistogram(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ buckets map[float64]uint64,
+ labelValues ...string,
+) Metric {
+ m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
+
+type buckSort []*dto.Bucket
+
+func (s buckSort) Len() int {
+ return len(s)
+}
+
+func (s buckSort) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s buckSort) Less(i, j int) bool {
+ return s[i].GetUpperBound() < s[j].GetUpperBound()
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/http.go b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/http.go
new file mode 100644
index 0000000..eabe602
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/http.go
@@ -0,0 +1,361 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "bufio"
+ "io"
+ "net"
+ "net/http"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var instLabels = []string{"method", "code"}
+
+type nower interface {
+ Now() time.Time
+}
+
+type nowFunc func() time.Time
+
+func (n nowFunc) Now() time.Time {
+ return n()
+}
+
+var now nower = nowFunc(func() time.Time {
+ return time.Now()
+})
+
+func nowSeries(t ...time.Time) nower {
+ return nowFunc(func() time.Time {
+ defer func() {
+ t = t[1:]
+ }()
+
+ return t[0]
+ })
+}
+
+// InstrumentHandler wraps the given HTTP handler for instrumentation. It
+// registers four metric collectors (if not already done) and reports HTTP
+// metrics to the (newly or already) registered collectors: http_requests_total
+// (CounterVec), http_request_duration_microseconds (Summary),
+// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each
+// has a constant label named "handler" with the provided handlerName as
+// value. http_requests_total is a metric vector partitioned by HTTP method
+// (label name "method") and HTTP status code (label name "code").
+func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc {
+ return InstrumentHandlerFunc(handlerName, handler.ServeHTTP)
+}
+
+// InstrumentHandlerFunc wraps the given function for instrumentation. It
+// otherwise works in the same way as InstrumentHandler.
+func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
+ return InstrumentHandlerFuncWithOpts(
+ SummaryOpts{
+ Subsystem: "http",
+ ConstLabels: Labels{"handler": handlerName},
+ },
+ handlerFunc,
+ )
+}
+
+// InstrumentHandlerWithOpts works like InstrumentHandler but provides more
+// flexibility (at the cost of a more complex call syntax). As
+// InstrumentHandler, this function registers four metric collectors, but it
+// uses the provided SummaryOpts to create them. However, the fields "Name" and
+// "Help" in the SummaryOpts are ignored. "Name" is replaced by
+// "requests_total", "request_duration_microseconds", "request_size_bytes", and
+// "response_size_bytes", respectively. "Help" is replaced by an appropriate
+// help string. The names of the variable labels of the http_requests_total
+// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code).
+//
+// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the
+// behavior of InstrumentHandler:
+//
+// prometheus.InstrumentHandlerWithOpts(
+// prometheus.SummaryOpts{
+// Subsystem: "http",
+// ConstLabels: prometheus.Labels{"handler": handlerName},
+// },
+// handler,
+// )
+//
+// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it
+// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally,
+// and all its fields are set to the equally named fields in the provided
+// SummaryOpts.
+func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc {
+ return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP)
+}
+
+// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc but provides
+// more flexibility (at the cost of a more complex call syntax). See
+// InstrumentHandlerWithOpts for details how the provided SummaryOpts are used.
+func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
+ reqCnt := NewCounterVec(
+ CounterOpts{
+ Namespace: opts.Namespace,
+ Subsystem: opts.Subsystem,
+ Name: "requests_total",
+ Help: "Total number of HTTP requests made.",
+ ConstLabels: opts.ConstLabels,
+ },
+ instLabels,
+ )
+
+ opts.Name = "request_duration_microseconds"
+ opts.Help = "The HTTP request latencies in microseconds."
+ reqDur := NewSummary(opts)
+
+ opts.Name = "request_size_bytes"
+ opts.Help = "The HTTP request sizes in bytes."
+ reqSz := NewSummary(opts)
+
+ opts.Name = "response_size_bytes"
+ opts.Help = "The HTTP response sizes in bytes."
+ resSz := NewSummary(opts)
+
+ regReqCnt := MustRegisterOrGet(reqCnt).(*CounterVec)
+ regReqDur := MustRegisterOrGet(reqDur).(Summary)
+ regReqSz := MustRegisterOrGet(reqSz).(Summary)
+ regResSz := MustRegisterOrGet(resSz).(Summary)
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ now := time.Now()
+
+ delegate := &responseWriterDelegator{ResponseWriter: w}
+ out := make(chan int)
+ urlLen := 0
+ if r.URL != nil {
+ urlLen = len(r.URL.String())
+ }
+ go computeApproximateRequestSize(r, out, urlLen)
+
+ _, cn := w.(http.CloseNotifier)
+ _, fl := w.(http.Flusher)
+ _, hj := w.(http.Hijacker)
+ _, rf := w.(io.ReaderFrom)
+ var rw http.ResponseWriter
+ if cn && fl && hj && rf {
+ rw = &fancyResponseWriterDelegator{delegate}
+ } else {
+ rw = delegate
+ }
+ handlerFunc(rw, r)
+
+ elapsed := float64(time.Since(now)) / float64(time.Microsecond)
+
+ method := sanitizeMethod(r.Method)
+ code := sanitizeCode(delegate.status)
+ regReqCnt.WithLabelValues(method, code).Inc()
+ regReqDur.Observe(elapsed)
+ regResSz.Observe(float64(delegate.written))
+ regReqSz.Observe(float64(<-out))
+ })
+}
+
+func computeApproximateRequestSize(r *http.Request, out chan int, s int) {
+ s += len(r.Method)
+ s += len(r.Proto)
+ for name, values := range r.Header {
+ s += len(name)
+ for _, value := range values {
+ s += len(value)
+ }
+ }
+ s += len(r.Host)
+
+ // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
+
+ if r.ContentLength != -1 {
+ s += int(r.ContentLength)
+ }
+ out <- s
+}
+
+type responseWriterDelegator struct {
+ http.ResponseWriter
+
+ handler, method string
+ status int
+ written int64
+ wroteHeader bool
+}
+
+func (r *responseWriterDelegator) WriteHeader(code int) {
+ r.status = code
+ r.wroteHeader = true
+ r.ResponseWriter.WriteHeader(code)
+}
+
+func (r *responseWriterDelegator) Write(b []byte) (int, error) {
+ if !r.wroteHeader {
+ r.WriteHeader(http.StatusOK)
+ }
+ n, err := r.ResponseWriter.Write(b)
+ r.written += int64(n)
+ return n, err
+}
+
+type fancyResponseWriterDelegator struct {
+ *responseWriterDelegator
+}
+
+func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool {
+ return f.ResponseWriter.(http.CloseNotifier).CloseNotify()
+}
+
+func (f *fancyResponseWriterDelegator) Flush() {
+ f.ResponseWriter.(http.Flusher).Flush()
+}
+
+func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ return f.ResponseWriter.(http.Hijacker).Hijack()
+}
+
+func (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) {
+ if !f.wroteHeader {
+ f.WriteHeader(http.StatusOK)
+ }
+ n, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r)
+ f.written += n
+ return n, err
+}
+
+func sanitizeMethod(m string) string {
+ switch m {
+ case "GET", "get":
+ return "get"
+ case "PUT", "put":
+ return "put"
+ case "HEAD", "head":
+ return "head"
+ case "POST", "post":
+ return "post"
+ case "DELETE", "delete":
+ return "delete"
+ case "CONNECT", "connect":
+ return "connect"
+ case "OPTIONS", "options":
+ return "options"
+ case "NOTIFY", "notify":
+ return "notify"
+ default:
+ return strings.ToLower(m)
+ }
+}
+
+func sanitizeCode(s int) string {
+ switch s {
+ case 100:
+ return "100"
+ case 101:
+ return "101"
+
+ case 200:
+ return "200"
+ case 201:
+ return "201"
+ case 202:
+ return "202"
+ case 203:
+ return "203"
+ case 204:
+ return "204"
+ case 205:
+ return "205"
+ case 206:
+ return "206"
+
+ case 300:
+ return "300"
+ case 301:
+ return "301"
+ case 302:
+ return "302"
+ case 304:
+ return "304"
+ case 305:
+ return "305"
+ case 307:
+ return "307"
+
+ case 400:
+ return "400"
+ case 401:
+ return "401"
+ case 402:
+ return "402"
+ case 403:
+ return "403"
+ case 404:
+ return "404"
+ case 405:
+ return "405"
+ case 406:
+ return "406"
+ case 407:
+ return "407"
+ case 408:
+ return "408"
+ case 409:
+ return "409"
+ case 410:
+ return "410"
+ case 411:
+ return "411"
+ case 412:
+ return "412"
+ case 413:
+ return "413"
+ case 414:
+ return "414"
+ case 415:
+ return "415"
+ case 416:
+ return "416"
+ case 417:
+ return "417"
+ case 418:
+ return "418"
+
+ case 500:
+ return "500"
+ case 501:
+ return "501"
+ case 502:
+ return "502"
+ case 503:
+ return "503"
+ case 504:
+ return "504"
+ case 505:
+ return "505"
+
+ case 428:
+ return "428"
+ case 429:
+ return "429"
+ case 431:
+ return "431"
+ case 511:
+ return "511"
+
+ default:
+ return strconv.Itoa(s)
+ }
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/metric.go
new file mode 100644
index 0000000..86fd81c
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/metric.go
@@ -0,0 +1,166 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "strings"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+const separatorByte byte = 255
+
+// A Metric models a single sample value with its meta data being exported to
+// Prometheus. Implementers of Metric in this package inclued Gauge, Counter,
+// Untyped, and Summary. Users can implement their own Metric types, but that
+// should be rarely needed. See the example for SelfCollector, which is also an
+// example for a user-implemented Metric.
+type Metric interface {
+ // Desc returns the descriptor for the Metric. This method idempotently
+ // returns the same descriptor throughout the lifetime of the
+ // Metric. The returned descriptor is immutable by contract. A Metric
+ // unable to describe itself must return an invalid descriptor (created
+ // with NewInvalidDesc).
+ Desc() *Desc
+ // Write encodes the Metric into a "Metric" Protocol Buffer data
+ // transmission object.
+ //
+ // Implementers of custom Metric types must observe concurrency safety
+ // as reads of this metric may occur at any time, and any blocking
+ // occurs at the expense of total performance of rendering all
+ // registered metrics. Ideally Metric implementations should support
+ // concurrent readers.
+ //
+ // The Prometheus client library attempts to minimize memory allocations
+ // and will provide a pre-existing reset dto.Metric pointer. Prometheus
+ // may recycle the dto.Metric proto message, so Metric implementations
+ // should just populate the provided dto.Metric and then should not keep
+ // any reference to it.
+ //
+ // While populating dto.Metric, labels must be sorted lexicographically.
+ // (Implementers may find LabelPairSorter useful for that.)
+ Write(*dto.Metric) error
+}
+
+// Opts bundles the options for creating most Metric types. Each metric
+// implementation XXX has its own XXXOpts type, but in most cases, it is just be
+// an alias of this type (which might change when the requirement arises.)
+//
+// It is mandatory to set Name and Help to a non-empty string. All other fields
+// are optional and can safely be left at their zero value.
+type Opts struct {
+ // Namespace, Subsystem, and Name are components of the fully-qualified
+ // name of the Metric (created by joining these components with
+ // "_"). Only Name is mandatory, the others merely help structuring the
+ // name. Note that the fully-qualified name of the metric must be a
+ // valid Prometheus metric name.
+ Namespace string
+ Subsystem string
+ Name string
+
+ // Help provides information about this metric. Mandatory!
+ //
+ // Metrics with the same fully-qualified name must have the same Help
+ // string.
+ Help string
+
+ // ConstLabels are used to attach fixed labels to this metric. Metrics
+ // with the same fully-qualified name must have the same label names in
+ // their ConstLabels.
+ //
+ // Note that in most cases, labels have a value that varies during the
+ // lifetime of a process. Those labels are usually managed with a metric
+ // vector collector (like CounterVec, GaugeVec, UntypedVec). ConstLabels
+ // serve only special purposes. One is for the special case where the
+ // value of a label does not change during the lifetime of a process,
+ // e.g. if the revision of the running binary is put into a
+ // label. Another, more advanced purpose is if more than one Collector
+ // needs to collect Metrics with the same fully-qualified name. In that
+ // case, those Metrics must differ in the values of their
+ // ConstLabels. See the Collector examples.
+ //
+ // If the value of a label never changes (not even between binaries),
+ // that label most likely should not be a label at all (but part of the
+ // metric name).
+ ConstLabels Labels
+}
+
+// BuildFQName joins the given three name components by "_". Empty name
+// components are ignored. If the name parameter itself is empty, an empty
+// string is returned, no matter what. Metric implementations included in this
+// library use this function internally to generate the fully-qualified metric
+// name from the name component in their Opts. Users of the library will only
+// need this function if they implement their own Metric or instantiate a Desc
+// (with NewDesc) directly.
+func BuildFQName(namespace, subsystem, name string) string {
+ if name == "" {
+ return ""
+ }
+ switch {
+ case namespace != "" && subsystem != "":
+ return strings.Join([]string{namespace, subsystem, name}, "_")
+ case namespace != "":
+ return strings.Join([]string{namespace, name}, "_")
+ case subsystem != "":
+ return strings.Join([]string{subsystem, name}, "_")
+ }
+ return name
+}
+
+// LabelPairSorter implements sort.Interface. It is used to sort a slice of
+// dto.LabelPair pointers. This is useful for implementing the Write method of
+// custom metrics.
+type LabelPairSorter []*dto.LabelPair
+
+func (s LabelPairSorter) Len() int {
+ return len(s)
+}
+
+func (s LabelPairSorter) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s LabelPairSorter) Less(i, j int) bool {
+ return s[i].GetName() < s[j].GetName()
+}
+
+type hashSorter []uint64
+
+func (s hashSorter) Len() int {
+ return len(s)
+}
+
+func (s hashSorter) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s hashSorter) Less(i, j int) bool {
+ return s[i] < s[j]
+}
+
+type invalidMetric struct {
+ desc *Desc
+ err error
+}
+
+// NewInvalidMetric returns a metric whose Write method always returns the
+// provided error. It is useful if a Collector finds itself unable to collect
+// a metric and wishes to report an error to the registry.
+func NewInvalidMetric(desc *Desc, err error) Metric {
+ return &invalidMetric{desc, err}
+}
+
+func (m *invalidMetric) Desc() *Desc { return m.desc }
+
+func (m *invalidMetric) Write(*dto.Metric) error { return m.err }
diff --git a/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
new file mode 100644
index 0000000..d8cf0ed
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
@@ -0,0 +1,142 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import "github.com/prometheus/procfs"
+
+type processCollector struct {
+ pid int
+ collectFn func(chan<- Metric)
+ pidFn func() (int, error)
+ cpuTotal Counter
+ openFDs, maxFDs Gauge
+ vsize, rss Gauge
+ startTime Gauge
+}
+
+// NewProcessCollector returns a collector which exports the current state of
+// process metrics including cpu, memory and file descriptor usage as well as
+// the process start time for the given process id under the given namespace.
+func NewProcessCollector(pid int, namespace string) *processCollector {
+ return NewProcessCollectorPIDFn(
+ func() (int, error) { return pid, nil },
+ namespace,
+ )
+}
+
+// NewProcessCollectorPIDFn returns a collector which exports the current state
+// of process metrics including cpu, memory and file descriptor usage as well
+// as the process start time under the given namespace. The given pidFn is
+// called on each collect and is used to determine the process to export
+// metrics for.
+func NewProcessCollectorPIDFn(
+ pidFn func() (int, error),
+ namespace string,
+) *processCollector {
+ c := processCollector{
+ pidFn: pidFn,
+ collectFn: func(chan<- Metric) {},
+
+ cpuTotal: NewCounter(CounterOpts{
+ Namespace: namespace,
+ Name: "process_cpu_seconds_total",
+ Help: "Total user and system CPU time spent in seconds.",
+ }),
+ openFDs: NewGauge(GaugeOpts{
+ Namespace: namespace,
+ Name: "process_open_fds",
+ Help: "Number of open file descriptors.",
+ }),
+ maxFDs: NewGauge(GaugeOpts{
+ Namespace: namespace,
+ Name: "process_max_fds",
+ Help: "Maximum number of open file descriptors.",
+ }),
+ vsize: NewGauge(GaugeOpts{
+ Namespace: namespace,
+ Name: "process_virtual_memory_bytes",
+ Help: "Virtual memory size in bytes.",
+ }),
+ rss: NewGauge(GaugeOpts{
+ Namespace: namespace,
+ Name: "process_resident_memory_bytes",
+ Help: "Resident memory size in bytes.",
+ }),
+ startTime: NewGauge(GaugeOpts{
+ Namespace: namespace,
+ Name: "process_start_time_seconds",
+ Help: "Start time of the process since unix epoch in seconds.",
+ }),
+ }
+
+ // Set up process metric collection if supported by the runtime.
+ if _, err := procfs.NewStat(); err == nil {
+ c.collectFn = c.processCollect
+ }
+
+ return &c
+}
+
+// Describe returns all descriptions of the collector.
+func (c *processCollector) Describe(ch chan<- *Desc) {
+ ch <- c.cpuTotal.Desc()
+ ch <- c.openFDs.Desc()
+ ch <- c.maxFDs.Desc()
+ ch <- c.vsize.Desc()
+ ch <- c.rss.Desc()
+ ch <- c.startTime.Desc()
+}
+
+// Collect returns the current state of all metrics of the collector.
+func (c *processCollector) Collect(ch chan<- Metric) {
+ c.collectFn(ch)
+}
+
+// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the
+// client allows users to configure the error behavior.
+func (c *processCollector) processCollect(ch chan<- Metric) {
+ pid, err := c.pidFn()
+ if err != nil {
+ return
+ }
+
+ p, err := procfs.NewProc(pid)
+ if err != nil {
+ return
+ }
+
+ if stat, err := p.NewStat(); err == nil {
+ c.cpuTotal.Set(stat.CPUTime())
+ ch <- c.cpuTotal
+ c.vsize.Set(float64(stat.VirtualMemory()))
+ ch <- c.vsize
+ c.rss.Set(float64(stat.ResidentMemory()))
+ ch <- c.rss
+
+ if startTime, err := stat.StartTime(); err == nil {
+ c.startTime.Set(startTime)
+ ch <- c.startTime
+ }
+ }
+
+ if fds, err := p.FileDescriptorsLen(); err == nil {
+ c.openFDs.Set(float64(fds))
+ ch <- c.openFDs
+ }
+
+ if limits, err := p.NewLimits(); err == nil {
+ c.maxFDs.Set(float64(limits.OpenFiles))
+ ch <- c.maxFDs
+ }
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/push.go b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/push.go
new file mode 100644
index 0000000..1c33848
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/push.go
@@ -0,0 +1,65 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Copyright (c) 2013, The Prometheus Authors
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be found
+// in the LICENSE file.
+
+package prometheus
+
+// Push triggers a metric collection by the default registry and pushes all
+// collected metrics to the Pushgateway specified by addr. See the Pushgateway
+// documentation for detailed implications of the job and instance
+// parameter. instance can be left empty. You can use just host:port or ip:port
+// as url, in which case 'http://' is added automatically. You can also include
+// the schema in the URL. However, do not include the '/metrics/jobs/...' part.
+//
+// Note that all previously pushed metrics with the same job and instance will
+// be replaced with the metrics pushed by this call. (It uses HTTP method 'PUT'
+// to push to the Pushgateway.)
+func Push(job, instance, url string) error {
+ return defRegistry.Push(job, instance, url, "PUT")
+}
+
+// PushAdd works like Push, but only previously pushed metrics with the same
+// name (and the same job and instance) will be replaced. (It uses HTTP method
+// 'POST' to push to the Pushgateway.)
+func PushAdd(job, instance, url string) error {
+ return defRegistry.Push(job, instance, url, "POST")
+}
+
+// PushCollectors works like Push, but it does not collect from the default
+// registry. Instead, it collects from the provided collectors. It is a
+// convenient way to push only a few metrics.
+func PushCollectors(job, instance, url string, collectors ...Collector) error {
+ return pushCollectors(job, instance, url, "PUT", collectors...)
+}
+
+// PushAddCollectors works like PushAdd, but it does not collect from the
+// default registry. Instead, it collects from the provided collectors. It is a
+// convenient way to push only a few metrics.
+func PushAddCollectors(job, instance, url string, collectors ...Collector) error {
+ return pushCollectors(job, instance, url, "POST", collectors...)
+}
+
+func pushCollectors(job, instance, url, method string, collectors ...Collector) error {
+ r := newRegistry()
+ for _, collector := range collectors {
+ if _, err := r.Register(collector); err != nil {
+ return err
+ }
+ }
+ return r.Push(job, instance, url, method)
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/registry.go
new file mode 100644
index 0000000..5970aae
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/registry.go
@@ -0,0 +1,726 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Copyright (c) 2013, The Prometheus Authors
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be found
+// in the LICENSE file.
+
+package prometheus
+
+import (
+ "bytes"
+ "compress/gzip"
+ "errors"
+ "fmt"
+ "hash/fnv"
+ "io"
+ "net/http"
+ "net/url"
+ "os"
+ "sort"
+ "strings"
+ "sync"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/prometheus/common/expfmt"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+var (
+ defRegistry = newDefaultRegistry()
+ errAlreadyReg = errors.New("duplicate metrics collector registration attempted")
+)
+
+// Constants relevant to the HTTP interface.
+const (
+ // APIVersion is the version of the format of the exported data. This
+ // will match this library's version, which subscribes to the Semantic
+ // Versioning scheme.
+ APIVersion = "0.0.4"
+
+ // DelimitedTelemetryContentType is the content type set on telemetry
+ // data responses in delimited protobuf format.
+ DelimitedTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`
+ // TextTelemetryContentType is the content type set on telemetry data
+ // responses in text format.
+ TextTelemetryContentType = `text/plain; version=` + APIVersion
+ // ProtoTextTelemetryContentType is the content type set on telemetry
+ // data responses in protobuf text format. (Only used for debugging.)
+ ProtoTextTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=text`
+ // ProtoCompactTextTelemetryContentType is the content type set on
+ // telemetry data responses in protobuf compact text format. (Only used
+ // for debugging.)
+ ProtoCompactTextTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=compact-text`
+
+ // Constants for object pools.
+ numBufs = 4
+ numMetricFamilies = 1000
+ numMetrics = 10000
+
+ // Capacity for the channel to collect metrics and descriptors.
+ capMetricChan = 1000
+ capDescChan = 10
+
+ contentTypeHeader = "Content-Type"
+ contentLengthHeader = "Content-Length"
+ contentEncodingHeader = "Content-Encoding"
+
+ acceptEncodingHeader = "Accept-Encoding"
+ acceptHeader = "Accept"
+)
+
+// Handler returns the HTTP handler for the global Prometheus registry. It is
+// already instrumented with InstrumentHandler (using "prometheus" as handler
+// name). Usually the handler is used to handle the "/metrics" endpoint.
+func Handler() http.Handler {
+ return InstrumentHandler("prometheus", defRegistry)
+}
+
+// UninstrumentedHandler works in the same way as Handler, but the returned HTTP
+// handler is not instrumented. This is useful if no instrumentation is desired
+// (for whatever reason) or if the instrumentation has to happen with a
+// different handler name (or with a different instrumentation approach
+// altogether). See the InstrumentHandler example.
+func UninstrumentedHandler() http.Handler {
+ return defRegistry
+}
+
+// Register registers a new Collector to be included in metrics collection. It
+// returns an error if the descriptors provided by the Collector are invalid or
+// if they - in combination with descriptors of already registered Collectors -
+// do not fulfill the consistency and uniqueness criteria described in the Desc
+// documentation.
+//
+// Do not register the same Collector multiple times concurrently. (Registering
+// the same Collector twice would result in an error anyway, but on top of that,
+// it is not safe to do so concurrently.)
+func Register(m Collector) error {
+ _, err := defRegistry.Register(m)
+ return err
+}
+
+// MustRegister works like Register but panics where Register would have
+// returned an error.
+func MustRegister(m Collector) {
+ err := Register(m)
+ if err != nil {
+ panic(err)
+ }
+}
+
+// RegisterOrGet works like Register but does not return an error if a Collector
+// is registered that equals a previously registered Collector. (Two Collectors
+// are considered equal if their Describe method yields the same set of
+// descriptors.) Instead, the previously registered Collector is returned (which
+// is helpful if the new and previously registered Collectors are equal but not
+// identical, i.e. not pointers to the same object).
+//
+// As for Register, it is still not safe to call RegisterOrGet with the same
+// Collector multiple times concurrently.
+func RegisterOrGet(m Collector) (Collector, error) {
+ return defRegistry.RegisterOrGet(m)
+}
+
+// MustRegisterOrGet works like Register but panics where RegisterOrGet would
+// have returned an error.
+func MustRegisterOrGet(m Collector) Collector {
+ existing, err := RegisterOrGet(m)
+ if err != nil {
+ panic(err)
+ }
+ return existing
+}
+
+// Unregister unregisters the Collector that equals the Collector passed in as
+// an argument. (Two Collectors are considered equal if their Describe method
+// yields the same set of descriptors.) The function returns whether a Collector
+// was unregistered.
+func Unregister(c Collector) bool {
+ return defRegistry.Unregister(c)
+}
+
+// SetMetricFamilyInjectionHook sets a function that is called whenever metrics
+// are collected. The hook function must be set before metrics collection begins
+// (i.e. call SetMetricFamilyInjectionHook before setting the HTTP handler.) The
+// MetricFamily protobufs returned by the hook function are merged with the
+// metrics collected in the usual way.
+//
+// This is a way to directly inject MetricFamily protobufs managed and owned by
+// the caller. The caller has full responsibility. As no registration of the
+// injected metrics has happened, there is no descriptor to check against, and
+// there are no registration-time checks. If collect-time checks are disabled
+// (see function EnableCollectChecks), no sanity checks are performed on the
+// returned protobufs at all. If collect-checks are enabled, type and uniqueness
+// checks are performed, but no further consistency checks (which would require
+// knowledge of a metric descriptor).
+//
+// Sorting concerns: The caller is responsible for sorting the label pairs in
+// each metric. However, the order of metrics will be sorted by the registry as
+// it is required anyway after merging with the metric families collected
+// conventionally.
+//
+// The function must be callable at any time and concurrently.
+func SetMetricFamilyInjectionHook(hook func() []*dto.MetricFamily) {
+ defRegistry.metricFamilyInjectionHook = hook
+}
+
+// PanicOnCollectError sets the behavior whether a panic is caused upon an error
+// while metrics are collected and served to the HTTP endpoint. By default, an
+// internal server error (status code 500) is served with an error message.
+func PanicOnCollectError(b bool) {
+ defRegistry.panicOnCollectError = b
+}
+
+// EnableCollectChecks enables (or disables) additional consistency checks
+// during metrics collection. These additional checks are not enabled by default
+// because they inflict a performance penalty and the errors they check for can
+// only happen if the used Metric and Collector types have internal programming
+// errors. It can be helpful to enable these checks while working with custom
+// Collectors or Metrics whose correctness is not well established yet.
+func EnableCollectChecks(b bool) {
+ defRegistry.collectChecksEnabled = b
+}
+
+// encoder is a function that writes a dto.MetricFamily to an io.Writer in a
+// certain encoding. It returns the number of bytes written and any error
+// encountered. Note that pbutil.WriteDelimited and pbutil.MetricFamilyToText
+// are encoders.
+type encoder func(io.Writer, *dto.MetricFamily) (int, error)
+
+type registry struct {
+ mtx sync.RWMutex
+ collectorsByID map[uint64]Collector // ID is a hash of the descIDs.
+ descIDs map[uint64]struct{}
+ dimHashesByName map[string]uint64
+ bufPool chan *bytes.Buffer
+ metricFamilyPool chan *dto.MetricFamily
+ metricPool chan *dto.Metric
+ metricFamilyInjectionHook func() []*dto.MetricFamily
+
+ panicOnCollectError, collectChecksEnabled bool
+}
+
+func (r *registry) Register(c Collector) (Collector, error) {
+ descChan := make(chan *Desc, capDescChan)
+ go func() {
+ c.Describe(descChan)
+ close(descChan)
+ }()
+
+ newDescIDs := map[uint64]struct{}{}
+ newDimHashesByName := map[string]uint64{}
+ var collectorID uint64 // Just a sum of all desc IDs.
+ var duplicateDescErr error
+
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+ // Coduct various tests...
+ for desc := range descChan {
+
+ // Is the descriptor valid at all?
+ if desc.err != nil {
+ return c, fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err)
+ }
+
+ // Is the descID unique?
+ // (In other words: Is the fqName + constLabel combination unique?)
+ if _, exists := r.descIDs[desc.id]; exists {
+ duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc)
+ }
+ // If it is not a duplicate desc in this collector, add it to
+ // the collectorID. (We allow duplicate descs within the same
+ // collector, but their existence must be a no-op.)
+ if _, exists := newDescIDs[desc.id]; !exists {
+ newDescIDs[desc.id] = struct{}{}
+ collectorID += desc.id
+ }
+
+ // Are all the label names and the help string consistent with
+ // previous descriptors of the same name?
+ // First check existing descriptors...
+ if dimHash, exists := r.dimHashesByName[desc.fqName]; exists {
+ if dimHash != desc.dimHash {
+ return nil, fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc)
+ }
+ } else {
+ // ...then check the new descriptors already seen.
+ if dimHash, exists := newDimHashesByName[desc.fqName]; exists {
+ if dimHash != desc.dimHash {
+ return nil, fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc)
+ }
+ } else {
+ newDimHashesByName[desc.fqName] = desc.dimHash
+ }
+ }
+ }
+ // Did anything happen at all?
+ if len(newDescIDs) == 0 {
+ return nil, errors.New("collector has no descriptors")
+ }
+ if existing, exists := r.collectorsByID[collectorID]; exists {
+ return existing, errAlreadyReg
+ }
+ // If the collectorID is new, but at least one of the descs existed
+ // before, we are in trouble.
+ if duplicateDescErr != nil {
+ return nil, duplicateDescErr
+ }
+
+ // Only after all tests have passed, actually register.
+ r.collectorsByID[collectorID] = c
+ for hash := range newDescIDs {
+ r.descIDs[hash] = struct{}{}
+ }
+ for name, dimHash := range newDimHashesByName {
+ r.dimHashesByName[name] = dimHash
+ }
+ return c, nil
+}
+
+func (r *registry) RegisterOrGet(m Collector) (Collector, error) {
+ existing, err := r.Register(m)
+ if err != nil && err != errAlreadyReg {
+ return nil, err
+ }
+ return existing, nil
+}
+
+func (r *registry) Unregister(c Collector) bool {
+ descChan := make(chan *Desc, capDescChan)
+ go func() {
+ c.Describe(descChan)
+ close(descChan)
+ }()
+
+ descIDs := map[uint64]struct{}{}
+ var collectorID uint64 // Just a sum of the desc IDs.
+ for desc := range descChan {
+ if _, exists := descIDs[desc.id]; !exists {
+ collectorID += desc.id
+ descIDs[desc.id] = struct{}{}
+ }
+ }
+
+ r.mtx.RLock()
+ if _, exists := r.collectorsByID[collectorID]; !exists {
+ r.mtx.RUnlock()
+ return false
+ }
+ r.mtx.RUnlock()
+
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+
+ delete(r.collectorsByID, collectorID)
+ for id := range descIDs {
+ delete(r.descIDs, id)
+ }
+ // dimHashesByName is left untouched as those must be consistent
+ // throughout the lifetime of a program.
+ return true
+}
+
+func (r *registry) Push(job, instance, pushURL, method string) error {
+ if !strings.Contains(pushURL, "://") {
+ pushURL = "http://" + pushURL
+ }
+ pushURL = fmt.Sprintf("%s/metrics/jobs/%s", pushURL, url.QueryEscape(job))
+ if instance != "" {
+ pushURL += "/instances/" + url.QueryEscape(instance)
+ }
+ buf := r.getBuf()
+ defer r.giveBuf(buf)
+ if err := r.writePB(expfmt.NewEncoder(buf, expfmt.FmtProtoDelim)); err != nil {
+ if r.panicOnCollectError {
+ panic(err)
+ }
+ return err
+ }
+ req, err := http.NewRequest(method, pushURL, buf)
+ if err != nil {
+ return err
+ }
+ req.Header.Set(contentTypeHeader, DelimitedTelemetryContentType)
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != 202 {
+ return fmt.Errorf("unexpected status code %d while pushing to %s", resp.StatusCode, pushURL)
+ }
+ return nil
+}
+
+func (r *registry) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ contentType := expfmt.Negotiate(req.Header)
+ buf := r.getBuf()
+ defer r.giveBuf(buf)
+ writer, encoding := decorateWriter(req, buf)
+ if err := r.writePB(expfmt.NewEncoder(writer, contentType)); err != nil {
+ if r.panicOnCollectError {
+ panic(err)
+ }
+ http.Error(w, "An error has occurred:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ if closer, ok := writer.(io.Closer); ok {
+ closer.Close()
+ }
+ header := w.Header()
+ header.Set(contentTypeHeader, string(contentType))
+ header.Set(contentLengthHeader, fmt.Sprint(buf.Len()))
+ if encoding != "" {
+ header.Set(contentEncodingHeader, encoding)
+ }
+ w.Write(buf.Bytes())
+}
+
+func (r *registry) writePB(encoder expfmt.Encoder) error {
+ var metricHashes map[uint64]struct{}
+ if r.collectChecksEnabled {
+ metricHashes = make(map[uint64]struct{})
+ }
+ metricChan := make(chan Metric, capMetricChan)
+ wg := sync.WaitGroup{}
+
+ r.mtx.RLock()
+ metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName))
+
+ // Scatter.
+ // (Collectors could be complex and slow, so we call them all at once.)
+ wg.Add(len(r.collectorsByID))
+ go func() {
+ wg.Wait()
+ close(metricChan)
+ }()
+ for _, collector := range r.collectorsByID {
+ go func(collector Collector) {
+ defer wg.Done()
+ collector.Collect(metricChan)
+ }(collector)
+ }
+ r.mtx.RUnlock()
+
+ // Drain metricChan in case of premature return.
+ defer func() {
+ for _ = range metricChan {
+ }
+ }()
+
+ // Gather.
+ for metric := range metricChan {
+ // This could be done concurrently, too, but it required locking
+ // of metricFamiliesByName (and of metricHashes if checks are
+ // enabled). Most likely not worth it.
+ desc := metric.Desc()
+ metricFamily, ok := metricFamiliesByName[desc.fqName]
+ if !ok {
+ metricFamily = r.getMetricFamily()
+ defer r.giveMetricFamily(metricFamily)
+ metricFamily.Name = proto.String(desc.fqName)
+ metricFamily.Help = proto.String(desc.help)
+ metricFamiliesByName[desc.fqName] = metricFamily
+ }
+ dtoMetric := r.getMetric()
+ defer r.giveMetric(dtoMetric)
+ if err := metric.Write(dtoMetric); err != nil {
+ // TODO: Consider different means of error reporting so
+ // that a single erroneous metric could be skipped
+ // instead of blowing up the whole collection.
+ return fmt.Errorf("error collecting metric %v: %s", desc, err)
+ }
+ switch {
+ case metricFamily.Type != nil:
+ // Type already set. We are good.
+ case dtoMetric.Gauge != nil:
+ metricFamily.Type = dto.MetricType_GAUGE.Enum()
+ case dtoMetric.Counter != nil:
+ metricFamily.Type = dto.MetricType_COUNTER.Enum()
+ case dtoMetric.Summary != nil:
+ metricFamily.Type = dto.MetricType_SUMMARY.Enum()
+ case dtoMetric.Untyped != nil:
+ metricFamily.Type = dto.MetricType_UNTYPED.Enum()
+ case dtoMetric.Histogram != nil:
+ metricFamily.Type = dto.MetricType_HISTOGRAM.Enum()
+ default:
+ return fmt.Errorf("empty metric collected: %s", dtoMetric)
+ }
+ if r.collectChecksEnabled {
+ if err := r.checkConsistency(metricFamily, dtoMetric, desc, metricHashes); err != nil {
+ return err
+ }
+ }
+ metricFamily.Metric = append(metricFamily.Metric, dtoMetric)
+ }
+
+ if r.metricFamilyInjectionHook != nil {
+ for _, mf := range r.metricFamilyInjectionHook() {
+ existingMF, exists := metricFamiliesByName[mf.GetName()]
+ if !exists {
+ metricFamiliesByName[mf.GetName()] = mf
+ if r.collectChecksEnabled {
+ for _, m := range mf.Metric {
+ if err := r.checkConsistency(mf, m, nil, metricHashes); err != nil {
+ return err
+ }
+ }
+ }
+ continue
+ }
+ for _, m := range mf.Metric {
+ if r.collectChecksEnabled {
+ if err := r.checkConsistency(existingMF, m, nil, metricHashes); err != nil {
+ return err
+ }
+ }
+ existingMF.Metric = append(existingMF.Metric, m)
+ }
+ }
+ }
+
+ // Now that MetricFamilies are all set, sort their Metrics
+ // lexicographically by their label values.
+ for _, mf := range metricFamiliesByName {
+ sort.Sort(metricSorter(mf.Metric))
+ }
+
+ // Write out MetricFamilies sorted by their name.
+ names := make([]string, 0, len(metricFamiliesByName))
+ for name := range metricFamiliesByName {
+ names = append(names, name)
+ }
+ sort.Strings(names)
+
+ for _, name := range names {
+ if err := encoder.Encode(metricFamiliesByName[name]); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (r *registry) checkConsistency(metricFamily *dto.MetricFamily, dtoMetric *dto.Metric, desc *Desc, metricHashes map[uint64]struct{}) error {
+
+ // Type consistency with metric family.
+ if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil ||
+ metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil ||
+ metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil ||
+ metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil ||
+ metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil {
+ return fmt.Errorf(
+ "collected metric %s %s is not a %s",
+ metricFamily.GetName(), dtoMetric, metricFamily.GetType(),
+ )
+ }
+
+ // Is the metric unique (i.e. no other metric with the same name and the same label values)?
+ h := fnv.New64a()
+ var buf bytes.Buffer
+ buf.WriteString(metricFamily.GetName())
+ buf.WriteByte(separatorByte)
+ h.Write(buf.Bytes())
+ // Make sure label pairs are sorted. We depend on it for the consistency
+ // check. Label pairs must be sorted by contract. But the point of this
+ // method is to check for contract violations. So we better do the sort
+ // now.
+ sort.Sort(LabelPairSorter(dtoMetric.Label))
+ for _, lp := range dtoMetric.Label {
+ buf.Reset()
+ buf.WriteString(lp.GetValue())
+ buf.WriteByte(separatorByte)
+ h.Write(buf.Bytes())
+ }
+ metricHash := h.Sum64()
+ if _, exists := metricHashes[metricHash]; exists {
+ return fmt.Errorf(
+ "collected metric %s %s was collected before with the same name and label values",
+ metricFamily.GetName(), dtoMetric,
+ )
+ }
+ metricHashes[metricHash] = struct{}{}
+
+ if desc == nil {
+ return nil // Nothing left to check if we have no desc.
+ }
+
+ // Desc consistency with metric family.
+ if metricFamily.GetName() != desc.fqName {
+ return fmt.Errorf(
+ "collected metric %s %s has name %q but should have %q",
+ metricFamily.GetName(), dtoMetric, metricFamily.GetName(), desc.fqName,
+ )
+ }
+ if metricFamily.GetHelp() != desc.help {
+ return fmt.Errorf(
+ "collected metric %s %s has help %q but should have %q",
+ metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help,
+ )
+ }
+
+ // Is the desc consistent with the content of the metric?
+ lpsFromDesc := make([]*dto.LabelPair, 0, len(dtoMetric.Label))
+ lpsFromDesc = append(lpsFromDesc, desc.constLabelPairs...)
+ for _, l := range desc.variableLabels {
+ lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{
+ Name: proto.String(l),
+ })
+ }
+ if len(lpsFromDesc) != len(dtoMetric.Label) {
+ return fmt.Errorf(
+ "labels in collected metric %s %s are inconsistent with descriptor %s",
+ metricFamily.GetName(), dtoMetric, desc,
+ )
+ }
+ sort.Sort(LabelPairSorter(lpsFromDesc))
+ for i, lpFromDesc := range lpsFromDesc {
+ lpFromMetric := dtoMetric.Label[i]
+ if lpFromDesc.GetName() != lpFromMetric.GetName() ||
+ lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() {
+ return fmt.Errorf(
+ "labels in collected metric %s %s are inconsistent with descriptor %s",
+ metricFamily.GetName(), dtoMetric, desc,
+ )
+ }
+ }
+
+ r.mtx.RLock() // Remaining checks need the read lock.
+ defer r.mtx.RUnlock()
+
+ // Is the desc registered?
+ if _, exist := r.descIDs[desc.id]; !exist {
+ return fmt.Errorf(
+ "collected metric %s %s with unregistered descriptor %s",
+ metricFamily.GetName(), dtoMetric, desc,
+ )
+ }
+
+ return nil
+}
+
+func (r *registry) getBuf() *bytes.Buffer {
+ select {
+ case buf := <-r.bufPool:
+ return buf
+ default:
+ return &bytes.Buffer{}
+ }
+}
+
+func (r *registry) giveBuf(buf *bytes.Buffer) {
+ buf.Reset()
+ select {
+ case r.bufPool <- buf:
+ default:
+ }
+}
+
+func (r *registry) getMetricFamily() *dto.MetricFamily {
+ select {
+ case mf := <-r.metricFamilyPool:
+ return mf
+ default:
+ return &dto.MetricFamily{}
+ }
+}
+
+func (r *registry) giveMetricFamily(mf *dto.MetricFamily) {
+ mf.Reset()
+ select {
+ case r.metricFamilyPool <- mf:
+ default:
+ }
+}
+
+func (r *registry) getMetric() *dto.Metric {
+ select {
+ case m := <-r.metricPool:
+ return m
+ default:
+ return &dto.Metric{}
+ }
+}
+
+func (r *registry) giveMetric(m *dto.Metric) {
+ m.Reset()
+ select {
+ case r.metricPool <- m:
+ default:
+ }
+}
+
+func newRegistry() *registry {
+ return &registry{
+ collectorsByID: map[uint64]Collector{},
+ descIDs: map[uint64]struct{}{},
+ dimHashesByName: map[string]uint64{},
+ bufPool: make(chan *bytes.Buffer, numBufs),
+ metricFamilyPool: make(chan *dto.MetricFamily, numMetricFamilies),
+ metricPool: make(chan *dto.Metric, numMetrics),
+ }
+}
+
+func newDefaultRegistry() *registry {
+ r := newRegistry()
+ r.Register(NewProcessCollector(os.Getpid(), ""))
+ r.Register(NewGoCollector())
+ return r
+}
+
+// decorateWriter wraps a writer to handle gzip compression if requested. It
+// returns the decorated writer and the appropriate "Content-Encoding" header
+// (which is empty if no compression is enabled).
+func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) {
+ header := request.Header.Get(acceptEncodingHeader)
+ parts := strings.Split(header, ",")
+ for _, part := range parts {
+ part := strings.TrimSpace(part)
+ if part == "gzip" || strings.HasPrefix(part, "gzip;") {
+ return gzip.NewWriter(writer), "gzip"
+ }
+ }
+ return writer, ""
+}
+
+type metricSorter []*dto.Metric
+
+func (s metricSorter) Len() int {
+ return len(s)
+}
+
+func (s metricSorter) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s metricSorter) Less(i, j int) bool {
+ if len(s[i].Label) != len(s[j].Label) {
+ // This should not happen. The metrics are
+ // inconsistent. However, we have to deal with the fact, as
+ // people might use custom collectors or metric family injection
+ // to create inconsistent metrics. So let's simply compare the
+ // number of labels in this case. That will still yield
+ // reproducible sorting.
+ return len(s[i].Label) < len(s[j].Label)
+ }
+ for n, lp := range s[i].Label {
+ vi := lp.GetValue()
+ vj := s[j].Label[n].GetValue()
+ if vi != vj {
+ return vi < vj
+ }
+ }
+ return true
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/summary.go
new file mode 100644
index 0000000..fe81e00
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/summary.go
@@ -0,0 +1,540 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "fmt"
+ "hash/fnv"
+ "math"
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/beorn7/perks/quantile"
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// quantileLabel is used for the label that defines the quantile in a
+// summary.
+const quantileLabel = "quantile"
+
+// A Summary captures individual observations from an event or sample stream and
+// summarizes them in a manner similar to traditional summary statistics: 1. sum
+// of observations, 2. observation count, 3. rank estimations.
+//
+// A typical use-case is the observation of request latencies. By default, a
+// Summary provides the median, the 90th and the 99th percentile of the latency
+// as rank estimations.
+//
+// Note that the rank estimations cannot be aggregated in a meaningful way with
+// the Prometheus query language (i.e. you cannot average or add them). If you
+// need aggregatable quantiles (e.g. you want the 99th percentile latency of all
+// queries served across all instances of a service), consider the Histogram
+// metric type. See the Prometheus documentation for more details.
+//
+// To create Summary instances, use NewSummary.
+type Summary interface {
+ Metric
+ Collector
+
+ // Observe adds a single observation to the summary.
+ Observe(float64)
+}
+
+var (
+ // DefObjectives are the default Summary quantile values.
+ DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}
+
+ errQuantileLabelNotAllowed = fmt.Errorf(
+ "%q is not allowed as label name in summaries", quantileLabel,
+ )
+)
+
+// Default values for SummaryOpts.
+const (
+ // DefMaxAge is the default duration for which observations stay
+ // relevant.
+ DefMaxAge time.Duration = 10 * time.Minute
+ // DefAgeBuckets is the default number of buckets used to calculate the
+ // age of observations.
+ DefAgeBuckets = 5
+ // DefBufCap is the standard buffer size for collecting Summary observations.
+ DefBufCap = 500
+)
+
+// SummaryOpts bundles the options for creating a Summary metric. It is
+// mandatory to set Name and Help to a non-empty string. All other fields are
+// optional and can safely be left at their zero value.
+type SummaryOpts struct {
+ // Namespace, Subsystem, and Name are components of the fully-qualified
+ // name of the Summary (created by joining these components with
+ // "_"). Only Name is mandatory, the others merely help structuring the
+ // name. Note that the fully-qualified name of the Summary must be a
+ // valid Prometheus metric name.
+ Namespace string
+ Subsystem string
+ Name string
+
+ // Help provides information about this Summary. Mandatory!
+ //
+ // Metrics with the same fully-qualified name must have the same Help
+ // string.
+ Help string
+
+ // ConstLabels are used to attach fixed labels to this
+ // Summary. Summaries with the same fully-qualified name must have the
+ // same label names in their ConstLabels.
+ //
+ // Note that in most cases, labels have a value that varies during the
+ // lifetime of a process. Those labels are usually managed with a
+ // SummaryVec. ConstLabels serve only special purposes. One is for the
+ // special case where the value of a label does not change during the
+ // lifetime of a process, e.g. if the revision of the running binary is
+ // put into a label. Another, more advanced purpose is if more than one
+ // Collector needs to collect Summaries with the same fully-qualified
+ // name. In that case, those Summaries must differ in the values of
+ // their ConstLabels. See the Collector examples.
+ //
+ // If the value of a label never changes (not even between binaries),
+ // that label most likely should not be a label at all (but part of the
+ // metric name).
+ ConstLabels Labels
+
+ // Objectives defines the quantile rank estimates with their respective
+ // absolute error. If Objectives[q] = e, then the value reported
+ // for q will be the φ-quantile value for some φ between q-e and q+e.
+ // The default value is DefObjectives.
+ Objectives map[float64]float64
+
+ // MaxAge defines the duration for which an observation stays relevant
+ // for the summary. Must be positive. The default value is DefMaxAge.
+ MaxAge time.Duration
+
+ // AgeBuckets is the number of buckets used to exclude observations that
+ // are older than MaxAge from the summary. A higher number has a
+ // resource penalty, so only increase it if the higher resolution is
+ // really required. For very high observation rates, you might want to
+ // reduce the number of age buckets. With only one age bucket, you will
+ // effectively see a complete reset of the summary each time MaxAge has
+ // passed. The default value is DefAgeBuckets.
+ AgeBuckets uint32
+
+ // BufCap defines the default sample stream buffer size. The default
+ // value of DefBufCap should suffice for most uses. If there is a need
+ // to increase the value, a multiple of 500 is recommended (because that
+ // is the internal buffer size of the underlying package
+ // "github.com/bmizerany/perks/quantile").
+ BufCap uint32
+}
+
+// TODO: Great fuck-up with the sliding-window decay algorithm... The Merge
+// method of perk/quantile is actually not working as advertised - and it might
+// be unfixable, as the underlying algorithm is apparently not capable of
+// merging summaries in the first place. To avoid using Merge, we are currently
+// adding observations to _each_ age bucket, i.e. the effort to add a sample is
+// essentially multiplied by the number of age buckets. When rotating age
+// buckets, we empty the previous head stream. On scrape time, we simply take
+// the quantiles from the head stream (no merging required). Result: More effort
+// on observation time, less effort on scrape time, which is exactly the
+// opposite of what we try to accomplish, but at least the results are correct.
+//
+// The quite elegant previous contraption to merge the age buckets efficiently
+// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0)
+// can't be used anymore.
+
+// NewSummary creates a new Summary based on the provided SummaryOpts.
+func NewSummary(opts SummaryOpts) Summary {
+ return newSummary(
+ NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ),
+ opts,
+ )
+}
+
+func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
+ if len(desc.variableLabels) != len(labelValues) {
+ panic(errInconsistentCardinality)
+ }
+
+ for _, n := range desc.variableLabels {
+ if n == quantileLabel {
+ panic(errQuantileLabelNotAllowed)
+ }
+ }
+ for _, lp := range desc.constLabelPairs {
+ if lp.GetName() == quantileLabel {
+ panic(errQuantileLabelNotAllowed)
+ }
+ }
+
+ if len(opts.Objectives) == 0 {
+ opts.Objectives = DefObjectives
+ }
+
+ if opts.MaxAge < 0 {
+ panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge))
+ }
+ if opts.MaxAge == 0 {
+ opts.MaxAge = DefMaxAge
+ }
+
+ if opts.AgeBuckets == 0 {
+ opts.AgeBuckets = DefAgeBuckets
+ }
+
+ if opts.BufCap == 0 {
+ opts.BufCap = DefBufCap
+ }
+
+ s := &summary{
+ desc: desc,
+
+ objectives: opts.Objectives,
+ sortedObjectives: make([]float64, 0, len(opts.Objectives)),
+
+ labelPairs: makeLabelPairs(desc, labelValues),
+
+ hotBuf: make([]float64, 0, opts.BufCap),
+ coldBuf: make([]float64, 0, opts.BufCap),
+ streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets),
+ }
+ s.headStreamExpTime = time.Now().Add(s.streamDuration)
+ s.hotBufExpTime = s.headStreamExpTime
+
+ for i := uint32(0); i < opts.AgeBuckets; i++ {
+ s.streams = append(s.streams, s.newStream())
+ }
+ s.headStream = s.streams[0]
+
+ for qu := range s.objectives {
+ s.sortedObjectives = append(s.sortedObjectives, qu)
+ }
+ sort.Float64s(s.sortedObjectives)
+
+ s.Init(s) // Init self-collection.
+ return s
+}
+
+type summary struct {
+ SelfCollector
+
+ bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime.
+ mtx sync.Mutex // Protects every other moving part.
+ // Lock bufMtx before mtx if both are needed.
+
+ desc *Desc
+
+ objectives map[float64]float64
+ sortedObjectives []float64
+
+ labelPairs []*dto.LabelPair
+
+ sum float64
+ cnt uint64
+
+ hotBuf, coldBuf []float64
+
+ streams []*quantile.Stream
+ streamDuration time.Duration
+ headStream *quantile.Stream
+ headStreamIdx int
+ headStreamExpTime, hotBufExpTime time.Time
+}
+
+func (s *summary) Desc() *Desc {
+ return s.desc
+}
+
+func (s *summary) Observe(v float64) {
+ s.bufMtx.Lock()
+ defer s.bufMtx.Unlock()
+
+ now := time.Now()
+ if now.After(s.hotBufExpTime) {
+ s.asyncFlush(now)
+ }
+ s.hotBuf = append(s.hotBuf, v)
+ if len(s.hotBuf) == cap(s.hotBuf) {
+ s.asyncFlush(now)
+ }
+}
+
+func (s *summary) Write(out *dto.Metric) error {
+ sum := &dto.Summary{}
+ qs := make([]*dto.Quantile, 0, len(s.objectives))
+
+ s.bufMtx.Lock()
+ s.mtx.Lock()
+ // Swap bufs even if hotBuf is empty to set new hotBufExpTime.
+ s.swapBufs(time.Now())
+ s.bufMtx.Unlock()
+
+ s.flushColdBuf()
+ sum.SampleCount = proto.Uint64(s.cnt)
+ sum.SampleSum = proto.Float64(s.sum)
+
+ for _, rank := range s.sortedObjectives {
+ var q float64
+ if s.headStream.Count() == 0 {
+ q = math.NaN()
+ } else {
+ q = s.headStream.Query(rank)
+ }
+ qs = append(qs, &dto.Quantile{
+ Quantile: proto.Float64(rank),
+ Value: proto.Float64(q),
+ })
+ }
+
+ s.mtx.Unlock()
+
+ if len(qs) > 0 {
+ sort.Sort(quantSort(qs))
+ }
+ sum.Quantile = qs
+
+ out.Summary = sum
+ out.Label = s.labelPairs
+ return nil
+}
+
+func (s *summary) newStream() *quantile.Stream {
+ return quantile.NewTargeted(s.objectives)
+}
+
+// asyncFlush needs bufMtx locked.
+func (s *summary) asyncFlush(now time.Time) {
+ s.mtx.Lock()
+ s.swapBufs(now)
+
+ // Unblock the original goroutine that was responsible for the mutation
+ // that triggered the compaction. But hold onto the global non-buffer
+ // state mutex until the operation finishes.
+ go func() {
+ s.flushColdBuf()
+ s.mtx.Unlock()
+ }()
+}
+
+// rotateStreams needs mtx AND bufMtx locked.
+func (s *summary) maybeRotateStreams() {
+ for !s.hotBufExpTime.Equal(s.headStreamExpTime) {
+ s.headStream.Reset()
+ s.headStreamIdx++
+ if s.headStreamIdx >= len(s.streams) {
+ s.headStreamIdx = 0
+ }
+ s.headStream = s.streams[s.headStreamIdx]
+ s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration)
+ }
+}
+
+// flushColdBuf needs mtx locked.
+func (s *summary) flushColdBuf() {
+ for _, v := range s.coldBuf {
+ for _, stream := range s.streams {
+ stream.Insert(v)
+ }
+ s.cnt++
+ s.sum += v
+ }
+ s.coldBuf = s.coldBuf[0:0]
+ s.maybeRotateStreams()
+}
+
+// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty.
+func (s *summary) swapBufs(now time.Time) {
+ if len(s.coldBuf) != 0 {
+ panic("coldBuf is not empty")
+ }
+ s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf
+ // hotBuf is now empty and gets new expiration set.
+ for now.After(s.hotBufExpTime) {
+ s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration)
+ }
+}
+
+type quantSort []*dto.Quantile
+
+func (s quantSort) Len() int {
+ return len(s)
+}
+
+func (s quantSort) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s quantSort) Less(i, j int) bool {
+ return s[i].GetQuantile() < s[j].GetQuantile()
+}
+
+// SummaryVec is a Collector that bundles a set of Summaries that all share the
+// same Desc, but have different values for their variable labels. This is used
+// if you want to count the same thing partitioned by various dimensions
+// (e.g. HTTP request latencies, partitioned by status code and method). Create
+// instances with NewSummaryVec.
+type SummaryVec struct {
+ MetricVec
+}
+
+// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &SummaryVec{
+ MetricVec: MetricVec{
+ children: map[uint64]Metric{},
+ desc: desc,
+ hash: fnv.New64a(),
+ newMetric: func(lvs ...string) Metric {
+ return newSummary(desc, opts, lvs...)
+ },
+ },
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns a Summary and not a
+// Metric so that no type conversion is required.
+func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Summary, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Summary), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns a Summary and not a Metric so that no
+// type conversion is required.
+func (m *SummaryVec) GetMetricWith(labels Labels) (Summary, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Summary), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Observe(42.21)
+func (m *SummaryVec) WithLabelValues(lvs ...string) Summary {
+ return m.MetricVec.WithLabelValues(lvs...).(Summary)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21)
+func (m *SummaryVec) With(labels Labels) Summary {
+ return m.MetricVec.With(labels).(Summary)
+}
+
+type constSummary struct {
+ desc *Desc
+ count uint64
+ sum float64
+ quantiles map[float64]float64
+ labelPairs []*dto.LabelPair
+}
+
+func (s *constSummary) Desc() *Desc {
+ return s.desc
+}
+
+func (s *constSummary) Write(out *dto.Metric) error {
+ sum := &dto.Summary{}
+ qs := make([]*dto.Quantile, 0, len(s.quantiles))
+
+ sum.SampleCount = proto.Uint64(s.count)
+ sum.SampleSum = proto.Float64(s.sum)
+
+ for rank, q := range s.quantiles {
+ qs = append(qs, &dto.Quantile{
+ Quantile: proto.Float64(rank),
+ Value: proto.Float64(q),
+ })
+ }
+
+ if len(qs) > 0 {
+ sort.Sort(quantSort(qs))
+ }
+ sum.Quantile = qs
+
+ out.Summary = sum
+ out.Label = s.labelPairs
+
+ return nil
+}
+
+// NewConstSummary returns a metric representing a Prometheus summary with fixed
+// values for the count, sum, and quantiles. As those parameters cannot be
+// changed, the returned value does not implement the Summary interface (but
+// only the Metric interface). Users of this package will not have much use for
+// it in regular operations. However, when implementing custom Collectors, it is
+// useful as a throw-away metric that is generated on the fly to send it to
+// Prometheus in the Collect method.
+//
+// quantiles maps ranks to quantile values. For example, a median latency of
+// 0.23s and a 99th percentile latency of 0.56s would be expressed as:
+// map[float64]float64{0.5: 0.23, 0.99: 0.56}
+//
+// NewConstSummary returns an error if the length of labelValues is not
+// consistent with the variable labels in Desc.
+func NewConstSummary(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ quantiles map[float64]float64,
+ labelValues ...string,
+) (Metric, error) {
+ if len(desc.variableLabels) != len(labelValues) {
+ return nil, errInconsistentCardinality
+ }
+ return &constSummary{
+ desc: desc,
+ count: count,
+ sum: sum,
+ quantiles: quantiles,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }, nil
+}
+
+// MustNewConstSummary is a version of NewConstSummary that panics where
+// NewConstMetric would have returned an error.
+func MustNewConstSummary(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ quantiles map[float64]float64,
+ labelValues ...string,
+) Metric {
+ m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/untyped.go
new file mode 100644
index 0000000..c65ab1c
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/untyped.go
@@ -0,0 +1,145 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import "hash/fnv"
+
+// Untyped is a Metric that represents a single numerical value that can
+// arbitrarily go up and down.
+//
+// An Untyped metric works the same as a Gauge. The only difference is that to
+// no type information is implied.
+//
+// To create Untyped instances, use NewUntyped.
+type Untyped interface {
+ Metric
+ Collector
+
+ // Set sets the Untyped metric to an arbitrary value.
+ Set(float64)
+ // Inc increments the Untyped metric by 1.
+ Inc()
+ // Dec decrements the Untyped metric by 1.
+ Dec()
+ // Add adds the given value to the Untyped metric. (The value can be
+ // negative, resulting in a decrease.)
+ Add(float64)
+ // Sub subtracts the given value from the Untyped metric. (The value can
+ // be negative, resulting in an increase.)
+ Sub(float64)
+}
+
+// UntypedOpts is an alias for Opts. See there for doc comments.
+type UntypedOpts Opts
+
+// NewUntyped creates a new Untyped metric from the provided UntypedOpts.
+func NewUntyped(opts UntypedOpts) Untyped {
+ return newValue(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), UntypedValue, 0)
+}
+
+// UntypedVec is a Collector that bundles a set of Untyped metrics that all
+// share the same Desc, but have different values for their variable
+// labels. This is used if you want to count the same thing partitioned by
+// various dimensions. Create instances with NewUntypedVec.
+type UntypedVec struct {
+ MetricVec
+}
+
+// NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &UntypedVec{
+ MetricVec: MetricVec{
+ children: map[uint64]Metric{},
+ desc: desc,
+ hash: fnv.New64a(),
+ newMetric: func(lvs ...string) Metric {
+ return newValue(desc, UntypedValue, 0, lvs...)
+ },
+ },
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns an Untyped and not a
+// Metric so that no type conversion is required.
+func (m *UntypedVec) GetMetricWithLabelValues(lvs ...string) (Untyped, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Untyped), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns an Untyped and not a Metric so that no
+// type conversion is required.
+func (m *UntypedVec) GetMetricWith(labels Labels) (Untyped, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Untyped), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Add(42)
+func (m *UntypedVec) WithLabelValues(lvs ...string) Untyped {
+ return m.MetricVec.WithLabelValues(lvs...).(Untyped)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+func (m *UntypedVec) With(labels Labels) Untyped {
+ return m.MetricVec.With(labels).(Untyped)
+}
+
+// UntypedFunc is an Untyped whose value is determined at collect time by
+// calling a provided function.
+//
+// To create UntypedFunc instances, use NewUntypedFunc.
+type UntypedFunc interface {
+ Metric
+ Collector
+}
+
+// NewUntypedFunc creates a new UntypedFunc based on the provided
+// UntypedOpts. The value reported is determined by calling the given function
+// from within the Write method. Take into account that metric collection may
+// happen concurrently. If that results in concurrent calls to Write, like in
+// the case where an UntypedFunc is directly registered with Prometheus, the
+// provided function must be concurrency-safe.
+func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc {
+ return newValueFunc(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), UntypedValue, function)
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/value.go b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/value.go
new file mode 100644
index 0000000..b54ac11
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/value.go
@@ -0,0 +1,234 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "sort"
+ "sync/atomic"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/golang/protobuf/proto"
+)
+
+// ValueType is an enumeration of metric types that represent a simple value.
+type ValueType int
+
+// Possible values for the ValueType enum.
+const (
+ _ ValueType = iota
+ CounterValue
+ GaugeValue
+ UntypedValue
+)
+
+var errInconsistentCardinality = errors.New("inconsistent label cardinality")
+
+// value is a generic metric for simple values. It implements Metric, Collector,
+// Counter, Gauge, and Untyped. Its effective type is determined by
+// ValueType. This is a low-level building block used by the library to back the
+// implementations of Counter, Gauge, and Untyped.
+type value struct {
+ // valBits containst the bits of the represented float64 value. It has
+ // to go first in the struct to guarantee alignment for atomic
+ // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ valBits uint64
+
+ SelfCollector
+
+ desc *Desc
+ valType ValueType
+ labelPairs []*dto.LabelPair
+}
+
+// newValue returns a newly allocated value with the given Desc, ValueType,
+// sample value and label values. It panics if the number of label
+// values is different from the number of variable labels in Desc.
+func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...string) *value {
+ if len(labelValues) != len(desc.variableLabels) {
+ panic(errInconsistentCardinality)
+ }
+ result := &value{
+ desc: desc,
+ valType: valueType,
+ valBits: math.Float64bits(val),
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }
+ result.Init(result)
+ return result
+}
+
+func (v *value) Desc() *Desc {
+ return v.desc
+}
+
+func (v *value) Set(val float64) {
+ atomic.StoreUint64(&v.valBits, math.Float64bits(val))
+}
+
+func (v *value) Inc() {
+ v.Add(1)
+}
+
+func (v *value) Dec() {
+ v.Add(-1)
+}
+
+func (v *value) Add(val float64) {
+ for {
+ oldBits := atomic.LoadUint64(&v.valBits)
+ newBits := math.Float64bits(math.Float64frombits(oldBits) + val)
+ if atomic.CompareAndSwapUint64(&v.valBits, oldBits, newBits) {
+ return
+ }
+ }
+}
+
+func (v *value) Sub(val float64) {
+ v.Add(val * -1)
+}
+
+func (v *value) Write(out *dto.Metric) error {
+ val := math.Float64frombits(atomic.LoadUint64(&v.valBits))
+ return populateMetric(v.valType, val, v.labelPairs, out)
+}
+
+// valueFunc is a generic metric for simple values retrieved on collect time
+// from a function. It implements Metric and Collector. Its effective type is
+// determined by ValueType. This is a low-level building block used by the
+// library to back the implementations of CounterFunc, GaugeFunc, and
+// UntypedFunc.
+type valueFunc struct {
+ SelfCollector
+
+ desc *Desc
+ valType ValueType
+ function func() float64
+ labelPairs []*dto.LabelPair
+}
+
+// newValueFunc returns a newly allocated valueFunc with the given Desc and
+// ValueType. The value reported is determined by calling the given function
+// from within the Write method. Take into account that metric collection may
+// happen concurrently. If that results in concurrent calls to Write, like in
+// the case where a valueFunc is directly registered with Prometheus, the
+// provided function must be concurrency-safe.
+func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc {
+ result := &valueFunc{
+ desc: desc,
+ valType: valueType,
+ function: function,
+ labelPairs: makeLabelPairs(desc, nil),
+ }
+ result.Init(result)
+ return result
+}
+
+func (v *valueFunc) Desc() *Desc {
+ return v.desc
+}
+
+func (v *valueFunc) Write(out *dto.Metric) error {
+ return populateMetric(v.valType, v.function(), v.labelPairs, out)
+}
+
+// NewConstMetric returns a metric with one fixed value that cannot be
+// changed. Users of this package will not have much use for it in regular
+// operations. However, when implementing custom Collectors, it is useful as a
+// throw-away metric that is generated on the fly to send it to Prometheus in
+// the Collect method. NewConstMetric returns an error if the length of
+// labelValues is not consistent with the variable labels in Desc.
+func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) {
+ if len(desc.variableLabels) != len(labelValues) {
+ return nil, errInconsistentCardinality
+ }
+ return &constMetric{
+ desc: desc,
+ valType: valueType,
+ val: value,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }, nil
+}
+
+// MustNewConstMetric is a version of NewConstMetric that panics where
+// NewConstMetric would have returned an error.
+func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric {
+ m, err := NewConstMetric(desc, valueType, value, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
+
+type constMetric struct {
+ desc *Desc
+ valType ValueType
+ val float64
+ labelPairs []*dto.LabelPair
+}
+
+func (m *constMetric) Desc() *Desc {
+ return m.desc
+}
+
+func (m *constMetric) Write(out *dto.Metric) error {
+ return populateMetric(m.valType, m.val, m.labelPairs, out)
+}
+
+func populateMetric(
+ t ValueType,
+ v float64,
+ labelPairs []*dto.LabelPair,
+ m *dto.Metric,
+) error {
+ m.Label = labelPairs
+ switch t {
+ case CounterValue:
+ m.Counter = &dto.Counter{Value: proto.Float64(v)}
+ case GaugeValue:
+ m.Gauge = &dto.Gauge{Value: proto.Float64(v)}
+ case UntypedValue:
+ m.Untyped = &dto.Untyped{Value: proto.Float64(v)}
+ default:
+ return fmt.Errorf("encountered unknown type %v", t)
+ }
+ return nil
+}
+
+func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
+ totalLen := len(desc.variableLabels) + len(desc.constLabelPairs)
+ if totalLen == 0 {
+ // Super fast path.
+ return nil
+ }
+ if len(desc.variableLabels) == 0 {
+ // Moderately fast path.
+ return desc.constLabelPairs
+ }
+ labelPairs := make([]*dto.LabelPair, 0, totalLen)
+ for i, n := range desc.variableLabels {
+ labelPairs = append(labelPairs, &dto.LabelPair{
+ Name: proto.String(n),
+ Value: proto.String(labelValues[i]),
+ })
+ }
+ for _, lp := range desc.constLabelPairs {
+ labelPairs = append(labelPairs, lp)
+ }
+ sort.Sort(LabelPairSorter(labelPairs))
+ return labelPairs
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/vec.go
new file mode 100644
index 0000000..a1f3bdf
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/client_golang/prometheus/vec.go
@@ -0,0 +1,247 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "bytes"
+ "fmt"
+ "hash"
+ "sync"
+)
+
+// MetricVec is a Collector to bundle metrics of the same name that
+// differ in their label values. MetricVec is usually not used directly but as a
+// building block for implementations of vectors of a given metric
+// type. GaugeVec, CounterVec, SummaryVec, and UntypedVec are examples already
+// provided in this package.
+type MetricVec struct {
+ mtx sync.RWMutex // Protects not only children, but also hash and buf.
+ children map[uint64]Metric
+ desc *Desc
+
+ // hash is our own hash instance to avoid repeated allocations.
+ hash hash.Hash64
+ // buf is used to copy string contents into it for hashing,
+ // again to avoid allocations.
+ buf bytes.Buffer
+
+ newMetric func(labelValues ...string) Metric
+}
+
+// Describe implements Collector. The length of the returned slice
+// is always one.
+func (m *MetricVec) Describe(ch chan<- *Desc) {
+ ch <- m.desc
+}
+
+// Collect implements Collector.
+func (m *MetricVec) Collect(ch chan<- Metric) {
+ m.mtx.RLock()
+ defer m.mtx.RUnlock()
+
+ for _, metric := range m.children {
+ ch <- metric
+ }
+}
+
+// GetMetricWithLabelValues returns the Metric for the given slice of label
+// values (same order as the VariableLabels in Desc). If that combination of
+// label values is accessed for the first time, a new Metric is created.
+//
+// It is possible to call this method without using the returned Metric to only
+// create the new Metric but leave it at its start value (e.g. a Summary or
+// Histogram without any observations). See also the SummaryVec example.
+//
+// Keeping the Metric for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Metric from the MetricVec. In that case, the
+// Metric will still exist, but it will not be exported anymore, even if a
+// Metric with the same label values is created later. See also the CounterVec
+// example.
+//
+// An error is returned if the number of label values is not the same as the
+// number of VariableLabels in Desc.
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the GaugeVec example.
+func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ h, err := m.hashLabelValues(lvs)
+ if err != nil {
+ return nil, err
+ }
+ return m.getOrCreateMetric(h, lvs...), nil
+}
+
+// GetMetricWith returns the Metric for the given Labels map (the label names
+// must match those of the VariableLabels in Desc). If that label map is
+// accessed for the first time, a new Metric is created. Implications of
+// creating a Metric without using it and keeping the Metric for later use are
+// the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc.
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
+func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ h, err := m.hashLabels(labels)
+ if err != nil {
+ return nil, err
+ }
+ lvs := make([]string, len(labels))
+ for i, label := range m.desc.variableLabels {
+ lvs[i] = labels[label]
+ }
+ return m.getOrCreateMetric(h, lvs...), nil
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics if an error
+// occurs. The method allows neat syntax like:
+// httpReqs.WithLabelValues("404", "POST").Inc()
+func (m *MetricVec) WithLabelValues(lvs ...string) Metric {
+ metric, err := m.GetMetricWithLabelValues(lvs...)
+ if err != nil {
+ panic(err)
+ }
+ return metric
+}
+
+// With works as GetMetricWith, but panics if an error occurs. The method allows
+// neat syntax like:
+// httpReqs.With(Labels{"status":"404", "method":"POST"}).Inc()
+func (m *MetricVec) With(labels Labels) Metric {
+ metric, err := m.GetMetricWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return metric
+}
+
+// DeleteLabelValues removes the metric where the variable labels are the same
+// as those passed in as labels (same order as the VariableLabels in Desc). It
+// returns true if a metric was deleted.
+//
+// It is not an error if the number of label values is not the same as the
+// number of VariableLabels in Desc. However, such inconsistent label count can
+// never match an actual Metric, so the method will always return false in that
+// case.
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider Delete(Labels) as an
+// alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the CounterVec example.
+func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ h, err := m.hashLabelValues(lvs)
+ if err != nil {
+ return false
+ }
+ if _, has := m.children[h]; !has {
+ return false
+ }
+ delete(m.children, h)
+ return true
+}
+
+// Delete deletes the metric where the variable labels are the same as those
+// passed in as labels. It returns true if a metric was deleted.
+//
+// It is not an error if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in the Desc of the MetricVec. However, such
+// inconsistent Labels can never match an actual Metric, so the method will
+// always return false in that case.
+//
+// This method is used for the same purpose as DeleteLabelValues(...string). See
+// there for pros and cons of the two methods.
+func (m *MetricVec) Delete(labels Labels) bool {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ h, err := m.hashLabels(labels)
+ if err != nil {
+ return false
+ }
+ if _, has := m.children[h]; !has {
+ return false
+ }
+ delete(m.children, h)
+ return true
+}
+
+// Reset deletes all metrics in this vector.
+func (m *MetricVec) Reset() {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ for h := range m.children {
+ delete(m.children, h)
+ }
+}
+
+func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
+ if len(vals) != len(m.desc.variableLabels) {
+ return 0, errInconsistentCardinality
+ }
+ m.hash.Reset()
+ for _, val := range vals {
+ m.buf.Reset()
+ m.buf.WriteString(val)
+ m.hash.Write(m.buf.Bytes())
+ }
+ return m.hash.Sum64(), nil
+}
+
+func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
+ if len(labels) != len(m.desc.variableLabels) {
+ return 0, errInconsistentCardinality
+ }
+ m.hash.Reset()
+ for _, label := range m.desc.variableLabels {
+ val, ok := labels[label]
+ if !ok {
+ return 0, fmt.Errorf("label name %q missing in label map", label)
+ }
+ m.buf.Reset()
+ m.buf.WriteString(val)
+ m.hash.Write(m.buf.Bytes())
+ }
+ return m.hash.Sum64(), nil
+}
+
+func (m *MetricVec) getOrCreateMetric(hash uint64, labelValues ...string) Metric {
+ metric, ok := m.children[hash]
+ if !ok {
+ // Copy labelValues. Otherwise, they would be allocated even if we don't go
+ // down this code path.
+ copiedLabelValues := append(make([]string, 0, len(labelValues)), labelValues...)
+ metric = m.newMetric(copiedLabelValues...)
+ m.children[hash] = metric
+ }
+ return metric
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/client_model/LICENSE b/src/kube2msb/vendor/github.com/prometheus/client_model/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/client_model/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/src/kube2msb/vendor/github.com/prometheus/client_model/NOTICE b/src/kube2msb/vendor/github.com/prometheus/client_model/NOTICE
new file mode 100644
index 0000000..20110e4
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/client_model/NOTICE
@@ -0,0 +1,5 @@
+Data model artifacts for Prometheus.
+Copyright 2012-2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/src/kube2msb/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/src/kube2msb/vendor/github.com/prometheus/client_model/go/metrics.pb.go
new file mode 100644
index 0000000..b065f86
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/client_model/go/metrics.pb.go
@@ -0,0 +1,364 @@
+// Code generated by protoc-gen-go.
+// source: metrics.proto
+// DO NOT EDIT!
+
+/*
+Package io_prometheus_client is a generated protocol buffer package.
+
+It is generated from these files:
+ metrics.proto
+
+It has these top-level messages:
+ LabelPair
+ Gauge
+ Counter
+ Quantile
+ Summary
+ Untyped
+ Histogram
+ Bucket
+ Metric
+ MetricFamily
+*/
+package io_prometheus_client
+
+import proto "github.com/golang/protobuf/proto"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = math.Inf
+
+type MetricType int32
+
+const (
+ MetricType_COUNTER MetricType = 0
+ MetricType_GAUGE MetricType = 1
+ MetricType_SUMMARY MetricType = 2
+ MetricType_UNTYPED MetricType = 3
+ MetricType_HISTOGRAM MetricType = 4
+)
+
+var MetricType_name = map[int32]string{
+ 0: "COUNTER",
+ 1: "GAUGE",
+ 2: "SUMMARY",
+ 3: "UNTYPED",
+ 4: "HISTOGRAM",
+}
+var MetricType_value = map[string]int32{
+ "COUNTER": 0,
+ "GAUGE": 1,
+ "SUMMARY": 2,
+ "UNTYPED": 3,
+ "HISTOGRAM": 4,
+}
+
+func (x MetricType) Enum() *MetricType {
+ p := new(MetricType)
+ *p = x
+ return p
+}
+func (x MetricType) String() string {
+ return proto.EnumName(MetricType_name, int32(x))
+}
+func (x *MetricType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType")
+ if err != nil {
+ return err
+ }
+ *x = MetricType(value)
+ return nil
+}
+
+type LabelPair struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LabelPair) Reset() { *m = LabelPair{} }
+func (m *LabelPair) String() string { return proto.CompactTextString(m) }
+func (*LabelPair) ProtoMessage() {}
+
+func (m *LabelPair) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *LabelPair) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+type Gauge struct {
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Gauge) Reset() { *m = Gauge{} }
+func (m *Gauge) String() string { return proto.CompactTextString(m) }
+func (*Gauge) ProtoMessage() {}
+
+func (m *Gauge) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Counter struct {
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Counter) Reset() { *m = Counter{} }
+func (m *Counter) String() string { return proto.CompactTextString(m) }
+func (*Counter) ProtoMessage() {}
+
+func (m *Counter) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Quantile struct {
+ Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
+ Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Quantile) Reset() { *m = Quantile{} }
+func (m *Quantile) String() string { return proto.CompactTextString(m) }
+func (*Quantile) ProtoMessage() {}
+
+func (m *Quantile) GetQuantile() float64 {
+ if m != nil && m.Quantile != nil {
+ return *m.Quantile
+ }
+ return 0
+}
+
+func (m *Quantile) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Summary struct {
+ SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"`
+ SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"`
+ Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Summary) Reset() { *m = Summary{} }
+func (m *Summary) String() string { return proto.CompactTextString(m) }
+func (*Summary) ProtoMessage() {}
+
+func (m *Summary) GetSampleCount() uint64 {
+ if m != nil && m.SampleCount != nil {
+ return *m.SampleCount
+ }
+ return 0
+}
+
+func (m *Summary) GetSampleSum() float64 {
+ if m != nil && m.SampleSum != nil {
+ return *m.SampleSum
+ }
+ return 0
+}
+
+func (m *Summary) GetQuantile() []*Quantile {
+ if m != nil {
+ return m.Quantile
+ }
+ return nil
+}
+
+type Untyped struct {
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Untyped) Reset() { *m = Untyped{} }
+func (m *Untyped) String() string { return proto.CompactTextString(m) }
+func (*Untyped) ProtoMessage() {}
+
+func (m *Untyped) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Histogram struct {
+ SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"`
+ SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"`
+ Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Histogram) Reset() { *m = Histogram{} }
+func (m *Histogram) String() string { return proto.CompactTextString(m) }
+func (*Histogram) ProtoMessage() {}
+
+func (m *Histogram) GetSampleCount() uint64 {
+ if m != nil && m.SampleCount != nil {
+ return *m.SampleCount
+ }
+ return 0
+}
+
+func (m *Histogram) GetSampleSum() float64 {
+ if m != nil && m.SampleSum != nil {
+ return *m.SampleSum
+ }
+ return 0
+}
+
+func (m *Histogram) GetBucket() []*Bucket {
+ if m != nil {
+ return m.Bucket
+ }
+ return nil
+}
+
+type Bucket struct {
+ CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count" json:"cumulative_count,omitempty"`
+ UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound" json:"upper_bound,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Bucket) Reset() { *m = Bucket{} }
+func (m *Bucket) String() string { return proto.CompactTextString(m) }
+func (*Bucket) ProtoMessage() {}
+
+func (m *Bucket) GetCumulativeCount() uint64 {
+ if m != nil && m.CumulativeCount != nil {
+ return *m.CumulativeCount
+ }
+ return 0
+}
+
+func (m *Bucket) GetUpperBound() float64 {
+ if m != nil && m.UpperBound != nil {
+ return *m.UpperBound
+ }
+ return 0
+}
+
+type Metric struct {
+ Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
+ Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
+ Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"`
+ Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"`
+ Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"`
+ Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"`
+ TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms" json:"timestamp_ms,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Metric) Reset() { *m = Metric{} }
+func (m *Metric) String() string { return proto.CompactTextString(m) }
+func (*Metric) ProtoMessage() {}
+
+func (m *Metric) GetLabel() []*LabelPair {
+ if m != nil {
+ return m.Label
+ }
+ return nil
+}
+
+func (m *Metric) GetGauge() *Gauge {
+ if m != nil {
+ return m.Gauge
+ }
+ return nil
+}
+
+func (m *Metric) GetCounter() *Counter {
+ if m != nil {
+ return m.Counter
+ }
+ return nil
+}
+
+func (m *Metric) GetSummary() *Summary {
+ if m != nil {
+ return m.Summary
+ }
+ return nil
+}
+
+func (m *Metric) GetUntyped() *Untyped {
+ if m != nil {
+ return m.Untyped
+ }
+ return nil
+}
+
+func (m *Metric) GetHistogram() *Histogram {
+ if m != nil {
+ return m.Histogram
+ }
+ return nil
+}
+
+func (m *Metric) GetTimestampMs() int64 {
+ if m != nil && m.TimestampMs != nil {
+ return *m.TimestampMs
+ }
+ return 0
+}
+
+type MetricFamily struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
+ Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
+ Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MetricFamily) Reset() { *m = MetricFamily{} }
+func (m *MetricFamily) String() string { return proto.CompactTextString(m) }
+func (*MetricFamily) ProtoMessage() {}
+
+func (m *MetricFamily) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *MetricFamily) GetHelp() string {
+ if m != nil && m.Help != nil {
+ return *m.Help
+ }
+ return ""
+}
+
+func (m *MetricFamily) GetType() MetricType {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return MetricType_COUNTER
+}
+
+func (m *MetricFamily) GetMetric() []*Metric {
+ if m != nil {
+ return m.Metric
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value)
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/common/LICENSE b/src/kube2msb/vendor/github.com/prometheus/common/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/common/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/src/kube2msb/vendor/github.com/prometheus/common/NOTICE b/src/kube2msb/vendor/github.com/prometheus/common/NOTICE
new file mode 100644
index 0000000..636a2c1
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/common/NOTICE
@@ -0,0 +1,5 @@
+Common libraries shared by Prometheus Go components.
+Copyright 2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/src/kube2msb/vendor/github.com/prometheus/common/expfmt/decode.go b/src/kube2msb/vendor/github.com/prometheus/common/expfmt/decode.go
new file mode 100644
index 0000000..a98696d
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/common/expfmt/decode.go
@@ -0,0 +1,433 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "fmt"
+ "io"
+ "math"
+ "mime"
+ "net/http"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/matttproud/golang_protobuf_extensions/pbutil"
+ "github.com/prometheus/common/model"
+)
+
+// Decoder types decode an input stream into metric families.
+type Decoder interface {
+ Decode(*dto.MetricFamily) error
+}
+
+type DecodeOptions struct {
+ // Timestamp is added to each value from the stream that has no explicit timestamp set.
+ Timestamp model.Time
+}
+
+// ResponseFormat extracts the correct format from a HTTP response header.
+// If no matching format can be found FormatUnknown is returned.
+func ResponseFormat(h http.Header) Format {
+ ct := h.Get(hdrContentType)
+
+ mediatype, params, err := mime.ParseMediaType(ct)
+ if err != nil {
+ return FmtUnknown
+ }
+
+ const (
+ textType = "text/plain"
+ jsonType = "application/json"
+ )
+
+ switch mediatype {
+ case ProtoType:
+ if p, ok := params["proto"]; ok && p != ProtoProtocol {
+ return FmtUnknown
+ }
+ if e, ok := params["encoding"]; ok && e != "delimited" {
+ return FmtUnknown
+ }
+ return FmtProtoDelim
+
+ case textType:
+ if v, ok := params["version"]; ok && v != TextVersion {
+ return FmtUnknown
+ }
+ return FmtText
+
+ case jsonType:
+ var prometheusAPIVersion string
+
+ if params["schema"] == "prometheus/telemetry" && params["version"] != "" {
+ prometheusAPIVersion = params["version"]
+ } else {
+ prometheusAPIVersion = h.Get("X-Prometheus-API-Version")
+ }
+
+ switch prometheusAPIVersion {
+ case "0.0.2", "":
+ return fmtJSON2
+ default:
+ return FmtUnknown
+ }
+ }
+
+ return FmtUnknown
+}
+
+// NewDecoder returns a new decoder based on the given input format.
+// If the input format does not imply otherwise, a text format decoder is returned.
+func NewDecoder(r io.Reader, format Format) Decoder {
+ switch format {
+ case FmtProtoDelim:
+ return &protoDecoder{r: r}
+ case fmtJSON2:
+ return newJSON2Decoder(r)
+ }
+ return &textDecoder{r: r}
+}
+
+// protoDecoder implements the Decoder interface for protocol buffers.
+type protoDecoder struct {
+ r io.Reader
+}
+
+// Decode implements the Decoder interface.
+func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
+ _, err := pbutil.ReadDelimited(d.r, v)
+ if err != nil {
+ return err
+ }
+ if !model.IsValidMetricName(model.LabelValue(v.GetName())) {
+ return fmt.Errorf("invalid metric name %q", v.GetName())
+ }
+ for _, m := range v.GetMetric() {
+ if m == nil {
+ continue
+ }
+ for _, l := range m.GetLabel() {
+ if l == nil {
+ continue
+ }
+ if !model.LabelValue(l.GetValue()).IsValid() {
+ return fmt.Errorf("invalid label value %q", l.GetValue())
+ }
+ if !model.LabelName(l.GetName()).IsValid() {
+ return fmt.Errorf("invalid label name %q", l.GetName())
+ }
+ }
+ }
+ return nil
+}
+
+// textDecoder implements the Decoder interface for the text protcol.
+type textDecoder struct {
+ r io.Reader
+ p TextParser
+ fams []*dto.MetricFamily
+}
+
+// Decode implements the Decoder interface.
+func (d *textDecoder) Decode(v *dto.MetricFamily) error {
+ // TODO(fabxc): Wrap this as a line reader to make streaming safer.
+ if len(d.fams) == 0 {
+ // No cached metric families, read everything and parse metrics.
+ fams, err := d.p.TextToMetricFamilies(d.r)
+ if err != nil {
+ return err
+ }
+ if len(fams) == 0 {
+ return io.EOF
+ }
+ d.fams = make([]*dto.MetricFamily, 0, len(fams))
+ for _, f := range fams {
+ d.fams = append(d.fams, f)
+ }
+ }
+
+ *v = *d.fams[0]
+ d.fams = d.fams[1:]
+
+ return nil
+}
+
+type SampleDecoder struct {
+ Dec Decoder
+ Opts *DecodeOptions
+
+ f dto.MetricFamily
+}
+
+func (sd *SampleDecoder) Decode(s *model.Vector) error {
+ if err := sd.Dec.Decode(&sd.f); err != nil {
+ return err
+ }
+ *s = extractSamples(&sd.f, sd.Opts)
+ return nil
+}
+
+// Extract samples builds a slice of samples from the provided metric families.
+func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) model.Vector {
+ var all model.Vector
+ for _, f := range fams {
+ all = append(all, extractSamples(f, o)...)
+ }
+ return all
+}
+
+func extractSamples(f *dto.MetricFamily, o *DecodeOptions) model.Vector {
+ switch f.GetType() {
+ case dto.MetricType_COUNTER:
+ return extractCounter(o, f)
+ case dto.MetricType_GAUGE:
+ return extractGauge(o, f)
+ case dto.MetricType_SUMMARY:
+ return extractSummary(o, f)
+ case dto.MetricType_UNTYPED:
+ return extractUntyped(o, f)
+ case dto.MetricType_HISTOGRAM:
+ return extractHistogram(o, f)
+ }
+ panic("expfmt.extractSamples: unknown metric family type")
+}
+
+func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Counter == nil {
+ continue
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ smpl := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Counter.GetValue()),
+ }
+
+ if m.TimestampMs != nil {
+ smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ } else {
+ smpl.Timestamp = o.Timestamp
+ }
+
+ samples = append(samples, smpl)
+ }
+
+ return samples
+}
+
+func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Gauge == nil {
+ continue
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ smpl := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Gauge.GetValue()),
+ }
+
+ if m.TimestampMs != nil {
+ smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ } else {
+ smpl.Timestamp = o.Timestamp
+ }
+
+ samples = append(samples, smpl)
+ }
+
+ return samples
+}
+
+func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Untyped == nil {
+ continue
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ smpl := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Untyped.GetValue()),
+ }
+
+ if m.TimestampMs != nil {
+ smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ } else {
+ smpl.Timestamp = o.Timestamp
+ }
+
+ samples = append(samples, smpl)
+ }
+
+ return samples
+}
+
+func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Summary == nil {
+ continue
+ }
+
+ timestamp := o.Timestamp
+ if m.TimestampMs != nil {
+ timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ }
+
+ for _, q := range m.Summary.Quantile {
+ lset := make(model.LabelSet, len(m.Label)+2)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ // BUG(matt): Update other names to "quantile".
+ lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile()))
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(q.GetValue()),
+ Timestamp: timestamp,
+ })
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Summary.GetSampleSum()),
+ Timestamp: timestamp,
+ })
+
+ lset = make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Summary.GetSampleCount()),
+ Timestamp: timestamp,
+ })
+ }
+
+ return samples
+}
+
+func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Histogram == nil {
+ continue
+ }
+
+ timestamp := o.Timestamp
+ if m.TimestampMs != nil {
+ timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ }
+
+ infSeen := false
+
+ for _, q := range m.Histogram.Bucket {
+ lset := make(model.LabelSet, len(m.Label)+2)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound()))
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
+
+ if math.IsInf(q.GetUpperBound(), +1) {
+ infSeen = true
+ }
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(q.GetCumulativeCount()),
+ Timestamp: timestamp,
+ })
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Histogram.GetSampleSum()),
+ Timestamp: timestamp,
+ })
+
+ lset = make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
+
+ count := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Histogram.GetSampleCount()),
+ Timestamp: timestamp,
+ }
+ samples = append(samples, count)
+
+ if !infSeen {
+ // Append an infinity bucket sample.
+ lset := make(model.LabelSet, len(m.Label)+2)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf")
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: count.Value,
+ Timestamp: timestamp,
+ })
+ }
+ }
+
+ return samples
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/common/expfmt/encode.go b/src/kube2msb/vendor/github.com/prometheus/common/expfmt/encode.go
new file mode 100644
index 0000000..11839ed
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/common/expfmt/encode.go
@@ -0,0 +1,88 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "fmt"
+ "io"
+ "net/http"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/matttproud/golang_protobuf_extensions/pbutil"
+ "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// Encoder types encode metric families into an underlying wire protocol.
+type Encoder interface {
+ Encode(*dto.MetricFamily) error
+}
+
+type encoder func(*dto.MetricFamily) error
+
+func (e encoder) Encode(v *dto.MetricFamily) error {
+ return e(v)
+}
+
+// Negotiate returns the Content-Type based on the given Accept header.
+// If no appropriate accepted type is found, FmtText is returned.
+func Negotiate(h http.Header) Format {
+ for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
+ // Check for protocol buffer
+ if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
+ switch ac.Params["encoding"] {
+ case "delimited":
+ return FmtProtoDelim
+ case "text":
+ return FmtProtoText
+ case "compact-text":
+ return FmtProtoCompact
+ }
+ }
+ // Check for text format.
+ ver := ac.Params["version"]
+ if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
+ return FmtText
+ }
+ }
+ return FmtText
+}
+
+// NewEncoder returns a new encoder based on content type negotiation.
+func NewEncoder(w io.Writer, format Format) Encoder {
+ switch format {
+ case FmtProtoDelim:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := pbutil.WriteDelimited(w, v)
+ return err
+ })
+ case FmtProtoCompact:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := fmt.Fprintln(w, v.String())
+ return err
+ })
+ case FmtProtoText:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := fmt.Fprintln(w, proto.MarshalTextString(v))
+ return err
+ })
+ case FmtText:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := MetricFamilyToText(w, v)
+ return err
+ })
+ }
+ panic("expfmt.NewEncoder: unknown format")
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/common/expfmt/expfmt.go b/src/kube2msb/vendor/github.com/prometheus/common/expfmt/expfmt.go
new file mode 100644
index 0000000..366fbde
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/common/expfmt/expfmt.go
@@ -0,0 +1,40 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// A package for reading and writing Prometheus metrics.
+package expfmt
+
+type Format string
+
+const (
+ TextVersion = "0.0.4"
+
+ ProtoType = `application/vnd.google.protobuf`
+ ProtoProtocol = `io.prometheus.client.MetricFamily`
+ ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
+
+ // The Content-Type values for the different wire protocols.
+ FmtUnknown Format = `<unknown>`
+ FmtText Format = `text/plain; version=` + TextVersion
+ FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
+ FmtProtoText Format = ProtoFmt + ` encoding=text`
+ FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
+
+ // fmtJSON2 is hidden as it is deprecated.
+ fmtJSON2 Format = `application/json; version=0.0.2`
+)
+
+const (
+ hdrContentType = "Content-Type"
+ hdrAccept = "Accept"
+)
diff --git a/src/kube2msb/vendor/github.com/prometheus/common/expfmt/fuzz.go b/src/kube2msb/vendor/github.com/prometheus/common/expfmt/fuzz.go
new file mode 100644
index 0000000..dc2eede
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/common/expfmt/fuzz.go
@@ -0,0 +1,36 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Build only when actually fuzzing
+// +build gofuzz
+
+package expfmt
+
+import "bytes"
+
+// Fuzz text metric parser with with github.com/dvyukov/go-fuzz:
+//
+// go-fuzz-build github.com/prometheus/common/expfmt
+// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz
+//
+// Further input samples should go in the folder fuzz/corpus.
+func Fuzz(in []byte) int {
+ parser := TextParser{}
+ _, err := parser.TextToMetricFamilies(bytes.NewReader(in))
+
+ if err != nil {
+ return 0
+ }
+
+ return 1
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/common/expfmt/json_decode.go b/src/kube2msb/vendor/github.com/prometheus/common/expfmt/json_decode.go
new file mode 100644
index 0000000..cf86545
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/common/expfmt/json_decode.go
@@ -0,0 +1,174 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "sort"
+
+ "github.com/golang/protobuf/proto"
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/prometheus/common/model"
+)
+
+type json2Decoder struct {
+ dec *json.Decoder
+ fams []*dto.MetricFamily
+}
+
+func newJSON2Decoder(r io.Reader) Decoder {
+ return &json2Decoder{
+ dec: json.NewDecoder(r),
+ }
+}
+
+type histogram002 struct {
+ Labels model.LabelSet `json:"labels"`
+ Values map[string]float64 `json:"value"`
+}
+
+type counter002 struct {
+ Labels model.LabelSet `json:"labels"`
+ Value float64 `json:"value"`
+}
+
+func protoLabelSet(base, ext model.LabelSet) ([]*dto.LabelPair, error) {
+ labels := base.Clone().Merge(ext)
+ delete(labels, model.MetricNameLabel)
+
+ names := make([]string, 0, len(labels))
+ for ln := range labels {
+ names = append(names, string(ln))
+ }
+ sort.Strings(names)
+
+ pairs := make([]*dto.LabelPair, 0, len(labels))
+
+ for _, ln := range names {
+ if !model.LabelNameRE.MatchString(ln) {
+ return nil, fmt.Errorf("invalid label name %q", ln)
+ }
+ lv := labels[model.LabelName(ln)]
+
+ pairs = append(pairs, &dto.LabelPair{
+ Name: proto.String(ln),
+ Value: proto.String(string(lv)),
+ })
+ }
+
+ return pairs, nil
+}
+
+func (d *json2Decoder) more() error {
+ var entities []struct {
+ BaseLabels model.LabelSet `json:"baseLabels"`
+ Docstring string `json:"docstring"`
+ Metric struct {
+ Type string `json:"type"`
+ Values json.RawMessage `json:"value"`
+ } `json:"metric"`
+ }
+
+ if err := d.dec.Decode(&entities); err != nil {
+ return err
+ }
+ for _, e := range entities {
+ f := &dto.MetricFamily{
+ Name: proto.String(string(e.BaseLabels[model.MetricNameLabel])),
+ Help: proto.String(e.Docstring),
+ Type: dto.MetricType_UNTYPED.Enum(),
+ Metric: []*dto.Metric{},
+ }
+
+ d.fams = append(d.fams, f)
+
+ switch e.Metric.Type {
+ case "counter", "gauge":
+ var values []counter002
+
+ if err := json.Unmarshal(e.Metric.Values, &values); err != nil {
+ return fmt.Errorf("could not extract %s value: %s", e.Metric.Type, err)
+ }
+
+ for _, ctr := range values {
+ labels, err := protoLabelSet(e.BaseLabels, ctr.Labels)
+ if err != nil {
+ return err
+ }
+ f.Metric = append(f.Metric, &dto.Metric{
+ Label: labels,
+ Untyped: &dto.Untyped{
+ Value: proto.Float64(ctr.Value),
+ },
+ })
+ }
+
+ case "histogram":
+ var values []histogram002
+
+ if err := json.Unmarshal(e.Metric.Values, &values); err != nil {
+ return fmt.Errorf("could not extract %s value: %s", e.Metric.Type, err)
+ }
+
+ for _, hist := range values {
+ quants := make([]string, 0, len(values))
+ for q := range hist.Values {
+ quants = append(quants, q)
+ }
+
+ sort.Strings(quants)
+
+ for _, q := range quants {
+ value := hist.Values[q]
+ // The correct label is "quantile" but to not break old expressions
+ // this remains "percentile"
+ hist.Labels["percentile"] = model.LabelValue(q)
+
+ labels, err := protoLabelSet(e.BaseLabels, hist.Labels)
+ if err != nil {
+ return err
+ }
+
+ f.Metric = append(f.Metric, &dto.Metric{
+ Label: labels,
+ Untyped: &dto.Untyped{
+ Value: proto.Float64(value),
+ },
+ })
+ }
+ }
+
+ default:
+ return fmt.Errorf("unknown metric type %q", e.Metric.Type)
+ }
+ }
+ return nil
+}
+
+// Decode implements the Decoder interface.
+func (d *json2Decoder) Decode(v *dto.MetricFamily) error {
+ if len(d.fams) == 0 {
+ if err := d.more(); err != nil {
+ return err
+ }
+ }
+
+ *v = *d.fams[0]
+ d.fams = d.fams[1:]
+
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/common/expfmt/text_create.go b/src/kube2msb/vendor/github.com/prometheus/common/expfmt/text_create.go
new file mode 100644
index 0000000..0bb9c14
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/common/expfmt/text_create.go
@@ -0,0 +1,305 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "math"
+ "strings"
+
+ dto "github.com/prometheus/client_model/go"
+ "github.com/prometheus/common/model"
+)
+
+// MetricFamilyToText converts a MetricFamily proto message into text format and
+// writes the resulting lines to 'out'. It returns the number of bytes written
+// and any error encountered. This function does not perform checks on the
+// content of the metric and label names, i.e. invalid metric or label names
+// will result in invalid text format output.
+// This method fulfills the type 'prometheus.encoder'.
+func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
+ var written int
+
+ // Fail-fast checks.
+ if len(in.Metric) == 0 {
+ return written, fmt.Errorf("MetricFamily has no metrics: %s", in)
+ }
+ name := in.GetName()
+ if name == "" {
+ return written, fmt.Errorf("MetricFamily has no name: %s", in)
+ }
+
+ // Comments, first HELP, then TYPE.
+ if in.Help != nil {
+ n, err := fmt.Fprintf(
+ out, "# HELP %s %s\n",
+ name, escapeString(*in.Help, false),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ metricType := in.GetType()
+ n, err := fmt.Fprintf(
+ out, "# TYPE %s %s\n",
+ name, strings.ToLower(metricType.String()),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+
+ // Finally the samples, one line for each.
+ for _, metric := range in.Metric {
+ switch metricType {
+ case dto.MetricType_COUNTER:
+ if metric.Counter == nil {
+ return written, fmt.Errorf(
+ "expected counter in metric %s %s", name, metric,
+ )
+ }
+ n, err = writeSample(
+ name, metric, "", "",
+ metric.Counter.GetValue(),
+ out,
+ )
+ case dto.MetricType_GAUGE:
+ if metric.Gauge == nil {
+ return written, fmt.Errorf(
+ "expected gauge in metric %s %s", name, metric,
+ )
+ }
+ n, err = writeSample(
+ name, metric, "", "",
+ metric.Gauge.GetValue(),
+ out,
+ )
+ case dto.MetricType_UNTYPED:
+ if metric.Untyped == nil {
+ return written, fmt.Errorf(
+ "expected untyped in metric %s %s", name, metric,
+ )
+ }
+ n, err = writeSample(
+ name, metric, "", "",
+ metric.Untyped.GetValue(),
+ out,
+ )
+ case dto.MetricType_SUMMARY:
+ if metric.Summary == nil {
+ return written, fmt.Errorf(
+ "expected summary in metric %s %s", name, metric,
+ )
+ }
+ for _, q := range metric.Summary.Quantile {
+ n, err = writeSample(
+ name, metric,
+ model.QuantileLabel, fmt.Sprint(q.GetQuantile()),
+ q.GetValue(),
+ out,
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ n, err = writeSample(
+ name+"_sum", metric, "", "",
+ metric.Summary.GetSampleSum(),
+ out,
+ )
+ if err != nil {
+ return written, err
+ }
+ written += n
+ n, err = writeSample(
+ name+"_count", metric, "", "",
+ float64(metric.Summary.GetSampleCount()),
+ out,
+ )
+ case dto.MetricType_HISTOGRAM:
+ if metric.Histogram == nil {
+ return written, fmt.Errorf(
+ "expected histogram in metric %s %s", name, metric,
+ )
+ }
+ infSeen := false
+ for _, q := range metric.Histogram.Bucket {
+ n, err = writeSample(
+ name+"_bucket", metric,
+ model.BucketLabel, fmt.Sprint(q.GetUpperBound()),
+ float64(q.GetCumulativeCount()),
+ out,
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ if math.IsInf(q.GetUpperBound(), +1) {
+ infSeen = true
+ }
+ }
+ if !infSeen {
+ n, err = writeSample(
+ name+"_bucket", metric,
+ model.BucketLabel, "+Inf",
+ float64(metric.Histogram.GetSampleCount()),
+ out,
+ )
+ if err != nil {
+ return written, err
+ }
+ written += n
+ }
+ n, err = writeSample(
+ name+"_sum", metric, "", "",
+ metric.Histogram.GetSampleSum(),
+ out,
+ )
+ if err != nil {
+ return written, err
+ }
+ written += n
+ n, err = writeSample(
+ name+"_count", metric, "", "",
+ float64(metric.Histogram.GetSampleCount()),
+ out,
+ )
+ default:
+ return written, fmt.Errorf(
+ "unexpected type in metric %s %s", name, metric,
+ )
+ }
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ return written, nil
+}
+
+// writeSample writes a single sample in text format to out, given the metric
+// name, the metric proto message itself, optionally an additional label name
+// and value (use empty strings if not required), and the value. The function
+// returns the number of bytes written and any error encountered.
+func writeSample(
+ name string,
+ metric *dto.Metric,
+ additionalLabelName, additionalLabelValue string,
+ value float64,
+ out io.Writer,
+) (int, error) {
+ var written int
+ n, err := fmt.Fprint(out, name)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ n, err = labelPairsToText(
+ metric.Label,
+ additionalLabelName, additionalLabelValue,
+ out,
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ n, err = fmt.Fprintf(out, " %v", value)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ if metric.TimestampMs != nil {
+ n, err = fmt.Fprintf(out, " %v", *metric.TimestampMs)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ n, err = out.Write([]byte{'\n'})
+ written += n
+ if err != nil {
+ return written, err
+ }
+ return written, nil
+}
+
+// labelPairsToText converts a slice of LabelPair proto messages plus the
+// explicitly given additional label pair into text formatted as required by the
+// text format and writes it to 'out'. An empty slice in combination with an
+// empty string 'additionalLabelName' results in nothing being
+// written. Otherwise, the label pairs are written, escaped as required by the
+// text format, and enclosed in '{...}'. The function returns the number of
+// bytes written and any error encountered.
+func labelPairsToText(
+ in []*dto.LabelPair,
+ additionalLabelName, additionalLabelValue string,
+ out io.Writer,
+) (int, error) {
+ if len(in) == 0 && additionalLabelName == "" {
+ return 0, nil
+ }
+ var written int
+ separator := '{'
+ for _, lp := range in {
+ n, err := fmt.Fprintf(
+ out, `%c%s="%s"`,
+ separator, lp.GetName(), escapeString(lp.GetValue(), true),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ separator = ','
+ }
+ if additionalLabelName != "" {
+ n, err := fmt.Fprintf(
+ out, `%c%s="%s"`,
+ separator, additionalLabelName,
+ escapeString(additionalLabelValue, true),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ n, err := out.Write([]byte{'}'})
+ written += n
+ if err != nil {
+ return written, err
+ }
+ return written, nil
+}
+
+// escapeString replaces '\' by '\\', new line character by '\n', and - if
+// includeDoubleQuote is true - '"' by '\"'.
+func escapeString(v string, includeDoubleQuote bool) string {
+ result := bytes.NewBuffer(make([]byte, 0, len(v)))
+ for _, c := range v {
+ switch {
+ case c == '\\':
+ result.WriteString(`\\`)
+ case includeDoubleQuote && c == '"':
+ result.WriteString(`\"`)
+ case c == '\n':
+ result.WriteString(`\n`)
+ default:
+ result.WriteRune(c)
+ }
+ }
+ return result.String()
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/common/expfmt/text_parse.go b/src/kube2msb/vendor/github.com/prometheus/common/expfmt/text_parse.go
new file mode 100644
index 0000000..bd170b1
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/common/expfmt/text_parse.go
@@ -0,0 +1,753 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "math"
+ "strconv"
+ "strings"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/prometheus/common/model"
+)
+
+// A stateFn is a function that represents a state in a state machine. By
+// executing it, the state is progressed to the next state. The stateFn returns
+// another stateFn, which represents the new state. The end state is represented
+// by nil.
+type stateFn func() stateFn
+
+// ParseError signals errors while parsing the simple and flat text-based
+// exchange format.
+type ParseError struct {
+ Line int
+ Msg string
+}
+
+// Error implements the error interface.
+func (e ParseError) Error() string {
+ return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg)
+}
+
+// TextParser is used to parse the simple and flat text-based exchange format. Its
+// nil value is ready to use.
+type TextParser struct {
+ metricFamiliesByName map[string]*dto.MetricFamily
+ buf *bufio.Reader // Where the parsed input is read through.
+ err error // Most recent error.
+ lineCount int // Tracks the line count for error messages.
+ currentByte byte // The most recent byte read.
+ currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes.
+ currentMF *dto.MetricFamily
+ currentMetric *dto.Metric
+ currentLabelPair *dto.LabelPair
+
+ // The remaining member variables are only used for summaries/histograms.
+ currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le'
+ // Summary specific.
+ summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature.
+ currentQuantile float64
+ // Histogram specific.
+ histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature.
+ currentBucket float64
+ // These tell us if the currently processed line ends on '_count' or
+ // '_sum' respectively and belong to a summary/histogram, representing the sample
+ // count and sum of that summary/histogram.
+ currentIsSummaryCount, currentIsSummarySum bool
+ currentIsHistogramCount, currentIsHistogramSum bool
+}
+
+// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange
+// format and creates MetricFamily proto messages. It returns the MetricFamily
+// proto messages in a map where the metric names are the keys, along with any
+// error encountered.
+//
+// If the input contains duplicate metrics (i.e. lines with the same metric name
+// and exactly the same label set), the resulting MetricFamily will contain
+// duplicate Metric proto messages. Similar is true for duplicate label
+// names. Checks for duplicates have to be performed separately, if required.
+// Also note that neither the metrics within each MetricFamily are sorted nor
+// the label pairs within each Metric. Sorting is not required for the most
+// frequent use of this method, which is sample ingestion in the Prometheus
+// server. However, for presentation purposes, you might want to sort the
+// metrics, and in some cases, you must sort the labels, e.g. for consumption by
+// the metric family injection hook of the Prometheus registry.
+//
+// Summaries and histograms are rather special beasts. You would probably not
+// use them in the simple text format anyway. This method can deal with
+// summaries and histograms if they are presented in exactly the way the
+// text.Create function creates them.
+//
+// This method must not be called concurrently. If you want to parse different
+// input concurrently, instantiate a separate Parser for each goroutine.
+func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) {
+ p.reset(in)
+ for nextState := p.startOfLine; nextState != nil; nextState = nextState() {
+ // Magic happens here...
+ }
+ // Get rid of empty metric families.
+ for k, mf := range p.metricFamiliesByName {
+ if len(mf.GetMetric()) == 0 {
+ delete(p.metricFamiliesByName, k)
+ }
+ }
+ // If p.err is io.EOF now, we have run into a premature end of the input
+ // stream. Turn this error into something nicer and more
+ // meaningful. (io.EOF is often used as a signal for the legitimate end
+ // of an input stream.)
+ if p.err == io.EOF {
+ p.parseError("unexpected end of input stream")
+ }
+ return p.metricFamiliesByName, p.err
+}
+
+func (p *TextParser) reset(in io.Reader) {
+ p.metricFamiliesByName = map[string]*dto.MetricFamily{}
+ if p.buf == nil {
+ p.buf = bufio.NewReader(in)
+ } else {
+ p.buf.Reset(in)
+ }
+ p.err = nil
+ p.lineCount = 0
+ if p.summaries == nil || len(p.summaries) > 0 {
+ p.summaries = map[uint64]*dto.Metric{}
+ }
+ if p.histograms == nil || len(p.histograms) > 0 {
+ p.histograms = map[uint64]*dto.Metric{}
+ }
+ p.currentQuantile = math.NaN()
+ p.currentBucket = math.NaN()
+}
+
+// startOfLine represents the state where the next byte read from p.buf is the
+// start of a line (or whitespace leading up to it).
+func (p *TextParser) startOfLine() stateFn {
+ p.lineCount++
+ if p.skipBlankTab(); p.err != nil {
+ // End of input reached. This is the only case where
+ // that is not an error but a signal that we are done.
+ p.err = nil
+ return nil
+ }
+ switch p.currentByte {
+ case '#':
+ return p.startComment
+ case '\n':
+ return p.startOfLine // Empty line, start the next one.
+ }
+ return p.readingMetricName
+}
+
+// startComment represents the state where the next byte read from p.buf is the
+// start of a comment (or whitespace leading up to it).
+func (p *TextParser) startComment() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '\n' {
+ return p.startOfLine
+ }
+ if p.readTokenUntilWhitespace(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ // If we have hit the end of line already, there is nothing left
+ // to do. This is not considered a syntax error.
+ if p.currentByte == '\n' {
+ return p.startOfLine
+ }
+ keyword := p.currentToken.String()
+ if keyword != "HELP" && keyword != "TYPE" {
+ // Generic comment, ignore by fast forwarding to end of line.
+ for p.currentByte != '\n' {
+ if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ }
+ return p.startOfLine
+ }
+ // There is something. Next has to be a metric name.
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.readTokenAsMetricName(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '\n' {
+ // At the end of the line already.
+ // Again, this is not considered a syntax error.
+ return p.startOfLine
+ }
+ if !isBlankOrTab(p.currentByte) {
+ p.parseError("invalid metric name in comment")
+ return nil
+ }
+ p.setOrCreateCurrentMF()
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '\n' {
+ // At the end of the line already.
+ // Again, this is not considered a syntax error.
+ return p.startOfLine
+ }
+ switch keyword {
+ case "HELP":
+ return p.readingHelp
+ case "TYPE":
+ return p.readingType
+ }
+ panic(fmt.Sprintf("code error: unexpected keyword %q", keyword))
+}
+
+// readingMetricName represents the state where the last byte read (now in
+// p.currentByte) is the first byte of a metric name.
+func (p *TextParser) readingMetricName() stateFn {
+ if p.readTokenAsMetricName(); p.err != nil {
+ return nil
+ }
+ if p.currentToken.Len() == 0 {
+ p.parseError("invalid metric name")
+ return nil
+ }
+ p.setOrCreateCurrentMF()
+ // Now is the time to fix the type if it hasn't happened yet.
+ if p.currentMF.Type == nil {
+ p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
+ }
+ p.currentMetric = &dto.Metric{}
+ // Do not append the newly created currentMetric to
+ // currentMF.Metric right now. First wait if this is a summary,
+ // and the metric exists already, which we can only know after
+ // having read all the labels.
+ if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ return p.readingLabels
+}
+
+// readingLabels represents the state where the last byte read (now in
+// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the
+// first byte of the value (otherwise).
+func (p *TextParser) readingLabels() stateFn {
+ // Summaries/histograms are special. We have to reset the
+ // currentLabels map, currentQuantile and currentBucket before starting to
+ // read labels.
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ p.currentLabels = map[string]string{}
+ p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName()
+ p.currentQuantile = math.NaN()
+ p.currentBucket = math.NaN()
+ }
+ if p.currentByte != '{' {
+ return p.readingValue
+ }
+ return p.startLabelName
+}
+
+// startLabelName represents the state where the next byte read from p.buf is
+// the start of a label name (or whitespace leading up to it).
+func (p *TextParser) startLabelName() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '}' {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ return p.readingValue
+ }
+ if p.readTokenAsLabelName(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentToken.Len() == 0 {
+ p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName()))
+ return nil
+ }
+ p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())}
+ if p.currentLabelPair.GetName() == string(model.MetricNameLabel) {
+ p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel))
+ return nil
+ }
+ // Special summary/histogram treatment. Don't add 'quantile' and 'le'
+ // labels to 'real' labels.
+ if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) &&
+ !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) {
+ p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair)
+ }
+ if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte != '=' {
+ p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte))
+ return nil
+ }
+ return p.startLabelValue
+}
+
+// startLabelValue represents the state where the next byte read from p.buf is
+// the start of a (quoted) label value (or whitespace leading up to it).
+func (p *TextParser) startLabelValue() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte != '"' {
+ p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte))
+ return nil
+ }
+ if p.readTokenAsLabelValue(); p.err != nil {
+ return nil
+ }
+ p.currentLabelPair.Value = proto.String(p.currentToken.String())
+ // Special treatment of summaries:
+ // - Quantile labels are special, will result in dto.Quantile later.
+ // - Other labels have to be added to currentLabels for signature calculation.
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+ if p.currentLabelPair.GetName() == model.QuantileLabel {
+ if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue()))
+ return nil
+ }
+ } else {
+ p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
+ }
+ }
+ // Similar special treatment of histograms.
+ if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ if p.currentLabelPair.GetName() == model.BucketLabel {
+ if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue()))
+ return nil
+ }
+ } else {
+ p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
+ }
+ }
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ switch p.currentByte {
+ case ',':
+ return p.startLabelName
+
+ case '}':
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ return p.readingValue
+ default:
+ p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.Value))
+ return nil
+ }
+}
+
+// readingValue represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the sample value (i.e. a float).
+func (p *TextParser) readingValue() stateFn {
+ // When we are here, we have read all the labels, so for the
+ // special case of a summary/histogram, we can finally find out
+ // if the metric already exists.
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+ signature := model.LabelsToSignature(p.currentLabels)
+ if summary := p.summaries[signature]; summary != nil {
+ p.currentMetric = summary
+ } else {
+ p.summaries[signature] = p.currentMetric
+ p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+ }
+ } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ signature := model.LabelsToSignature(p.currentLabels)
+ if histogram := p.histograms[signature]; histogram != nil {
+ p.currentMetric = histogram
+ } else {
+ p.histograms[signature] = p.currentMetric
+ p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+ }
+ } else {
+ p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+ }
+ if p.readTokenUntilWhitespace(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ value, err := strconv.ParseFloat(p.currentToken.String(), 64)
+ if err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String()))
+ return nil
+ }
+ switch p.currentMF.GetType() {
+ case dto.MetricType_COUNTER:
+ p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)}
+ case dto.MetricType_GAUGE:
+ p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)}
+ case dto.MetricType_UNTYPED:
+ p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)}
+ case dto.MetricType_SUMMARY:
+ // *sigh*
+ if p.currentMetric.Summary == nil {
+ p.currentMetric.Summary = &dto.Summary{}
+ }
+ switch {
+ case p.currentIsSummaryCount:
+ p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value))
+ case p.currentIsSummarySum:
+ p.currentMetric.Summary.SampleSum = proto.Float64(value)
+ case !math.IsNaN(p.currentQuantile):
+ p.currentMetric.Summary.Quantile = append(
+ p.currentMetric.Summary.Quantile,
+ &dto.Quantile{
+ Quantile: proto.Float64(p.currentQuantile),
+ Value: proto.Float64(value),
+ },
+ )
+ }
+ case dto.MetricType_HISTOGRAM:
+ // *sigh*
+ if p.currentMetric.Histogram == nil {
+ p.currentMetric.Histogram = &dto.Histogram{}
+ }
+ switch {
+ case p.currentIsHistogramCount:
+ p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value))
+ case p.currentIsHistogramSum:
+ p.currentMetric.Histogram.SampleSum = proto.Float64(value)
+ case !math.IsNaN(p.currentBucket):
+ p.currentMetric.Histogram.Bucket = append(
+ p.currentMetric.Histogram.Bucket,
+ &dto.Bucket{
+ UpperBound: proto.Float64(p.currentBucket),
+ CumulativeCount: proto.Uint64(uint64(value)),
+ },
+ )
+ }
+ default:
+ p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName())
+ }
+ if p.currentByte == '\n' {
+ return p.startOfLine
+ }
+ return p.startTimestamp
+}
+
+// startTimestamp represents the state where the next byte read from p.buf is
+// the start of the timestamp (or whitespace leading up to it).
+func (p *TextParser) startTimestamp() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.readTokenUntilWhitespace(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64)
+ if err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String()))
+ return nil
+ }
+ p.currentMetric.TimestampMs = proto.Int64(timestamp)
+ if p.readTokenUntilNewline(false); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentToken.Len() > 0 {
+ p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String()))
+ return nil
+ }
+ return p.startOfLine
+}
+
+// readingHelp represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the docstring after 'HELP'.
+func (p *TextParser) readingHelp() stateFn {
+ if p.currentMF.Help != nil {
+ p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName()))
+ return nil
+ }
+ // Rest of line is the docstring.
+ if p.readTokenUntilNewline(true); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ p.currentMF.Help = proto.String(p.currentToken.String())
+ return p.startOfLine
+}
+
+// readingType represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the type hint after 'HELP'.
+func (p *TextParser) readingType() stateFn {
+ if p.currentMF.Type != nil {
+ p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName()))
+ return nil
+ }
+ // Rest of line is the type.
+ if p.readTokenUntilNewline(false); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())]
+ if !ok {
+ p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String()))
+ return nil
+ }
+ p.currentMF.Type = dto.MetricType(metricType).Enum()
+ return p.startOfLine
+}
+
+// parseError sets p.err to a ParseError at the current line with the given
+// message.
+func (p *TextParser) parseError(msg string) {
+ p.err = ParseError{
+ Line: p.lineCount,
+ Msg: msg,
+ }
+}
+
+// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte
+// that is neither ' ' nor '\t'. That byte is left in p.currentByte.
+func (p *TextParser) skipBlankTab() {
+ for {
+ if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) {
+ return
+ }
+ }
+}
+
+// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do
+// anything if p.currentByte is neither ' ' nor '\t'.
+func (p *TextParser) skipBlankTabIfCurrentBlankTab() {
+ if isBlankOrTab(p.currentByte) {
+ p.skipBlankTab()
+ }
+}
+
+// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The
+// first byte considered is the byte already read (now in p.currentByte). The
+// first whitespace byte encountered is still copied into p.currentByte, but not
+// into p.currentToken.
+func (p *TextParser) readTokenUntilWhitespace() {
+ p.currentToken.Reset()
+ for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' {
+ p.currentToken.WriteByte(p.currentByte)
+ p.currentByte, p.err = p.buf.ReadByte()
+ }
+}
+
+// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first
+// byte considered is the byte already read (now in p.currentByte). The first
+// newline byte encountered is still copied into p.currentByte, but not into
+// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are
+// recognized: '\\' tranlates into '\', and '\n' into a line-feed character. All
+// other escape sequences are invalid and cause an error.
+func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) {
+ p.currentToken.Reset()
+ escaped := false
+ for p.err == nil {
+ if recognizeEscapeSequence && escaped {
+ switch p.currentByte {
+ case '\\':
+ p.currentToken.WriteByte(p.currentByte)
+ case 'n':
+ p.currentToken.WriteByte('\n')
+ default:
+ p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+ return
+ }
+ escaped = false
+ } else {
+ switch p.currentByte {
+ case '\n':
+ return
+ case '\\':
+ escaped = true
+ default:
+ p.currentToken.WriteByte(p.currentByte)
+ }
+ }
+ p.currentByte, p.err = p.buf.ReadByte()
+ }
+}
+
+// readTokenAsMetricName copies a metric name from p.buf into p.currentToken.
+// The first byte considered is the byte already read (now in p.currentByte).
+// The first byte not part of a metric name is still copied into p.currentByte,
+// but not into p.currentToken.
+func (p *TextParser) readTokenAsMetricName() {
+ p.currentToken.Reset()
+ if !isValidMetricNameStart(p.currentByte) {
+ return
+ }
+ for {
+ p.currentToken.WriteByte(p.currentByte)
+ p.currentByte, p.err = p.buf.ReadByte()
+ if p.err != nil || !isValidMetricNameContinuation(p.currentByte) {
+ return
+ }
+ }
+}
+
+// readTokenAsLabelName copies a label name from p.buf into p.currentToken.
+// The first byte considered is the byte already read (now in p.currentByte).
+// The first byte not part of a label name is still copied into p.currentByte,
+// but not into p.currentToken.
+func (p *TextParser) readTokenAsLabelName() {
+ p.currentToken.Reset()
+ if !isValidLabelNameStart(p.currentByte) {
+ return
+ }
+ for {
+ p.currentToken.WriteByte(p.currentByte)
+ p.currentByte, p.err = p.buf.ReadByte()
+ if p.err != nil || !isValidLabelNameContinuation(p.currentByte) {
+ return
+ }
+ }
+}
+
+// readTokenAsLabelValue copies a label value from p.buf into p.currentToken.
+// In contrast to the other 'readTokenAs...' functions, which start with the
+// last read byte in p.currentByte, this method ignores p.currentByte and starts
+// with reading a new byte from p.buf. The first byte not part of a label value
+// is still copied into p.currentByte, but not into p.currentToken.
+func (p *TextParser) readTokenAsLabelValue() {
+ p.currentToken.Reset()
+ escaped := false
+ for {
+ if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
+ return
+ }
+ if escaped {
+ switch p.currentByte {
+ case '"', '\\':
+ p.currentToken.WriteByte(p.currentByte)
+ case 'n':
+ p.currentToken.WriteByte('\n')
+ default:
+ p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+ return
+ }
+ escaped = false
+ continue
+ }
+ switch p.currentByte {
+ case '"':
+ return
+ case '\n':
+ p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String()))
+ return
+ case '\\':
+ escaped = true
+ default:
+ p.currentToken.WriteByte(p.currentByte)
+ }
+ }
+}
+
+func (p *TextParser) setOrCreateCurrentMF() {
+ p.currentIsSummaryCount = false
+ p.currentIsSummarySum = false
+ p.currentIsHistogramCount = false
+ p.currentIsHistogramSum = false
+ name := p.currentToken.String()
+ if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil {
+ return
+ }
+ // Try out if this is a _sum or _count for a summary/histogram.
+ summaryName := summaryMetricName(name)
+ if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil {
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+ if isCount(name) {
+ p.currentIsSummaryCount = true
+ }
+ if isSum(name) {
+ p.currentIsSummarySum = true
+ }
+ return
+ }
+ }
+ histogramName := histogramMetricName(name)
+ if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil {
+ if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ if isCount(name) {
+ p.currentIsHistogramCount = true
+ }
+ if isSum(name) {
+ p.currentIsHistogramSum = true
+ }
+ return
+ }
+ }
+ p.currentMF = &dto.MetricFamily{Name: proto.String(name)}
+ p.metricFamiliesByName[name] = p.currentMF
+}
+
+func isValidLabelNameStart(b byte) bool {
+ return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_'
+}
+
+func isValidLabelNameContinuation(b byte) bool {
+ return isValidLabelNameStart(b) || (b >= '0' && b <= '9')
+}
+
+func isValidMetricNameStart(b byte) bool {
+ return isValidLabelNameStart(b) || b == ':'
+}
+
+func isValidMetricNameContinuation(b byte) bool {
+ return isValidLabelNameContinuation(b) || b == ':'
+}
+
+func isBlankOrTab(b byte) bool {
+ return b == ' ' || b == '\t'
+}
+
+func isCount(name string) bool {
+ return len(name) > 6 && name[len(name)-6:] == "_count"
+}
+
+func isSum(name string) bool {
+ return len(name) > 4 && name[len(name)-4:] == "_sum"
+}
+
+func isBucket(name string) bool {
+ return len(name) > 7 && name[len(name)-7:] == "_bucket"
+}
+
+func summaryMetricName(name string) string {
+ switch {
+ case isCount(name):
+ return name[:len(name)-6]
+ case isSum(name):
+ return name[:len(name)-4]
+ default:
+ return name
+ }
+}
+
+func histogramMetricName(name string) string {
+ switch {
+ case isCount(name):
+ return name[:len(name)-6]
+ case isSum(name):
+ return name[:len(name)-4]
+ case isBucket(name):
+ return name[:len(name)-7]
+ default:
+ return name
+ }
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt b/src/kube2msb/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt
new file mode 100644
index 0000000..7723656
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt
@@ -0,0 +1,67 @@
+PACKAGE
+
+package goautoneg
+import "bitbucket.org/ww/goautoneg"
+
+HTTP Content-Type Autonegotiation.
+
+The functions in this package implement the behaviour specified in
+http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+
+Copyright (c) 2011, Open Knowledge Foundation Ltd.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ Neither the name of the Open Knowledge Foundation Ltd. nor the
+ names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+FUNCTIONS
+
+func Negotiate(header string, alternatives []string) (content_type string)
+Negotiate the most appropriate content_type given the accept header
+and a list of alternatives.
+
+func ParseAccept(header string) (accept []Accept)
+Parse an Accept Header string returning a sorted list
+of clauses
+
+
+TYPES
+
+type Accept struct {
+ Type, SubType string
+ Q float32
+ Params map[string]string
+}
+Structure to represent a clause in an HTTP Accept Header
+
+
+SUBDIRECTORIES
+
+ .hg
diff --git a/src/kube2msb/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/src/kube2msb/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
new file mode 100644
index 0000000..648b38c
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
@@ -0,0 +1,162 @@
+/*
+HTTP Content-Type Autonegotiation.
+
+The functions in this package implement the behaviour specified in
+http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+
+Copyright (c) 2011, Open Knowledge Foundation Ltd.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ Neither the name of the Open Knowledge Foundation Ltd. nor the
+ names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+*/
+package goautoneg
+
+import (
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// Structure to represent a clause in an HTTP Accept Header
+type Accept struct {
+ Type, SubType string
+ Q float64
+ Params map[string]string
+}
+
+// For internal use, so that we can use the sort interface
+type accept_slice []Accept
+
+func (accept accept_slice) Len() int {
+ slice := []Accept(accept)
+ return len(slice)
+}
+
+func (accept accept_slice) Less(i, j int) bool {
+ slice := []Accept(accept)
+ ai, aj := slice[i], slice[j]
+ if ai.Q > aj.Q {
+ return true
+ }
+ if ai.Type != "*" && aj.Type == "*" {
+ return true
+ }
+ if ai.SubType != "*" && aj.SubType == "*" {
+ return true
+ }
+ return false
+}
+
+func (accept accept_slice) Swap(i, j int) {
+ slice := []Accept(accept)
+ slice[i], slice[j] = slice[j], slice[i]
+}
+
+// Parse an Accept Header string returning a sorted list
+// of clauses
+func ParseAccept(header string) (accept []Accept) {
+ parts := strings.Split(header, ",")
+ accept = make([]Accept, 0, len(parts))
+ for _, part := range parts {
+ part := strings.Trim(part, " ")
+
+ a := Accept{}
+ a.Params = make(map[string]string)
+ a.Q = 1.0
+
+ mrp := strings.Split(part, ";")
+
+ media_range := mrp[0]
+ sp := strings.Split(media_range, "/")
+ a.Type = strings.Trim(sp[0], " ")
+
+ switch {
+ case len(sp) == 1 && a.Type == "*":
+ a.SubType = "*"
+ case len(sp) == 2:
+ a.SubType = strings.Trim(sp[1], " ")
+ default:
+ continue
+ }
+
+ if len(mrp) == 1 {
+ accept = append(accept, a)
+ continue
+ }
+
+ for _, param := range mrp[1:] {
+ sp := strings.SplitN(param, "=", 2)
+ if len(sp) != 2 {
+ continue
+ }
+ token := strings.Trim(sp[0], " ")
+ if token == "q" {
+ a.Q, _ = strconv.ParseFloat(sp[1], 32)
+ } else {
+ a.Params[token] = strings.Trim(sp[1], " ")
+ }
+ }
+
+ accept = append(accept, a)
+ }
+
+ slice := accept_slice(accept)
+ sort.Sort(slice)
+
+ return
+}
+
+// Negotiate the most appropriate content_type given the accept header
+// and a list of alternatives.
+func Negotiate(header string, alternatives []string) (content_type string) {
+ asp := make([][]string, 0, len(alternatives))
+ for _, ctype := range alternatives {
+ asp = append(asp, strings.SplitN(ctype, "/", 2))
+ }
+ for _, clause := range ParseAccept(header) {
+ for i, ctsp := range asp {
+ if clause.Type == ctsp[0] && clause.SubType == ctsp[1] {
+ content_type = alternatives[i]
+ return
+ }
+ if clause.Type == ctsp[0] && clause.SubType == "*" {
+ content_type = alternatives[i]
+ return
+ }
+ if clause.Type == "*" && clause.SubType == "*" {
+ content_type = alternatives[i]
+ return
+ }
+ }
+ }
+ return
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/common/model/alert.go b/src/kube2msb/vendor/github.com/prometheus/common/model/alert.go
new file mode 100644
index 0000000..35e739c
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/common/model/alert.go
@@ -0,0 +1,136 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "time"
+)
+
+type AlertStatus string
+
+const (
+ AlertFiring AlertStatus = "firing"
+ AlertResolved AlertStatus = "resolved"
+)
+
+// Alert is a generic representation of an alert in the Prometheus eco-system.
+type Alert struct {
+ // Label value pairs for purpose of aggregation, matching, and disposition
+ // dispatching. This must minimally include an "alertname" label.
+ Labels LabelSet `json:"labels"`
+
+ // Extra key/value information which does not define alert identity.
+ Annotations LabelSet `json:"annotations"`
+
+ // The known time range for this alert. Both ends are optional.
+ StartsAt time.Time `json:"startsAt,omitempty"`
+ EndsAt time.Time `json:"endsAt,omitempty"`
+ GeneratorURL string `json:"generatorURL"`
+}
+
+// Name returns the name of the alert. It is equivalent to the "alertname" label.
+func (a *Alert) Name() string {
+ return string(a.Labels[AlertNameLabel])
+}
+
+// Fingerprint returns a unique hash for the alert. It is equivalent to
+// the fingerprint of the alert's label set.
+func (a *Alert) Fingerprint() Fingerprint {
+ return a.Labels.Fingerprint()
+}
+
+func (a *Alert) String() string {
+ s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7])
+ if a.Resolved() {
+ return s + "[resolved]"
+ }
+ return s + "[active]"
+}
+
+// Resolved returns true iff the activity interval ended in the past.
+func (a *Alert) Resolved() bool {
+ return a.ResolvedAt(time.Now())
+}
+
+// ResolvedAt returns true off the activity interval ended before
+// the given timestamp.
+func (a *Alert) ResolvedAt(ts time.Time) bool {
+ if a.EndsAt.IsZero() {
+ return false
+ }
+ return !a.EndsAt.After(ts)
+}
+
+// Status returns the status of the alert.
+func (a *Alert) Status() AlertStatus {
+ if a.Resolved() {
+ return AlertResolved
+ }
+ return AlertFiring
+}
+
+// Validate checks whether the alert data is inconsistent.
+func (a *Alert) Validate() error {
+ if a.StartsAt.IsZero() {
+ return fmt.Errorf("start time missing")
+ }
+ if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) {
+ return fmt.Errorf("start time must be before end time")
+ }
+ if err := a.Labels.Validate(); err != nil {
+ return fmt.Errorf("invalid label set: %s", err)
+ }
+ if len(a.Labels) == 0 {
+ return fmt.Errorf("at least one label pair required")
+ }
+ if err := a.Annotations.Validate(); err != nil {
+ return fmt.Errorf("invalid annotations: %s", err)
+ }
+ return nil
+}
+
+// Alert is a list of alerts that can be sorted in chronological order.
+type Alerts []*Alert
+
+func (as Alerts) Len() int { return len(as) }
+func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] }
+
+func (as Alerts) Less(i, j int) bool {
+ if as[i].StartsAt.Before(as[j].StartsAt) {
+ return true
+ }
+ if as[i].EndsAt.Before(as[j].EndsAt) {
+ return true
+ }
+ return as[i].Fingerprint() < as[j].Fingerprint()
+}
+
+// HasFiring returns true iff one of the alerts is not resolved.
+func (as Alerts) HasFiring() bool {
+ for _, a := range as {
+ if !a.Resolved() {
+ return true
+ }
+ }
+ return false
+}
+
+// Status returns StatusFiring iff at least one of the alerts is firing.
+func (as Alerts) Status() AlertStatus {
+ if as.HasFiring() {
+ return AlertFiring
+ }
+ return AlertResolved
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/common/model/fingerprinting.go b/src/kube2msb/vendor/github.com/prometheus/common/model/fingerprinting.go
new file mode 100644
index 0000000..fc4de41
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/common/model/fingerprinting.go
@@ -0,0 +1,105 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// Fingerprint provides a hash-capable representation of a Metric.
+// For our purposes, FNV-1A 64-bit is used.
+type Fingerprint uint64
+
+// FingerprintFromString transforms a string representation into a Fingerprint.
+func FingerprintFromString(s string) (Fingerprint, error) {
+ num, err := strconv.ParseUint(s, 16, 64)
+ return Fingerprint(num), err
+}
+
+// ParseFingerprint parses the input string into a fingerprint.
+func ParseFingerprint(s string) (Fingerprint, error) {
+ num, err := strconv.ParseUint(s, 16, 64)
+ if err != nil {
+ return 0, err
+ }
+ return Fingerprint(num), nil
+}
+
+func (f Fingerprint) String() string {
+ return fmt.Sprintf("%016x", uint64(f))
+}
+
+// Fingerprints represents a collection of Fingerprint subject to a given
+// natural sorting scheme. It implements sort.Interface.
+type Fingerprints []Fingerprint
+
+// Len implements sort.Interface.
+func (f Fingerprints) Len() int {
+ return len(f)
+}
+
+// Less implements sort.Interface.
+func (f Fingerprints) Less(i, j int) bool {
+ return f[i] < f[j]
+}
+
+// Swap implements sort.Interface.
+func (f Fingerprints) Swap(i, j int) {
+ f[i], f[j] = f[j], f[i]
+}
+
+// FingerprintSet is a set of Fingerprints.
+type FingerprintSet map[Fingerprint]struct{}
+
+// Equal returns true if both sets contain the same elements (and not more).
+func (s FingerprintSet) Equal(o FingerprintSet) bool {
+ if len(s) != len(o) {
+ return false
+ }
+
+ for k := range s {
+ if _, ok := o[k]; !ok {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Intersection returns the elements contained in both sets.
+func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet {
+ myLength, otherLength := len(s), len(o)
+ if myLength == 0 || otherLength == 0 {
+ return FingerprintSet{}
+ }
+
+ subSet := s
+ superSet := o
+
+ if otherLength < myLength {
+ subSet = o
+ superSet = s
+ }
+
+ out := FingerprintSet{}
+
+ for k := range subSet {
+ if _, ok := superSet[k]; ok {
+ out[k] = struct{}{}
+ }
+ }
+
+ return out
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/common/model/fnv.go b/src/kube2msb/vendor/github.com/prometheus/common/model/fnv.go
new file mode 100644
index 0000000..038fc1c
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/common/model/fnv.go
@@ -0,0 +1,42 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+// Inline and byte-free variant of hash/fnv's fnv64a.
+
+const (
+ offset64 = 14695981039346656037
+ prime64 = 1099511628211
+)
+
+// hashNew initializies a new fnv64a hash value.
+func hashNew() uint64 {
+ return offset64
+}
+
+// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
+func hashAdd(h uint64, s string) uint64 {
+ for i := 0; i < len(s); i++ {
+ h ^= uint64(s[i])
+ h *= prime64
+ }
+ return h
+}
+
+// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
+func hashAddByte(h uint64, b byte) uint64 {
+ h ^= uint64(b)
+ h *= prime64
+ return h
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/common/model/labels.go b/src/kube2msb/vendor/github.com/prometheus/common/model/labels.go
new file mode 100644
index 0000000..3b72e7f
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/common/model/labels.go
@@ -0,0 +1,206 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "regexp"
+ "strings"
+ "unicode/utf8"
+)
+
+const (
+ // AlertNameLabel is the name of the label containing the an alert's name.
+ AlertNameLabel = "alertname"
+
+ // ExportedLabelPrefix is the prefix to prepend to the label names present in
+ // exported metrics if a label of the same name is added by the server.
+ ExportedLabelPrefix = "exported_"
+
+ // MetricNameLabel is the label name indicating the metric name of a
+ // timeseries.
+ MetricNameLabel = "__name__"
+
+ // SchemeLabel is the name of the label that holds the scheme on which to
+ // scrape a target.
+ SchemeLabel = "__scheme__"
+
+ // AddressLabel is the name of the label that holds the address of
+ // a scrape target.
+ AddressLabel = "__address__"
+
+ // MetricsPathLabel is the name of the label that holds the path on which to
+ // scrape a target.
+ MetricsPathLabel = "__metrics_path__"
+
+ // ReservedLabelPrefix is a prefix which is not legal in user-supplied
+ // label names.
+ ReservedLabelPrefix = "__"
+
+ // MetaLabelPrefix is a prefix for labels that provide meta information.
+ // Labels with this prefix are used for intermediate label processing and
+ // will not be attached to time series.
+ MetaLabelPrefix = "__meta_"
+
+ // TmpLabelPrefix is a prefix for temporary labels as part of relabelling.
+ // Labels with this prefix are used for intermediate label processing and
+ // will not be attached to time series. This is reserved for use in
+ // Prometheus configuration files by users.
+ TmpLabelPrefix = "__tmp_"
+
+ // ParamLabelPrefix is a prefix for labels that provide URL parameters
+ // used to scrape a target.
+ ParamLabelPrefix = "__param_"
+
+ // JobLabel is the label name indicating the job from which a timeseries
+ // was scraped.
+ JobLabel = "job"
+
+ // InstanceLabel is the label name used for the instance label.
+ InstanceLabel = "instance"
+
+ // BucketLabel is used for the label that defines the upper bound of a
+ // bucket of a histogram ("le" -> "less or equal").
+ BucketLabel = "le"
+
+ // QuantileLabel is used for the label that defines the quantile in a
+ // summary.
+ QuantileLabel = "quantile"
+)
+
+// LabelNameRE is a regular expression matching valid label names.
+var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
+
+// A LabelName is a key for a LabelSet or Metric. It has a value associated
+// therewith.
+type LabelName string
+
+// IsValid is true iff the label name matches the pattern of LabelNameRE.
+func (ln LabelName) IsValid() bool {
+ if len(ln) == 0 {
+ return false
+ }
+ for i, b := range ln {
+ if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
+ return false
+ }
+ }
+ return true
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var s string
+ if err := unmarshal(&s); err != nil {
+ return err
+ }
+ if !LabelNameRE.MatchString(s) {
+ return fmt.Errorf("%q is not a valid label name", s)
+ }
+ *ln = LabelName(s)
+ return nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (ln *LabelName) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ if !LabelNameRE.MatchString(s) {
+ return fmt.Errorf("%q is not a valid label name", s)
+ }
+ *ln = LabelName(s)
+ return nil
+}
+
+// LabelNames is a sortable LabelName slice. In implements sort.Interface.
+type LabelNames []LabelName
+
+func (l LabelNames) Len() int {
+ return len(l)
+}
+
+func (l LabelNames) Less(i, j int) bool {
+ return l[i] < l[j]
+}
+
+func (l LabelNames) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
+
+func (l LabelNames) String() string {
+ labelStrings := make([]string, 0, len(l))
+ for _, label := range l {
+ labelStrings = append(labelStrings, string(label))
+ }
+ return strings.Join(labelStrings, ", ")
+}
+
+// A LabelValue is an associated value for a LabelName.
+type LabelValue string
+
+// IsValid returns true iff the string is a valid UTF8.
+func (lv LabelValue) IsValid() bool {
+ return utf8.ValidString(string(lv))
+}
+
+// LabelValues is a sortable LabelValue slice. It implements sort.Interface.
+type LabelValues []LabelValue
+
+func (l LabelValues) Len() int {
+ return len(l)
+}
+
+func (l LabelValues) Less(i, j int) bool {
+ return string(l[i]) < string(l[j])
+}
+
+func (l LabelValues) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
+
+// LabelPair pairs a name with a value.
+type LabelPair struct {
+ Name LabelName
+ Value LabelValue
+}
+
+// LabelPairs is a sortable slice of LabelPair pointers. It implements
+// sort.Interface.
+type LabelPairs []*LabelPair
+
+func (l LabelPairs) Len() int {
+ return len(l)
+}
+
+func (l LabelPairs) Less(i, j int) bool {
+ switch {
+ case l[i].Name > l[j].Name:
+ return false
+ case l[i].Name < l[j].Name:
+ return true
+ case l[i].Value > l[j].Value:
+ return false
+ case l[i].Value < l[j].Value:
+ return true
+ default:
+ return false
+ }
+}
+
+func (l LabelPairs) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/common/model/labelset.go b/src/kube2msb/vendor/github.com/prometheus/common/model/labelset.go
new file mode 100644
index 0000000..5f931cd
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/common/model/labelset.go
@@ -0,0 +1,169 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "sort"
+ "strings"
+)
+
+// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet
+// may be fully-qualified down to the point where it may resolve to a single
+// Metric in the data store or not. All operations that occur within the realm
+// of a LabelSet can emit a vector of Metric entities to which the LabelSet may
+// match.
+type LabelSet map[LabelName]LabelValue
+
+// Validate checks whether all names and values in the label set
+// are valid.
+func (ls LabelSet) Validate() error {
+ for ln, lv := range ls {
+ if !ln.IsValid() {
+ return fmt.Errorf("invalid name %q", ln)
+ }
+ if !lv.IsValid() {
+ return fmt.Errorf("invalid value %q", lv)
+ }
+ }
+ return nil
+}
+
+// Equal returns true iff both label sets have exactly the same key/value pairs.
+func (ls LabelSet) Equal(o LabelSet) bool {
+ if len(ls) != len(o) {
+ return false
+ }
+ for ln, lv := range ls {
+ olv, ok := o[ln]
+ if !ok {
+ return false
+ }
+ if olv != lv {
+ return false
+ }
+ }
+ return true
+}
+
+// Before compares the metrics, using the following criteria:
+//
+// If m has fewer labels than o, it is before o. If it has more, it is not.
+//
+// If the number of labels is the same, the superset of all label names is
+// sorted alphanumerically. The first differing label pair found in that order
+// determines the outcome: If the label does not exist at all in m, then m is
+// before o, and vice versa. Otherwise the label value is compared
+// alphanumerically.
+//
+// If m and o are equal, the method returns false.
+func (ls LabelSet) Before(o LabelSet) bool {
+ if len(ls) < len(o) {
+ return true
+ }
+ if len(ls) > len(o) {
+ return false
+ }
+
+ lns := make(LabelNames, 0, len(ls)+len(o))
+ for ln := range ls {
+ lns = append(lns, ln)
+ }
+ for ln := range o {
+ lns = append(lns, ln)
+ }
+ // It's probably not worth it to de-dup lns.
+ sort.Sort(lns)
+ for _, ln := range lns {
+ mlv, ok := ls[ln]
+ if !ok {
+ return true
+ }
+ olv, ok := o[ln]
+ if !ok {
+ return false
+ }
+ if mlv < olv {
+ return true
+ }
+ if mlv > olv {
+ return false
+ }
+ }
+ return false
+}
+
+// Clone returns a copy of the label set.
+func (ls LabelSet) Clone() LabelSet {
+ lsn := make(LabelSet, len(ls))
+ for ln, lv := range ls {
+ lsn[ln] = lv
+ }
+ return lsn
+}
+
+// Merge is a helper function to non-destructively merge two label sets.
+func (l LabelSet) Merge(other LabelSet) LabelSet {
+ result := make(LabelSet, len(l))
+
+ for k, v := range l {
+ result[k] = v
+ }
+
+ for k, v := range other {
+ result[k] = v
+ }
+
+ return result
+}
+
+func (l LabelSet) String() string {
+ lstrs := make([]string, 0, len(l))
+ for l, v := range l {
+ lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v))
+ }
+
+ sort.Strings(lstrs)
+ return fmt.Sprintf("{%s}", strings.Join(lstrs, ", "))
+}
+
+// Fingerprint returns the LabelSet's fingerprint.
+func (ls LabelSet) Fingerprint() Fingerprint {
+ return labelSetToFingerprint(ls)
+}
+
+// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing
+// algorithm, which is, however, more susceptible to hash collisions.
+func (ls LabelSet) FastFingerprint() Fingerprint {
+ return labelSetToFastFingerprint(ls)
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (l *LabelSet) UnmarshalJSON(b []byte) error {
+ var m map[LabelName]LabelValue
+ if err := json.Unmarshal(b, &m); err != nil {
+ return err
+ }
+ // encoding/json only unmarshals maps of the form map[string]T. It treats
+ // LabelName as a string and does not call its UnmarshalJSON method.
+ // Thus, we have to replicate the behavior here.
+ for ln := range m {
+ if !LabelNameRE.MatchString(string(ln)) {
+ return fmt.Errorf("%q is not a valid label name", ln)
+ }
+ }
+ *l = LabelSet(m)
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/common/model/metric.go b/src/kube2msb/vendor/github.com/prometheus/common/model/metric.go
new file mode 100644
index 0000000..a5da59a
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/common/model/metric.go
@@ -0,0 +1,98 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "regexp"
+ "sort"
+ "strings"
+)
+
+var (
+ separator = []byte{0}
+ MetricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`)
+)
+
+// A Metric is similar to a LabelSet, but the key difference is that a Metric is
+// a singleton and refers to one and only one stream of samples.
+type Metric LabelSet
+
+// Equal compares the metrics.
+func (m Metric) Equal(o Metric) bool {
+ return LabelSet(m).Equal(LabelSet(o))
+}
+
+// Before compares the metrics' underlying label sets.
+func (m Metric) Before(o Metric) bool {
+ return LabelSet(m).Before(LabelSet(o))
+}
+
+// Clone returns a copy of the Metric.
+func (m Metric) Clone() Metric {
+ clone := Metric{}
+ for k, v := range m {
+ clone[k] = v
+ }
+ return clone
+}
+
+func (m Metric) String() string {
+ metricName, hasName := m[MetricNameLabel]
+ numLabels := len(m) - 1
+ if !hasName {
+ numLabels = len(m)
+ }
+ labelStrings := make([]string, 0, numLabels)
+ for label, value := range m {
+ if label != MetricNameLabel {
+ labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value))
+ }
+ }
+
+ switch numLabels {
+ case 0:
+ if hasName {
+ return string(metricName)
+ }
+ return "{}"
+ default:
+ sort.Strings(labelStrings)
+ return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", "))
+ }
+}
+
+// Fingerprint returns a Metric's Fingerprint.
+func (m Metric) Fingerprint() Fingerprint {
+ return LabelSet(m).Fingerprint()
+}
+
+// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing
+// algorithm, which is, however, more susceptible to hash collisions.
+func (m Metric) FastFingerprint() Fingerprint {
+ return LabelSet(m).FastFingerprint()
+}
+
+// IsValidMetricName returns true iff name matches the pattern of MetricNameRE.
+func IsValidMetricName(n LabelValue) bool {
+ if len(n) == 0 {
+ return false
+ }
+ for i, b := range n {
+ if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/common/model/model.go b/src/kube2msb/vendor/github.com/prometheus/common/model/model.go
new file mode 100644
index 0000000..88f013a
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/common/model/model.go
@@ -0,0 +1,16 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package model contains common data structures that are shared across
+// Prometheus componenets and libraries.
+package model
diff --git a/src/kube2msb/vendor/github.com/prometheus/common/model/signature.go b/src/kube2msb/vendor/github.com/prometheus/common/model/signature.go
new file mode 100644
index 0000000..8762b13
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/common/model/signature.go
@@ -0,0 +1,144 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "sort"
+)
+
+// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is
+// used to separate label names, label values, and other strings from each other
+// when calculating their combined hash value (aka signature aka fingerprint).
+const SeparatorByte byte = 255
+
+var (
+ // cache the signature of an empty label set.
+ emptyLabelSignature = hashNew()
+)
+
+// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a
+// given label set. (Collisions are possible but unlikely if the number of label
+// sets the function is applied to is small.)
+func LabelsToSignature(labels map[string]string) uint64 {
+ if len(labels) == 0 {
+ return emptyLabelSignature
+ }
+
+ labelNames := make([]string, 0, len(labels))
+ for labelName := range labels {
+ labelNames = append(labelNames, labelName)
+ }
+ sort.Strings(labelNames)
+
+ sum := hashNew()
+ for _, labelName := range labelNames {
+ sum = hashAdd(sum, labelName)
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, labels[labelName])
+ sum = hashAddByte(sum, SeparatorByte)
+ }
+ return sum
+}
+
+// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as
+// parameter (rather than a label map) and returns a Fingerprint.
+func labelSetToFingerprint(ls LabelSet) Fingerprint {
+ if len(ls) == 0 {
+ return Fingerprint(emptyLabelSignature)
+ }
+
+ labelNames := make(LabelNames, 0, len(ls))
+ for labelName := range ls {
+ labelNames = append(labelNames, labelName)
+ }
+ sort.Sort(labelNames)
+
+ sum := hashNew()
+ for _, labelName := range labelNames {
+ sum = hashAdd(sum, string(labelName))
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, string(ls[labelName]))
+ sum = hashAddByte(sum, SeparatorByte)
+ }
+ return Fingerprint(sum)
+}
+
+// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a
+// faster and less allocation-heavy hash function, which is more susceptible to
+// create hash collisions. Therefore, collision detection should be applied.
+func labelSetToFastFingerprint(ls LabelSet) Fingerprint {
+ if len(ls) == 0 {
+ return Fingerprint(emptyLabelSignature)
+ }
+
+ var result uint64
+ for labelName, labelValue := range ls {
+ sum := hashNew()
+ sum = hashAdd(sum, string(labelName))
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, string(labelValue))
+ result ^= sum
+ }
+ return Fingerprint(result)
+}
+
+// SignatureForLabels works like LabelsToSignature but takes a Metric as
+// parameter (rather than a label map) and only includes the labels with the
+// specified LabelNames into the signature calculation. The labels passed in
+// will be sorted by this function.
+func SignatureForLabels(m Metric, labels ...LabelName) uint64 {
+ if len(labels) == 0 {
+ return emptyLabelSignature
+ }
+
+ sort.Sort(LabelNames(labels))
+
+ sum := hashNew()
+ for _, label := range labels {
+ sum = hashAdd(sum, string(label))
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, string(m[label]))
+ sum = hashAddByte(sum, SeparatorByte)
+ }
+ return sum
+}
+
+// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as
+// parameter (rather than a label map) and excludes the labels with any of the
+// specified LabelNames from the signature calculation.
+func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 {
+ if len(m) == 0 {
+ return emptyLabelSignature
+ }
+
+ labelNames := make(LabelNames, 0, len(m))
+ for labelName := range m {
+ if _, exclude := labels[labelName]; !exclude {
+ labelNames = append(labelNames, labelName)
+ }
+ }
+ if len(labelNames) == 0 {
+ return emptyLabelSignature
+ }
+ sort.Sort(labelNames)
+
+ sum := hashNew()
+ for _, labelName := range labelNames {
+ sum = hashAdd(sum, string(labelName))
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, string(m[labelName]))
+ sum = hashAddByte(sum, SeparatorByte)
+ }
+ return sum
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/common/model/silence.go b/src/kube2msb/vendor/github.com/prometheus/common/model/silence.go
new file mode 100644
index 0000000..7538e29
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/common/model/silence.go
@@ -0,0 +1,106 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "regexp"
+ "time"
+)
+
+// Matcher describes a matches the value of a given label.
+type Matcher struct {
+ Name LabelName `json:"name"`
+ Value string `json:"value"`
+ IsRegex bool `json:"isRegex"`
+}
+
+func (m *Matcher) UnmarshalJSON(b []byte) error {
+ type plain Matcher
+ if err := json.Unmarshal(b, (*plain)(m)); err != nil {
+ return err
+ }
+
+ if len(m.Name) == 0 {
+ return fmt.Errorf("label name in matcher must not be empty")
+ }
+ if m.IsRegex {
+ if _, err := regexp.Compile(m.Value); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Validate returns true iff all fields of the matcher have valid values.
+func (m *Matcher) Validate() error {
+ if !m.Name.IsValid() {
+ return fmt.Errorf("invalid name %q", m.Name)
+ }
+ if m.IsRegex {
+ if _, err := regexp.Compile(m.Value); err != nil {
+ return fmt.Errorf("invalid regular expression %q", m.Value)
+ }
+ } else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 {
+ return fmt.Errorf("invalid value %q", m.Value)
+ }
+ return nil
+}
+
+// Silence defines the representation of a silence definiton
+// in the Prometheus eco-system.
+type Silence struct {
+ ID uint64 `json:"id,omitempty"`
+
+ Matchers []*Matcher `json:"matchers"`
+
+ StartsAt time.Time `json:"startsAt"`
+ EndsAt time.Time `json:"endsAt"`
+
+ CreatedAt time.Time `json:"createdAt,omitempty"`
+ CreatedBy string `json:"createdBy"`
+ Comment string `json:"comment,omitempty"`
+}
+
+// Validate returns true iff all fields of the silence have valid values.
+func (s *Silence) Validate() error {
+ if len(s.Matchers) == 0 {
+ return fmt.Errorf("at least one matcher required")
+ }
+ for _, m := range s.Matchers {
+ if err := m.Validate(); err != nil {
+ return fmt.Errorf("invalid matcher: %s", err)
+ }
+ }
+ if s.StartsAt.IsZero() {
+ return fmt.Errorf("start time missing")
+ }
+ if s.EndsAt.IsZero() {
+ return fmt.Errorf("end time missing")
+ }
+ if s.EndsAt.Before(s.StartsAt) {
+ return fmt.Errorf("start time must be before end time")
+ }
+ if s.CreatedBy == "" {
+ return fmt.Errorf("creator information missing")
+ }
+ if s.Comment == "" {
+ return fmt.Errorf("comment missing")
+ }
+ if s.CreatedAt.IsZero() {
+ return fmt.Errorf("creation timestamp missing")
+ }
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/common/model/time.go b/src/kube2msb/vendor/github.com/prometheus/common/model/time.go
new file mode 100644
index 0000000..548968a
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/common/model/time.go
@@ -0,0 +1,249 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "math"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+)
+
+const (
+ // MinimumTick is the minimum supported time resolution. This has to be
+ // at least time.Second in order for the code below to work.
+ minimumTick = time.Millisecond
+ // second is the Time duration equivalent to one second.
+ second = int64(time.Second / minimumTick)
+ // The number of nanoseconds per minimum tick.
+ nanosPerTick = int64(minimumTick / time.Nanosecond)
+
+ // Earliest is the earliest Time representable. Handy for
+ // initializing a high watermark.
+ Earliest = Time(math.MinInt64)
+ // Latest is the latest Time representable. Handy for initializing
+ // a low watermark.
+ Latest = Time(math.MaxInt64)
+)
+
+// Time is the number of milliseconds since the epoch
+// (1970-01-01 00:00 UTC) excluding leap seconds.
+type Time int64
+
+// Interval describes and interval between two timestamps.
+type Interval struct {
+ Start, End Time
+}
+
+// Now returns the current time as a Time.
+func Now() Time {
+ return TimeFromUnixNano(time.Now().UnixNano())
+}
+
+// TimeFromUnix returns the Time equivalent to the Unix Time t
+// provided in seconds.
+func TimeFromUnix(t int64) Time {
+ return Time(t * second)
+}
+
+// TimeFromUnixNano returns the Time equivalent to the Unix Time
+// t provided in nanoseconds.
+func TimeFromUnixNano(t int64) Time {
+ return Time(t / nanosPerTick)
+}
+
+// Equal reports whether two Times represent the same instant.
+func (t Time) Equal(o Time) bool {
+ return t == o
+}
+
+// Before reports whether the Time t is before o.
+func (t Time) Before(o Time) bool {
+ return t < o
+}
+
+// After reports whether the Time t is after o.
+func (t Time) After(o Time) bool {
+ return t > o
+}
+
+// Add returns the Time t + d.
+func (t Time) Add(d time.Duration) Time {
+ return t + Time(d/minimumTick)
+}
+
+// Sub returns the Duration t - o.
+func (t Time) Sub(o Time) time.Duration {
+ return time.Duration(t-o) * minimumTick
+}
+
+// Time returns the time.Time representation of t.
+func (t Time) Time() time.Time {
+ return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick)
+}
+
+// Unix returns t as a Unix time, the number of seconds elapsed
+// since January 1, 1970 UTC.
+func (t Time) Unix() int64 {
+ return int64(t) / second
+}
+
+// UnixNano returns t as a Unix time, the number of nanoseconds elapsed
+// since January 1, 1970 UTC.
+func (t Time) UnixNano() int64 {
+ return int64(t) * nanosPerTick
+}
+
+// The number of digits after the dot.
+var dotPrecision = int(math.Log10(float64(second)))
+
+// String returns a string representation of the Time.
+func (t Time) String() string {
+ return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64)
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (t Time) MarshalJSON() ([]byte, error) {
+ return []byte(t.String()), nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (t *Time) UnmarshalJSON(b []byte) error {
+ p := strings.Split(string(b), ".")
+ switch len(p) {
+ case 1:
+ v, err := strconv.ParseInt(string(p[0]), 10, 64)
+ if err != nil {
+ return err
+ }
+ *t = Time(v * second)
+
+ case 2:
+ v, err := strconv.ParseInt(string(p[0]), 10, 64)
+ if err != nil {
+ return err
+ }
+ v *= second
+
+ prec := dotPrecision - len(p[1])
+ if prec < 0 {
+ p[1] = p[1][:dotPrecision]
+ } else if prec > 0 {
+ p[1] = p[1] + strings.Repeat("0", prec)
+ }
+
+ va, err := strconv.ParseInt(p[1], 10, 32)
+ if err != nil {
+ return err
+ }
+
+ *t = Time(v + va)
+
+ default:
+ return fmt.Errorf("invalid time %q", string(b))
+ }
+ return nil
+}
+
+// Duration wraps time.Duration. It is used to parse the custom duration format
+// from YAML.
+// This type should not propagate beyond the scope of input/output processing.
+type Duration time.Duration
+
+var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$")
+
+// StringToDuration parses a string into a time.Duration, assuming that a year
+// always has 365d, a week always has 7d, and a day always has 24h.
+func ParseDuration(durationStr string) (Duration, error) {
+ matches := durationRE.FindStringSubmatch(durationStr)
+ if len(matches) != 3 {
+ return 0, fmt.Errorf("not a valid duration string: %q", durationStr)
+ }
+ var (
+ n, _ = strconv.Atoi(matches[1])
+ dur = time.Duration(n) * time.Millisecond
+ )
+ switch unit := matches[2]; unit {
+ case "y":
+ dur *= 1000 * 60 * 60 * 24 * 365
+ case "w":
+ dur *= 1000 * 60 * 60 * 24 * 7
+ case "d":
+ dur *= 1000 * 60 * 60 * 24
+ case "h":
+ dur *= 1000 * 60 * 60
+ case "m":
+ dur *= 1000 * 60
+ case "s":
+ dur *= 1000
+ case "ms":
+ // Value already correct
+ default:
+ return 0, fmt.Errorf("invalid time unit in duration string: %q", unit)
+ }
+ return Duration(dur), nil
+}
+
+func (d Duration) String() string {
+ var (
+ ms = int64(time.Duration(d) / time.Millisecond)
+ unit = "ms"
+ )
+ factors := map[string]int64{
+ "y": 1000 * 60 * 60 * 24 * 365,
+ "w": 1000 * 60 * 60 * 24 * 7,
+ "d": 1000 * 60 * 60 * 24,
+ "h": 1000 * 60 * 60,
+ "m": 1000 * 60,
+ "s": 1000,
+ "ms": 1,
+ }
+
+ switch int64(0) {
+ case ms % factors["y"]:
+ unit = "y"
+ case ms % factors["w"]:
+ unit = "w"
+ case ms % factors["d"]:
+ unit = "d"
+ case ms % factors["h"]:
+ unit = "h"
+ case ms % factors["m"]:
+ unit = "m"
+ case ms % factors["s"]:
+ unit = "s"
+ }
+ return fmt.Sprintf("%v%v", ms/factors[unit], unit)
+}
+
+// MarshalYAML implements the yaml.Marshaler interface.
+func (d Duration) MarshalYAML() (interface{}, error) {
+ return d.String(), nil
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var s string
+ if err := unmarshal(&s); err != nil {
+ return err
+ }
+ dur, err := ParseDuration(s)
+ if err != nil {
+ return err
+ }
+ *d = dur
+ return nil
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/common/model/value.go b/src/kube2msb/vendor/github.com/prometheus/common/model/value.go
new file mode 100644
index 0000000..dbf5d10
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/common/model/value.go
@@ -0,0 +1,403 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "math"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// A SampleValue is a representation of a value for a given sample at a given
+// time.
+type SampleValue float64
+
+// MarshalJSON implements json.Marshaler.
+func (v SampleValue) MarshalJSON() ([]byte, error) {
+ return json.Marshal(v.String())
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (v *SampleValue) UnmarshalJSON(b []byte) error {
+ if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
+ return fmt.Errorf("sample value must be a quoted string")
+ }
+ f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
+ if err != nil {
+ return err
+ }
+ *v = SampleValue(f)
+ return nil
+}
+
+// Equal returns true if the value of v and o is equal or if both are NaN. Note
+// that v==o is false if both are NaN. If you want the conventional float
+// behavior, use == to compare two SampleValues.
+func (v SampleValue) Equal(o SampleValue) bool {
+ if v == o {
+ return true
+ }
+ return math.IsNaN(float64(v)) && math.IsNaN(float64(o))
+}
+
+func (v SampleValue) String() string {
+ return strconv.FormatFloat(float64(v), 'f', -1, 64)
+}
+
+// SamplePair pairs a SampleValue with a Timestamp.
+type SamplePair struct {
+ Timestamp Time
+ Value SampleValue
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s SamplePair) MarshalJSON() ([]byte, error) {
+ t, err := json.Marshal(s.Timestamp)
+ if err != nil {
+ return nil, err
+ }
+ v, err := json.Marshal(s.Value)
+ if err != nil {
+ return nil, err
+ }
+ return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *SamplePair) UnmarshalJSON(b []byte) error {
+ v := [...]json.Unmarshaler{&s.Timestamp, &s.Value}
+ return json.Unmarshal(b, &v)
+}
+
+// Equal returns true if this SamplePair and o have equal Values and equal
+// Timestamps. The sematics of Value equality is defined by SampleValue.Equal.
+func (s *SamplePair) Equal(o *SamplePair) bool {
+ return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp))
+}
+
+func (s SamplePair) String() string {
+ return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp)
+}
+
+// Sample is a sample pair associated with a metric.
+type Sample struct {
+ Metric Metric `json:"metric"`
+ Value SampleValue `json:"value"`
+ Timestamp Time `json:"timestamp"`
+}
+
+// Equal compares first the metrics, then the timestamp, then the value. The
+// sematics of value equality is defined by SampleValue.Equal.
+func (s *Sample) Equal(o *Sample) bool {
+ if s == o {
+ return true
+ }
+
+ if !s.Metric.Equal(o.Metric) {
+ return false
+ }
+ if !s.Timestamp.Equal(o.Timestamp) {
+ return false
+ }
+ if s.Value.Equal(o.Value) {
+ return false
+ }
+
+ return true
+}
+
+func (s Sample) String() string {
+ return fmt.Sprintf("%s => %s", s.Metric, SamplePair{
+ Timestamp: s.Timestamp,
+ Value: s.Value,
+ })
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s Sample) MarshalJSON() ([]byte, error) {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Value SamplePair `json:"value"`
+ }{
+ Metric: s.Metric,
+ Value: SamplePair{
+ Timestamp: s.Timestamp,
+ Value: s.Value,
+ },
+ }
+
+ return json.Marshal(&v)
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *Sample) UnmarshalJSON(b []byte) error {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Value SamplePair `json:"value"`
+ }{
+ Metric: s.Metric,
+ Value: SamplePair{
+ Timestamp: s.Timestamp,
+ Value: s.Value,
+ },
+ }
+
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+
+ s.Metric = v.Metric
+ s.Timestamp = v.Value.Timestamp
+ s.Value = v.Value.Value
+
+ return nil
+}
+
+// Samples is a sortable Sample slice. It implements sort.Interface.
+type Samples []*Sample
+
+func (s Samples) Len() int {
+ return len(s)
+}
+
+// Less compares first the metrics, then the timestamp.
+func (s Samples) Less(i, j int) bool {
+ switch {
+ case s[i].Metric.Before(s[j].Metric):
+ return true
+ case s[j].Metric.Before(s[i].Metric):
+ return false
+ case s[i].Timestamp.Before(s[j].Timestamp):
+ return true
+ default:
+ return false
+ }
+}
+
+func (s Samples) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+// Equal compares two sets of samples and returns true if they are equal.
+func (s Samples) Equal(o Samples) bool {
+ if len(s) != len(o) {
+ return false
+ }
+
+ for i, sample := range s {
+ if !sample.Equal(o[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// SampleStream is a stream of Values belonging to an attached COWMetric.
+type SampleStream struct {
+ Metric Metric `json:"metric"`
+ Values []SamplePair `json:"values"`
+}
+
+func (ss SampleStream) String() string {
+ vals := make([]string, len(ss.Values))
+ for i, v := range ss.Values {
+ vals[i] = v.String()
+ }
+ return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n"))
+}
+
+// Value is a generic interface for values resulting from a query evaluation.
+type Value interface {
+ Type() ValueType
+ String() string
+}
+
+func (Matrix) Type() ValueType { return ValMatrix }
+func (Vector) Type() ValueType { return ValVector }
+func (*Scalar) Type() ValueType { return ValScalar }
+func (*String) Type() ValueType { return ValString }
+
+type ValueType int
+
+const (
+ ValNone ValueType = iota
+ ValScalar
+ ValVector
+ ValMatrix
+ ValString
+)
+
+// MarshalJSON implements json.Marshaler.
+func (et ValueType) MarshalJSON() ([]byte, error) {
+ return json.Marshal(et.String())
+}
+
+func (et *ValueType) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ switch s {
+ case "<ValNone>":
+ *et = ValNone
+ case "scalar":
+ *et = ValScalar
+ case "vector":
+ *et = ValVector
+ case "matrix":
+ *et = ValMatrix
+ case "string":
+ *et = ValString
+ default:
+ return fmt.Errorf("unknown value type %q", s)
+ }
+ return nil
+}
+
+func (e ValueType) String() string {
+ switch e {
+ case ValNone:
+ return "<ValNone>"
+ case ValScalar:
+ return "scalar"
+ case ValVector:
+ return "vector"
+ case ValMatrix:
+ return "matrix"
+ case ValString:
+ return "string"
+ }
+ panic("ValueType.String: unhandled value type")
+}
+
+// Scalar is a scalar value evaluated at the set timestamp.
+type Scalar struct {
+ Value SampleValue `json:"value"`
+ Timestamp Time `json:"timestamp"`
+}
+
+func (s Scalar) String() string {
+ return fmt.Sprintf("scalar: %v @[%v]", s.Value, s.Timestamp)
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s Scalar) MarshalJSON() ([]byte, error) {
+ v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64)
+ return json.Marshal([...]interface{}{s.Timestamp, string(v)})
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *Scalar) UnmarshalJSON(b []byte) error {
+ var f string
+ v := [...]interface{}{&s.Timestamp, &f}
+
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+
+ value, err := strconv.ParseFloat(f, 64)
+ if err != nil {
+ return fmt.Errorf("error parsing sample value: %s", err)
+ }
+ s.Value = SampleValue(value)
+ return nil
+}
+
+// String is a string value evaluated at the set timestamp.
+type String struct {
+ Value string `json:"value"`
+ Timestamp Time `json:"timestamp"`
+}
+
+func (s *String) String() string {
+ return s.Value
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s String) MarshalJSON() ([]byte, error) {
+ return json.Marshal([]interface{}{s.Timestamp, s.Value})
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *String) UnmarshalJSON(b []byte) error {
+ v := [...]interface{}{&s.Timestamp, &s.Value}
+ return json.Unmarshal(b, &v)
+}
+
+// Vector is basically only an alias for Samples, but the
+// contract is that in a Vector, all Samples have the same timestamp.
+type Vector []*Sample
+
+func (vec Vector) String() string {
+ entries := make([]string, len(vec))
+ for i, s := range vec {
+ entries[i] = s.String()
+ }
+ return strings.Join(entries, "\n")
+}
+
+func (vec Vector) Len() int { return len(vec) }
+func (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] }
+
+// Less compares first the metrics, then the timestamp.
+func (vec Vector) Less(i, j int) bool {
+ switch {
+ case vec[i].Metric.Before(vec[j].Metric):
+ return true
+ case vec[j].Metric.Before(vec[i].Metric):
+ return false
+ case vec[i].Timestamp.Before(vec[j].Timestamp):
+ return true
+ default:
+ return false
+ }
+}
+
+// Equal compares two sets of samples and returns true if they are equal.
+func (vec Vector) Equal(o Vector) bool {
+ if len(vec) != len(o) {
+ return false
+ }
+
+ for i, sample := range vec {
+ if !sample.Equal(o[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// Matrix is a list of time series.
+type Matrix []*SampleStream
+
+func (m Matrix) Len() int { return len(m) }
+func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) }
+func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
+
+func (mat Matrix) String() string {
+ matCp := make(Matrix, len(mat))
+ copy(matCp, mat)
+ sort.Sort(matCp)
+
+ strs := make([]string, len(matCp))
+
+ for i, ss := range matCp {
+ strs[i] = ss.String()
+ }
+
+ return strings.Join(strs, "\n")
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/procfs/AUTHORS.md b/src/kube2msb/vendor/github.com/prometheus/procfs/AUTHORS.md
new file mode 100644
index 0000000..6eb1935
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/procfs/AUTHORS.md
@@ -0,0 +1,11 @@
+The Prometheus project was started by Matt T. Proud (emeritus) and
+Julius Volz in 2012.
+
+Maintainers of this repository:
+
+* Tobias Schmidt <ts@soundcloud.com>
+
+The following individuals have contributed code to this repository
+(listed in alphabetical order):
+
+* Tobias Schmidt <ts@soundcloud.com>
diff --git a/src/kube2msb/vendor/github.com/prometheus/procfs/CONTRIBUTING.md b/src/kube2msb/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
new file mode 100644
index 0000000..5705f0f
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
@@ -0,0 +1,18 @@
+# Contributing
+
+Prometheus uses GitHub to manage reviews of pull requests.
+
+* If you have a trivial fix or improvement, go ahead and create a pull
+ request, addressing (with `@...`) one or more of the maintainers
+ (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request.
+
+* If you plan to do something more involved, first discuss your ideas
+ on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
+ This will avoid unnecessary work and surely give you and us a good deal
+ of inspiration.
+
+* Relevant coding style guidelines are the [Go Code Review
+ Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments)
+ and the _Formatting and style_ section of Peter Bourgon's [Go: Best
+ Practices for Production
+ Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style).
diff --git a/src/kube2msb/vendor/github.com/prometheus/procfs/LICENSE b/src/kube2msb/vendor/github.com/prometheus/procfs/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/procfs/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/src/kube2msb/vendor/github.com/prometheus/procfs/NOTICE b/src/kube2msb/vendor/github.com/prometheus/procfs/NOTICE
new file mode 100644
index 0000000..53c5e9a
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/procfs/NOTICE
@@ -0,0 +1,7 @@
+procfs provides functions to retrieve system, kernel and process
+metrics from the pseudo-filesystem proc.
+
+Copyright 2014-2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/src/kube2msb/vendor/github.com/prometheus/procfs/README.md b/src/kube2msb/vendor/github.com/prometheus/procfs/README.md
new file mode 100644
index 0000000..761d31c
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/procfs/README.md
@@ -0,0 +1,7 @@
+# procfs
+
+This procfs package provides functions to retrieve system, kernel and process
+metrics from the pseudo-filesystem proc.
+
+[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs)
+[![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs)
diff --git a/src/kube2msb/vendor/github.com/prometheus/procfs/doc.go b/src/kube2msb/vendor/github.com/prometheus/procfs/doc.go
new file mode 100644
index 0000000..e2acd6d
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/procfs/doc.go
@@ -0,0 +1,45 @@
+// Copyright 2014 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package procfs provides functions to retrieve system, kernel and process
+// metrics from the pseudo-filesystem proc.
+//
+// Example:
+//
+// package main
+//
+// import (
+// "fmt"
+// "log"
+//
+// "github.com/prometheus/procfs"
+// )
+//
+// func main() {
+// p, err := procfs.Self()
+// if err != nil {
+// log.Fatalf("could not get process: %s", err)
+// }
+//
+// stat, err := p.NewStat()
+// if err != nil {
+// log.Fatalf("could not get process stat: %s", err)
+// }
+//
+// fmt.Printf("command: %s\n", stat.Comm)
+// fmt.Printf("cpu time: %fs\n", stat.CPUTime())
+// fmt.Printf("vsize: %dB\n", stat.VirtualMemory())
+// fmt.Printf("rss: %dB\n", stat.ResidentMemory())
+// }
+//
+package procfs
diff --git a/src/kube2msb/vendor/github.com/prometheus/procfs/fs.go b/src/kube2msb/vendor/github.com/prometheus/procfs/fs.go
new file mode 100644
index 0000000..838474a
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/procfs/fs.go
@@ -0,0 +1,36 @@
+package procfs
+
+import (
+ "fmt"
+ "os"
+ "path"
+)
+
+// FS represents the pseudo-filesystem proc, which provides an interface to
+// kernel data structures.
+type FS string
+
+// DefaultMountPoint is the common mount point of the proc filesystem.
+const DefaultMountPoint = "/proc"
+
+// NewFS returns a new FS mounted under the given mountPoint. It will error
+// if the mount point can't be read.
+func NewFS(mountPoint string) (FS, error) {
+ info, err := os.Stat(mountPoint)
+ if err != nil {
+ return "", fmt.Errorf("could not read %s: %s", mountPoint, err)
+ }
+ if !info.IsDir() {
+ return "", fmt.Errorf("mount point %s is not a directory", mountPoint)
+ }
+
+ return FS(mountPoint), nil
+}
+
+func (fs FS) stat(p string) (os.FileInfo, error) {
+ return os.Stat(path.Join(string(fs), p))
+}
+
+func (fs FS) open(p string) (*os.File, error) {
+ return os.Open(path.Join(string(fs), p))
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/procfs/proc.go b/src/kube2msb/vendor/github.com/prometheus/procfs/proc.go
new file mode 100644
index 0000000..21445cf
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/procfs/proc.go
@@ -0,0 +1,149 @@
+package procfs
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path"
+ "strconv"
+ "strings"
+)
+
+// Proc provides information about a running process.
+type Proc struct {
+ // The process ID.
+ PID int
+
+ fs FS
+}
+
+// Procs represents a list of Proc structs.
+type Procs []Proc
+
+func (p Procs) Len() int { return len(p) }
+func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID }
+
+// Self returns a process for the current process.
+func Self() (Proc, error) {
+ return NewProc(os.Getpid())
+}
+
+// NewProc returns a process for the given pid under /proc.
+func NewProc(pid int) (Proc, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Proc{}, err
+ }
+
+ return fs.NewProc(pid)
+}
+
+// AllProcs returns a list of all currently avaible processes under /proc.
+func AllProcs() (Procs, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Procs{}, err
+ }
+
+ return fs.AllProcs()
+}
+
+// NewProc returns a process for the given pid.
+func (fs FS) NewProc(pid int) (Proc, error) {
+ if _, err := fs.stat(strconv.Itoa(pid)); err != nil {
+ return Proc{}, err
+ }
+
+ return Proc{PID: pid, fs: fs}, nil
+}
+
+// AllProcs returns a list of all currently avaible processes.
+func (fs FS) AllProcs() (Procs, error) {
+ d, err := fs.open("")
+ if err != nil {
+ return Procs{}, err
+ }
+ defer d.Close()
+
+ names, err := d.Readdirnames(-1)
+ if err != nil {
+ return Procs{}, fmt.Errorf("could not read %s: %s", d.Name(), err)
+ }
+
+ p := Procs{}
+ for _, n := range names {
+ pid, err := strconv.ParseInt(n, 10, 64)
+ if err != nil {
+ continue
+ }
+ p = append(p, Proc{PID: int(pid), fs: fs})
+ }
+
+ return p, nil
+}
+
+// CmdLine returns the command line of a process.
+func (p Proc) CmdLine() ([]string, error) {
+ f, err := p.open("cmdline")
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return nil, err
+ }
+
+ return strings.Split(string(data[:len(data)-1]), string(byte(0))), nil
+}
+
+// FileDescriptors returns the currently open file descriptors of a process.
+func (p Proc) FileDescriptors() ([]uintptr, error) {
+ names, err := p.fileDescriptors()
+ if err != nil {
+ return nil, err
+ }
+
+ fds := make([]uintptr, len(names))
+ for i, n := range names {
+ fd, err := strconv.ParseInt(n, 10, 32)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse fd %s: %s", n, err)
+ }
+ fds[i] = uintptr(fd)
+ }
+
+ return fds, nil
+}
+
+// FileDescriptorsLen returns the number of currently open file descriptors of
+// a process.
+func (p Proc) FileDescriptorsLen() (int, error) {
+ fds, err := p.fileDescriptors()
+ if err != nil {
+ return 0, err
+ }
+
+ return len(fds), nil
+}
+
+func (p Proc) fileDescriptors() ([]string, error) {
+ d, err := p.open("fd")
+ if err != nil {
+ return nil, err
+ }
+ defer d.Close()
+
+ names, err := d.Readdirnames(-1)
+ if err != nil {
+ return nil, fmt.Errorf("could not read %s: %s", d.Name(), err)
+ }
+
+ return names, nil
+}
+
+func (p Proc) open(pa string) (*os.File, error) {
+ return p.fs.open(path.Join(strconv.Itoa(p.PID), pa))
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/procfs/proc_limits.go b/src/kube2msb/vendor/github.com/prometheus/procfs/proc_limits.go
new file mode 100644
index 0000000..9f080b9
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/procfs/proc_limits.go
@@ -0,0 +1,111 @@
+package procfs
+
+import (
+ "bufio"
+ "fmt"
+ "regexp"
+ "strconv"
+)
+
+// ProcLimits represents the soft limits for each of the process's resource
+// limits.
+type ProcLimits struct {
+ CPUTime int
+ FileSize int
+ DataSize int
+ StackSize int
+ CoreFileSize int
+ ResidentSet int
+ Processes int
+ OpenFiles int
+ LockedMemory int
+ AddressSpace int
+ FileLocks int
+ PendingSignals int
+ MsqqueueSize int
+ NicePriority int
+ RealtimePriority int
+ RealtimeTimeout int
+}
+
+const (
+ limitsFields = 3
+ limitsUnlimited = "unlimited"
+)
+
+var (
+ limitsDelimiter = regexp.MustCompile(" +")
+)
+
+// NewLimits returns the current soft limits of the process.
+func (p Proc) NewLimits() (ProcLimits, error) {
+ f, err := p.open("limits")
+ if err != nil {
+ return ProcLimits{}, err
+ }
+ defer f.Close()
+
+ var (
+ l = ProcLimits{}
+ s = bufio.NewScanner(f)
+ )
+ for s.Scan() {
+ fields := limitsDelimiter.Split(s.Text(), limitsFields)
+ if len(fields) != limitsFields {
+ return ProcLimits{}, fmt.Errorf(
+ "couldn't parse %s line %s", f.Name(), s.Text())
+ }
+
+ switch fields[0] {
+ case "Max cpu time":
+ l.CPUTime, err = parseInt(fields[1])
+ case "Max file size":
+ l.FileLocks, err = parseInt(fields[1])
+ case "Max data size":
+ l.DataSize, err = parseInt(fields[1])
+ case "Max stack size":
+ l.StackSize, err = parseInt(fields[1])
+ case "Max core file size":
+ l.CoreFileSize, err = parseInt(fields[1])
+ case "Max resident set":
+ l.ResidentSet, err = parseInt(fields[1])
+ case "Max processes":
+ l.Processes, err = parseInt(fields[1])
+ case "Max open files":
+ l.OpenFiles, err = parseInt(fields[1])
+ case "Max locked memory":
+ l.LockedMemory, err = parseInt(fields[1])
+ case "Max address space":
+ l.AddressSpace, err = parseInt(fields[1])
+ case "Max file locks":
+ l.FileLocks, err = parseInt(fields[1])
+ case "Max pending signals":
+ l.PendingSignals, err = parseInt(fields[1])
+ case "Max msgqueue size":
+ l.MsqqueueSize, err = parseInt(fields[1])
+ case "Max nice priority":
+ l.NicePriority, err = parseInt(fields[1])
+ case "Max realtime priority":
+ l.RealtimePriority, err = parseInt(fields[1])
+ case "Max realtime timeout":
+ l.RealtimeTimeout, err = parseInt(fields[1])
+ }
+
+ if err != nil {
+ return ProcLimits{}, err
+ }
+ }
+
+ return l, s.Err()
+}
+
+func parseInt(s string) (int, error) {
+ if s == limitsUnlimited {
+ return -1, nil
+ }
+ i, err := strconv.ParseInt(s, 10, 32)
+ if err != nil {
+ return 0, fmt.Errorf("couldn't parse value %s: %s", s, err)
+ }
+ return int(i), nil
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/procfs/proc_stat.go b/src/kube2msb/vendor/github.com/prometheus/procfs/proc_stat.go
new file mode 100644
index 0000000..30a403b
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/procfs/proc_stat.go
@@ -0,0 +1,175 @@
+package procfs
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+)
+
+// Originally, this USER_HZ value was dynamically retrieved via a sysconf call which
+// required cgo. However, that caused a lot of problems regarding
+// cross-compilation. Alternatives such as running a binary to determine the
+// value, or trying to derive it in some other way were all problematic.
+// After much research it was determined that USER_HZ is actually hardcoded to
+// 100 on all Go-supported platforms as of the time of this writing. This is
+// why we decided to hardcode it here as well. It is not impossible that there
+// could be systems with exceptions, but they should be very exotic edge cases,
+// and in that case, the worst outcome will be two misreported metrics.
+//
+// See also the following discussions:
+//
+// - https://github.com/prometheus/node_exporter/issues/52
+// - https://github.com/prometheus/procfs/pull/2
+// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue
+const userHZ = 100
+
+// ProcStat provides status information about the process,
+// read from /proc/[pid]/stat.
+type ProcStat struct {
+ // The process ID.
+ PID int
+ // The filename of the executable.
+ Comm string
+ // The process state.
+ State string
+ // The PID of the parent of this process.
+ PPID int
+ // The process group ID of the process.
+ PGRP int
+ // The session ID of the process.
+ Session int
+ // The controlling terminal of the process.
+ TTY int
+ // The ID of the foreground process group of the controlling terminal of
+ // the process.
+ TPGID int
+ // The kernel flags word of the process.
+ Flags uint
+ // The number of minor faults the process has made which have not required
+ // loading a memory page from disk.
+ MinFlt uint
+ // The number of minor faults that the process's waited-for children have
+ // made.
+ CMinFlt uint
+ // The number of major faults the process has made which have required
+ // loading a memory page from disk.
+ MajFlt uint
+ // The number of major faults that the process's waited-for children have
+ // made.
+ CMajFlt uint
+ // Amount of time that this process has been scheduled in user mode,
+ // measured in clock ticks.
+ UTime uint
+ // Amount of time that this process has been scheduled in kernel mode,
+ // measured in clock ticks.
+ STime uint
+ // Amount of time that this process's waited-for children have been
+ // scheduled in user mode, measured in clock ticks.
+ CUTime uint
+ // Amount of time that this process's waited-for children have been
+ // scheduled in kernel mode, measured in clock ticks.
+ CSTime uint
+ // For processes running a real-time scheduling policy, this is the negated
+ // scheduling priority, minus one.
+ Priority int
+ // The nice value, a value in the range 19 (low priority) to -20 (high
+ // priority).
+ Nice int
+ // Number of threads in this process.
+ NumThreads int
+ // The time the process started after system boot, the value is expressed
+ // in clock ticks.
+ Starttime uint64
+ // Virtual memory size in bytes.
+ VSize int
+ // Resident set size in pages.
+ RSS int
+
+ fs FS
+}
+
+// NewStat returns the current status information of the process.
+func (p Proc) NewStat() (ProcStat, error) {
+ f, err := p.open("stat")
+ if err != nil {
+ return ProcStat{}, err
+ }
+ defer f.Close()
+
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return ProcStat{}, err
+ }
+
+ var (
+ ignore int
+
+ s = ProcStat{PID: p.PID, fs: p.fs}
+ l = bytes.Index(data, []byte("("))
+ r = bytes.LastIndex(data, []byte(")"))
+ )
+
+ if l < 0 || r < 0 {
+ return ProcStat{}, fmt.Errorf(
+ "unexpected format, couldn't extract comm: %s",
+ data,
+ )
+ }
+
+ s.Comm = string(data[l+1 : r])
+ _, err = fmt.Fscan(
+ bytes.NewBuffer(data[r+2:]),
+ &s.State,
+ &s.PPID,
+ &s.PGRP,
+ &s.Session,
+ &s.TTY,
+ &s.TPGID,
+ &s.Flags,
+ &s.MinFlt,
+ &s.CMinFlt,
+ &s.MajFlt,
+ &s.CMajFlt,
+ &s.UTime,
+ &s.STime,
+ &s.CUTime,
+ &s.CSTime,
+ &s.Priority,
+ &s.Nice,
+ &s.NumThreads,
+ &ignore,
+ &s.Starttime,
+ &s.VSize,
+ &s.RSS,
+ )
+ if err != nil {
+ return ProcStat{}, err
+ }
+
+ return s, nil
+}
+
+// VirtualMemory returns the virtual memory size in bytes.
+func (s ProcStat) VirtualMemory() int {
+ return s.VSize
+}
+
+// ResidentMemory returns the resident memory size in bytes.
+func (s ProcStat) ResidentMemory() int {
+ return s.RSS * os.Getpagesize()
+}
+
+// StartTime returns the unix timestamp of the process in seconds.
+func (s ProcStat) StartTime() (float64, error) {
+ stat, err := s.fs.NewStat()
+ if err != nil {
+ return 0, err
+ }
+ return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil
+}
+
+// CPUTime returns the total CPU user and system time in seconds.
+func (s ProcStat) CPUTime() float64 {
+ return float64(s.UTime+s.STime) / userHZ
+}
diff --git a/src/kube2msb/vendor/github.com/prometheus/procfs/stat.go b/src/kube2msb/vendor/github.com/prometheus/procfs/stat.go
new file mode 100644
index 0000000..26fefb0
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/prometheus/procfs/stat.go
@@ -0,0 +1,55 @@
+package procfs
+
+import (
+ "bufio"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// Stat represents kernel/system statistics.
+type Stat struct {
+ // Boot time in seconds since the Epoch.
+ BootTime int64
+}
+
+// NewStat returns kernel/system statistics read from /proc/stat.
+func NewStat() (Stat, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Stat{}, err
+ }
+
+ return fs.NewStat()
+}
+
+// NewStat returns an information about current kernel/system statistics.
+func (fs FS) NewStat() (Stat, error) {
+ f, err := fs.open("stat")
+ if err != nil {
+ return Stat{}, err
+ }
+ defer f.Close()
+
+ s := bufio.NewScanner(f)
+ for s.Scan() {
+ line := s.Text()
+ if !strings.HasPrefix(line, "btime") {
+ continue
+ }
+ fields := strings.Fields(line)
+ if len(fields) != 2 {
+ return Stat{}, fmt.Errorf("couldn't parse %s line %s", f.Name(), line)
+ }
+ i, err := strconv.ParseInt(fields[1], 10, 32)
+ if err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s: %s", fields[1], err)
+ }
+ return Stat{BootTime: i}, nil
+ }
+ if err := s.Err(); err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err)
+ }
+
+ return Stat{}, fmt.Errorf("couldn't parse %s, missing btime", f.Name())
+}
diff --git a/src/kube2msb/vendor/github.com/spf13/pflag/LICENSE b/src/kube2msb/vendor/github.com/spf13/pflag/LICENSE
new file mode 100644
index 0000000..63ed1cf
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/spf13/pflag/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2012 Alex Ogier. All rights reserved.
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/kube2msb/vendor/github.com/spf13/pflag/README.md b/src/kube2msb/vendor/github.com/spf13/pflag/README.md
new file mode 100644
index 0000000..e74dd50
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/spf13/pflag/README.md
@@ -0,0 +1,256 @@
+[![Build Status](https://travis-ci.org/spf13/pflag.svg?branch=master)](https://travis-ci.org/spf13/pflag)
+
+## Description
+
+pflag is a drop-in replacement for Go's flag package, implementing
+POSIX/GNU-style --flags.
+
+pflag is compatible with the [GNU extensions to the POSIX recommendations
+for command-line options][1]. For a more precise description, see the
+"Command-line flag syntax" section below.
+
+[1]: http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html
+
+pflag is available under the same style of BSD license as the Go language,
+which can be found in the LICENSE file.
+
+## Installation
+
+pflag is available using the standard `go get` command.
+
+Install by running:
+
+ go get github.com/spf13/pflag
+
+Run tests by running:
+
+ go test github.com/spf13/pflag
+
+## Usage
+
+pflag is a drop-in replacement of Go's native flag package. If you import
+pflag under the name "flag" then all code should continue to function
+with no changes.
+
+``` go
+import flag "github.com/spf13/pflag"
+```
+
+There is one exception to this: if you directly instantiate the Flag struct
+there is one more field "Shorthand" that you will need to set.
+Most code never instantiates this struct directly, and instead uses
+functions such as String(), BoolVar(), and Var(), and is therefore
+unaffected.
+
+Define flags using flag.String(), Bool(), Int(), etc.
+
+This declares an integer flag, -flagname, stored in the pointer ip, with type *int.
+
+``` go
+var ip *int = flag.Int("flagname", 1234, "help message for flagname")
+```
+
+If you like, you can bind the flag to a variable using the Var() functions.
+
+``` go
+var flagvar int
+func init() {
+ flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname")
+}
+```
+
+Or you can create custom flags that satisfy the Value interface (with
+pointer receivers) and couple them to flag parsing by
+
+``` go
+flag.Var(&flagVal, "name", "help message for flagname")
+```
+
+For such flags, the default value is just the initial value of the variable.
+
+After all flags are defined, call
+
+``` go
+flag.Parse()
+```
+
+to parse the command line into the defined flags.
+
+Flags may then be used directly. If you're using the flags themselves,
+they are all pointers; if you bind to variables, they're values.
+
+``` go
+fmt.Println("ip has value ", *ip)
+fmt.Println("flagvar has value ", flagvar)
+```
+
+There are helpers function to get values later if you have the FlagSet but
+it was difficult to keep up with all of the the flag pointers in your code.
+If you have a pflag.FlagSet with a flag called 'flagname' of type int you
+can use GetInt() to get the int value. But notice that 'flagname' must exist
+and it must be an int. GetString("flagname") will fail.
+
+``` go
+i, err := flagset.GetInt("flagname")
+```
+
+After parsing, the arguments after the flag are available as the
+slice flag.Args() or individually as flag.Arg(i).
+The arguments are indexed from 0 through flag.NArg()-1.
+
+The pflag package also defines some new functions that are not in flag,
+that give one-letter shorthands for flags. You can use these by appending
+'P' to the name of any function that defines a flag.
+
+``` go
+var ip = flag.IntP("flagname", "f", 1234, "help message")
+var flagvar bool
+func init() {
+ flag.BoolVarP("boolname", "b", true, "help message")
+}
+flag.VarP(&flagVar, "varname", "v", 1234, "help message")
+```
+
+Shorthand letters can be used with single dashes on the command line.
+Boolean shorthand flags can be combined with other shorthand flags.
+
+The default set of command-line flags is controlled by
+top-level functions. The FlagSet type allows one to define
+independent sets of flags, such as to implement subcommands
+in a command-line interface. The methods of FlagSet are
+analogous to the top-level functions for the command-line
+flag set.
+
+## Setting no option default values for flags
+
+After you create a flag it is possible to set the pflag.NoOptDefVal for
+the given flag. Doing this changes the meaning of the flag slightly. If
+a flag has a NoOptDefVal and the flag is set on the command line without
+an option the flag will be set to the NoOptDefVal. For example given:
+
+``` go
+var ip = flag.IntP("flagname", "f", 1234, "help message")
+flag.Lookup("flagname").NoOptDefVal = "4321"
+```
+
+Would result in something like
+
+| Parsed Arguments | Resulting Value |
+| ------------- | ------------- |
+| --flagname=1357 | ip=1357 |
+| --flagname | ip=4321 |
+| [nothing] | ip=1234 |
+
+## Command line flag syntax
+
+```
+--flag // boolean flags, or flags with no option default values
+--flag x // only on flags without a default value
+--flag=x
+```
+
+Unlike the flag package, a single dash before an option means something
+different than a double dash. Single dashes signify a series of shorthand
+letters for flags. All but the last shorthand letter must be boolean flags
+or a flag with a default value
+
+```
+// boolean or flags where the 'no option default value' is set
+-f
+-f=true
+-abc
+but
+-b true is INVALID
+
+// non-boolean and flags without a 'no option default value'
+-n 1234
+-n=1234
+-n1234
+
+// mixed
+-abcs "hello"
+-absd="hello"
+-abcs1234
+```
+
+Flag parsing stops after the terminator "--". Unlike the flag package,
+flags can be interspersed with arguments anywhere on the command line
+before this terminator.
+
+Integer flags accept 1234, 0664, 0x1234 and may be negative.
+Boolean flags (in their long form) accept 1, 0, t, f, true, false,
+TRUE, FALSE, True, False.
+Duration flags accept any input valid for time.ParseDuration.
+
+## Mutating or "Normalizing" Flag names
+
+It is possible to set a custom flag name 'normalization function.' It allows flag names to be mutated both when created in the code and when used on the command line to some 'normalized' form. The 'normalized' form is used for comparison. Two examples of using the custom normalization func follow.
+
+**Example #1**: You want -, _, and . in flags to compare the same. aka --my-flag == --my_flag == --my.flag
+
+``` go
+func wordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName {
+ from := []string{"-", "_"}
+ to := "."
+ for _, sep := range from {
+ name = strings.Replace(name, sep, to, -1)
+ }
+ return pflag.NormalizedName(name)
+}
+
+myFlagSet.SetNormalizeFunc(wordSepNormalizeFunc)
+```
+
+**Example #2**: You want to alias two flags. aka --old-flag-name == --new-flag-name
+
+``` go
+func aliasNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName {
+ switch name {
+ case "old-flag-name":
+ name = "new-flag-name"
+ break
+ }
+ return pflag.NormalizedName(name)
+}
+
+myFlagSet.SetNormalizeFunc(aliasNormalizeFunc)
+```
+
+## Deprecating a flag or its shorthand
+It is possible to deprecate a flag, or just its shorthand. Deprecating a flag/shorthand hides it from help text and prints a usage message when the deprecated flag/shorthand is used.
+
+**Example #1**: You want to deprecate a flag named "badflag" as well as inform the users what flag they should use instead.
+```go
+// deprecate a flag by specifying its name and a usage message
+flags.MarkDeprecated("badflag", "please use --good-flag instead")
+```
+This hides "badflag" from help text, and prints `Flag --badflag has been deprecated, please use --good-flag instead` when "badflag" is used.
+
+**Example #2**: You want to keep a flag name "noshorthandflag" but deprecate its shortname "n".
+```go
+// deprecate a flag shorthand by specifying its flag name and a usage message
+flags.MarkShorthandDeprecated("noshorthandflag", "please use --noshorthandflag only")
+```
+This hides the shortname "n" from help text, and prints `Flag shorthand -n has been deprecated, please use --noshorthandflag only` when the shorthand "n" is used.
+
+Note that usage message is essential here, and it should not be empty.
+
+## Hidden flags
+It is possible to mark a flag as hidden, meaning it will still function as normal, however will not show up in usage/help text.
+
+**Example**: You have a flag named "secretFlag" that you need for internal use only and don't want it showing up in help text, or for its usage text to be available.
+```go
+// hide a flag by specifying its name
+flags.MarkHidden("secretFlag")
+```
+
+## More info
+
+You can see the full reference documentation of the pflag package
+[at godoc.org][3], or through go's standard documentation system by
+running `godoc -http=:6060` and browsing to
+[http://localhost:6060/pkg/github.com/ogier/pflag][2] after
+installation.
+
+[2]: http://localhost:6060/pkg/github.com/ogier/pflag
+[3]: http://godoc.org/github.com/ogier/pflag
diff --git a/src/kube2msb/vendor/github.com/spf13/pflag/bool.go b/src/kube2msb/vendor/github.com/spf13/pflag/bool.go
new file mode 100644
index 0000000..d272e40
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/spf13/pflag/bool.go
@@ -0,0 +1,97 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// optional interface to indicate boolean flags that can be
+// supplied without "=value" text
+type boolFlag interface {
+ Value
+ IsBoolFlag() bool
+}
+
+// -- bool Value
+type boolValue bool
+
+func newBoolValue(val bool, p *bool) *boolValue {
+ *p = val
+ return (*boolValue)(p)
+}
+
+func (b *boolValue) Set(s string) error {
+ v, err := strconv.ParseBool(s)
+ *b = boolValue(v)
+ return err
+}
+
+func (b *boolValue) Type() string {
+ return "bool"
+}
+
+func (b *boolValue) String() string { return fmt.Sprintf("%v", *b) }
+
+func (b *boolValue) IsBoolFlag() bool { return true }
+
+func boolConv(sval string) (interface{}, error) {
+ return strconv.ParseBool(sval)
+}
+
+// GetBool return the bool value of a flag with the given name
+func (f *FlagSet) GetBool(name string) (bool, error) {
+ val, err := f.getFlagType(name, "bool", boolConv)
+ if err != nil {
+ return false, err
+ }
+ return val.(bool), nil
+}
+
+// BoolVar defines a bool flag with specified name, default value, and usage string.
+// The argument p points to a bool variable in which to store the value of the flag.
+func (f *FlagSet) BoolVar(p *bool, name string, value bool, usage string) {
+ f.BoolVarP(p, name, "", value, usage)
+}
+
+// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BoolVarP(p *bool, name, shorthand string, value bool, usage string) {
+ flag := f.VarPF(newBoolValue(value, p), name, shorthand, usage)
+ flag.NoOptDefVal = "true"
+}
+
+// BoolVar defines a bool flag with specified name, default value, and usage string.
+// The argument p points to a bool variable in which to store the value of the flag.
+func BoolVar(p *bool, name string, value bool, usage string) {
+ BoolVarP(p, name, "", value, usage)
+}
+
+// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash.
+func BoolVarP(p *bool, name, shorthand string, value bool, usage string) {
+ flag := CommandLine.VarPF(newBoolValue(value, p), name, shorthand, usage)
+ flag.NoOptDefVal = "true"
+}
+
+// Bool defines a bool flag with specified name, default value, and usage string.
+// The return value is the address of a bool variable that stores the value of the flag.
+func (f *FlagSet) Bool(name string, value bool, usage string) *bool {
+ return f.BoolP(name, "", value, usage)
+}
+
+// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BoolP(name, shorthand string, value bool, usage string) *bool {
+ p := new(bool)
+ f.BoolVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Bool defines a bool flag with specified name, default value, and usage string.
+// The return value is the address of a bool variable that stores the value of the flag.
+func Bool(name string, value bool, usage string) *bool {
+ return BoolP(name, "", value, usage)
+}
+
+// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash.
+func BoolP(name, shorthand string, value bool, usage string) *bool {
+ b := CommandLine.BoolP(name, shorthand, value, usage)
+ return b
+}
diff --git a/src/kube2msb/vendor/github.com/spf13/pflag/count.go b/src/kube2msb/vendor/github.com/spf13/pflag/count.go
new file mode 100644
index 0000000..7b1f142
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/spf13/pflag/count.go
@@ -0,0 +1,97 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// -- count Value
+type countValue int
+
+func newCountValue(val int, p *int) *countValue {
+ *p = val
+ return (*countValue)(p)
+}
+
+func (i *countValue) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 64)
+ // -1 means that no specific value was passed, so increment
+ if v == -1 {
+ *i = countValue(*i + 1)
+ } else {
+ *i = countValue(v)
+ }
+ return err
+}
+
+func (i *countValue) Type() string {
+ return "count"
+}
+
+func (i *countValue) String() string { return fmt.Sprintf("%v", *i) }
+
+func countConv(sval string) (interface{}, error) {
+ i, err := strconv.Atoi(sval)
+ if err != nil {
+ return nil, err
+ }
+ return i, nil
+}
+
+// GetCount return the int value of a flag with the given name
+func (f *FlagSet) GetCount(name string) (int, error) {
+ val, err := f.getFlagType(name, "count", countConv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int), nil
+}
+
+// CountVar defines a count flag with specified name, default value, and usage string.
+// The argument p points to an int variable in which to store the value of the flag.
+// A count flag will add 1 to its value evey time it is found on the command line
+func (f *FlagSet) CountVar(p *int, name string, usage string) {
+ f.CountVarP(p, name, "", usage)
+}
+
+// CountVarP is like CountVar only take a shorthand for the flag name.
+func (f *FlagSet) CountVarP(p *int, name, shorthand string, usage string) {
+ flag := f.VarPF(newCountValue(0, p), name, shorthand, usage)
+ flag.NoOptDefVal = "-1"
+}
+
+// CountVar like CountVar only the flag is placed on the CommandLine instead of a given flag set
+func CountVar(p *int, name string, usage string) {
+ CommandLine.CountVar(p, name, usage)
+}
+
+// CountVarP is like CountVar only take a shorthand for the flag name.
+func CountVarP(p *int, name, shorthand string, usage string) {
+ CommandLine.CountVarP(p, name, shorthand, usage)
+}
+
+// Count defines a count flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+// A count flag will add 1 to its value evey time it is found on the command line
+func (f *FlagSet) Count(name string, usage string) *int {
+ p := new(int)
+ f.CountVarP(p, name, "", usage)
+ return p
+}
+
+// CountP is like Count only takes a shorthand for the flag name.
+func (f *FlagSet) CountP(name, shorthand string, usage string) *int {
+ p := new(int)
+ f.CountVarP(p, name, shorthand, usage)
+ return p
+}
+
+// Count like Count only the flag is placed on the CommandLine isntead of a given flag set
+func Count(name string, usage string) *int {
+ return CommandLine.CountP(name, "", usage)
+}
+
+// CountP is like Count only takes a shorthand for the flag name.
+func CountP(name, shorthand string, usage string) *int {
+ return CommandLine.CountP(name, shorthand, usage)
+}
diff --git a/src/kube2msb/vendor/github.com/spf13/pflag/duration.go b/src/kube2msb/vendor/github.com/spf13/pflag/duration.go
new file mode 100644
index 0000000..e9debef
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/spf13/pflag/duration.go
@@ -0,0 +1,86 @@
+package pflag
+
+import (
+ "time"
+)
+
+// -- time.Duration Value
+type durationValue time.Duration
+
+func newDurationValue(val time.Duration, p *time.Duration) *durationValue {
+ *p = val
+ return (*durationValue)(p)
+}
+
+func (d *durationValue) Set(s string) error {
+ v, err := time.ParseDuration(s)
+ *d = durationValue(v)
+ return err
+}
+
+func (d *durationValue) Type() string {
+ return "duration"
+}
+
+func (d *durationValue) String() string { return (*time.Duration)(d).String() }
+
+func durationConv(sval string) (interface{}, error) {
+ return time.ParseDuration(sval)
+}
+
+// GetDuration return the duration value of a flag with the given name
+func (f *FlagSet) GetDuration(name string) (time.Duration, error) {
+ val, err := f.getFlagType(name, "duration", durationConv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(time.Duration), nil
+}
+
+// DurationVar defines a time.Duration flag with specified name, default value, and usage string.
+// The argument p points to a time.Duration variable in which to store the value of the flag.
+func (f *FlagSet) DurationVar(p *time.Duration, name string, value time.Duration, usage string) {
+ f.VarP(newDurationValue(value, p), name, "", usage)
+}
+
+// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) {
+ f.VarP(newDurationValue(value, p), name, shorthand, usage)
+}
+
+// DurationVar defines a time.Duration flag with specified name, default value, and usage string.
+// The argument p points to a time.Duration variable in which to store the value of the flag.
+func DurationVar(p *time.Duration, name string, value time.Duration, usage string) {
+ CommandLine.VarP(newDurationValue(value, p), name, "", usage)
+}
+
+// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash.
+func DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) {
+ CommandLine.VarP(newDurationValue(value, p), name, shorthand, usage)
+}
+
+// Duration defines a time.Duration flag with specified name, default value, and usage string.
+// The return value is the address of a time.Duration variable that stores the value of the flag.
+func (f *FlagSet) Duration(name string, value time.Duration, usage string) *time.Duration {
+ p := new(time.Duration)
+ f.DurationVarP(p, name, "", value, usage)
+ return p
+}
+
+// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration {
+ p := new(time.Duration)
+ f.DurationVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Duration defines a time.Duration flag with specified name, default value, and usage string.
+// The return value is the address of a time.Duration variable that stores the value of the flag.
+func Duration(name string, value time.Duration, usage string) *time.Duration {
+ return CommandLine.DurationP(name, "", value, usage)
+}
+
+// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash.
+func DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration {
+ return CommandLine.DurationP(name, shorthand, value, usage)
+}
diff --git a/src/kube2msb/vendor/github.com/spf13/pflag/flag.go b/src/kube2msb/vendor/github.com/spf13/pflag/flag.go
new file mode 100644
index 0000000..fd91440
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/spf13/pflag/flag.go
@@ -0,0 +1,836 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package pflag is a drop-in replacement for Go's flag package, implementing
+POSIX/GNU-style --flags.
+
+pflag is compatible with the GNU extensions to the POSIX recommendations
+for command-line options. See
+http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html
+
+Usage:
+
+pflag is a drop-in replacement of Go's native flag package. If you import
+pflag under the name "flag" then all code should continue to function
+with no changes.
+
+ import flag "github.com/ogier/pflag"
+
+ There is one exception to this: if you directly instantiate the Flag struct
+there is one more field "Shorthand" that you will need to set.
+Most code never instantiates this struct directly, and instead uses
+functions such as String(), BoolVar(), and Var(), and is therefore
+unaffected.
+
+Define flags using flag.String(), Bool(), Int(), etc.
+
+This declares an integer flag, -flagname, stored in the pointer ip, with type *int.
+ var ip = flag.Int("flagname", 1234, "help message for flagname")
+If you like, you can bind the flag to a variable using the Var() functions.
+ var flagvar int
+ func init() {
+ flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname")
+ }
+Or you can create custom flags that satisfy the Value interface (with
+pointer receivers) and couple them to flag parsing by
+ flag.Var(&flagVal, "name", "help message for flagname")
+For such flags, the default value is just the initial value of the variable.
+
+After all flags are defined, call
+ flag.Parse()
+to parse the command line into the defined flags.
+
+Flags may then be used directly. If you're using the flags themselves,
+they are all pointers; if you bind to variables, they're values.
+ fmt.Println("ip has value ", *ip)
+ fmt.Println("flagvar has value ", flagvar)
+
+After parsing, the arguments after the flag are available as the
+slice flag.Args() or individually as flag.Arg(i).
+The arguments are indexed from 0 through flag.NArg()-1.
+
+The pflag package also defines some new functions that are not in flag,
+that give one-letter shorthands for flags. You can use these by appending
+'P' to the name of any function that defines a flag.
+ var ip = flag.IntP("flagname", "f", 1234, "help message")
+ var flagvar bool
+ func init() {
+ flag.BoolVarP("boolname", "b", true, "help message")
+ }
+ flag.VarP(&flagVar, "varname", "v", 1234, "help message")
+Shorthand letters can be used with single dashes on the command line.
+Boolean shorthand flags can be combined with other shorthand flags.
+
+Command line flag syntax:
+ --flag // boolean flags only
+ --flag=x
+
+Unlike the flag package, a single dash before an option means something
+different than a double dash. Single dashes signify a series of shorthand
+letters for flags. All but the last shorthand letter must be boolean flags.
+ // boolean flags
+ -f
+ -abc
+ // non-boolean flags
+ -n 1234
+ -Ifile
+ // mixed
+ -abcs "hello"
+ -abcn1234
+
+Flag parsing stops after the terminator "--". Unlike the flag package,
+flags can be interspersed with arguments anywhere on the command line
+before this terminator.
+
+Integer flags accept 1234, 0664, 0x1234 and may be negative.
+Boolean flags (in their long form) accept 1, 0, t, f, true, false,
+TRUE, FALSE, True, False.
+Duration flags accept any input valid for time.ParseDuration.
+
+The default set of command-line flags is controlled by
+top-level functions. The FlagSet type allows one to define
+independent sets of flags, such as to implement subcommands
+in a command-line interface. The methods of FlagSet are
+analogous to the top-level functions for the command-line
+flag set.
+*/
+package pflag
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "sort"
+ "strings"
+)
+
+// ErrHelp is the error returned if the flag -help is invoked but no such flag is defined.
+var ErrHelp = errors.New("pflag: help requested")
+
+// ErrorHandling defines how to handle flag parsing errors.
+type ErrorHandling int
+
+const (
+ // ContinueOnError will return an err from Parse() if an error is found
+ ContinueOnError ErrorHandling = iota
+ // ExitOnError will call os.Exit(2) if an error is found when parsing
+ ExitOnError
+ // PanicOnError will panic() if an error is found when parsing flags
+ PanicOnError
+)
+
+// NormalizedName is a flag name that has been normalized according to rules
+// for the FlagSet (e.g. making '-' and '_' equivalent).
+type NormalizedName string
+
+// A FlagSet represents a set of defined flags.
+type FlagSet struct {
+ // Usage is the function called when an error occurs while parsing flags.
+ // The field is a function (not a method) that may be changed to point to
+ // a custom error handler.
+ Usage func()
+
+ name string
+ parsed bool
+ actual map[NormalizedName]*Flag
+ formal map[NormalizedName]*Flag
+ shorthands map[byte]*Flag
+ args []string // arguments after flags
+ argsLenAtDash int // len(args) when a '--' was located when parsing, or -1 if no --
+ exitOnError bool // does the program exit if there's an error?
+ errorHandling ErrorHandling
+ output io.Writer // nil means stderr; use out() accessor
+ interspersed bool // allow interspersed option/non-option args
+ normalizeNameFunc func(f *FlagSet, name string) NormalizedName
+}
+
+// A Flag represents the state of a flag.
+type Flag struct {
+ Name string // name as it appears on command line
+ Shorthand string // one-letter abbreviated flag
+ Usage string // help message
+ Value Value // value as set
+ DefValue string // default value (as text); for usage message
+ Changed bool // If the user set the value (or if left to default)
+ NoOptDefVal string //default value (as text); if the flag is on the command line without any options
+ Deprecated string // If this flag is deprecated, this string is the new or now thing to use
+ Hidden bool // used by cobra.Command to allow flags to be hidden from help/usage text
+ ShorthandDeprecated string // If the shorthand of this flag is deprecated, this string is the new or now thing to use
+ Annotations map[string][]string // used by cobra.Command bash autocomple code
+}
+
+// Value is the interface to the dynamic value stored in a flag.
+// (The default value is represented as a string.)
+type Value interface {
+ String() string
+ Set(string) error
+ Type() string
+}
+
+// sortFlags returns the flags as a slice in lexicographical sorted order.
+func sortFlags(flags map[NormalizedName]*Flag) []*Flag {
+ list := make(sort.StringSlice, len(flags))
+ i := 0
+ for k := range flags {
+ list[i] = string(k)
+ i++
+ }
+ list.Sort()
+ result := make([]*Flag, len(list))
+ for i, name := range list {
+ result[i] = flags[NormalizedName(name)]
+ }
+ return result
+}
+
+// SetNormalizeFunc allows you to add a function which can translate flag names.
+// Flags added to the FlagSet will be translated and then when anything tries to
+// look up the flag that will also be translated. So it would be possible to create
+// a flag named "getURL" and have it translated to "geturl". A user could then pass
+// "--getUrl" which may also be translated to "geturl" and everything will work.
+func (f *FlagSet) SetNormalizeFunc(n func(f *FlagSet, name string) NormalizedName) {
+ f.normalizeNameFunc = n
+ for k, v := range f.formal {
+ delete(f.formal, k)
+ nname := f.normalizeFlagName(string(k))
+ f.formal[nname] = v
+ v.Name = string(nname)
+ }
+}
+
+// GetNormalizeFunc returns the previously set NormalizeFunc of a function which
+// does no translation, if not set previously.
+func (f *FlagSet) GetNormalizeFunc() func(f *FlagSet, name string) NormalizedName {
+ if f.normalizeNameFunc != nil {
+ return f.normalizeNameFunc
+ }
+ return func(f *FlagSet, name string) NormalizedName { return NormalizedName(name) }
+}
+
+func (f *FlagSet) normalizeFlagName(name string) NormalizedName {
+ n := f.GetNormalizeFunc()
+ return n(f, name)
+}
+
+func (f *FlagSet) out() io.Writer {
+ if f.output == nil {
+ return os.Stderr
+ }
+ return f.output
+}
+
+// SetOutput sets the destination for usage and error messages.
+// If output is nil, os.Stderr is used.
+func (f *FlagSet) SetOutput(output io.Writer) {
+ f.output = output
+}
+
+// VisitAll visits the flags in lexicographical order, calling fn for each.
+// It visits all flags, even those not set.
+func (f *FlagSet) VisitAll(fn func(*Flag)) {
+ for _, flag := range sortFlags(f.formal) {
+ fn(flag)
+ }
+}
+
+// HasFlags returns a bool to indicate if the FlagSet has any flags definied.
+func (f *FlagSet) HasFlags() bool {
+ return len(f.formal) > 0
+}
+
+// VisitAll visits the command-line flags in lexicographical order, calling
+// fn for each. It visits all flags, even those not set.
+func VisitAll(fn func(*Flag)) {
+ CommandLine.VisitAll(fn)
+}
+
+// Visit visits the flags in lexicographical order, calling fn for each.
+// It visits only those flags that have been set.
+func (f *FlagSet) Visit(fn func(*Flag)) {
+ for _, flag := range sortFlags(f.actual) {
+ fn(flag)
+ }
+}
+
+// Visit visits the command-line flags in lexicographical order, calling fn
+// for each. It visits only those flags that have been set.
+func Visit(fn func(*Flag)) {
+ CommandLine.Visit(fn)
+}
+
+// Lookup returns the Flag structure of the named flag, returning nil if none exists.
+func (f *FlagSet) Lookup(name string) *Flag {
+ return f.lookup(f.normalizeFlagName(name))
+}
+
+// lookup returns the Flag structure of the named flag, returning nil if none exists.
+func (f *FlagSet) lookup(name NormalizedName) *Flag {
+ return f.formal[name]
+}
+
+// func to return a given type for a given flag name
+func (f *FlagSet) getFlagType(name string, ftype string, convFunc func(sval string) (interface{}, error)) (interface{}, error) {
+ flag := f.Lookup(name)
+ if flag == nil {
+ err := fmt.Errorf("flag accessed but not defined: %s", name)
+ return nil, err
+ }
+
+ if flag.Value.Type() != ftype {
+ err := fmt.Errorf("trying to get %s value of flag of type %s", ftype, flag.Value.Type())
+ return nil, err
+ }
+
+ sval := flag.Value.String()
+ result, err := convFunc(sval)
+ if err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+
+// ArgsLenAtDash will return the length of f.Args at the moment when a -- was
+// found during arg parsing. This allows your program to know which args were
+// before the -- and which came after.
+func (f *FlagSet) ArgsLenAtDash() int {
+ return f.argsLenAtDash
+}
+
+// MarkDeprecated indicated that a flag is deprecated in your program. It will
+// continue to function but will not show up in help or usage messages. Using
+// this flag will also print the given usageMessage.
+func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error {
+ flag := f.Lookup(name)
+ if flag == nil {
+ return fmt.Errorf("flag %q does not exist", name)
+ }
+ if len(usageMessage) == 0 {
+ return fmt.Errorf("deprecated message for flag %q must be set", name)
+ }
+ flag.Deprecated = usageMessage
+ return nil
+}
+
+// MarkShorthandDeprecated will mark the shorthand of a flag deprecated in your
+// program. It will continue to function but will not show up in help or usage
+// messages. Using this flag will also print the given usageMessage.
+func (f *FlagSet) MarkShorthandDeprecated(name string, usageMessage string) error {
+ flag := f.Lookup(name)
+ if flag == nil {
+ return fmt.Errorf("flag %q does not exist", name)
+ }
+ if len(usageMessage) == 0 {
+ return fmt.Errorf("deprecated message for flag %q must be set", name)
+ }
+ flag.ShorthandDeprecated = usageMessage
+ return nil
+}
+
+// MarkHidden sets a flag to 'hidden' in your program. It will continue to
+// function but will not show up in help or usage messages.
+func (f *FlagSet) MarkHidden(name string) error {
+ flag := f.Lookup(name)
+ if flag == nil {
+ return fmt.Errorf("flag %q does not exist", name)
+ }
+ flag.Hidden = true
+ return nil
+}
+
+// Lookup returns the Flag structure of the named command-line flag,
+// returning nil if none exists.
+func Lookup(name string) *Flag {
+ return CommandLine.Lookup(name)
+}
+
+// Set sets the value of the named flag.
+func (f *FlagSet) Set(name, value string) error {
+ normalName := f.normalizeFlagName(name)
+ flag, ok := f.formal[normalName]
+ if !ok {
+ return fmt.Errorf("no such flag -%v", name)
+ }
+ err := flag.Value.Set(value)
+ if err != nil {
+ return err
+ }
+ if f.actual == nil {
+ f.actual = make(map[NormalizedName]*Flag)
+ }
+ f.actual[normalName] = flag
+ flag.Changed = true
+ if len(flag.Deprecated) > 0 {
+ fmt.Fprintf(os.Stderr, "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated)
+ }
+ return nil
+}
+
+// SetAnnotation allows one to set arbitrary annotations on a flag in the FlagSet.
+// This is sometimes used by spf13/cobra programs which want to generate additional
+// bash completion information.
+func (f *FlagSet) SetAnnotation(name, key string, values []string) error {
+ normalName := f.normalizeFlagName(name)
+ flag, ok := f.formal[normalName]
+ if !ok {
+ return fmt.Errorf("no such flag -%v", name)
+ }
+ if flag.Annotations == nil {
+ flag.Annotations = map[string][]string{}
+ }
+ flag.Annotations[key] = values
+ return nil
+}
+
+// Changed returns true if the flag was explicitly set during Parse() and false
+// otherwise
+func (f *FlagSet) Changed(name string) bool {
+ flag := f.Lookup(name)
+ // If a flag doesn't exist, it wasn't changed....
+ if flag == nil {
+ return false
+ }
+ return flag.Changed
+}
+
+// Set sets the value of the named command-line flag.
+func Set(name, value string) error {
+ return CommandLine.Set(name, value)
+}
+
+// PrintDefaults prints, to standard error unless configured
+// otherwise, the default values of all defined flags in the set.
+func (f *FlagSet) PrintDefaults() {
+ usages := f.FlagUsages()
+ fmt.Fprintf(f.out(), "%s", usages)
+}
+
+// FlagUsages Returns a string containing the usage information for all flags in
+// the FlagSet
+func (f *FlagSet) FlagUsages() string {
+ x := new(bytes.Buffer)
+
+ f.VisitAll(func(flag *Flag) {
+ if len(flag.Deprecated) > 0 || flag.Hidden {
+ return
+ }
+ format := ""
+ if len(flag.Shorthand) > 0 && len(flag.ShorthandDeprecated) == 0 {
+ format = " -%s, --%s"
+ } else {
+ format = " %s --%s"
+ }
+ if len(flag.NoOptDefVal) > 0 {
+ format = format + "["
+ }
+ if flag.Value.Type() == "string" {
+ // put quotes on the value
+ format = format + "=%q"
+ } else {
+ format = format + "=%s"
+ }
+ if len(flag.NoOptDefVal) > 0 {
+ format = format + "]"
+ }
+ format = format + ": %s\n"
+ shorthand := flag.Shorthand
+ if len(flag.ShorthandDeprecated) > 0 {
+ shorthand = ""
+ }
+ fmt.Fprintf(x, format, shorthand, flag.Name, flag.DefValue, flag.Usage)
+ })
+
+ return x.String()
+}
+
+// PrintDefaults prints to standard error the default values of all defined command-line flags.
+func PrintDefaults() {
+ CommandLine.PrintDefaults()
+}
+
+// defaultUsage is the default function to print a usage message.
+func defaultUsage(f *FlagSet) {
+ fmt.Fprintf(f.out(), "Usage of %s:\n", f.name)
+ f.PrintDefaults()
+}
+
+// NOTE: Usage is not just defaultUsage(CommandLine)
+// because it serves (via godoc flag Usage) as the example
+// for how to write your own usage function.
+
+// Usage prints to standard error a usage message documenting all defined command-line flags.
+// The function is a variable that may be changed to point to a custom function.
+var Usage = func() {
+ fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
+ PrintDefaults()
+}
+
+// NFlag returns the number of flags that have been set.
+func (f *FlagSet) NFlag() int { return len(f.actual) }
+
+// NFlag returns the number of command-line flags that have been set.
+func NFlag() int { return len(CommandLine.actual) }
+
+// Arg returns the i'th argument. Arg(0) is the first remaining argument
+// after flags have been processed.
+func (f *FlagSet) Arg(i int) string {
+ if i < 0 || i >= len(f.args) {
+ return ""
+ }
+ return f.args[i]
+}
+
+// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument
+// after flags have been processed.
+func Arg(i int) string {
+ return CommandLine.Arg(i)
+}
+
+// NArg is the number of arguments remaining after flags have been processed.
+func (f *FlagSet) NArg() int { return len(f.args) }
+
+// NArg is the number of arguments remaining after flags have been processed.
+func NArg() int { return len(CommandLine.args) }
+
+// Args returns the non-flag arguments.
+func (f *FlagSet) Args() []string { return f.args }
+
+// Args returns the non-flag command-line arguments.
+func Args() []string { return CommandLine.args }
+
+// Var defines a flag with the specified name and usage string. The type and
+// value of the flag are represented by the first argument, of type Value, which
+// typically holds a user-defined implementation of Value. For instance, the
+// caller could create a flag that turns a comma-separated string into a slice
+// of strings by giving the slice the methods of Value; in particular, Set would
+// decompose the comma-separated string into the slice.
+func (f *FlagSet) Var(value Value, name string, usage string) {
+ f.VarP(value, name, "", usage)
+}
+
+// VarPF is like VarP, but returns the flag created
+func (f *FlagSet) VarPF(value Value, name, shorthand, usage string) *Flag {
+ // Remember the default value as a string; it won't change.
+ flag := &Flag{
+ Name: name,
+ Shorthand: shorthand,
+ Usage: usage,
+ Value: value,
+ DefValue: value.String(),
+ }
+ f.AddFlag(flag)
+ return flag
+}
+
+// VarP is like Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) VarP(value Value, name, shorthand, usage string) {
+ _ = f.VarPF(value, name, shorthand, usage)
+}
+
+// AddFlag will add the flag to the FlagSet
+func (f *FlagSet) AddFlag(flag *Flag) {
+ // Call normalizeFlagName function only once
+ normalizedFlagName := f.normalizeFlagName(flag.Name)
+
+ _, alreadythere := f.formal[normalizedFlagName]
+ if alreadythere {
+ msg := fmt.Sprintf("%s flag redefined: %s", f.name, flag.Name)
+ fmt.Fprintln(f.out(), msg)
+ panic(msg) // Happens only if flags are declared with identical names
+ }
+ if f.formal == nil {
+ f.formal = make(map[NormalizedName]*Flag)
+ }
+
+ flag.Name = string(normalizedFlagName)
+ f.formal[normalizedFlagName] = flag
+
+ if len(flag.Shorthand) == 0 {
+ return
+ }
+ if len(flag.Shorthand) > 1 {
+ fmt.Fprintf(f.out(), "%s shorthand more than ASCII character: %s\n", f.name, flag.Shorthand)
+ panic("shorthand is more than one character")
+ }
+ if f.shorthands == nil {
+ f.shorthands = make(map[byte]*Flag)
+ }
+ c := flag.Shorthand[0]
+ old, alreadythere := f.shorthands[c]
+ if alreadythere {
+ fmt.Fprintf(f.out(), "%s shorthand reused: %q for %s already used for %s\n", f.name, c, flag.Name, old.Name)
+ panic("shorthand redefinition")
+ }
+ f.shorthands[c] = flag
+}
+
+// AddFlagSet adds one FlagSet to another. If a flag is already present in f
+// the flag from newSet will be ignored
+func (f *FlagSet) AddFlagSet(newSet *FlagSet) {
+ if newSet == nil {
+ return
+ }
+ newSet.VisitAll(func(flag *Flag) {
+ if f.Lookup(flag.Name) == nil {
+ f.AddFlag(flag)
+ }
+ })
+}
+
+// Var defines a flag with the specified name and usage string. The type and
+// value of the flag are represented by the first argument, of type Value, which
+// typically holds a user-defined implementation of Value. For instance, the
+// caller could create a flag that turns a comma-separated string into a slice
+// of strings by giving the slice the methods of Value; in particular, Set would
+// decompose the comma-separated string into the slice.
+func Var(value Value, name string, usage string) {
+ CommandLine.VarP(value, name, "", usage)
+}
+
+// VarP is like Var, but accepts a shorthand letter that can be used after a single dash.
+func VarP(value Value, name, shorthand, usage string) {
+ CommandLine.VarP(value, name, shorthand, usage)
+}
+
+// failf prints to standard error a formatted error and usage message and
+// returns the error.
+func (f *FlagSet) failf(format string, a ...interface{}) error {
+ err := fmt.Errorf(format, a...)
+ fmt.Fprintln(f.out(), err)
+ f.usage()
+ return err
+}
+
+// usage calls the Usage method for the flag set, or the usage function if
+// the flag set is CommandLine.
+func (f *FlagSet) usage() {
+ if f == CommandLine {
+ Usage()
+ } else if f.Usage == nil {
+ defaultUsage(f)
+ } else {
+ f.Usage()
+ }
+}
+
+func (f *FlagSet) setFlag(flag *Flag, value string, origArg string) error {
+ if err := flag.Value.Set(value); err != nil {
+ return f.failf("invalid argument %q for %s: %v", value, origArg, err)
+ }
+ // mark as visited for Visit()
+ if f.actual == nil {
+ f.actual = make(map[NormalizedName]*Flag)
+ }
+ f.actual[f.normalizeFlagName(flag.Name)] = flag
+ flag.Changed = true
+ if len(flag.Deprecated) > 0 {
+ fmt.Fprintf(os.Stderr, "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated)
+ }
+ if len(flag.ShorthandDeprecated) > 0 && containsShorthand(origArg, flag.Shorthand) {
+ fmt.Fprintf(os.Stderr, "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated)
+ }
+ return nil
+}
+
+func containsShorthand(arg, shorthand string) bool {
+ // filter out flags --<flag_name>
+ if strings.HasPrefix(arg, "-") {
+ return false
+ }
+ arg = strings.SplitN(arg, "=", 2)[0]
+ return strings.Contains(arg, shorthand)
+}
+
+func (f *FlagSet) parseLongArg(s string, args []string) (a []string, err error) {
+ a = args
+ name := s[2:]
+ if len(name) == 0 || name[0] == '-' || name[0] == '=' {
+ err = f.failf("bad flag syntax: %s", s)
+ return
+ }
+ split := strings.SplitN(name, "=", 2)
+ name = split[0]
+ flag, alreadythere := f.formal[f.normalizeFlagName(name)]
+ if !alreadythere {
+ if name == "help" { // special case for nice help message.
+ f.usage()
+ return a, ErrHelp
+ }
+ err = f.failf("unknown flag: --%s", name)
+ return
+ }
+ var value string
+ if len(split) == 2 {
+ // '--flag=arg'
+ value = split[1]
+ } else if len(flag.NoOptDefVal) > 0 {
+ // '--flag' (arg was optional)
+ value = flag.NoOptDefVal
+ } else if len(a) > 0 {
+ // '--flag arg'
+ value = a[0]
+ a = a[1:]
+ } else {
+ // '--flag' (arg was required)
+ err = f.failf("flag needs an argument: %s", s)
+ return
+ }
+ err = f.setFlag(flag, value, s)
+ return
+}
+
+func (f *FlagSet) parseSingleShortArg(shorthands string, args []string) (outShorts string, outArgs []string, err error) {
+ outArgs = args
+ outShorts = shorthands[1:]
+ c := shorthands[0]
+
+ flag, alreadythere := f.shorthands[c]
+ if !alreadythere {
+ if c == 'h' { // special case for nice help message.
+ f.usage()
+ err = ErrHelp
+ return
+ }
+ //TODO continue on error
+ err = f.failf("unknown shorthand flag: %q in -%s", c, shorthands)
+ return
+ }
+ var value string
+ if len(shorthands) > 2 && shorthands[1] == '=' {
+ value = shorthands[2:]
+ outShorts = ""
+ } else if len(flag.NoOptDefVal) > 0 {
+ value = flag.NoOptDefVal
+ } else if len(shorthands) > 1 {
+ value = shorthands[1:]
+ outShorts = ""
+ } else if len(args) > 0 {
+ value = args[0]
+ outArgs = args[1:]
+ } else {
+ err = f.failf("flag needs an argument: %q in -%s", c, shorthands)
+ return
+ }
+ err = f.setFlag(flag, value, shorthands)
+ return
+}
+
+func (f *FlagSet) parseShortArg(s string, args []string) (a []string, err error) {
+ a = args
+ shorthands := s[1:]
+
+ for len(shorthands) > 0 {
+ shorthands, a, err = f.parseSingleShortArg(shorthands, args)
+ if err != nil {
+ return
+ }
+ }
+
+ return
+}
+
+func (f *FlagSet) parseArgs(args []string) (err error) {
+ for len(args) > 0 {
+ s := args[0]
+ args = args[1:]
+ if len(s) == 0 || s[0] != '-' || len(s) == 1 {
+ if !f.interspersed {
+ f.args = append(f.args, s)
+ f.args = append(f.args, args...)
+ return nil
+ }
+ f.args = append(f.args, s)
+ continue
+ }
+
+ if s[1] == '-' {
+ if len(s) == 2 { // "--" terminates the flags
+ f.argsLenAtDash = len(f.args)
+ f.args = append(f.args, args...)
+ break
+ }
+ args, err = f.parseLongArg(s, args)
+ } else {
+ args, err = f.parseShortArg(s, args)
+ }
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// Parse parses flag definitions from the argument list, which should not
+// include the command name. Must be called after all flags in the FlagSet
+// are defined and before flags are accessed by the program.
+// The return value will be ErrHelp if -help was set but not defined.
+func (f *FlagSet) Parse(arguments []string) error {
+ f.parsed = true
+ f.args = make([]string, 0, len(arguments))
+ err := f.parseArgs(arguments)
+ if err != nil {
+ switch f.errorHandling {
+ case ContinueOnError:
+ return err
+ case ExitOnError:
+ os.Exit(2)
+ case PanicOnError:
+ panic(err)
+ }
+ }
+ return nil
+}
+
+// Parsed reports whether f.Parse has been called.
+func (f *FlagSet) Parsed() bool {
+ return f.parsed
+}
+
+// Parse parses the command-line flags from os.Args[1:]. Must be called
+// after all flags are defined and before flags are accessed by the program.
+func Parse() {
+ // Ignore errors; CommandLine is set for ExitOnError.
+ CommandLine.Parse(os.Args[1:])
+}
+
+// SetInterspersed sets whether to support interspersed option/non-option arguments.
+func SetInterspersed(interspersed bool) {
+ CommandLine.SetInterspersed(interspersed)
+}
+
+// Parsed returns true if the command-line flags have been parsed.
+func Parsed() bool {
+ return CommandLine.Parsed()
+}
+
+// The default set of command-line flags, parsed from os.Args.
+var CommandLine = NewFlagSet(os.Args[0], ExitOnError)
+
+// NewFlagSet returns a new, empty flag set with the specified name and
+// error handling property.
+func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet {
+ f := &FlagSet{
+ name: name,
+ errorHandling: errorHandling,
+ argsLenAtDash: -1,
+ interspersed: true,
+ }
+ return f
+}
+
+// SetInterspersed sets whether to support interspersed option/non-option arguments.
+func (f *FlagSet) SetInterspersed(interspersed bool) {
+ f.interspersed = interspersed
+}
+
+// Init sets the name and error handling property for a flag set.
+// By default, the zero FlagSet uses an empty name and the
+// ContinueOnError error handling policy.
+func (f *FlagSet) Init(name string, errorHandling ErrorHandling) {
+ f.name = name
+ f.errorHandling = errorHandling
+ f.argsLenAtDash = -1
+}
diff --git a/src/kube2msb/vendor/github.com/spf13/pflag/float32.go b/src/kube2msb/vendor/github.com/spf13/pflag/float32.go
new file mode 100644
index 0000000..7683fae
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/spf13/pflag/float32.go
@@ -0,0 +1,91 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// -- float32 Value
+type float32Value float32
+
+func newFloat32Value(val float32, p *float32) *float32Value {
+ *p = val
+ return (*float32Value)(p)
+}
+
+func (f *float32Value) Set(s string) error {
+ v, err := strconv.ParseFloat(s, 32)
+ *f = float32Value(v)
+ return err
+}
+
+func (f *float32Value) Type() string {
+ return "float32"
+}
+
+func (f *float32Value) String() string { return fmt.Sprintf("%v", *f) }
+
+func float32Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseFloat(sval, 32)
+ if err != nil {
+ return 0, err
+ }
+ return float32(v), nil
+}
+
+// GetFloat32 return the float32 value of a flag with the given name
+func (f *FlagSet) GetFloat32(name string) (float32, error) {
+ val, err := f.getFlagType(name, "float32", float32Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(float32), nil
+}
+
+// Float32Var defines a float32 flag with specified name, default value, and usage string.
+// The argument p points to a float32 variable in which to store the value of the flag.
+func (f *FlagSet) Float32Var(p *float32, name string, value float32, usage string) {
+ f.VarP(newFloat32Value(value, p), name, "", usage)
+}
+
+// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float32VarP(p *float32, name, shorthand string, value float32, usage string) {
+ f.VarP(newFloat32Value(value, p), name, shorthand, usage)
+}
+
+// Float32Var defines a float32 flag with specified name, default value, and usage string.
+// The argument p points to a float32 variable in which to store the value of the flag.
+func Float32Var(p *float32, name string, value float32, usage string) {
+ CommandLine.VarP(newFloat32Value(value, p), name, "", usage)
+}
+
+// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash.
+func Float32VarP(p *float32, name, shorthand string, value float32, usage string) {
+ CommandLine.VarP(newFloat32Value(value, p), name, shorthand, usage)
+}
+
+// Float32 defines a float32 flag with specified name, default value, and usage string.
+// The return value is the address of a float32 variable that stores the value of the flag.
+func (f *FlagSet) Float32(name string, value float32, usage string) *float32 {
+ p := new(float32)
+ f.Float32VarP(p, name, "", value, usage)
+ return p
+}
+
+// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float32P(name, shorthand string, value float32, usage string) *float32 {
+ p := new(float32)
+ f.Float32VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Float32 defines a float32 flag with specified name, default value, and usage string.
+// The return value is the address of a float32 variable that stores the value of the flag.
+func Float32(name string, value float32, usage string) *float32 {
+ return CommandLine.Float32P(name, "", value, usage)
+}
+
+// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash.
+func Float32P(name, shorthand string, value float32, usage string) *float32 {
+ return CommandLine.Float32P(name, shorthand, value, usage)
+}
diff --git a/src/kube2msb/vendor/github.com/spf13/pflag/float64.go b/src/kube2msb/vendor/github.com/spf13/pflag/float64.go
new file mode 100644
index 0000000..50fbf8c
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/spf13/pflag/float64.go
@@ -0,0 +1,87 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// -- float64 Value
+type float64Value float64
+
+func newFloat64Value(val float64, p *float64) *float64Value {
+ *p = val
+ return (*float64Value)(p)
+}
+
+func (f *float64Value) Set(s string) error {
+ v, err := strconv.ParseFloat(s, 64)
+ *f = float64Value(v)
+ return err
+}
+
+func (f *float64Value) Type() string {
+ return "float64"
+}
+
+func (f *float64Value) String() string { return fmt.Sprintf("%v", *f) }
+
+func float64Conv(sval string) (interface{}, error) {
+ return strconv.ParseFloat(sval, 64)
+}
+
+// GetFloat64 return the float64 value of a flag with the given name
+func (f *FlagSet) GetFloat64(name string) (float64, error) {
+ val, err := f.getFlagType(name, "float64", float64Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(float64), nil
+}
+
+// Float64Var defines a float64 flag with specified name, default value, and usage string.
+// The argument p points to a float64 variable in which to store the value of the flag.
+func (f *FlagSet) Float64Var(p *float64, name string, value float64, usage string) {
+ f.VarP(newFloat64Value(value, p), name, "", usage)
+}
+
+// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float64VarP(p *float64, name, shorthand string, value float64, usage string) {
+ f.VarP(newFloat64Value(value, p), name, shorthand, usage)
+}
+
+// Float64Var defines a float64 flag with specified name, default value, and usage string.
+// The argument p points to a float64 variable in which to store the value of the flag.
+func Float64Var(p *float64, name string, value float64, usage string) {
+ CommandLine.VarP(newFloat64Value(value, p), name, "", usage)
+}
+
+// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash.
+func Float64VarP(p *float64, name, shorthand string, value float64, usage string) {
+ CommandLine.VarP(newFloat64Value(value, p), name, shorthand, usage)
+}
+
+// Float64 defines a float64 flag with specified name, default value, and usage string.
+// The return value is the address of a float64 variable that stores the value of the flag.
+func (f *FlagSet) Float64(name string, value float64, usage string) *float64 {
+ p := new(float64)
+ f.Float64VarP(p, name, "", value, usage)
+ return p
+}
+
+// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float64P(name, shorthand string, value float64, usage string) *float64 {
+ p := new(float64)
+ f.Float64VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Float64 defines a float64 flag with specified name, default value, and usage string.
+// The return value is the address of a float64 variable that stores the value of the flag.
+func Float64(name string, value float64, usage string) *float64 {
+ return CommandLine.Float64P(name, "", value, usage)
+}
+
+// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash.
+func Float64P(name, shorthand string, value float64, usage string) *float64 {
+ return CommandLine.Float64P(name, shorthand, value, usage)
+}
diff --git a/src/kube2msb/vendor/github.com/spf13/pflag/golangflag.go b/src/kube2msb/vendor/github.com/spf13/pflag/golangflag.go
new file mode 100644
index 0000000..a8c24ef
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/spf13/pflag/golangflag.go
@@ -0,0 +1,97 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pflag
+
+import (
+ goflag "flag"
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+var _ = fmt.Print
+
+// flagValueWrapper implements pflag.Value around a flag.Value. The main
+// difference here is the addition of the Type method that returns a string
+// name of the type. As this is generally unknown, we approximate that with
+// reflection.
+type flagValueWrapper struct {
+ inner goflag.Value
+ flagType string
+}
+
+// We are just copying the boolFlag interface out of goflag as that is what
+// they use to decide if a flag should get "true" when no arg is given.
+type goBoolFlag interface {
+ goflag.Value
+ IsBoolFlag() bool
+}
+
+func wrapFlagValue(v goflag.Value) Value {
+ // If the flag.Value happens to also be a pflag.Value, just use it directly.
+ if pv, ok := v.(Value); ok {
+ return pv
+ }
+
+ pv := &flagValueWrapper{
+ inner: v,
+ }
+
+ t := reflect.TypeOf(v)
+ if t.Kind() == reflect.Interface || t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+
+ pv.flagType = strings.TrimSuffix(t.Name(), "Value")
+ return pv
+}
+
+func (v *flagValueWrapper) String() string {
+ return v.inner.String()
+}
+
+func (v *flagValueWrapper) Set(s string) error {
+ return v.inner.Set(s)
+}
+
+func (v *flagValueWrapper) Type() string {
+ return v.flagType
+}
+
+// PFlagFromGoFlag will return a *pflag.Flag given a *flag.Flag
+func PFlagFromGoFlag(goflag *goflag.Flag) *Flag {
+ // Remember the default value as a string; it won't change.
+ flag := &Flag{
+ Name: goflag.Name,
+ Usage: goflag.Usage,
+ Value: wrapFlagValue(goflag.Value),
+ // Looks like golang flags don't set DefValue correctly :-(
+ //DefValue: goflag.DefValue,
+ DefValue: goflag.Value.String(),
+ }
+ if fv, ok := goflag.Value.(goBoolFlag); ok && fv.IsBoolFlag() {
+ flag.NoOptDefVal = "true"
+ }
+ return flag
+}
+
+// AddGoFlag will add the given *flag.Flag to the pflag.FlagSet
+func (f *FlagSet) AddGoFlag(goflag *goflag.Flag) {
+ if f.Lookup(goflag.Name) != nil {
+ return
+ }
+ newflag := PFlagFromGoFlag(goflag)
+ f.AddFlag(newflag)
+}
+
+// AddGoFlagSet will add the given *flag.FlagSet to the pflag.FlagSet
+func (f *FlagSet) AddGoFlagSet(newSet *goflag.FlagSet) {
+ if newSet == nil {
+ return
+ }
+ newSet.VisitAll(func(goflag *goflag.Flag) {
+ f.AddGoFlag(goflag)
+ })
+}
diff --git a/src/kube2msb/vendor/github.com/spf13/pflag/int.go b/src/kube2msb/vendor/github.com/spf13/pflag/int.go
new file mode 100644
index 0000000..b656036
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/spf13/pflag/int.go
@@ -0,0 +1,87 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// -- int Value
+type intValue int
+
+func newIntValue(val int, p *int) *intValue {
+ *p = val
+ return (*intValue)(p)
+}
+
+func (i *intValue) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 64)
+ *i = intValue(v)
+ return err
+}
+
+func (i *intValue) Type() string {
+ return "int"
+}
+
+func (i *intValue) String() string { return fmt.Sprintf("%v", *i) }
+
+func intConv(sval string) (interface{}, error) {
+ return strconv.Atoi(sval)
+}
+
+// GetInt return the int value of a flag with the given name
+func (f *FlagSet) GetInt(name string) (int, error) {
+ val, err := f.getFlagType(name, "int", intConv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int), nil
+}
+
+// IntVar defines an int flag with specified name, default value, and usage string.
+// The argument p points to an int variable in which to store the value of the flag.
+func (f *FlagSet) IntVar(p *int, name string, value int, usage string) {
+ f.VarP(newIntValue(value, p), name, "", usage)
+}
+
+// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IntVarP(p *int, name, shorthand string, value int, usage string) {
+ f.VarP(newIntValue(value, p), name, shorthand, usage)
+}
+
+// IntVar defines an int flag with specified name, default value, and usage string.
+// The argument p points to an int variable in which to store the value of the flag.
+func IntVar(p *int, name string, value int, usage string) {
+ CommandLine.VarP(newIntValue(value, p), name, "", usage)
+}
+
+// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash.
+func IntVarP(p *int, name, shorthand string, value int, usage string) {
+ CommandLine.VarP(newIntValue(value, p), name, shorthand, usage)
+}
+
+// Int defines an int flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+func (f *FlagSet) Int(name string, value int, usage string) *int {
+ p := new(int)
+ f.IntVarP(p, name, "", value, usage)
+ return p
+}
+
+// IntP is like Int, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IntP(name, shorthand string, value int, usage string) *int {
+ p := new(int)
+ f.IntVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Int defines an int flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+func Int(name string, value int, usage string) *int {
+ return CommandLine.IntP(name, "", value, usage)
+}
+
+// IntP is like Int, but accepts a shorthand letter that can be used after a single dash.
+func IntP(name, shorthand string, value int, usage string) *int {
+ return CommandLine.IntP(name, shorthand, value, usage)
+}
diff --git a/src/kube2msb/vendor/github.com/spf13/pflag/int32.go b/src/kube2msb/vendor/github.com/spf13/pflag/int32.go
new file mode 100644
index 0000000..41659a9
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/spf13/pflag/int32.go
@@ -0,0 +1,91 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// -- int32 Value
+type int32Value int32
+
+func newInt32Value(val int32, p *int32) *int32Value {
+ *p = val
+ return (*int32Value)(p)
+}
+
+func (i *int32Value) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 32)
+ *i = int32Value(v)
+ return err
+}
+
+func (i *int32Value) Type() string {
+ return "int32"
+}
+
+func (i *int32Value) String() string { return fmt.Sprintf("%v", *i) }
+
+func int32Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseInt(sval, 0, 32)
+ if err != nil {
+ return 0, err
+ }
+ return int32(v), nil
+}
+
+// GetInt32 return the int32 value of a flag with the given name
+func (f *FlagSet) GetInt32(name string) (int32, error) {
+ val, err := f.getFlagType(name, "int32", int32Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int32), nil
+}
+
+// Int32Var defines an int32 flag with specified name, default value, and usage string.
+// The argument p points to an int32 variable in which to store the value of the flag.
+func (f *FlagSet) Int32Var(p *int32, name string, value int32, usage string) {
+ f.VarP(newInt32Value(value, p), name, "", usage)
+}
+
+// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int32VarP(p *int32, name, shorthand string, value int32, usage string) {
+ f.VarP(newInt32Value(value, p), name, shorthand, usage)
+}
+
+// Int32Var defines an int32 flag with specified name, default value, and usage string.
+// The argument p points to an int32 variable in which to store the value of the flag.
+func Int32Var(p *int32, name string, value int32, usage string) {
+ CommandLine.VarP(newInt32Value(value, p), name, "", usage)
+}
+
+// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash.
+func Int32VarP(p *int32, name, shorthand string, value int32, usage string) {
+ CommandLine.VarP(newInt32Value(value, p), name, shorthand, usage)
+}
+
+// Int32 defines an int32 flag with specified name, default value, and usage string.
+// The return value is the address of an int32 variable that stores the value of the flag.
+func (f *FlagSet) Int32(name string, value int32, usage string) *int32 {
+ p := new(int32)
+ f.Int32VarP(p, name, "", value, usage)
+ return p
+}
+
+// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int32P(name, shorthand string, value int32, usage string) *int32 {
+ p := new(int32)
+ f.Int32VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Int32 defines an int32 flag with specified name, default value, and usage string.
+// The return value is the address of an int32 variable that stores the value of the flag.
+func Int32(name string, value int32, usage string) *int32 {
+ return CommandLine.Int32P(name, "", value, usage)
+}
+
+// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash.
+func Int32P(name, shorthand string, value int32, usage string) *int32 {
+ return CommandLine.Int32P(name, shorthand, value, usage)
+}
diff --git a/src/kube2msb/vendor/github.com/spf13/pflag/int64.go b/src/kube2msb/vendor/github.com/spf13/pflag/int64.go
new file mode 100644
index 0000000..6e67e38
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/spf13/pflag/int64.go
@@ -0,0 +1,87 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// -- int64 Value
+type int64Value int64
+
+func newInt64Value(val int64, p *int64) *int64Value {
+ *p = val
+ return (*int64Value)(p)
+}
+
+func (i *int64Value) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 64)
+ *i = int64Value(v)
+ return err
+}
+
+func (i *int64Value) Type() string {
+ return "int64"
+}
+
+func (i *int64Value) String() string { return fmt.Sprintf("%v", *i) }
+
+func int64Conv(sval string) (interface{}, error) {
+ return strconv.ParseInt(sval, 0, 64)
+}
+
+// GetInt64 return the int64 value of a flag with the given name
+func (f *FlagSet) GetInt64(name string) (int64, error) {
+ val, err := f.getFlagType(name, "int64", int64Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int64), nil
+}
+
+// Int64Var defines an int64 flag with specified name, default value, and usage string.
+// The argument p points to an int64 variable in which to store the value of the flag.
+func (f *FlagSet) Int64Var(p *int64, name string, value int64, usage string) {
+ f.VarP(newInt64Value(value, p), name, "", usage)
+}
+
+// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int64VarP(p *int64, name, shorthand string, value int64, usage string) {
+ f.VarP(newInt64Value(value, p), name, shorthand, usage)
+}
+
+// Int64Var defines an int64 flag with specified name, default value, and usage string.
+// The argument p points to an int64 variable in which to store the value of the flag.
+func Int64Var(p *int64, name string, value int64, usage string) {
+ CommandLine.VarP(newInt64Value(value, p), name, "", usage)
+}
+
+// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash.
+func Int64VarP(p *int64, name, shorthand string, value int64, usage string) {
+ CommandLine.VarP(newInt64Value(value, p), name, shorthand, usage)
+}
+
+// Int64 defines an int64 flag with specified name, default value, and usage string.
+// The return value is the address of an int64 variable that stores the value of the flag.
+func (f *FlagSet) Int64(name string, value int64, usage string) *int64 {
+ p := new(int64)
+ f.Int64VarP(p, name, "", value, usage)
+ return p
+}
+
+// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int64P(name, shorthand string, value int64, usage string) *int64 {
+ p := new(int64)
+ f.Int64VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Int64 defines an int64 flag with specified name, default value, and usage string.
+// The return value is the address of an int64 variable that stores the value of the flag.
+func Int64(name string, value int64, usage string) *int64 {
+ return CommandLine.Int64P(name, "", value, usage)
+}
+
+// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash.
+func Int64P(name, shorthand string, value int64, usage string) *int64 {
+ return CommandLine.Int64P(name, shorthand, value, usage)
+}
diff --git a/src/kube2msb/vendor/github.com/spf13/pflag/int8.go b/src/kube2msb/vendor/github.com/spf13/pflag/int8.go
new file mode 100644
index 0000000..400db21
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/spf13/pflag/int8.go
@@ -0,0 +1,91 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// -- int8 Value
+type int8Value int8
+
+func newInt8Value(val int8, p *int8) *int8Value {
+ *p = val
+ return (*int8Value)(p)
+}
+
+func (i *int8Value) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 8)
+ *i = int8Value(v)
+ return err
+}
+
+func (i *int8Value) Type() string {
+ return "int8"
+}
+
+func (i *int8Value) String() string { return fmt.Sprintf("%v", *i) }
+
+func int8Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseInt(sval, 0, 8)
+ if err != nil {
+ return 0, err
+ }
+ return int8(v), nil
+}
+
+// GetInt8 return the int8 value of a flag with the given name
+func (f *FlagSet) GetInt8(name string) (int8, error) {
+ val, err := f.getFlagType(name, "int8", int8Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int8), nil
+}
+
+// Int8Var defines an int8 flag with specified name, default value, and usage string.
+// The argument p points to an int8 variable in which to store the value of the flag.
+func (f *FlagSet) Int8Var(p *int8, name string, value int8, usage string) {
+ f.VarP(newInt8Value(value, p), name, "", usage)
+}
+
+// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int8VarP(p *int8, name, shorthand string, value int8, usage string) {
+ f.VarP(newInt8Value(value, p), name, shorthand, usage)
+}
+
+// Int8Var defines an int8 flag with specified name, default value, and usage string.
+// The argument p points to an int8 variable in which to store the value of the flag.
+func Int8Var(p *int8, name string, value int8, usage string) {
+ CommandLine.VarP(newInt8Value(value, p), name, "", usage)
+}
+
+// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash.
+func Int8VarP(p *int8, name, shorthand string, value int8, usage string) {
+ CommandLine.VarP(newInt8Value(value, p), name, shorthand, usage)
+}
+
+// Int8 defines an int8 flag with specified name, default value, and usage string.
+// The return value is the address of an int8 variable that stores the value of the flag.
+func (f *FlagSet) Int8(name string, value int8, usage string) *int8 {
+ p := new(int8)
+ f.Int8VarP(p, name, "", value, usage)
+ return p
+}
+
+// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int8P(name, shorthand string, value int8, usage string) *int8 {
+ p := new(int8)
+ f.Int8VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Int8 defines an int8 flag with specified name, default value, and usage string.
+// The return value is the address of an int8 variable that stores the value of the flag.
+func Int8(name string, value int8, usage string) *int8 {
+ return CommandLine.Int8P(name, "", value, usage)
+}
+
+// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash.
+func Int8P(name, shorthand string, value int8, usage string) *int8 {
+ return CommandLine.Int8P(name, shorthand, value, usage)
+}
diff --git a/src/kube2msb/vendor/github.com/spf13/pflag/int_slice.go b/src/kube2msb/vendor/github.com/spf13/pflag/int_slice.go
new file mode 100644
index 0000000..1e7c9ed
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/spf13/pflag/int_slice.go
@@ -0,0 +1,128 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- intSlice Value
+type intSliceValue struct {
+ value *[]int
+ changed bool
+}
+
+func newIntSliceValue(val []int, p *[]int) *intSliceValue {
+ isv := new(intSliceValue)
+ isv.value = p
+ *isv.value = val
+ return isv
+}
+
+func (s *intSliceValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make([]int, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = strconv.Atoi(d)
+ if err != nil {
+ return err
+ }
+
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *intSliceValue) Type() string {
+ return "intSlice"
+}
+
+func (s *intSliceValue) String() string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = fmt.Sprintf("%d", d)
+ }
+ return "[" + strings.Join(out, ",") + "]"
+}
+
+func intSliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []int{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]int, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = strconv.Atoi(d)
+ if err != nil {
+ return nil, err
+ }
+
+ }
+ return out, nil
+}
+
+// GetIntSlice return the []int value of a flag with the given name
+func (f *FlagSet) GetIntSlice(name string) ([]int, error) {
+ val, err := f.getFlagType(name, "intSlice", intSliceConv)
+ if err != nil {
+ return []int{}, err
+ }
+ return val.([]int), nil
+}
+
+// IntSliceVar defines a intSlice flag with specified name, default value, and usage string.
+// The argument p points to a []int variable in which to store the value of the flag.
+func (f *FlagSet) IntSliceVar(p *[]int, name string, value []int, usage string) {
+ f.VarP(newIntSliceValue(value, p), name, "", usage)
+}
+
+// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) {
+ f.VarP(newIntSliceValue(value, p), name, shorthand, usage)
+}
+
+// IntSliceVar defines a int[] flag with specified name, default value, and usage string.
+// The argument p points to a int[] variable in which to store the value of the flag.
+func IntSliceVar(p *[]int, name string, value []int, usage string) {
+ CommandLine.VarP(newIntSliceValue(value, p), name, "", usage)
+}
+
+// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) {
+ CommandLine.VarP(newIntSliceValue(value, p), name, shorthand, usage)
+}
+
+// IntSlice defines a []int flag with specified name, default value, and usage string.
+// The return value is the address of a []int variable that stores the value of the flag.
+func (f *FlagSet) IntSlice(name string, value []int, usage string) *[]int {
+ p := []int{}
+ f.IntSliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IntSliceP(name, shorthand string, value []int, usage string) *[]int {
+ p := []int{}
+ f.IntSliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// IntSlice defines a []int flag with specified name, default value, and usage string.
+// The return value is the address of a []int variable that stores the value of the flag.
+func IntSlice(name string, value []int, usage string) *[]int {
+ return CommandLine.IntSliceP(name, "", value, usage)
+}
+
+// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash.
+func IntSliceP(name, shorthand string, value []int, usage string) *[]int {
+ return CommandLine.IntSliceP(name, shorthand, value, usage)
+}
diff --git a/src/kube2msb/vendor/github.com/spf13/pflag/ip.go b/src/kube2msb/vendor/github.com/spf13/pflag/ip.go
new file mode 100644
index 0000000..88a1743
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/spf13/pflag/ip.go
@@ -0,0 +1,96 @@
+package pflag
+
+import (
+ "fmt"
+ "net"
+ "strings"
+)
+
+var _ = strings.TrimSpace
+
+// -- net.IP value
+type ipValue net.IP
+
+func newIPValue(val net.IP, p *net.IP) *ipValue {
+ *p = val
+ return (*ipValue)(p)
+}
+
+func (i *ipValue) String() string { return net.IP(*i).String() }
+func (i *ipValue) Set(s string) error {
+ ip := net.ParseIP(strings.TrimSpace(s))
+ if ip == nil {
+ return fmt.Errorf("failed to parse IP: %q", s)
+ }
+ *i = ipValue(ip)
+ return nil
+}
+
+func (i *ipValue) Type() string {
+ return "ip"
+}
+
+func ipConv(sval string) (interface{}, error) {
+ ip := net.ParseIP(sval)
+ if ip != nil {
+ return ip, nil
+ }
+ return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval)
+}
+
+// GetIP return the net.IP value of a flag with the given name
+func (f *FlagSet) GetIP(name string) (net.IP, error) {
+ val, err := f.getFlagType(name, "ip", ipConv)
+ if err != nil {
+ return nil, err
+ }
+ return val.(net.IP), nil
+}
+
+// IPVar defines an net.IP flag with specified name, default value, and usage string.
+// The argument p points to an net.IP variable in which to store the value of the flag.
+func (f *FlagSet) IPVar(p *net.IP, name string, value net.IP, usage string) {
+ f.VarP(newIPValue(value, p), name, "", usage)
+}
+
+// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) {
+ f.VarP(newIPValue(value, p), name, shorthand, usage)
+}
+
+// IPVar defines an net.IP flag with specified name, default value, and usage string.
+// The argument p points to an net.IP variable in which to store the value of the flag.
+func IPVar(p *net.IP, name string, value net.IP, usage string) {
+ CommandLine.VarP(newIPValue(value, p), name, "", usage)
+}
+
+// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash.
+func IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) {
+ CommandLine.VarP(newIPValue(value, p), name, shorthand, usage)
+}
+
+// IP defines an net.IP flag with specified name, default value, and usage string.
+// The return value is the address of an net.IP variable that stores the value of the flag.
+func (f *FlagSet) IP(name string, value net.IP, usage string) *net.IP {
+ p := new(net.IP)
+ f.IPVarP(p, name, "", value, usage)
+ return p
+}
+
+// IPP is like IP, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPP(name, shorthand string, value net.IP, usage string) *net.IP {
+ p := new(net.IP)
+ f.IPVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// IP defines an net.IP flag with specified name, default value, and usage string.
+// The return value is the address of an net.IP variable that stores the value of the flag.
+func IP(name string, value net.IP, usage string) *net.IP {
+ return CommandLine.IPP(name, "", value, usage)
+}
+
+// IPP is like IP, but accepts a shorthand letter that can be used after a single dash.
+func IPP(name, shorthand string, value net.IP, usage string) *net.IP {
+ return CommandLine.IPP(name, shorthand, value, usage)
+}
diff --git a/src/kube2msb/vendor/github.com/spf13/pflag/ipmask.go b/src/kube2msb/vendor/github.com/spf13/pflag/ipmask.go
new file mode 100644
index 0000000..5bd44bd
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/spf13/pflag/ipmask.go
@@ -0,0 +1,122 @@
+package pflag
+
+import (
+ "fmt"
+ "net"
+ "strconv"
+)
+
+// -- net.IPMask value
+type ipMaskValue net.IPMask
+
+func newIPMaskValue(val net.IPMask, p *net.IPMask) *ipMaskValue {
+ *p = val
+ return (*ipMaskValue)(p)
+}
+
+func (i *ipMaskValue) String() string { return net.IPMask(*i).String() }
+func (i *ipMaskValue) Set(s string) error {
+ ip := ParseIPv4Mask(s)
+ if ip == nil {
+ return fmt.Errorf("failed to parse IP mask: %q", s)
+ }
+ *i = ipMaskValue(ip)
+ return nil
+}
+
+func (i *ipMaskValue) Type() string {
+ return "ipMask"
+}
+
+// ParseIPv4Mask written in IP form (e.g. 255.255.255.0).
+// This function should really belong to the net package.
+func ParseIPv4Mask(s string) net.IPMask {
+ mask := net.ParseIP(s)
+ if mask == nil {
+ if len(s) != 8 {
+ return nil
+ }
+ // net.IPMask.String() actually outputs things like ffffff00
+ // so write a horrible parser for that as well :-(
+ m := []int{}
+ for i := 0; i < 4; i++ {
+ b := "0x" + s[2*i:2*i+2]
+ d, err := strconv.ParseInt(b, 0, 0)
+ if err != nil {
+ return nil
+ }
+ m = append(m, int(d))
+ }
+ s := fmt.Sprintf("%d.%d.%d.%d", m[0], m[1], m[2], m[3])
+ mask = net.ParseIP(s)
+ if mask == nil {
+ return nil
+ }
+ }
+ return net.IPv4Mask(mask[12], mask[13], mask[14], mask[15])
+}
+
+func parseIPv4Mask(sval string) (interface{}, error) {
+ mask := ParseIPv4Mask(sval)
+ if mask == nil {
+ return nil, fmt.Errorf("unable to parse %s as net.IPMask", sval)
+ }
+ return mask, nil
+}
+
+// GetIPv4Mask return the net.IPv4Mask value of a flag with the given name
+func (f *FlagSet) GetIPv4Mask(name string) (net.IPMask, error) {
+ val, err := f.getFlagType(name, "ipMask", parseIPv4Mask)
+ if err != nil {
+ return nil, err
+ }
+ return val.(net.IPMask), nil
+}
+
+// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string.
+// The argument p points to an net.IPMask variable in which to store the value of the flag.
+func (f *FlagSet) IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) {
+ f.VarP(newIPMaskValue(value, p), name, "", usage)
+}
+
+// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) {
+ f.VarP(newIPMaskValue(value, p), name, shorthand, usage)
+}
+
+// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string.
+// The argument p points to an net.IPMask variable in which to store the value of the flag.
+func IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) {
+ CommandLine.VarP(newIPMaskValue(value, p), name, "", usage)
+}
+
+// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash.
+func IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) {
+ CommandLine.VarP(newIPMaskValue(value, p), name, shorthand, usage)
+}
+
+// IPMask defines an net.IPMask flag with specified name, default value, and usage string.
+// The return value is the address of an net.IPMask variable that stores the value of the flag.
+func (f *FlagSet) IPMask(name string, value net.IPMask, usage string) *net.IPMask {
+ p := new(net.IPMask)
+ f.IPMaskVarP(p, name, "", value, usage)
+ return p
+}
+
+// IPMaskP is like IPMask, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask {
+ p := new(net.IPMask)
+ f.IPMaskVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// IPMask defines an net.IPMask flag with specified name, default value, and usage string.
+// The return value is the address of an net.IPMask variable that stores the value of the flag.
+func IPMask(name string, value net.IPMask, usage string) *net.IPMask {
+ return CommandLine.IPMaskP(name, "", value, usage)
+}
+
+// IPMaskP is like IP, but accepts a shorthand letter that can be used after a single dash.
+func IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask {
+ return CommandLine.IPMaskP(name, shorthand, value, usage)
+}
diff --git a/src/kube2msb/vendor/github.com/spf13/pflag/ipnet.go b/src/kube2msb/vendor/github.com/spf13/pflag/ipnet.go
new file mode 100644
index 0000000..149b764
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/spf13/pflag/ipnet.go
@@ -0,0 +1,100 @@
+package pflag
+
+import (
+ "fmt"
+ "net"
+ "strings"
+)
+
+// IPNet adapts net.IPNet for use as a flag.
+type ipNetValue net.IPNet
+
+func (ipnet ipNetValue) String() string {
+ n := net.IPNet(ipnet)
+ return n.String()
+}
+
+func (ipnet *ipNetValue) Set(value string) error {
+ _, n, err := net.ParseCIDR(strings.TrimSpace(value))
+ if err != nil {
+ return err
+ }
+ *ipnet = ipNetValue(*n)
+ return nil
+}
+
+func (*ipNetValue) Type() string {
+ return "ipNet"
+}
+
+var _ = strings.TrimSpace
+
+func newIPNetValue(val net.IPNet, p *net.IPNet) *ipNetValue {
+ *p = val
+ return (*ipNetValue)(p)
+}
+
+func ipNetConv(sval string) (interface{}, error) {
+ _, n, err := net.ParseCIDR(strings.TrimSpace(sval))
+ if err == nil {
+ return *n, nil
+ }
+ return nil, fmt.Errorf("invalid string being converted to IPNet: %s", sval)
+}
+
+// GetIPNet return the net.IPNet value of a flag with the given name
+func (f *FlagSet) GetIPNet(name string) (net.IPNet, error) {
+ val, err := f.getFlagType(name, "ipNet", ipNetConv)
+ if err != nil {
+ return net.IPNet{}, err
+ }
+ return val.(net.IPNet), nil
+}
+
+// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string.
+// The argument p points to an net.IPNet variable in which to store the value of the flag.
+func (f *FlagSet) IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) {
+ f.VarP(newIPNetValue(value, p), name, "", usage)
+}
+
+// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) {
+ f.VarP(newIPNetValue(value, p), name, shorthand, usage)
+}
+
+// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string.
+// The argument p points to an net.IPNet variable in which to store the value of the flag.
+func IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) {
+ CommandLine.VarP(newIPNetValue(value, p), name, "", usage)
+}
+
+// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash.
+func IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) {
+ CommandLine.VarP(newIPNetValue(value, p), name, shorthand, usage)
+}
+
+// IPNet defines an net.IPNet flag with specified name, default value, and usage string.
+// The return value is the address of an net.IPNet variable that stores the value of the flag.
+func (f *FlagSet) IPNet(name string, value net.IPNet, usage string) *net.IPNet {
+ p := new(net.IPNet)
+ f.IPNetVarP(p, name, "", value, usage)
+ return p
+}
+
+// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet {
+ p := new(net.IPNet)
+ f.IPNetVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// IPNet defines an net.IPNet flag with specified name, default value, and usage string.
+// The return value is the address of an net.IPNet variable that stores the value of the flag.
+func IPNet(name string, value net.IPNet, usage string) *net.IPNet {
+ return CommandLine.IPNetP(name, "", value, usage)
+}
+
+// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash.
+func IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet {
+ return CommandLine.IPNetP(name, shorthand, value, usage)
+}
diff --git a/src/kube2msb/vendor/github.com/spf13/pflag/string.go b/src/kube2msb/vendor/github.com/spf13/pflag/string.go
new file mode 100644
index 0000000..e296136
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/spf13/pflag/string.go
@@ -0,0 +1,82 @@
+package pflag
+
+import "fmt"
+
+// -- string Value
+type stringValue string
+
+func newStringValue(val string, p *string) *stringValue {
+ *p = val
+ return (*stringValue)(p)
+}
+
+func (s *stringValue) Set(val string) error {
+ *s = stringValue(val)
+ return nil
+}
+func (s *stringValue) Type() string {
+ return "string"
+}
+
+func (s *stringValue) String() string { return fmt.Sprintf("%s", *s) }
+
+func stringConv(sval string) (interface{}, error) {
+ return sval, nil
+}
+
+// GetString return the string value of a flag with the given name
+func (f *FlagSet) GetString(name string) (string, error) {
+ val, err := f.getFlagType(name, "string", stringConv)
+ if err != nil {
+ return "", err
+ }
+ return val.(string), nil
+}
+
+// StringVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a string variable in which to store the value of the flag.
+func (f *FlagSet) StringVar(p *string, name string, value string, usage string) {
+ f.VarP(newStringValue(value, p), name, "", usage)
+}
+
+// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringVarP(p *string, name, shorthand string, value string, usage string) {
+ f.VarP(newStringValue(value, p), name, shorthand, usage)
+}
+
+// StringVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a string variable in which to store the value of the flag.
+func StringVar(p *string, name string, value string, usage string) {
+ CommandLine.VarP(newStringValue(value, p), name, "", usage)
+}
+
+// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash.
+func StringVarP(p *string, name, shorthand string, value string, usage string) {
+ CommandLine.VarP(newStringValue(value, p), name, shorthand, usage)
+}
+
+// String defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a string variable that stores the value of the flag.
+func (f *FlagSet) String(name string, value string, usage string) *string {
+ p := new(string)
+ f.StringVarP(p, name, "", value, usage)
+ return p
+}
+
+// StringP is like String, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringP(name, shorthand string, value string, usage string) *string {
+ p := new(string)
+ f.StringVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// String defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a string variable that stores the value of the flag.
+func String(name string, value string, usage string) *string {
+ return CommandLine.StringP(name, "", value, usage)
+}
+
+// StringP is like String, but accepts a shorthand letter that can be used after a single dash.
+func StringP(name, shorthand string, value string, usage string) *string {
+ return CommandLine.StringP(name, shorthand, value, usage)
+}
diff --git a/src/kube2msb/vendor/github.com/spf13/pflag/string_slice.go b/src/kube2msb/vendor/github.com/spf13/pflag/string_slice.go
new file mode 100644
index 0000000..b53648b
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/spf13/pflag/string_slice.go
@@ -0,0 +1,111 @@
+package pflag
+
+import (
+ "encoding/csv"
+ "fmt"
+ "strings"
+)
+
+var _ = fmt.Fprint
+
+// -- stringSlice Value
+type stringSliceValue struct {
+ value *[]string
+ changed bool
+}
+
+func newStringSliceValue(val []string, p *[]string) *stringSliceValue {
+ ssv := new(stringSliceValue)
+ ssv.value = p
+ *ssv.value = val
+ return ssv
+}
+
+func (s *stringSliceValue) Set(val string) error {
+ stringReader := strings.NewReader(val)
+ csvReader := csv.NewReader(stringReader)
+ v, err := csvReader.Read()
+ if err != nil {
+ return err
+ }
+ if !s.changed {
+ *s.value = v
+ } else {
+ *s.value = append(*s.value, v...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *stringSliceValue) Type() string {
+ return "stringSlice"
+}
+
+func (s *stringSliceValue) String() string { return "[" + strings.Join(*s.value, ",") + "]" }
+
+func stringSliceConv(sval string) (interface{}, error) {
+ sval = strings.Trim(sval, "[]")
+ // An empty string would cause a slice with one (empty) string
+ if len(sval) == 0 {
+ return []string{}, nil
+ }
+ v := strings.Split(sval, ",")
+ return v, nil
+}
+
+// GetStringSlice return the []string value of a flag with the given name
+func (f *FlagSet) GetStringSlice(name string) ([]string, error) {
+ val, err := f.getFlagType(name, "stringSlice", stringSliceConv)
+ if err != nil {
+ return []string{}, err
+ }
+ return val.([]string), nil
+}
+
+// StringSliceVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a []string variable in which to store the value of the flag.
+func (f *FlagSet) StringSliceVar(p *[]string, name string, value []string, usage string) {
+ f.VarP(newStringSliceValue(value, p), name, "", usage)
+}
+
+// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) {
+ f.VarP(newStringSliceValue(value, p), name, shorthand, usage)
+}
+
+// StringSliceVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a []string variable in which to store the value of the flag.
+func StringSliceVar(p *[]string, name string, value []string, usage string) {
+ CommandLine.VarP(newStringSliceValue(value, p), name, "", usage)
+}
+
+// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) {
+ CommandLine.VarP(newStringSliceValue(value, p), name, shorthand, usage)
+}
+
+// StringSlice defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a []string variable that stores the value of the flag.
+func (f *FlagSet) StringSlice(name string, value []string, usage string) *[]string {
+ p := []string{}
+ f.StringSliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringSliceP(name, shorthand string, value []string, usage string) *[]string {
+ p := []string{}
+ f.StringSliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// StringSlice defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a []string variable that stores the value of the flag.
+func StringSlice(name string, value []string, usage string) *[]string {
+ return CommandLine.StringSliceP(name, "", value, usage)
+}
+
+// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash.
+func StringSliceP(name, shorthand string, value []string, usage string) *[]string {
+ return CommandLine.StringSliceP(name, shorthand, value, usage)
+}
diff --git a/src/kube2msb/vendor/github.com/spf13/pflag/uint.go b/src/kube2msb/vendor/github.com/spf13/pflag/uint.go
new file mode 100644
index 0000000..e142b49
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/spf13/pflag/uint.go
@@ -0,0 +1,91 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// -- uint Value
+type uintValue uint
+
+func newUintValue(val uint, p *uint) *uintValue {
+ *p = val
+ return (*uintValue)(p)
+}
+
+func (i *uintValue) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 64)
+ *i = uintValue(v)
+ return err
+}
+
+func (i *uintValue) Type() string {
+ return "uint"
+}
+
+func (i *uintValue) String() string { return fmt.Sprintf("%v", *i) }
+
+func uintConv(sval string) (interface{}, error) {
+ v, err := strconv.ParseUint(sval, 0, 0)
+ if err != nil {
+ return 0, err
+ }
+ return uint(v), nil
+}
+
+// GetUint return the uint value of a flag with the given name
+func (f *FlagSet) GetUint(name string) (uint, error) {
+ val, err := f.getFlagType(name, "uint", uintConv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(uint), nil
+}
+
+// UintVar defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint variable in which to store the value of the flag.
+func (f *FlagSet) UintVar(p *uint, name string, value uint, usage string) {
+ f.VarP(newUintValue(value, p), name, "", usage)
+}
+
+// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) UintVarP(p *uint, name, shorthand string, value uint, usage string) {
+ f.VarP(newUintValue(value, p), name, shorthand, usage)
+}
+
+// UintVar defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint variable in which to store the value of the flag.
+func UintVar(p *uint, name string, value uint, usage string) {
+ CommandLine.VarP(newUintValue(value, p), name, "", usage)
+}
+
+// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash.
+func UintVarP(p *uint, name, shorthand string, value uint, usage string) {
+ CommandLine.VarP(newUintValue(value, p), name, shorthand, usage)
+}
+
+// Uint defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint variable that stores the value of the flag.
+func (f *FlagSet) Uint(name string, value uint, usage string) *uint {
+ p := new(uint)
+ f.UintVarP(p, name, "", value, usage)
+ return p
+}
+
+// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) UintP(name, shorthand string, value uint, usage string) *uint {
+ p := new(uint)
+ f.UintVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Uint defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint variable that stores the value of the flag.
+func Uint(name string, value uint, usage string) *uint {
+ return CommandLine.UintP(name, "", value, usage)
+}
+
+// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash.
+func UintP(name, shorthand string, value uint, usage string) *uint {
+ return CommandLine.UintP(name, shorthand, value, usage)
+}
diff --git a/src/kube2msb/vendor/github.com/spf13/pflag/uint16.go b/src/kube2msb/vendor/github.com/spf13/pflag/uint16.go
new file mode 100644
index 0000000..5c96c19
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/spf13/pflag/uint16.go
@@ -0,0 +1,89 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// -- uint16 value
+type uint16Value uint16
+
+func newUint16Value(val uint16, p *uint16) *uint16Value {
+ *p = val
+ return (*uint16Value)(p)
+}
+func (i *uint16Value) String() string { return fmt.Sprintf("%d", *i) }
+func (i *uint16Value) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 16)
+ *i = uint16Value(v)
+ return err
+}
+
+func (i *uint16Value) Type() string {
+ return "uint16"
+}
+
+func uint16Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseUint(sval, 0, 16)
+ if err != nil {
+ return 0, err
+ }
+ return uint16(v), nil
+}
+
+// GetUint16 return the uint16 value of a flag with the given name
+func (f *FlagSet) GetUint16(name string) (uint16, error) {
+ val, err := f.getFlagType(name, "uint16", uint16Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(uint16), nil
+}
+
+// Uint16Var defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint variable in which to store the value of the flag.
+func (f *FlagSet) Uint16Var(p *uint16, name string, value uint16, usage string) {
+ f.VarP(newUint16Value(value, p), name, "", usage)
+}
+
+// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) {
+ f.VarP(newUint16Value(value, p), name, shorthand, usage)
+}
+
+// Uint16Var defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint variable in which to store the value of the flag.
+func Uint16Var(p *uint16, name string, value uint16, usage string) {
+ CommandLine.VarP(newUint16Value(value, p), name, "", usage)
+}
+
+// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash.
+func Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) {
+ CommandLine.VarP(newUint16Value(value, p), name, shorthand, usage)
+}
+
+// Uint16 defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint variable that stores the value of the flag.
+func (f *FlagSet) Uint16(name string, value uint16, usage string) *uint16 {
+ p := new(uint16)
+ f.Uint16VarP(p, name, "", value, usage)
+ return p
+}
+
+// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint16P(name, shorthand string, value uint16, usage string) *uint16 {
+ p := new(uint16)
+ f.Uint16VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Uint16 defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint variable that stores the value of the flag.
+func Uint16(name string, value uint16, usage string) *uint16 {
+ return CommandLine.Uint16P(name, "", value, usage)
+}
+
+// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash.
+func Uint16P(name, shorthand string, value uint16, usage string) *uint16 {
+ return CommandLine.Uint16P(name, shorthand, value, usage)
+}
diff --git a/src/kube2msb/vendor/github.com/spf13/pflag/uint32.go b/src/kube2msb/vendor/github.com/spf13/pflag/uint32.go
new file mode 100644
index 0000000..294fcaa
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/spf13/pflag/uint32.go
@@ -0,0 +1,89 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// -- uint16 value
+type uint32Value uint32
+
+func newUint32Value(val uint32, p *uint32) *uint32Value {
+ *p = val
+ return (*uint32Value)(p)
+}
+func (i *uint32Value) String() string { return fmt.Sprintf("%d", *i) }
+func (i *uint32Value) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 32)
+ *i = uint32Value(v)
+ return err
+}
+
+func (i *uint32Value) Type() string {
+ return "uint32"
+}
+
+func uint32Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseUint(sval, 0, 32)
+ if err != nil {
+ return 0, err
+ }
+ return uint32(v), nil
+}
+
+// GetUint32 return the uint32 value of a flag with the given name
+func (f *FlagSet) GetUint32(name string) (uint32, error) {
+ val, err := f.getFlagType(name, "uint32", uint32Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(uint32), nil
+}
+
+// Uint32Var defines a uint32 flag with specified name, default value, and usage string.
+// The argument p points to a uint32 variable in which to store the value of the flag.
+func (f *FlagSet) Uint32Var(p *uint32, name string, value uint32, usage string) {
+ f.VarP(newUint32Value(value, p), name, "", usage)
+}
+
+// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) {
+ f.VarP(newUint32Value(value, p), name, shorthand, usage)
+}
+
+// Uint32Var defines a uint32 flag with specified name, default value, and usage string.
+// The argument p points to a uint32 variable in which to store the value of the flag.
+func Uint32Var(p *uint32, name string, value uint32, usage string) {
+ CommandLine.VarP(newUint32Value(value, p), name, "", usage)
+}
+
+// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash.
+func Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) {
+ CommandLine.VarP(newUint32Value(value, p), name, shorthand, usage)
+}
+
+// Uint32 defines a uint32 flag with specified name, default value, and usage string.
+// The return value is the address of a uint32 variable that stores the value of the flag.
+func (f *FlagSet) Uint32(name string, value uint32, usage string) *uint32 {
+ p := new(uint32)
+ f.Uint32VarP(p, name, "", value, usage)
+ return p
+}
+
+// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint32P(name, shorthand string, value uint32, usage string) *uint32 {
+ p := new(uint32)
+ f.Uint32VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Uint32 defines a uint32 flag with specified name, default value, and usage string.
+// The return value is the address of a uint32 variable that stores the value of the flag.
+func Uint32(name string, value uint32, usage string) *uint32 {
+ return CommandLine.Uint32P(name, "", value, usage)
+}
+
+// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash.
+func Uint32P(name, shorthand string, value uint32, usage string) *uint32 {
+ return CommandLine.Uint32P(name, shorthand, value, usage)
+}
diff --git a/src/kube2msb/vendor/github.com/spf13/pflag/uint64.go b/src/kube2msb/vendor/github.com/spf13/pflag/uint64.go
new file mode 100644
index 0000000..c681885
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/spf13/pflag/uint64.go
@@ -0,0 +1,91 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// -- uint64 Value
+type uint64Value uint64
+
+func newUint64Value(val uint64, p *uint64) *uint64Value {
+ *p = val
+ return (*uint64Value)(p)
+}
+
+func (i *uint64Value) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 64)
+ *i = uint64Value(v)
+ return err
+}
+
+func (i *uint64Value) Type() string {
+ return "uint64"
+}
+
+func (i *uint64Value) String() string { return fmt.Sprintf("%v", *i) }
+
+func uint64Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseUint(sval, 0, 64)
+ if err != nil {
+ return 0, err
+ }
+ return uint64(v), nil
+}
+
+// GetUint64 return the uint64 value of a flag with the given name
+func (f *FlagSet) GetUint64(name string) (uint64, error) {
+ val, err := f.getFlagType(name, "uint64", uint64Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(uint64), nil
+}
+
+// Uint64Var defines a uint64 flag with specified name, default value, and usage string.
+// The argument p points to a uint64 variable in which to store the value of the flag.
+func (f *FlagSet) Uint64Var(p *uint64, name string, value uint64, usage string) {
+ f.VarP(newUint64Value(value, p), name, "", usage)
+}
+
+// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) {
+ f.VarP(newUint64Value(value, p), name, shorthand, usage)
+}
+
+// Uint64Var defines a uint64 flag with specified name, default value, and usage string.
+// The argument p points to a uint64 variable in which to store the value of the flag.
+func Uint64Var(p *uint64, name string, value uint64, usage string) {
+ CommandLine.VarP(newUint64Value(value, p), name, "", usage)
+}
+
+// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash.
+func Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) {
+ CommandLine.VarP(newUint64Value(value, p), name, shorthand, usage)
+}
+
+// Uint64 defines a uint64 flag with specified name, default value, and usage string.
+// The return value is the address of a uint64 variable that stores the value of the flag.
+func (f *FlagSet) Uint64(name string, value uint64, usage string) *uint64 {
+ p := new(uint64)
+ f.Uint64VarP(p, name, "", value, usage)
+ return p
+}
+
+// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint64P(name, shorthand string, value uint64, usage string) *uint64 {
+ p := new(uint64)
+ f.Uint64VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Uint64 defines a uint64 flag with specified name, default value, and usage string.
+// The return value is the address of a uint64 variable that stores the value of the flag.
+func Uint64(name string, value uint64, usage string) *uint64 {
+ return CommandLine.Uint64P(name, "", value, usage)
+}
+
+// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash.
+func Uint64P(name, shorthand string, value uint64, usage string) *uint64 {
+ return CommandLine.Uint64P(name, shorthand, value, usage)
+}
diff --git a/src/kube2msb/vendor/github.com/spf13/pflag/uint8.go b/src/kube2msb/vendor/github.com/spf13/pflag/uint8.go
new file mode 100644
index 0000000..26db418
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/spf13/pflag/uint8.go
@@ -0,0 +1,91 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// -- uint8 Value
+type uint8Value uint8
+
+func newUint8Value(val uint8, p *uint8) *uint8Value {
+ *p = val
+ return (*uint8Value)(p)
+}
+
+func (i *uint8Value) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 8)
+ *i = uint8Value(v)
+ return err
+}
+
+func (i *uint8Value) Type() string {
+ return "uint8"
+}
+
+func (i *uint8Value) String() string { return fmt.Sprintf("%v", *i) }
+
+func uint8Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseUint(sval, 0, 8)
+ if err != nil {
+ return 0, err
+ }
+ return uint8(v), nil
+}
+
+// GetUint8 return the uint8 value of a flag with the given name
+func (f *FlagSet) GetUint8(name string) (uint8, error) {
+ val, err := f.getFlagType(name, "uint8", uint8Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(uint8), nil
+}
+
+// Uint8Var defines a uint8 flag with specified name, default value, and usage string.
+// The argument p points to a uint8 variable in which to store the value of the flag.
+func (f *FlagSet) Uint8Var(p *uint8, name string, value uint8, usage string) {
+ f.VarP(newUint8Value(value, p), name, "", usage)
+}
+
+// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) {
+ f.VarP(newUint8Value(value, p), name, shorthand, usage)
+}
+
+// Uint8Var defines a uint8 flag with specified name, default value, and usage string.
+// The argument p points to a uint8 variable in which to store the value of the flag.
+func Uint8Var(p *uint8, name string, value uint8, usage string) {
+ CommandLine.VarP(newUint8Value(value, p), name, "", usage)
+}
+
+// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash.
+func Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) {
+ CommandLine.VarP(newUint8Value(value, p), name, shorthand, usage)
+}
+
+// Uint8 defines a uint8 flag with specified name, default value, and usage string.
+// The return value is the address of a uint8 variable that stores the value of the flag.
+func (f *FlagSet) Uint8(name string, value uint8, usage string) *uint8 {
+ p := new(uint8)
+ f.Uint8VarP(p, name, "", value, usage)
+ return p
+}
+
+// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint8P(name, shorthand string, value uint8, usage string) *uint8 {
+ p := new(uint8)
+ f.Uint8VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Uint8 defines a uint8 flag with specified name, default value, and usage string.
+// The return value is the address of a uint8 variable that stores the value of the flag.
+func Uint8(name string, value uint8, usage string) *uint8 {
+ return CommandLine.Uint8P(name, "", value, usage)
+}
+
+// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash.
+func Uint8P(name, shorthand string, value uint8, usage string) *uint8 {
+ return CommandLine.Uint8P(name, shorthand, value, usage)
+}
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/LICENSE b/src/kube2msb/vendor/github.com/ugorji/go/LICENSE
new file mode 100644
index 0000000..95a0f05
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/LICENSE
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2012-2015 Ugorji Nwoke.
+All rights reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/0doc.go b/src/kube2msb/vendor/github.com/ugorji/go/codec/0doc.go
new file mode 100644
index 0000000..bd7361c
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/0doc.go
@@ -0,0 +1,199 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+/*
+High Performance, Feature-Rich Idiomatic Go codec/encoding library for
+binc, msgpack, cbor, json.
+
+Supported Serialization formats are:
+
+ - msgpack: https://github.com/msgpack/msgpack
+ - binc: http://github.com/ugorji/binc
+ - cbor: http://cbor.io http://tools.ietf.org/html/rfc7049
+ - json: http://json.org http://tools.ietf.org/html/rfc7159
+ - simple:
+
+To install:
+
+ go get github.com/ugorji/go/codec
+
+This package understands the 'unsafe' tag, to allow using unsafe semantics:
+
+ - When decoding into a struct, you need to read the field name as a string
+ so you can find the struct field it is mapped to.
+ Using `unsafe` will bypass the allocation and copying overhead of []byte->string conversion.
+
+To install using unsafe, pass the 'unsafe' tag:
+
+ go get -tags=unsafe github.com/ugorji/go/codec
+
+For detailed usage information, read the primer at http://ugorji.net/blog/go-codec-primer .
+
+The idiomatic Go support is as seen in other encoding packages in
+the standard library (ie json, xml, gob, etc).
+
+Rich Feature Set includes:
+
+ - Simple but extremely powerful and feature-rich API
+ - Very High Performance.
+ Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X.
+ - Multiple conversions:
+ Package coerces types where appropriate
+ e.g. decode an int in the stream into a float, etc.
+ - Corner Cases:
+ Overflows, nil maps/slices, nil values in streams are handled correctly
+ - Standard field renaming via tags
+ - Support for omitting empty fields during an encoding
+ - Encoding from any value and decoding into pointer to any value
+ (struct, slice, map, primitives, pointers, interface{}, etc)
+ - Extensions to support efficient encoding/decoding of any named types
+ - Support encoding.(Binary|Text)(M|Unm)arshaler interfaces
+ - Decoding without a schema (into a interface{}).
+ Includes Options to configure what specific map or slice type to use
+ when decoding an encoded list or map into a nil interface{}
+ - Encode a struct as an array, and decode struct from an array in the data stream
+ - Comprehensive support for anonymous fields
+ - Fast (no-reflection) encoding/decoding of common maps and slices
+ - Code-generation for faster performance.
+ - Support binary (e.g. messagepack, cbor) and text (e.g. json) formats
+ - Support indefinite-length formats to enable true streaming
+ (for formats which support it e.g. json, cbor)
+ - Support canonical encoding, where a value is ALWAYS encoded as same sequence of bytes.
+ This mostly applies to maps, where iteration order is non-deterministic.
+ - NIL in data stream decoded as zero value
+ - Never silently skip data when decoding.
+ User decides whether to return an error or silently skip data when keys or indexes
+ in the data stream do not map to fields in the struct.
+ - Detect and error when encoding a cyclic reference (instead of stack overflow shutdown)
+ - Encode/Decode from/to chan types (for iterative streaming support)
+ - Drop-in replacement for encoding/json. `json:` key in struct tag supported.
+ - Provides a RPC Server and Client Codec for net/rpc communication protocol.
+ - Handle unique idiosynchracies of codecs e.g.
+ - For messagepack, configure how ambiguities in handling raw bytes are resolved
+ - For messagepack, provide rpc server/client codec to support
+ msgpack-rpc protocol defined at:
+ https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
+
+Extension Support
+
+Users can register a function to handle the encoding or decoding of
+their custom types.
+
+There are no restrictions on what the custom type can be. Some examples:
+
+ type BisSet []int
+ type BitSet64 uint64
+ type UUID string
+ type MyStructWithUnexportedFields struct { a int; b bool; c []int; }
+ type GifImage struct { ... }
+
+As an illustration, MyStructWithUnexportedFields would normally be
+encoded as an empty map because it has no exported fields, while UUID
+would be encoded as a string. However, with extension support, you can
+encode any of these however you like.
+
+RPC
+
+RPC Client and Server Codecs are implemented, so the codecs can be used
+with the standard net/rpc package.
+
+Usage
+
+The Handle is SAFE for concurrent READ, but NOT SAFE for concurrent modification.
+
+The Encoder and Decoder are NOT safe for concurrent use.
+
+Consequently, the usage model is basically:
+
+ - Create and initialize the Handle before any use.
+ Once created, DO NOT modify it.
+ - Multiple Encoders or Decoders can now use the Handle concurrently.
+ They only read information off the Handle (never write).
+ - However, each Encoder or Decoder MUST not be used concurrently
+ - To re-use an Encoder/Decoder, call Reset(...) on it first.
+ This allows you use state maintained on the Encoder/Decoder.
+
+Sample usage model:
+
+ // create and configure Handle
+ var (
+ bh codec.BincHandle
+ mh codec.MsgpackHandle
+ ch codec.CborHandle
+ )
+
+ mh.MapType = reflect.TypeOf(map[string]interface{}(nil))
+
+ // configure extensions
+ // e.g. for msgpack, define functions and enable Time support for tag 1
+ // mh.SetExt(reflect.TypeOf(time.Time{}), 1, myExt)
+
+ // create and use decoder/encoder
+ var (
+ r io.Reader
+ w io.Writer
+ b []byte
+ h = &bh // or mh to use msgpack
+ )
+
+ dec = codec.NewDecoder(r, h)
+ dec = codec.NewDecoderBytes(b, h)
+ err = dec.Decode(&v)
+
+ enc = codec.NewEncoder(w, h)
+ enc = codec.NewEncoderBytes(&b, h)
+ err = enc.Encode(v)
+
+ //RPC Server
+ go func() {
+ for {
+ conn, err := listener.Accept()
+ rpcCodec := codec.GoRpc.ServerCodec(conn, h)
+ //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h)
+ rpc.ServeCodec(rpcCodec)
+ }
+ }()
+
+ //RPC Communication (client side)
+ conn, err = net.Dial("tcp", "localhost:5555")
+ rpcCodec := codec.GoRpc.ClientCodec(conn, h)
+ //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h)
+ client := rpc.NewClientWithCodec(rpcCodec)
+
+*/
+package codec
+
+// Benefits of go-codec:
+//
+// - encoding/json always reads whole file into memory first.
+// This makes it unsuitable for parsing very large files.
+// - encoding/xml cannot parse into a map[string]interface{}
+// I found this out on reading https://github.com/clbanning/mxj
+
+// TODO:
+//
+// - optimization for codecgen:
+// if len of entity is <= 3 words, then support a value receiver for encode.
+// - (En|De)coder should store an error when it occurs.
+// Until reset, subsequent calls return that error that was stored.
+// This means that free panics must go away.
+// All errors must be raised through errorf method.
+// - Decoding using a chan is good, but incurs concurrency costs.
+// This is because there's no fast way to use a channel without it
+// having to switch goroutines constantly.
+// Callback pattern is still the best. Maybe cnsider supporting something like:
+// type X struct {
+// Name string
+// Ys []Y
+// Ys chan <- Y
+// Ys func(Y) -> call this function for each entry
+// }
+// - Consider adding a isZeroer interface { isZero() bool }
+// It is used within isEmpty, for omitEmpty support.
+// - Consider making Handle used AS-IS within the encoding/decoding session.
+// This means that we don't cache Handle information within the (En|De)coder,
+// except we really need it at Reset(...)
+// - Consider adding math/big support
+// - Consider reducing the size of the generated functions:
+// Maybe use one loop, and put the conditionals in the loop.
+// for ... { if cLen > 0 { if j == cLen { break } } else if dd.CheckBreak() { break } }
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/README.md b/src/kube2msb/vendor/github.com/ugorji/go/codec/README.md
new file mode 100644
index 0000000..a790a52
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/README.md
@@ -0,0 +1,148 @@
+# Codec
+
+High Performance, Feature-Rich Idiomatic Go codec/encoding library for
+binc, msgpack, cbor, json.
+
+Supported Serialization formats are:
+
+ - msgpack: https://github.com/msgpack/msgpack
+ - binc: http://github.com/ugorji/binc
+ - cbor: http://cbor.io http://tools.ietf.org/html/rfc7049
+ - json: http://json.org http://tools.ietf.org/html/rfc7159
+ - simple:
+
+To install:
+
+ go get github.com/ugorji/go/codec
+
+This package understands the `unsafe` tag, to allow using unsafe semantics:
+
+ - When decoding into a struct, you need to read the field name as a string
+ so you can find the struct field it is mapped to.
+ Using `unsafe` will bypass the allocation and copying overhead of `[]byte->string` conversion.
+
+To use it, you must pass the `unsafe` tag during install:
+
+```
+go install -tags=unsafe github.com/ugorji/go/codec
+```
+
+Online documentation: http://godoc.org/github.com/ugorji/go/codec
+Detailed Usage/How-to Primer: http://ugorji.net/blog/go-codec-primer
+
+The idiomatic Go support is as seen in other encoding packages in
+the standard library (ie json, xml, gob, etc).
+
+Rich Feature Set includes:
+
+ - Simple but extremely powerful and feature-rich API
+ - Very High Performance.
+ Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X.
+ - Multiple conversions:
+ Package coerces types where appropriate
+ e.g. decode an int in the stream into a float, etc.
+ - Corner Cases:
+ Overflows, nil maps/slices, nil values in streams are handled correctly
+ - Standard field renaming via tags
+ - Support for omitting empty fields during an encoding
+ - Encoding from any value and decoding into pointer to any value
+ (struct, slice, map, primitives, pointers, interface{}, etc)
+ - Extensions to support efficient encoding/decoding of any named types
+ - Support encoding.(Binary|Text)(M|Unm)arshaler interfaces
+ - Decoding without a schema (into a interface{}).
+ Includes Options to configure what specific map or slice type to use
+ when decoding an encoded list or map into a nil interface{}
+ - Encode a struct as an array, and decode struct from an array in the data stream
+ - Comprehensive support for anonymous fields
+ - Fast (no-reflection) encoding/decoding of common maps and slices
+ - Code-generation for faster performance.
+ - Support binary (e.g. messagepack, cbor) and text (e.g. json) formats
+ - Support indefinite-length formats to enable true streaming
+ (for formats which support it e.g. json, cbor)
+ - Support canonical encoding, where a value is ALWAYS encoded as same sequence of bytes.
+ This mostly applies to maps, where iteration order is non-deterministic.
+ - NIL in data stream decoded as zero value
+ - Never silently skip data when decoding.
+ User decides whether to return an error or silently skip data when keys or indexes
+ in the data stream do not map to fields in the struct.
+ - Encode/Decode from/to chan types (for iterative streaming support)
+ - Drop-in replacement for encoding/json. `json:` key in struct tag supported.
+ - Provides a RPC Server and Client Codec for net/rpc communication protocol.
+ - Handle unique idiosynchracies of codecs e.g.
+ - For messagepack, configure how ambiguities in handling raw bytes are resolved
+ - For messagepack, provide rpc server/client codec to support
+ msgpack-rpc protocol defined at:
+ https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
+
+## Extension Support
+
+Users can register a function to handle the encoding or decoding of
+their custom types.
+
+There are no restrictions on what the custom type can be. Some examples:
+
+ type BisSet []int
+ type BitSet64 uint64
+ type UUID string
+ type MyStructWithUnexportedFields struct { a int; b bool; c []int; }
+ type GifImage struct { ... }
+
+As an illustration, MyStructWithUnexportedFields would normally be
+encoded as an empty map because it has no exported fields, while UUID
+would be encoded as a string. However, with extension support, you can
+encode any of these however you like.
+
+## RPC
+
+RPC Client and Server Codecs are implemented, so the codecs can be used
+with the standard net/rpc package.
+
+## Usage
+
+Typical usage model:
+
+ // create and configure Handle
+ var (
+ bh codec.BincHandle
+ mh codec.MsgpackHandle
+ ch codec.CborHandle
+ )
+
+ mh.MapType = reflect.TypeOf(map[string]interface{}(nil))
+
+ // configure extensions
+ // e.g. for msgpack, define functions and enable Time support for tag 1
+ // mh.SetExt(reflect.TypeOf(time.Time{}), 1, myExt)
+
+ // create and use decoder/encoder
+ var (
+ r io.Reader
+ w io.Writer
+ b []byte
+ h = &bh // or mh to use msgpack
+ )
+
+ dec = codec.NewDecoder(r, h)
+ dec = codec.NewDecoderBytes(b, h)
+ err = dec.Decode(&v)
+
+ enc = codec.NewEncoder(w, h)
+ enc = codec.NewEncoderBytes(&b, h)
+ err = enc.Encode(v)
+
+ //RPC Server
+ go func() {
+ for {
+ conn, err := listener.Accept()
+ rpcCodec := codec.GoRpc.ServerCodec(conn, h)
+ //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h)
+ rpc.ServeCodec(rpcCodec)
+ }
+ }()
+
+ //RPC Communication (client side)
+ conn, err = net.Dial("tcp", "localhost:5555")
+ rpcCodec := codec.GoRpc.ClientCodec(conn, h)
+ //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h)
+ client := rpc.NewClientWithCodec(rpcCodec)
+
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/binc.go b/src/kube2msb/vendor/github.com/ugorji/go/codec/binc.go
new file mode 100644
index 0000000..766d26c
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/binc.go
@@ -0,0 +1,922 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+ "math"
+ "reflect"
+ "time"
+)
+
+const bincDoPrune = true // No longer needed. Needed before as C lib did not support pruning.
+
+// vd as low 4 bits (there are 16 slots)
+const (
+ bincVdSpecial byte = iota
+ bincVdPosInt
+ bincVdNegInt
+ bincVdFloat
+
+ bincVdString
+ bincVdByteArray
+ bincVdArray
+ bincVdMap
+
+ bincVdTimestamp
+ bincVdSmallInt
+ bincVdUnicodeOther
+ bincVdSymbol
+
+ bincVdDecimal
+ _ // open slot
+ _ // open slot
+ bincVdCustomExt = 0x0f
+)
+
+const (
+ bincSpNil byte = iota
+ bincSpFalse
+ bincSpTrue
+ bincSpNan
+ bincSpPosInf
+ bincSpNegInf
+ bincSpZeroFloat
+ bincSpZero
+ bincSpNegOne
+)
+
+const (
+ bincFlBin16 byte = iota
+ bincFlBin32
+ _ // bincFlBin32e
+ bincFlBin64
+ _ // bincFlBin64e
+ // others not currently supported
+)
+
+type bincEncDriver struct {
+ e *Encoder
+ w encWriter
+ m map[string]uint16 // symbols
+ b [scratchByteArrayLen]byte
+ s uint16 // symbols sequencer
+ encNoSeparator
+}
+
+func (e *bincEncDriver) IsBuiltinType(rt uintptr) bool {
+ return rt == timeTypId
+}
+
+func (e *bincEncDriver) EncodeBuiltin(rt uintptr, v interface{}) {
+ if rt == timeTypId {
+ var bs []byte
+ switch x := v.(type) {
+ case time.Time:
+ bs = encodeTime(x)
+ case *time.Time:
+ bs = encodeTime(*x)
+ default:
+ e.e.errorf("binc error encoding builtin: expect time.Time, received %T", v)
+ }
+ e.w.writen1(bincVdTimestamp<<4 | uint8(len(bs)))
+ e.w.writeb(bs)
+ }
+}
+
+func (e *bincEncDriver) EncodeNil() {
+ e.w.writen1(bincVdSpecial<<4 | bincSpNil)
+}
+
+func (e *bincEncDriver) EncodeBool(b bool) {
+ if b {
+ e.w.writen1(bincVdSpecial<<4 | bincSpTrue)
+ } else {
+ e.w.writen1(bincVdSpecial<<4 | bincSpFalse)
+ }
+}
+
+func (e *bincEncDriver) EncodeFloat32(f float32) {
+ if f == 0 {
+ e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat)
+ return
+ }
+ e.w.writen1(bincVdFloat<<4 | bincFlBin32)
+ bigenHelper{e.b[:4], e.w}.writeUint32(math.Float32bits(f))
+}
+
+func (e *bincEncDriver) EncodeFloat64(f float64) {
+ if f == 0 {
+ e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat)
+ return
+ }
+ bigen.PutUint64(e.b[:8], math.Float64bits(f))
+ if bincDoPrune {
+ i := 7
+ for ; i >= 0 && (e.b[i] == 0); i-- {
+ }
+ i++
+ if i <= 6 {
+ e.w.writen1(bincVdFloat<<4 | 0x8 | bincFlBin64)
+ e.w.writen1(byte(i))
+ e.w.writeb(e.b[:i])
+ return
+ }
+ }
+ e.w.writen1(bincVdFloat<<4 | bincFlBin64)
+ e.w.writeb(e.b[:8])
+}
+
+func (e *bincEncDriver) encIntegerPrune(bd byte, pos bool, v uint64, lim uint8) {
+ if lim == 4 {
+ bigen.PutUint32(e.b[:lim], uint32(v))
+ } else {
+ bigen.PutUint64(e.b[:lim], v)
+ }
+ if bincDoPrune {
+ i := pruneSignExt(e.b[:lim], pos)
+ e.w.writen1(bd | lim - 1 - byte(i))
+ e.w.writeb(e.b[i:lim])
+ } else {
+ e.w.writen1(bd | lim - 1)
+ e.w.writeb(e.b[:lim])
+ }
+}
+
+func (e *bincEncDriver) EncodeInt(v int64) {
+ const nbd byte = bincVdNegInt << 4
+ if v >= 0 {
+ e.encUint(bincVdPosInt<<4, true, uint64(v))
+ } else if v == -1 {
+ e.w.writen1(bincVdSpecial<<4 | bincSpNegOne)
+ } else {
+ e.encUint(bincVdNegInt<<4, false, uint64(-v))
+ }
+}
+
+func (e *bincEncDriver) EncodeUint(v uint64) {
+ e.encUint(bincVdPosInt<<4, true, v)
+}
+
+func (e *bincEncDriver) encUint(bd byte, pos bool, v uint64) {
+ if v == 0 {
+ e.w.writen1(bincVdSpecial<<4 | bincSpZero)
+ } else if pos && v >= 1 && v <= 16 {
+ e.w.writen1(bincVdSmallInt<<4 | byte(v-1))
+ } else if v <= math.MaxUint8 {
+ e.w.writen2(bd|0x0, byte(v))
+ } else if v <= math.MaxUint16 {
+ e.w.writen1(bd | 0x01)
+ bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v))
+ } else if v <= math.MaxUint32 {
+ e.encIntegerPrune(bd, pos, v, 4)
+ } else {
+ e.encIntegerPrune(bd, pos, v, 8)
+ }
+}
+
+func (e *bincEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, _ *Encoder) {
+ bs := ext.WriteExt(rv)
+ if bs == nil {
+ e.EncodeNil()
+ return
+ }
+ e.encodeExtPreamble(uint8(xtag), len(bs))
+ e.w.writeb(bs)
+}
+
+func (e *bincEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) {
+ e.encodeExtPreamble(uint8(re.Tag), len(re.Data))
+ e.w.writeb(re.Data)
+}
+
+func (e *bincEncDriver) encodeExtPreamble(xtag byte, length int) {
+ e.encLen(bincVdCustomExt<<4, uint64(length))
+ e.w.writen1(xtag)
+}
+
+func (e *bincEncDriver) EncodeArrayStart(length int) {
+ e.encLen(bincVdArray<<4, uint64(length))
+}
+
+func (e *bincEncDriver) EncodeMapStart(length int) {
+ e.encLen(bincVdMap<<4, uint64(length))
+}
+
+func (e *bincEncDriver) EncodeString(c charEncoding, v string) {
+ l := uint64(len(v))
+ e.encBytesLen(c, l)
+ if l > 0 {
+ e.w.writestr(v)
+ }
+}
+
+func (e *bincEncDriver) EncodeSymbol(v string) {
+ // if WriteSymbolsNoRefs {
+ // e.encodeString(c_UTF8, v)
+ // return
+ // }
+
+ //symbols only offer benefit when string length > 1.
+ //This is because strings with length 1 take only 2 bytes to store
+ //(bd with embedded length, and single byte for string val).
+
+ l := len(v)
+ if l == 0 {
+ e.encBytesLen(c_UTF8, 0)
+ return
+ } else if l == 1 {
+ e.encBytesLen(c_UTF8, 1)
+ e.w.writen1(v[0])
+ return
+ }
+ if e.m == nil {
+ e.m = make(map[string]uint16, 16)
+ }
+ ui, ok := e.m[v]
+ if ok {
+ if ui <= math.MaxUint8 {
+ e.w.writen2(bincVdSymbol<<4, byte(ui))
+ } else {
+ e.w.writen1(bincVdSymbol<<4 | 0x8)
+ bigenHelper{e.b[:2], e.w}.writeUint16(ui)
+ }
+ } else {
+ e.s++
+ ui = e.s
+ //ui = uint16(atomic.AddUint32(&e.s, 1))
+ e.m[v] = ui
+ var lenprec uint8
+ if l <= math.MaxUint8 {
+ // lenprec = 0
+ } else if l <= math.MaxUint16 {
+ lenprec = 1
+ } else if int64(l) <= math.MaxUint32 {
+ lenprec = 2
+ } else {
+ lenprec = 3
+ }
+ if ui <= math.MaxUint8 {
+ e.w.writen2(bincVdSymbol<<4|0x0|0x4|lenprec, byte(ui))
+ } else {
+ e.w.writen1(bincVdSymbol<<4 | 0x8 | 0x4 | lenprec)
+ bigenHelper{e.b[:2], e.w}.writeUint16(ui)
+ }
+ if lenprec == 0 {
+ e.w.writen1(byte(l))
+ } else if lenprec == 1 {
+ bigenHelper{e.b[:2], e.w}.writeUint16(uint16(l))
+ } else if lenprec == 2 {
+ bigenHelper{e.b[:4], e.w}.writeUint32(uint32(l))
+ } else {
+ bigenHelper{e.b[:8], e.w}.writeUint64(uint64(l))
+ }
+ e.w.writestr(v)
+ }
+}
+
+func (e *bincEncDriver) EncodeStringBytes(c charEncoding, v []byte) {
+ l := uint64(len(v))
+ e.encBytesLen(c, l)
+ if l > 0 {
+ e.w.writeb(v)
+ }
+}
+
+func (e *bincEncDriver) encBytesLen(c charEncoding, length uint64) {
+ //TODO: support bincUnicodeOther (for now, just use string or bytearray)
+ if c == c_RAW {
+ e.encLen(bincVdByteArray<<4, length)
+ } else {
+ e.encLen(bincVdString<<4, length)
+ }
+}
+
+func (e *bincEncDriver) encLen(bd byte, l uint64) {
+ if l < 12 {
+ e.w.writen1(bd | uint8(l+4))
+ } else {
+ e.encLenNumber(bd, l)
+ }
+}
+
+func (e *bincEncDriver) encLenNumber(bd byte, v uint64) {
+ if v <= math.MaxUint8 {
+ e.w.writen2(bd, byte(v))
+ } else if v <= math.MaxUint16 {
+ e.w.writen1(bd | 0x01)
+ bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v))
+ } else if v <= math.MaxUint32 {
+ e.w.writen1(bd | 0x02)
+ bigenHelper{e.b[:4], e.w}.writeUint32(uint32(v))
+ } else {
+ e.w.writen1(bd | 0x03)
+ bigenHelper{e.b[:8], e.w}.writeUint64(uint64(v))
+ }
+}
+
+//------------------------------------
+
+type bincDecSymbol struct {
+ s string
+ b []byte
+ i uint16
+}
+
+type bincDecDriver struct {
+ d *Decoder
+ h *BincHandle
+ r decReader
+ br bool // bytes reader
+ bdRead bool
+ bd byte
+ vd byte
+ vs byte
+ noStreamingCodec
+ decNoSeparator
+ b [scratchByteArrayLen]byte
+
+ // linear searching on this slice is ok,
+ // because we typically expect < 32 symbols in each stream.
+ s []bincDecSymbol
+}
+
+func (d *bincDecDriver) readNextBd() {
+ d.bd = d.r.readn1()
+ d.vd = d.bd >> 4
+ d.vs = d.bd & 0x0f
+ d.bdRead = true
+}
+
+func (d *bincDecDriver) ContainerType() (vt valueType) {
+ if d.vd == bincVdSpecial && d.vs == bincSpNil {
+ return valueTypeNil
+ } else if d.vd == bincVdByteArray {
+ return valueTypeBytes
+ } else if d.vd == bincVdString {
+ return valueTypeString
+ } else if d.vd == bincVdArray {
+ return valueTypeArray
+ } else if d.vd == bincVdMap {
+ return valueTypeMap
+ } else {
+ // d.d.errorf("isContainerType: unsupported parameter: %v", vt)
+ }
+ return valueTypeUnset
+}
+
+func (d *bincDecDriver) TryDecodeAsNil() bool {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == bincVdSpecial<<4|bincSpNil {
+ d.bdRead = false
+ return true
+ }
+ return false
+}
+
+func (d *bincDecDriver) IsBuiltinType(rt uintptr) bool {
+ return rt == timeTypId
+}
+
+func (d *bincDecDriver) DecodeBuiltin(rt uintptr, v interface{}) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if rt == timeTypId {
+ if d.vd != bincVdTimestamp {
+ d.d.errorf("Invalid d.vd. Expecting 0x%x. Received: 0x%x", bincVdTimestamp, d.vd)
+ return
+ }
+ tt, err := decodeTime(d.r.readx(int(d.vs)))
+ if err != nil {
+ panic(err)
+ }
+ var vt *time.Time = v.(*time.Time)
+ *vt = tt
+ d.bdRead = false
+ }
+}
+
+func (d *bincDecDriver) decFloatPre(vs, defaultLen byte) {
+ if vs&0x8 == 0 {
+ d.r.readb(d.b[0:defaultLen])
+ } else {
+ l := d.r.readn1()
+ if l > 8 {
+ d.d.errorf("At most 8 bytes used to represent float. Received: %v bytes", l)
+ return
+ }
+ for i := l; i < 8; i++ {
+ d.b[i] = 0
+ }
+ d.r.readb(d.b[0:l])
+ }
+}
+
+func (d *bincDecDriver) decFloat() (f float64) {
+ //if true { f = math.Float64frombits(bigen.Uint64(d.r.readx(8))); break; }
+ if x := d.vs & 0x7; x == bincFlBin32 {
+ d.decFloatPre(d.vs, 4)
+ f = float64(math.Float32frombits(bigen.Uint32(d.b[0:4])))
+ } else if x == bincFlBin64 {
+ d.decFloatPre(d.vs, 8)
+ f = math.Float64frombits(bigen.Uint64(d.b[0:8]))
+ } else {
+ d.d.errorf("only float32 and float64 are supported. d.vd: 0x%x, d.vs: 0x%x", d.vd, d.vs)
+ return
+ }
+ return
+}
+
+func (d *bincDecDriver) decUint() (v uint64) {
+ // need to inline the code (interface conversion and type assertion expensive)
+ switch d.vs {
+ case 0:
+ v = uint64(d.r.readn1())
+ case 1:
+ d.r.readb(d.b[6:8])
+ v = uint64(bigen.Uint16(d.b[6:8]))
+ case 2:
+ d.b[4] = 0
+ d.r.readb(d.b[5:8])
+ v = uint64(bigen.Uint32(d.b[4:8]))
+ case 3:
+ d.r.readb(d.b[4:8])
+ v = uint64(bigen.Uint32(d.b[4:8]))
+ case 4, 5, 6:
+ lim := int(7 - d.vs)
+ d.r.readb(d.b[lim:8])
+ for i := 0; i < lim; i++ {
+ d.b[i] = 0
+ }
+ v = uint64(bigen.Uint64(d.b[:8]))
+ case 7:
+ d.r.readb(d.b[:8])
+ v = uint64(bigen.Uint64(d.b[:8]))
+ default:
+ d.d.errorf("unsigned integers with greater than 64 bits of precision not supported")
+ return
+ }
+ return
+}
+
+func (d *bincDecDriver) decCheckInteger() (ui uint64, neg bool) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ vd, vs := d.vd, d.vs
+ if vd == bincVdPosInt {
+ ui = d.decUint()
+ } else if vd == bincVdNegInt {
+ ui = d.decUint()
+ neg = true
+ } else if vd == bincVdSmallInt {
+ ui = uint64(d.vs) + 1
+ } else if vd == bincVdSpecial {
+ if vs == bincSpZero {
+ //i = 0
+ } else if vs == bincSpNegOne {
+ neg = true
+ ui = 1
+ } else {
+ d.d.errorf("numeric decode fails for special value: d.vs: 0x%x", d.vs)
+ return
+ }
+ } else {
+ d.d.errorf("number can only be decoded from uint or int values. d.bd: 0x%x, d.vd: 0x%x", d.bd, d.vd)
+ return
+ }
+ return
+}
+
+func (d *bincDecDriver) DecodeInt(bitsize uint8) (i int64) {
+ ui, neg := d.decCheckInteger()
+ i, overflow := chkOvf.SignedInt(ui)
+ if overflow {
+ d.d.errorf("simple: overflow converting %v to signed integer", ui)
+ return
+ }
+ if neg {
+ i = -i
+ }
+ if chkOvf.Int(i, bitsize) {
+ d.d.errorf("binc: overflow integer: %v", i)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *bincDecDriver) DecodeUint(bitsize uint8) (ui uint64) {
+ ui, neg := d.decCheckInteger()
+ if neg {
+ d.d.errorf("Assigning negative signed value to unsigned type")
+ return
+ }
+ if chkOvf.Uint(ui, bitsize) {
+ d.d.errorf("binc: overflow integer: %v", ui)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *bincDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ vd, vs := d.vd, d.vs
+ if vd == bincVdSpecial {
+ d.bdRead = false
+ if vs == bincSpNan {
+ return math.NaN()
+ } else if vs == bincSpPosInf {
+ return math.Inf(1)
+ } else if vs == bincSpZeroFloat || vs == bincSpZero {
+ return
+ } else if vs == bincSpNegInf {
+ return math.Inf(-1)
+ } else {
+ d.d.errorf("Invalid d.vs decoding float where d.vd=bincVdSpecial: %v", d.vs)
+ return
+ }
+ } else if vd == bincVdFloat {
+ f = d.decFloat()
+ } else {
+ f = float64(d.DecodeInt(64))
+ }
+ if chkOverflow32 && chkOvf.Float32(f) {
+ d.d.errorf("binc: float32 overflow: %v", f)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+// bool can be decoded from bool only (single byte).
+func (d *bincDecDriver) DecodeBool() (b bool) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if bd := d.bd; bd == (bincVdSpecial | bincSpFalse) {
+ // b = false
+ } else if bd == (bincVdSpecial | bincSpTrue) {
+ b = true
+ } else {
+ d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *bincDecDriver) ReadMapStart() (length int) {
+ if d.vd != bincVdMap {
+ d.d.errorf("Invalid d.vd for map. Expecting 0x%x. Got: 0x%x", bincVdMap, d.vd)
+ return
+ }
+ length = d.decLen()
+ d.bdRead = false
+ return
+}
+
+func (d *bincDecDriver) ReadArrayStart() (length int) {
+ if d.vd != bincVdArray {
+ d.d.errorf("Invalid d.vd for array. Expecting 0x%x. Got: 0x%x", bincVdArray, d.vd)
+ return
+ }
+ length = d.decLen()
+ d.bdRead = false
+ return
+}
+
+func (d *bincDecDriver) decLen() int {
+ if d.vs > 3 {
+ return int(d.vs - 4)
+ }
+ return int(d.decLenNumber())
+}
+
+func (d *bincDecDriver) decLenNumber() (v uint64) {
+ if x := d.vs; x == 0 {
+ v = uint64(d.r.readn1())
+ } else if x == 1 {
+ d.r.readb(d.b[6:8])
+ v = uint64(bigen.Uint16(d.b[6:8]))
+ } else if x == 2 {
+ d.r.readb(d.b[4:8])
+ v = uint64(bigen.Uint32(d.b[4:8]))
+ } else {
+ d.r.readb(d.b[:8])
+ v = bigen.Uint64(d.b[:8])
+ }
+ return
+}
+
+func (d *bincDecDriver) decStringAndBytes(bs []byte, withString, zerocopy bool) (bs2 []byte, s string) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == bincVdSpecial<<4|bincSpNil {
+ d.bdRead = false
+ return
+ }
+ var slen int = -1
+ // var ok bool
+ switch d.vd {
+ case bincVdString, bincVdByteArray:
+ slen = d.decLen()
+ if zerocopy {
+ if d.br {
+ bs2 = d.r.readx(slen)
+ } else if len(bs) == 0 {
+ bs2 = decByteSlice(d.r, slen, d.b[:])
+ } else {
+ bs2 = decByteSlice(d.r, slen, bs)
+ }
+ } else {
+ bs2 = decByteSlice(d.r, slen, bs)
+ }
+ if withString {
+ s = string(bs2)
+ }
+ case bincVdSymbol:
+ // zerocopy doesn't apply for symbols,
+ // as the values must be stored in a table for later use.
+ //
+ //from vs: extract numSymbolBytes, containsStringVal, strLenPrecision,
+ //extract symbol
+ //if containsStringVal, read it and put in map
+ //else look in map for string value
+ var symbol uint16
+ vs := d.vs
+ if vs&0x8 == 0 {
+ symbol = uint16(d.r.readn1())
+ } else {
+ symbol = uint16(bigen.Uint16(d.r.readx(2)))
+ }
+ if d.s == nil {
+ d.s = make([]bincDecSymbol, 0, 16)
+ }
+
+ if vs&0x4 == 0 {
+ for i := range d.s {
+ j := &d.s[i]
+ if j.i == symbol {
+ bs2 = j.b
+ if withString {
+ if j.s == "" && bs2 != nil {
+ j.s = string(bs2)
+ }
+ s = j.s
+ }
+ break
+ }
+ }
+ } else {
+ switch vs & 0x3 {
+ case 0:
+ slen = int(d.r.readn1())
+ case 1:
+ slen = int(bigen.Uint16(d.r.readx(2)))
+ case 2:
+ slen = int(bigen.Uint32(d.r.readx(4)))
+ case 3:
+ slen = int(bigen.Uint64(d.r.readx(8)))
+ }
+ // since using symbols, do not store any part of
+ // the parameter bs in the map, as it might be a shared buffer.
+ // bs2 = decByteSlice(d.r, slen, bs)
+ bs2 = decByteSlice(d.r, slen, nil)
+ if withString {
+ s = string(bs2)
+ }
+ d.s = append(d.s, bincDecSymbol{i: symbol, s: s, b: bs2})
+ }
+ default:
+ d.d.errorf("Invalid d.vd. Expecting string:0x%x, bytearray:0x%x or symbol: 0x%x. Got: 0x%x",
+ bincVdString, bincVdByteArray, bincVdSymbol, d.vd)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *bincDecDriver) DecodeString() (s string) {
+ // DecodeBytes does not accomodate symbols, whose impl stores string version in map.
+ // Use decStringAndBytes directly.
+ // return string(d.DecodeBytes(d.b[:], true, true))
+ _, s = d.decStringAndBytes(d.b[:], true, true)
+ return
+}
+
+func (d *bincDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) {
+ if isstring {
+ bsOut, _ = d.decStringAndBytes(bs, false, zerocopy)
+ return
+ }
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == bincVdSpecial<<4|bincSpNil {
+ d.bdRead = false
+ return nil
+ }
+ var clen int
+ if d.vd == bincVdString || d.vd == bincVdByteArray {
+ clen = d.decLen()
+ } else {
+ d.d.errorf("Invalid d.vd for bytes. Expecting string:0x%x or bytearray:0x%x. Got: 0x%x",
+ bincVdString, bincVdByteArray, d.vd)
+ return
+ }
+ d.bdRead = false
+ if zerocopy {
+ if d.br {
+ return d.r.readx(clen)
+ } else if len(bs) == 0 {
+ bs = d.b[:]
+ }
+ }
+ return decByteSlice(d.r, clen, bs)
+}
+
+func (d *bincDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
+ if xtag > 0xff {
+ d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag)
+ return
+ }
+ realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag))
+ realxtag = uint64(realxtag1)
+ if ext == nil {
+ re := rv.(*RawExt)
+ re.Tag = realxtag
+ re.Data = detachZeroCopyBytes(d.br, re.Data, xbs)
+ } else {
+ ext.ReadExt(rv, xbs)
+ }
+ return
+}
+
+func (d *bincDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.vd == bincVdCustomExt {
+ l := d.decLen()
+ xtag = d.r.readn1()
+ if verifyTag && xtag != tag {
+ d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", xtag, tag)
+ return
+ }
+ xbs = d.r.readx(l)
+ } else if d.vd == bincVdByteArray {
+ xbs = d.DecodeBytes(nil, false, true)
+ } else {
+ d.d.errorf("Invalid d.vd for extensions (Expecting extensions or byte array). Got: 0x%x", d.vd)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *bincDecDriver) DecodeNaked() {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+
+ n := &d.d.n
+ var decodeFurther bool
+
+ switch d.vd {
+ case bincVdSpecial:
+ switch d.vs {
+ case bincSpNil:
+ n.v = valueTypeNil
+ case bincSpFalse:
+ n.v = valueTypeBool
+ n.b = false
+ case bincSpTrue:
+ n.v = valueTypeBool
+ n.b = true
+ case bincSpNan:
+ n.v = valueTypeFloat
+ n.f = math.NaN()
+ case bincSpPosInf:
+ n.v = valueTypeFloat
+ n.f = math.Inf(1)
+ case bincSpNegInf:
+ n.v = valueTypeFloat
+ n.f = math.Inf(-1)
+ case bincSpZeroFloat:
+ n.v = valueTypeFloat
+ n.f = float64(0)
+ case bincSpZero:
+ n.v = valueTypeUint
+ n.u = uint64(0) // int8(0)
+ case bincSpNegOne:
+ n.v = valueTypeInt
+ n.i = int64(-1) // int8(-1)
+ default:
+ d.d.errorf("decodeNaked: Unrecognized special value 0x%x", d.vs)
+ }
+ case bincVdSmallInt:
+ n.v = valueTypeUint
+ n.u = uint64(int8(d.vs)) + 1 // int8(d.vs) + 1
+ case bincVdPosInt:
+ n.v = valueTypeUint
+ n.u = d.decUint()
+ case bincVdNegInt:
+ n.v = valueTypeInt
+ n.i = -(int64(d.decUint()))
+ case bincVdFloat:
+ n.v = valueTypeFloat
+ n.f = d.decFloat()
+ case bincVdSymbol:
+ n.v = valueTypeSymbol
+ n.s = d.DecodeString()
+ case bincVdString:
+ n.v = valueTypeString
+ n.s = d.DecodeString()
+ case bincVdByteArray:
+ n.v = valueTypeBytes
+ n.l = d.DecodeBytes(nil, false, false)
+ case bincVdTimestamp:
+ n.v = valueTypeTimestamp
+ tt, err := decodeTime(d.r.readx(int(d.vs)))
+ if err != nil {
+ panic(err)
+ }
+ n.t = tt
+ case bincVdCustomExt:
+ n.v = valueTypeExt
+ l := d.decLen()
+ n.u = uint64(d.r.readn1())
+ n.l = d.r.readx(l)
+ case bincVdArray:
+ n.v = valueTypeArray
+ decodeFurther = true
+ case bincVdMap:
+ n.v = valueTypeMap
+ decodeFurther = true
+ default:
+ d.d.errorf("decodeNaked: Unrecognized d.vd: 0x%x", d.vd)
+ }
+
+ if !decodeFurther {
+ d.bdRead = false
+ }
+ if n.v == valueTypeUint && d.h.SignedInteger {
+ n.v = valueTypeInt
+ n.i = int64(n.u)
+ }
+ return
+}
+
+//------------------------------------
+
+//BincHandle is a Handle for the Binc Schema-Free Encoding Format
+//defined at https://github.com/ugorji/binc .
+//
+//BincHandle currently supports all Binc features with the following EXCEPTIONS:
+// - only integers up to 64 bits of precision are supported.
+// big integers are unsupported.
+// - Only IEEE 754 binary32 and binary64 floats are supported (ie Go float32 and float64 types).
+// extended precision and decimal IEEE 754 floats are unsupported.
+// - Only UTF-8 strings supported.
+// Unicode_Other Binc types (UTF16, UTF32) are currently unsupported.
+//
+//Note that these EXCEPTIONS are temporary and full support is possible and may happen soon.
+type BincHandle struct {
+ BasicHandle
+ binaryEncodingType
+}
+
+func (h *BincHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
+ return h.SetExt(rt, tag, &setExtWrapper{b: ext})
+}
+
+func (h *BincHandle) newEncDriver(e *Encoder) encDriver {
+ return &bincEncDriver{e: e, w: e.w}
+}
+
+func (h *BincHandle) newDecDriver(d *Decoder) decDriver {
+ return &bincDecDriver{d: d, r: d.r, h: h, br: d.bytes}
+}
+
+func (e *bincEncDriver) reset() {
+ e.w = e.e.w
+ e.s = 0
+ e.m = nil
+}
+
+func (d *bincDecDriver) reset() {
+ d.r = d.d.r
+ d.s = nil
+ d.bd, d.bdRead, d.vd, d.vs = 0, false, 0, 0
+}
+
+var _ decDriver = (*bincDecDriver)(nil)
+var _ encDriver = (*bincEncDriver)(nil)
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/cbor.go b/src/kube2msb/vendor/github.com/ugorji/go/codec/cbor.go
new file mode 100644
index 0000000..a224cd3
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/cbor.go
@@ -0,0 +1,585 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+ "math"
+ "reflect"
+)
+
+const (
+ cborMajorUint byte = iota
+ cborMajorNegInt
+ cborMajorBytes
+ cborMajorText
+ cborMajorArray
+ cborMajorMap
+ cborMajorTag
+ cborMajorOther
+)
+
+const (
+ cborBdFalse byte = 0xf4 + iota
+ cborBdTrue
+ cborBdNil
+ cborBdUndefined
+ cborBdExt
+ cborBdFloat16
+ cborBdFloat32
+ cborBdFloat64
+)
+
+const (
+ cborBdIndefiniteBytes byte = 0x5f
+ cborBdIndefiniteString = 0x7f
+ cborBdIndefiniteArray = 0x9f
+ cborBdIndefiniteMap = 0xbf
+ cborBdBreak = 0xff
+)
+
+const (
+ CborStreamBytes byte = 0x5f
+ CborStreamString = 0x7f
+ CborStreamArray = 0x9f
+ CborStreamMap = 0xbf
+ CborStreamBreak = 0xff
+)
+
+const (
+ cborBaseUint byte = 0x00
+ cborBaseNegInt = 0x20
+ cborBaseBytes = 0x40
+ cborBaseString = 0x60
+ cborBaseArray = 0x80
+ cborBaseMap = 0xa0
+ cborBaseTag = 0xc0
+ cborBaseSimple = 0xe0
+)
+
+// -------------------
+
+type cborEncDriver struct {
+ noBuiltInTypes
+ encNoSeparator
+ e *Encoder
+ w encWriter
+ h *CborHandle
+ x [8]byte
+}
+
+func (e *cborEncDriver) EncodeNil() {
+ e.w.writen1(cborBdNil)
+}
+
+func (e *cborEncDriver) EncodeBool(b bool) {
+ if b {
+ e.w.writen1(cborBdTrue)
+ } else {
+ e.w.writen1(cborBdFalse)
+ }
+}
+
+func (e *cborEncDriver) EncodeFloat32(f float32) {
+ e.w.writen1(cborBdFloat32)
+ bigenHelper{e.x[:4], e.w}.writeUint32(math.Float32bits(f))
+}
+
+func (e *cborEncDriver) EncodeFloat64(f float64) {
+ e.w.writen1(cborBdFloat64)
+ bigenHelper{e.x[:8], e.w}.writeUint64(math.Float64bits(f))
+}
+
+func (e *cborEncDriver) encUint(v uint64, bd byte) {
+ if v <= 0x17 {
+ e.w.writen1(byte(v) + bd)
+ } else if v <= math.MaxUint8 {
+ e.w.writen2(bd+0x18, uint8(v))
+ } else if v <= math.MaxUint16 {
+ e.w.writen1(bd + 0x19)
+ bigenHelper{e.x[:2], e.w}.writeUint16(uint16(v))
+ } else if v <= math.MaxUint32 {
+ e.w.writen1(bd + 0x1a)
+ bigenHelper{e.x[:4], e.w}.writeUint32(uint32(v))
+ } else { // if v <= math.MaxUint64 {
+ e.w.writen1(bd + 0x1b)
+ bigenHelper{e.x[:8], e.w}.writeUint64(v)
+ }
+}
+
+func (e *cborEncDriver) EncodeInt(v int64) {
+ if v < 0 {
+ e.encUint(uint64(-1-v), cborBaseNegInt)
+ } else {
+ e.encUint(uint64(v), cborBaseUint)
+ }
+}
+
+func (e *cborEncDriver) EncodeUint(v uint64) {
+ e.encUint(v, cborBaseUint)
+}
+
+func (e *cborEncDriver) encLen(bd byte, length int) {
+ e.encUint(uint64(length), bd)
+}
+
+func (e *cborEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Encoder) {
+ e.encUint(uint64(xtag), cborBaseTag)
+ if v := ext.ConvertExt(rv); v == nil {
+ e.EncodeNil()
+ } else {
+ en.encode(v)
+ }
+}
+
+func (e *cborEncDriver) EncodeRawExt(re *RawExt, en *Encoder) {
+ e.encUint(uint64(re.Tag), cborBaseTag)
+ if re.Data != nil {
+ en.encode(re.Data)
+ } else if re.Value == nil {
+ e.EncodeNil()
+ } else {
+ en.encode(re.Value)
+ }
+}
+
+func (e *cborEncDriver) EncodeArrayStart(length int) {
+ e.encLen(cborBaseArray, length)
+}
+
+func (e *cborEncDriver) EncodeMapStart(length int) {
+ e.encLen(cborBaseMap, length)
+}
+
+func (e *cborEncDriver) EncodeString(c charEncoding, v string) {
+ e.encLen(cborBaseString, len(v))
+ e.w.writestr(v)
+}
+
+func (e *cborEncDriver) EncodeSymbol(v string) {
+ e.EncodeString(c_UTF8, v)
+}
+
+func (e *cborEncDriver) EncodeStringBytes(c charEncoding, v []byte) {
+ if c == c_RAW {
+ e.encLen(cborBaseBytes, len(v))
+ } else {
+ e.encLen(cborBaseString, len(v))
+ }
+ e.w.writeb(v)
+}
+
+// ----------------------
+
+type cborDecDriver struct {
+ d *Decoder
+ h *CborHandle
+ r decReader
+ b [scratchByteArrayLen]byte
+ br bool // bytes reader
+ bdRead bool
+ bd byte
+ noBuiltInTypes
+ decNoSeparator
+}
+
+func (d *cborDecDriver) readNextBd() {
+ d.bd = d.r.readn1()
+ d.bdRead = true
+}
+
+func (d *cborDecDriver) ContainerType() (vt valueType) {
+ if d.bd == cborBdNil {
+ return valueTypeNil
+ } else if d.bd == cborBdIndefiniteBytes || (d.bd >= cborBaseBytes && d.bd < cborBaseString) {
+ return valueTypeBytes
+ } else if d.bd == cborBdIndefiniteString || (d.bd >= cborBaseString && d.bd < cborBaseArray) {
+ return valueTypeString
+ } else if d.bd == cborBdIndefiniteArray || (d.bd >= cborBaseArray && d.bd < cborBaseMap) {
+ return valueTypeArray
+ } else if d.bd == cborBdIndefiniteMap || (d.bd >= cborBaseMap && d.bd < cborBaseTag) {
+ return valueTypeMap
+ } else {
+ // d.d.errorf("isContainerType: unsupported parameter: %v", vt)
+ }
+ return valueTypeUnset
+}
+
+func (d *cborDecDriver) TryDecodeAsNil() bool {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ // treat Nil and Undefined as nil values
+ if d.bd == cborBdNil || d.bd == cborBdUndefined {
+ d.bdRead = false
+ return true
+ }
+ return false
+}
+
+func (d *cborDecDriver) CheckBreak() bool {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == cborBdBreak {
+ d.bdRead = false
+ return true
+ }
+ return false
+}
+
+func (d *cborDecDriver) decUint() (ui uint64) {
+ v := d.bd & 0x1f
+ if v <= 0x17 {
+ ui = uint64(v)
+ } else {
+ if v == 0x18 {
+ ui = uint64(d.r.readn1())
+ } else if v == 0x19 {
+ ui = uint64(bigen.Uint16(d.r.readx(2)))
+ } else if v == 0x1a {
+ ui = uint64(bigen.Uint32(d.r.readx(4)))
+ } else if v == 0x1b {
+ ui = uint64(bigen.Uint64(d.r.readx(8)))
+ } else {
+ d.d.errorf("decUint: Invalid descriptor: %v", d.bd)
+ return
+ }
+ }
+ return
+}
+
+func (d *cborDecDriver) decCheckInteger() (neg bool) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ major := d.bd >> 5
+ if major == cborMajorUint {
+ } else if major == cborMajorNegInt {
+ neg = true
+ } else {
+ d.d.errorf("invalid major: %v (bd: %v)", major, d.bd)
+ return
+ }
+ return
+}
+
+func (d *cborDecDriver) DecodeInt(bitsize uint8) (i int64) {
+ neg := d.decCheckInteger()
+ ui := d.decUint()
+ // check if this number can be converted to an int without overflow
+ var overflow bool
+ if neg {
+ if i, overflow = chkOvf.SignedInt(ui + 1); overflow {
+ d.d.errorf("cbor: overflow converting %v to signed integer", ui+1)
+ return
+ }
+ i = -i
+ } else {
+ if i, overflow = chkOvf.SignedInt(ui); overflow {
+ d.d.errorf("cbor: overflow converting %v to signed integer", ui)
+ return
+ }
+ }
+ if chkOvf.Int(i, bitsize) {
+ d.d.errorf("cbor: overflow integer: %v", i)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *cborDecDriver) DecodeUint(bitsize uint8) (ui uint64) {
+ if d.decCheckInteger() {
+ d.d.errorf("Assigning negative signed value to unsigned type")
+ return
+ }
+ ui = d.decUint()
+ if chkOvf.Uint(ui, bitsize) {
+ d.d.errorf("cbor: overflow integer: %v", ui)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *cborDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if bd := d.bd; bd == cborBdFloat16 {
+ f = float64(math.Float32frombits(halfFloatToFloatBits(bigen.Uint16(d.r.readx(2)))))
+ } else if bd == cborBdFloat32 {
+ f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4))))
+ } else if bd == cborBdFloat64 {
+ f = math.Float64frombits(bigen.Uint64(d.r.readx(8)))
+ } else if bd >= cborBaseUint && bd < cborBaseBytes {
+ f = float64(d.DecodeInt(64))
+ } else {
+ d.d.errorf("Float only valid from float16/32/64: Invalid descriptor: %v", bd)
+ return
+ }
+ if chkOverflow32 && chkOvf.Float32(f) {
+ d.d.errorf("cbor: float32 overflow: %v", f)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+// bool can be decoded from bool only (single byte).
+func (d *cborDecDriver) DecodeBool() (b bool) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if bd := d.bd; bd == cborBdTrue {
+ b = true
+ } else if bd == cborBdFalse {
+ } else {
+ d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *cborDecDriver) ReadMapStart() (length int) {
+ d.bdRead = false
+ if d.bd == cborBdIndefiniteMap {
+ return -1
+ }
+ return d.decLen()
+}
+
+func (d *cborDecDriver) ReadArrayStart() (length int) {
+ d.bdRead = false
+ if d.bd == cborBdIndefiniteArray {
+ return -1
+ }
+ return d.decLen()
+}
+
+func (d *cborDecDriver) decLen() int {
+ return int(d.decUint())
+}
+
+func (d *cborDecDriver) decAppendIndefiniteBytes(bs []byte) []byte {
+ d.bdRead = false
+ for {
+ if d.CheckBreak() {
+ break
+ }
+ if major := d.bd >> 5; major != cborMajorBytes && major != cborMajorText {
+ d.d.errorf("cbor: expect bytes or string major type in indefinite string/bytes; got: %v, byte: %v", major, d.bd)
+ return nil
+ }
+ n := d.decLen()
+ oldLen := len(bs)
+ newLen := oldLen + n
+ if newLen > cap(bs) {
+ bs2 := make([]byte, newLen, 2*cap(bs)+n)
+ copy(bs2, bs)
+ bs = bs2
+ } else {
+ bs = bs[:newLen]
+ }
+ d.r.readb(bs[oldLen:newLen])
+ // bs = append(bs, d.r.readn()...)
+ d.bdRead = false
+ }
+ d.bdRead = false
+ return bs
+}
+
+func (d *cborDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == cborBdNil || d.bd == cborBdUndefined {
+ d.bdRead = false
+ return nil
+ }
+ if d.bd == cborBdIndefiniteBytes || d.bd == cborBdIndefiniteString {
+ if bs == nil {
+ return d.decAppendIndefiniteBytes(nil)
+ }
+ return d.decAppendIndefiniteBytes(bs[:0])
+ }
+ clen := d.decLen()
+ d.bdRead = false
+ if zerocopy {
+ if d.br {
+ return d.r.readx(clen)
+ } else if len(bs) == 0 {
+ bs = d.b[:]
+ }
+ }
+ return decByteSlice(d.r, clen, bs)
+}
+
+func (d *cborDecDriver) DecodeString() (s string) {
+ return string(d.DecodeBytes(d.b[:], true, true))
+}
+
+func (d *cborDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ u := d.decUint()
+ d.bdRead = false
+ realxtag = u
+ if ext == nil {
+ re := rv.(*RawExt)
+ re.Tag = realxtag
+ d.d.decode(&re.Value)
+ } else if xtag != realxtag {
+ d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", realxtag, xtag)
+ return
+ } else {
+ var v interface{}
+ d.d.decode(&v)
+ ext.UpdateExt(rv, v)
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *cborDecDriver) DecodeNaked() {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+
+ n := &d.d.n
+ var decodeFurther bool
+
+ switch d.bd {
+ case cborBdNil:
+ n.v = valueTypeNil
+ case cborBdFalse:
+ n.v = valueTypeBool
+ n.b = false
+ case cborBdTrue:
+ n.v = valueTypeBool
+ n.b = true
+ case cborBdFloat16, cborBdFloat32:
+ n.v = valueTypeFloat
+ n.f = d.DecodeFloat(true)
+ case cborBdFloat64:
+ n.v = valueTypeFloat
+ n.f = d.DecodeFloat(false)
+ case cborBdIndefiniteBytes:
+ n.v = valueTypeBytes
+ n.l = d.DecodeBytes(nil, false, false)
+ case cborBdIndefiniteString:
+ n.v = valueTypeString
+ n.s = d.DecodeString()
+ case cborBdIndefiniteArray:
+ n.v = valueTypeArray
+ decodeFurther = true
+ case cborBdIndefiniteMap:
+ n.v = valueTypeMap
+ decodeFurther = true
+ default:
+ switch {
+ case d.bd >= cborBaseUint && d.bd < cborBaseNegInt:
+ if d.h.SignedInteger {
+ n.v = valueTypeInt
+ n.i = d.DecodeInt(64)
+ } else {
+ n.v = valueTypeUint
+ n.u = d.DecodeUint(64)
+ }
+ case d.bd >= cborBaseNegInt && d.bd < cborBaseBytes:
+ n.v = valueTypeInt
+ n.i = d.DecodeInt(64)
+ case d.bd >= cborBaseBytes && d.bd < cborBaseString:
+ n.v = valueTypeBytes
+ n.l = d.DecodeBytes(nil, false, false)
+ case d.bd >= cborBaseString && d.bd < cborBaseArray:
+ n.v = valueTypeString
+ n.s = d.DecodeString()
+ case d.bd >= cborBaseArray && d.bd < cborBaseMap:
+ n.v = valueTypeArray
+ decodeFurther = true
+ case d.bd >= cborBaseMap && d.bd < cborBaseTag:
+ n.v = valueTypeMap
+ decodeFurther = true
+ case d.bd >= cborBaseTag && d.bd < cborBaseSimple:
+ n.v = valueTypeExt
+ n.u = d.decUint()
+ n.l = nil
+ // d.bdRead = false
+ // d.d.decode(&re.Value) // handled by decode itself.
+ // decodeFurther = true
+ default:
+ d.d.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd)
+ return
+ }
+ }
+
+ if !decodeFurther {
+ d.bdRead = false
+ }
+ return
+}
+
+// -------------------------
+
+// CborHandle is a Handle for the CBOR encoding format,
+// defined at http://tools.ietf.org/html/rfc7049 and documented further at http://cbor.io .
+//
+// CBOR is comprehensively supported, including support for:
+// - indefinite-length arrays/maps/bytes/strings
+// - (extension) tags in range 0..0xffff (0 .. 65535)
+// - half, single and double-precision floats
+// - all numbers (1, 2, 4 and 8-byte signed and unsigned integers)
+// - nil, true, false, ...
+// - arrays and maps, bytes and text strings
+//
+// None of the optional extensions (with tags) defined in the spec are supported out-of-the-box.
+// Users can implement them as needed (using SetExt), including spec-documented ones:
+// - timestamp, BigNum, BigFloat, Decimals, Encoded Text (e.g. URL, regexp, base64, MIME Message), etc.
+//
+// To encode with indefinite lengths (streaming), users will use
+// (Must)Encode methods of *Encoder, along with writing CborStreamXXX constants.
+//
+// For example, to encode "one-byte" as an indefinite length string:
+// var buf bytes.Buffer
+// e := NewEncoder(&buf, new(CborHandle))
+// buf.WriteByte(CborStreamString)
+// e.MustEncode("one-")
+// e.MustEncode("byte")
+// buf.WriteByte(CborStreamBreak)
+// encodedBytes := buf.Bytes()
+// var vv interface{}
+// NewDecoderBytes(buf.Bytes(), new(CborHandle)).MustDecode(&vv)
+// // Now, vv contains the same string "one-byte"
+//
+type CborHandle struct {
+ binaryEncodingType
+ BasicHandle
+}
+
+func (h *CborHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) {
+ return h.SetExt(rt, tag, &setExtWrapper{i: ext})
+}
+
+func (h *CborHandle) newEncDriver(e *Encoder) encDriver {
+ return &cborEncDriver{e: e, w: e.w, h: h}
+}
+
+func (h *CborHandle) newDecDriver(d *Decoder) decDriver {
+ return &cborDecDriver{d: d, r: d.r, h: h, br: d.bytes}
+}
+
+func (e *cborEncDriver) reset() {
+ e.w = e.e.w
+}
+
+func (d *cborDecDriver) reset() {
+ d.r = d.d.r
+ d.bd, d.bdRead = 0, false
+}
+
+var _ decDriver = (*cborDecDriver)(nil)
+var _ encDriver = (*cborEncDriver)(nil)
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/decode.go b/src/kube2msb/vendor/github.com/ugorji/go/codec/decode.go
new file mode 100644
index 0000000..7e56f1e
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/decode.go
@@ -0,0 +1,2019 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+ "encoding"
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "time"
+)
+
+// Some tagging information for error messages.
+const (
+ msgBadDesc = "Unrecognized descriptor byte"
+ msgDecCannotExpandArr = "cannot expand go array from %v to stream length: %v"
+)
+
+var (
+ onlyMapOrArrayCanDecodeIntoStructErr = errors.New("only encoded map or array can be decoded into a struct")
+ cannotDecodeIntoNilErr = errors.New("cannot decode into nil")
+)
+
+// decReader abstracts the reading source, allowing implementations that can
+// read from an io.Reader or directly off a byte slice with zero-copying.
+type decReader interface {
+ unreadn1()
+
+ // readx will use the implementation scratch buffer if possible i.e. n < len(scratchbuf), OR
+ // just return a view of the []byte being decoded from.
+ // Ensure you call detachZeroCopyBytes later if this needs to be sent outside codec control.
+ readx(n int) []byte
+ readb([]byte)
+ readn1() uint8
+ readn1eof() (v uint8, eof bool)
+ numread() int // number of bytes read
+ track()
+ stopTrack() []byte
+}
+
+type decReaderByteScanner interface {
+ io.Reader
+ io.ByteScanner
+}
+
+type decDriver interface {
+ // this will check if the next token is a break.
+ CheckBreak() bool
+ TryDecodeAsNil() bool
+ // vt is one of: Bytes, String, Nil, Slice or Map. Return unSet if not known.
+ ContainerType() (vt valueType)
+ IsBuiltinType(rt uintptr) bool
+ DecodeBuiltin(rt uintptr, v interface{})
+
+ // DecodeNaked will decode primitives (number, bool, string, []byte) and RawExt.
+ // For maps and arrays, it will not do the decoding in-band, but will signal
+ // the decoder, so that is done later, by setting the decNaked.valueType field.
+ //
+ // Note: Numbers are decoded as int64, uint64, float64 only (no smaller sized number types).
+ // for extensions, DecodeNaked must read the tag and the []byte if it exists.
+ // if the []byte is not read, then kInterfaceNaked will treat it as a Handle
+ // that stores the subsequent value in-band, and complete reading the RawExt.
+ //
+ // extensions should also use readx to decode them, for efficiency.
+ // kInterface will extract the detached byte slice if it has to pass it outside its realm.
+ DecodeNaked()
+ DecodeInt(bitsize uint8) (i int64)
+ DecodeUint(bitsize uint8) (ui uint64)
+ DecodeFloat(chkOverflow32 bool) (f float64)
+ DecodeBool() (b bool)
+ // DecodeString can also decode symbols.
+ // It looks redundant as DecodeBytes is available.
+ // However, some codecs (e.g. binc) support symbols and can
+ // return a pre-stored string value, meaning that it can bypass
+ // the cost of []byte->string conversion.
+ DecodeString() (s string)
+
+ // DecodeBytes may be called directly, without going through reflection.
+ // Consequently, it must be designed to handle possible nil.
+ DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte)
+
+ // decodeExt will decode into a *RawExt or into an extension.
+ DecodeExt(v interface{}, xtag uint64, ext Ext) (realxtag uint64)
+ // decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte)
+ ReadMapStart() int
+ ReadArrayStart() int
+
+ reset()
+ uncacheRead()
+}
+
+type decNoSeparator struct{}
+
+func (_ decNoSeparator) ReadEnd() {}
+func (_ decNoSeparator) uncacheRead() {}
+
+type DecodeOptions struct {
+ // MapType specifies type to use during schema-less decoding of a map in the stream.
+ // If nil, we use map[interface{}]interface{}
+ MapType reflect.Type
+
+ // SliceType specifies type to use during schema-less decoding of an array in the stream.
+ // If nil, we use []interface{}
+ SliceType reflect.Type
+
+ // MaxInitLen defines the initial length that we "make" a collection (slice, chan or map) with.
+ // If 0 or negative, we default to a sensible value based on the size of an element in the collection.
+ //
+ // For example, when decoding, a stream may say that it has MAX_UINT elements.
+ // We should not auto-matically provision a slice of that length, to prevent Out-Of-Memory crash.
+ // Instead, we provision up to MaxInitLen, fill that up, and start appending after that.
+ MaxInitLen int
+
+ // If ErrorIfNoField, return an error when decoding a map
+ // from a codec stream into a struct, and no matching struct field is found.
+ ErrorIfNoField bool
+
+ // If ErrorIfNoArrayExpand, return an error when decoding a slice/array that cannot be expanded.
+ // For example, the stream contains an array of 8 items, but you are decoding into a [4]T array,
+ // or you are decoding into a slice of length 4 which is non-addressable (and so cannot be set).
+ ErrorIfNoArrayExpand bool
+
+ // If SignedInteger, use the int64 during schema-less decoding of unsigned values (not uint64).
+ SignedInteger bool
+
+ // MapValueReset controls how we decode into a map value.
+ //
+ // By default, we MAY retrieve the mapping for a key, and then decode into that.
+ // However, especially with big maps, that retrieval may be expensive and unnecessary
+ // if the stream already contains all that is necessary to recreate the value.
+ //
+ // If true, we will never retrieve the previous mapping,
+ // but rather decode into a new value and set that in the map.
+ //
+ // If false, we will retrieve the previous mapping if necessary e.g.
+ // the previous mapping is a pointer, or is a struct or array with pre-set state,
+ // or is an interface.
+ MapValueReset bool
+
+ // InterfaceReset controls how we decode into an interface.
+ //
+ // By default, when we see a field that is an interface{...},
+ // or a map with interface{...} value, we will attempt decoding into the
+ // "contained" value.
+ //
+ // However, this prevents us from reading a string into an interface{}
+ // that formerly contained a number.
+ //
+ // If true, we will decode into a new "blank" value, and set that in the interface.
+ // If false, we will decode into whatever is contained in the interface.
+ InterfaceReset bool
+
+ // InternString controls interning of strings during decoding.
+ //
+ // Some handles, e.g. json, typically will read map keys as strings.
+ // If the set of keys are finite, it may help reduce allocation to
+ // look them up from a map (than to allocate them afresh).
+ //
+ // Note: Handles will be smart when using the intern functionality.
+ // So everything will not be interned.
+ InternString bool
+}
+
+// ------------------------------------
+
+// ioDecByteScanner implements Read(), ReadByte(...), UnreadByte(...) methods
+// of io.Reader, io.ByteScanner.
+type ioDecByteScanner struct {
+ r io.Reader
+ l byte // last byte
+ ls byte // last byte status. 0: init-canDoNothing, 1: canRead, 2: canUnread
+ b [1]byte // tiny buffer for reading single bytes
+}
+
+func (z *ioDecByteScanner) Read(p []byte) (n int, err error) {
+ var firstByte bool
+ if z.ls == 1 {
+ z.ls = 2
+ p[0] = z.l
+ if len(p) == 1 {
+ n = 1
+ return
+ }
+ firstByte = true
+ p = p[1:]
+ }
+ n, err = z.r.Read(p)
+ if n > 0 {
+ if err == io.EOF && n == len(p) {
+ err = nil // read was successful, so postpone EOF (till next time)
+ }
+ z.l = p[n-1]
+ z.ls = 2
+ }
+ if firstByte {
+ n++
+ }
+ return
+}
+
+func (z *ioDecByteScanner) ReadByte() (c byte, err error) {
+ n, err := z.Read(z.b[:])
+ if n == 1 {
+ c = z.b[0]
+ if err == io.EOF {
+ err = nil // read was successful, so postpone EOF (till next time)
+ }
+ }
+ return
+}
+
+func (z *ioDecByteScanner) UnreadByte() (err error) {
+ x := z.ls
+ if x == 0 {
+ err = errors.New("cannot unread - nothing has been read")
+ } else if x == 1 {
+ err = errors.New("cannot unread - last byte has not been read")
+ } else if x == 2 {
+ z.ls = 1
+ }
+ return
+}
+
+// ioDecReader is a decReader that reads off an io.Reader
+type ioDecReader struct {
+ br decReaderByteScanner
+ // temp byte array re-used internally for efficiency during read.
+ // shares buffer with Decoder, so we keep size of struct within 8 words.
+ x *[scratchByteArrayLen]byte
+ bs ioDecByteScanner
+ n int // num read
+ tr []byte // tracking bytes read
+ trb bool
+}
+
+func (z *ioDecReader) numread() int {
+ return z.n
+}
+
+func (z *ioDecReader) readx(n int) (bs []byte) {
+ if n <= 0 {
+ return
+ }
+ if n < len(z.x) {
+ bs = z.x[:n]
+ } else {
+ bs = make([]byte, n)
+ }
+ if _, err := io.ReadAtLeast(z.br, bs, n); err != nil {
+ panic(err)
+ }
+ z.n += len(bs)
+ if z.trb {
+ z.tr = append(z.tr, bs...)
+ }
+ return
+}
+
+func (z *ioDecReader) readb(bs []byte) {
+ if len(bs) == 0 {
+ return
+ }
+ n, err := io.ReadAtLeast(z.br, bs, len(bs))
+ z.n += n
+ if err != nil {
+ panic(err)
+ }
+ if z.trb {
+ z.tr = append(z.tr, bs...)
+ }
+}
+
+func (z *ioDecReader) readn1() (b uint8) {
+ b, err := z.br.ReadByte()
+ if err != nil {
+ panic(err)
+ }
+ z.n++
+ if z.trb {
+ z.tr = append(z.tr, b)
+ }
+ return b
+}
+
+func (z *ioDecReader) readn1eof() (b uint8, eof bool) {
+ b, err := z.br.ReadByte()
+ if err == nil {
+ z.n++
+ if z.trb {
+ z.tr = append(z.tr, b)
+ }
+ } else if err == io.EOF {
+ eof = true
+ } else {
+ panic(err)
+ }
+ return
+}
+
+func (z *ioDecReader) unreadn1() {
+ err := z.br.UnreadByte()
+ if err != nil {
+ panic(err)
+ }
+ z.n--
+ if z.trb {
+ if l := len(z.tr) - 1; l >= 0 {
+ z.tr = z.tr[:l]
+ }
+ }
+}
+
+func (z *ioDecReader) track() {
+ if z.tr != nil {
+ z.tr = z.tr[:0]
+ }
+ z.trb = true
+}
+
+func (z *ioDecReader) stopTrack() (bs []byte) {
+ z.trb = false
+ return z.tr
+}
+
+// ------------------------------------
+
+var bytesDecReaderCannotUnreadErr = errors.New("cannot unread last byte read")
+
+// bytesDecReader is a decReader that reads off a byte slice with zero copying
+type bytesDecReader struct {
+ b []byte // data
+ c int // cursor
+ a int // available
+ t int // track start
+}
+
+func (z *bytesDecReader) reset(in []byte) {
+ z.b = in
+ z.a = len(in)
+ z.c = 0
+ z.t = 0
+}
+
+func (z *bytesDecReader) numread() int {
+ return z.c
+}
+
+func (z *bytesDecReader) unreadn1() {
+ if z.c == 0 || len(z.b) == 0 {
+ panic(bytesDecReaderCannotUnreadErr)
+ }
+ z.c--
+ z.a++
+ return
+}
+
+func (z *bytesDecReader) readx(n int) (bs []byte) {
+ // slicing from a non-constant start position is more expensive,
+ // as more computation is required to decipher the pointer start position.
+ // However, we do it only once, and it's better than reslicing both z.b and return value.
+
+ if n <= 0 {
+ } else if z.a == 0 {
+ panic(io.EOF)
+ } else if n > z.a {
+ panic(io.ErrUnexpectedEOF)
+ } else {
+ c0 := z.c
+ z.c = c0 + n
+ z.a = z.a - n
+ bs = z.b[c0:z.c]
+ }
+ return
+}
+
+func (z *bytesDecReader) readn1() (v uint8) {
+ if z.a == 0 {
+ panic(io.EOF)
+ }
+ v = z.b[z.c]
+ z.c++
+ z.a--
+ return
+}
+
+func (z *bytesDecReader) readn1eof() (v uint8, eof bool) {
+ if z.a == 0 {
+ eof = true
+ return
+ }
+ v = z.b[z.c]
+ z.c++
+ z.a--
+ return
+}
+
+func (z *bytesDecReader) readb(bs []byte) {
+ copy(bs, z.readx(len(bs)))
+}
+
+func (z *bytesDecReader) track() {
+ z.t = z.c
+}
+
+func (z *bytesDecReader) stopTrack() (bs []byte) {
+ return z.b[z.t:z.c]
+}
+
+// ------------------------------------
+
+type decFnInfo struct {
+ d *Decoder
+ ti *typeInfo
+ xfFn Ext
+ xfTag uint64
+ seq seqType
+}
+
+// ----------------------------------------
+
+type decFn struct {
+ i decFnInfo
+ f func(*decFnInfo, reflect.Value)
+}
+
+func (f *decFnInfo) builtin(rv reflect.Value) {
+ f.d.d.DecodeBuiltin(f.ti.rtid, rv.Addr().Interface())
+}
+
+func (f *decFnInfo) rawExt(rv reflect.Value) {
+ f.d.d.DecodeExt(rv.Addr().Interface(), 0, nil)
+}
+
+func (f *decFnInfo) ext(rv reflect.Value) {
+ f.d.d.DecodeExt(rv.Addr().Interface(), f.xfTag, f.xfFn)
+}
+
+func (f *decFnInfo) getValueForUnmarshalInterface(rv reflect.Value, indir int8) (v interface{}) {
+ if indir == -1 {
+ v = rv.Addr().Interface()
+ } else if indir == 0 {
+ v = rv.Interface()
+ } else {
+ for j := int8(0); j < indir; j++ {
+ if rv.IsNil() {
+ rv.Set(reflect.New(rv.Type().Elem()))
+ }
+ rv = rv.Elem()
+ }
+ v = rv.Interface()
+ }
+ return
+}
+
+func (f *decFnInfo) selferUnmarshal(rv reflect.Value) {
+ f.getValueForUnmarshalInterface(rv, f.ti.csIndir).(Selfer).CodecDecodeSelf(f.d)
+}
+
+func (f *decFnInfo) binaryUnmarshal(rv reflect.Value) {
+ bm := f.getValueForUnmarshalInterface(rv, f.ti.bunmIndir).(encoding.BinaryUnmarshaler)
+ xbs := f.d.d.DecodeBytes(nil, false, true)
+ if fnerr := bm.UnmarshalBinary(xbs); fnerr != nil {
+ panic(fnerr)
+ }
+}
+
+func (f *decFnInfo) textUnmarshal(rv reflect.Value) {
+ tm := f.getValueForUnmarshalInterface(rv, f.ti.tunmIndir).(encoding.TextUnmarshaler)
+ fnerr := tm.UnmarshalText(f.d.d.DecodeBytes(f.d.b[:], true, true))
+ if fnerr != nil {
+ panic(fnerr)
+ }
+}
+
+func (f *decFnInfo) jsonUnmarshal(rv reflect.Value) {
+ tm := f.getValueForUnmarshalInterface(rv, f.ti.junmIndir).(jsonUnmarshaler)
+ // bs := f.d.d.DecodeBytes(f.d.b[:], true, true)
+ // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself.
+ fnerr := tm.UnmarshalJSON(f.d.nextValueBytes())
+ if fnerr != nil {
+ panic(fnerr)
+ }
+}
+
+func (f *decFnInfo) kErr(rv reflect.Value) {
+ f.d.errorf("no decoding function defined for kind %v", rv.Kind())
+}
+
+func (f *decFnInfo) kString(rv reflect.Value) {
+ rv.SetString(f.d.d.DecodeString())
+}
+
+func (f *decFnInfo) kBool(rv reflect.Value) {
+ rv.SetBool(f.d.d.DecodeBool())
+}
+
+func (f *decFnInfo) kInt(rv reflect.Value) {
+ rv.SetInt(f.d.d.DecodeInt(intBitsize))
+}
+
+func (f *decFnInfo) kInt64(rv reflect.Value) {
+ rv.SetInt(f.d.d.DecodeInt(64))
+}
+
+func (f *decFnInfo) kInt32(rv reflect.Value) {
+ rv.SetInt(f.d.d.DecodeInt(32))
+}
+
+func (f *decFnInfo) kInt8(rv reflect.Value) {
+ rv.SetInt(f.d.d.DecodeInt(8))
+}
+
+func (f *decFnInfo) kInt16(rv reflect.Value) {
+ rv.SetInt(f.d.d.DecodeInt(16))
+}
+
+func (f *decFnInfo) kFloat32(rv reflect.Value) {
+ rv.SetFloat(f.d.d.DecodeFloat(true))
+}
+
+func (f *decFnInfo) kFloat64(rv reflect.Value) {
+ rv.SetFloat(f.d.d.DecodeFloat(false))
+}
+
+func (f *decFnInfo) kUint8(rv reflect.Value) {
+ rv.SetUint(f.d.d.DecodeUint(8))
+}
+
+func (f *decFnInfo) kUint64(rv reflect.Value) {
+ rv.SetUint(f.d.d.DecodeUint(64))
+}
+
+func (f *decFnInfo) kUint(rv reflect.Value) {
+ rv.SetUint(f.d.d.DecodeUint(uintBitsize))
+}
+
+func (f *decFnInfo) kUintptr(rv reflect.Value) {
+ rv.SetUint(f.d.d.DecodeUint(uintBitsize))
+}
+
+func (f *decFnInfo) kUint32(rv reflect.Value) {
+ rv.SetUint(f.d.d.DecodeUint(32))
+}
+
+func (f *decFnInfo) kUint16(rv reflect.Value) {
+ rv.SetUint(f.d.d.DecodeUint(16))
+}
+
+// func (f *decFnInfo) kPtr(rv reflect.Value) {
+// debugf(">>>>>>> ??? decode kPtr called - shouldn't get called")
+// if rv.IsNil() {
+// rv.Set(reflect.New(rv.Type().Elem()))
+// }
+// f.d.decodeValue(rv.Elem())
+// }
+
+// var kIntfCtr uint64
+
+func (f *decFnInfo) kInterfaceNaked() (rvn reflect.Value) {
+ // nil interface:
+ // use some hieristics to decode it appropriately
+ // based on the detected next value in the stream.
+ d := f.d
+ d.d.DecodeNaked()
+ n := &d.n
+ if n.v == valueTypeNil {
+ return
+ }
+ // We cannot decode non-nil stream value into nil interface with methods (e.g. io.Reader).
+ // if num := f.ti.rt.NumMethod(); num > 0 {
+ if f.ti.numMeth > 0 {
+ d.errorf("cannot decode non-nil codec value into nil %v (%v methods)", f.ti.rt, f.ti.numMeth)
+ return
+ }
+ // var useRvn bool
+ switch n.v {
+ case valueTypeMap:
+ // if d.h.MapType == nil || d.h.MapType == mapIntfIntfTyp {
+ // } else if d.h.MapType == mapStrIntfTyp { // for json performance
+ // }
+ if d.mtid == 0 || d.mtid == mapIntfIntfTypId {
+ l := len(n.ms)
+ n.ms = append(n.ms, nil)
+ var v2 interface{} = &n.ms[l]
+ d.decode(v2)
+ rvn = reflect.ValueOf(v2).Elem()
+ n.ms = n.ms[:l]
+ } else if d.mtid == mapStrIntfTypId { // for json performance
+ l := len(n.ns)
+ n.ns = append(n.ns, nil)
+ var v2 interface{} = &n.ns[l]
+ d.decode(v2)
+ rvn = reflect.ValueOf(v2).Elem()
+ n.ns = n.ns[:l]
+ } else {
+ rvn = reflect.New(d.h.MapType).Elem()
+ d.decodeValue(rvn, nil)
+ }
+ case valueTypeArray:
+ // if d.h.SliceType == nil || d.h.SliceType == intfSliceTyp {
+ if d.stid == 0 || d.stid == intfSliceTypId {
+ l := len(n.ss)
+ n.ss = append(n.ss, nil)
+ var v2 interface{} = &n.ss[l]
+ d.decode(v2)
+ rvn = reflect.ValueOf(v2).Elem()
+ n.ss = n.ss[:l]
+ } else {
+ rvn = reflect.New(d.h.SliceType).Elem()
+ d.decodeValue(rvn, nil)
+ }
+ case valueTypeExt:
+ var v interface{}
+ tag, bytes := n.u, n.l // calling decode below might taint the values
+ if bytes == nil {
+ l := len(n.is)
+ n.is = append(n.is, nil)
+ v2 := &n.is[l]
+ d.decode(v2)
+ v = *v2
+ n.is = n.is[:l]
+ }
+ bfn := d.h.getExtForTag(tag)
+ if bfn == nil {
+ var re RawExt
+ re.Tag = tag
+ re.Data = detachZeroCopyBytes(d.bytes, nil, bytes)
+ rvn = reflect.ValueOf(re)
+ } else {
+ rvnA := reflect.New(bfn.rt)
+ rvn = rvnA.Elem()
+ if bytes != nil {
+ bfn.ext.ReadExt(rvnA.Interface(), bytes)
+ } else {
+ bfn.ext.UpdateExt(rvnA.Interface(), v)
+ }
+ }
+ case valueTypeNil:
+ // no-op
+ case valueTypeInt:
+ rvn = reflect.ValueOf(&n.i).Elem()
+ case valueTypeUint:
+ rvn = reflect.ValueOf(&n.u).Elem()
+ case valueTypeFloat:
+ rvn = reflect.ValueOf(&n.f).Elem()
+ case valueTypeBool:
+ rvn = reflect.ValueOf(&n.b).Elem()
+ case valueTypeString, valueTypeSymbol:
+ rvn = reflect.ValueOf(&n.s).Elem()
+ case valueTypeBytes:
+ rvn = reflect.ValueOf(&n.l).Elem()
+ case valueTypeTimestamp:
+ rvn = reflect.ValueOf(&n.t).Elem()
+ default:
+ panic(fmt.Errorf("kInterfaceNaked: unexpected valueType: %d", n.v))
+ }
+ return
+}
+
+func (f *decFnInfo) kInterface(rv reflect.Value) {
+ // debugf("\t===> kInterface")
+
+ // Note:
+ // A consequence of how kInterface works, is that
+ // if an interface already contains something, we try
+ // to decode into what was there before.
+ // We do not replace with a generic value (as got from decodeNaked).
+
+ var rvn reflect.Value
+ if rv.IsNil() {
+ rvn = f.kInterfaceNaked()
+ if rvn.IsValid() {
+ rv.Set(rvn)
+ }
+ } else if f.d.h.InterfaceReset {
+ rvn = f.kInterfaceNaked()
+ if rvn.IsValid() {
+ rv.Set(rvn)
+ } else {
+ // reset to zero value based on current type in there.
+ rv.Set(reflect.Zero(rv.Elem().Type()))
+ }
+ } else {
+ rvn = rv.Elem()
+ // Note: interface{} is settable, but underlying type may not be.
+ // Consequently, we have to set the reflect.Value directly.
+ // if underlying type is settable (e.g. ptr or interface),
+ // we just decode into it.
+ // Else we create a settable value, decode into it, and set on the interface.
+ if rvn.CanSet() {
+ f.d.decodeValue(rvn, nil)
+ } else {
+ rvn2 := reflect.New(rvn.Type()).Elem()
+ rvn2.Set(rvn)
+ f.d.decodeValue(rvn2, nil)
+ rv.Set(rvn2)
+ }
+ }
+}
+
+func (f *decFnInfo) kStruct(rv reflect.Value) {
+ fti := f.ti
+ d := f.d
+ dd := d.d
+ cr := d.cr
+ ctyp := dd.ContainerType()
+ if ctyp == valueTypeMap {
+ containerLen := dd.ReadMapStart()
+ if containerLen == 0 {
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return
+ }
+ tisfi := fti.sfi
+ hasLen := containerLen >= 0
+ if hasLen {
+ for j := 0; j < containerLen; j++ {
+ // rvkencname := dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ rvkencname := stringView(dd.DecodeBytes(f.d.b[:], true, true))
+ // rvksi := ti.getForEncName(rvkencname)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if k := fti.indexForEncName(rvkencname); k > -1 {
+ si := tisfi[k]
+ if dd.TryDecodeAsNil() {
+ si.setToZeroValue(rv)
+ } else {
+ d.decodeValue(si.field(rv, true), nil)
+ }
+ } else {
+ d.structFieldNotFound(-1, rvkencname)
+ }
+ }
+ } else {
+ for j := 0; !dd.CheckBreak(); j++ {
+ // rvkencname := dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ rvkencname := stringView(dd.DecodeBytes(f.d.b[:], true, true))
+ // rvksi := ti.getForEncName(rvkencname)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if k := fti.indexForEncName(rvkencname); k > -1 {
+ si := tisfi[k]
+ if dd.TryDecodeAsNil() {
+ si.setToZeroValue(rv)
+ } else {
+ d.decodeValue(si.field(rv, true), nil)
+ }
+ } else {
+ d.structFieldNotFound(-1, rvkencname)
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ } else if ctyp == valueTypeArray {
+ containerLen := dd.ReadArrayStart()
+ if containerLen == 0 {
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+ return
+ }
+ // Not much gain from doing it two ways for array.
+ // Arrays are not used as much for structs.
+ hasLen := containerLen >= 0
+ for j, si := range fti.sfip {
+ if hasLen {
+ if j == containerLen {
+ break
+ }
+ } else if dd.CheckBreak() {
+ break
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ if dd.TryDecodeAsNil() {
+ si.setToZeroValue(rv)
+ } else {
+ d.decodeValue(si.field(rv, true), nil)
+ }
+ }
+ if containerLen > len(fti.sfip) {
+ // read remaining values and throw away
+ for j := len(fti.sfip); j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ d.structFieldNotFound(j, "")
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+ } else {
+ f.d.error(onlyMapOrArrayCanDecodeIntoStructErr)
+ return
+ }
+}
+
+func (f *decFnInfo) kSlice(rv reflect.Value) {
+ // A slice can be set from a map or array in stream.
+ // This way, the order can be kept (as order is lost with map).
+ ti := f.ti
+ d := f.d
+ dd := d.d
+ rtelem0 := ti.rt.Elem()
+ ctyp := dd.ContainerType()
+ if ctyp == valueTypeBytes || ctyp == valueTypeString {
+ // you can only decode bytes or string in the stream into a slice or array of bytes
+ if !(ti.rtid == uint8SliceTypId || rtelem0.Kind() == reflect.Uint8) {
+ f.d.errorf("bytes or string in the stream must be decoded into a slice or array of bytes, not %v", ti.rt)
+ }
+ if f.seq == seqTypeChan {
+ bs2 := dd.DecodeBytes(nil, false, true)
+ ch := rv.Interface().(chan<- byte)
+ for _, b := range bs2 {
+ ch <- b
+ }
+ } else {
+ rvbs := rv.Bytes()
+ bs2 := dd.DecodeBytes(rvbs, false, false)
+ if rvbs == nil && bs2 != nil || rvbs != nil && bs2 == nil || len(bs2) != len(rvbs) {
+ if rv.CanSet() {
+ rv.SetBytes(bs2)
+ } else {
+ copy(rvbs, bs2)
+ }
+ }
+ }
+ return
+ }
+
+ // array := f.seq == seqTypeChan
+
+ slh, containerLenS := d.decSliceHelperStart() // only expects valueType(Array|Map)
+
+ // // an array can never return a nil slice. so no need to check f.array here.
+ if containerLenS == 0 {
+ if f.seq == seqTypeSlice {
+ if rv.IsNil() {
+ rv.Set(reflect.MakeSlice(ti.rt, 0, 0))
+ } else {
+ rv.SetLen(0)
+ }
+ } else if f.seq == seqTypeChan {
+ if rv.IsNil() {
+ rv.Set(reflect.MakeChan(ti.rt, 0))
+ }
+ }
+ slh.End()
+ return
+ }
+
+ rtelem := rtelem0
+ for rtelem.Kind() == reflect.Ptr {
+ rtelem = rtelem.Elem()
+ }
+ fn := d.getDecFn(rtelem, true, true)
+
+ var rv0, rv9 reflect.Value
+ rv0 = rv
+ rvChanged := false
+
+ // for j := 0; j < containerLenS; j++ {
+ var rvlen int
+ if containerLenS > 0 { // hasLen
+ if f.seq == seqTypeChan {
+ if rv.IsNil() {
+ rvlen, _ = decInferLen(containerLenS, f.d.h.MaxInitLen, int(rtelem0.Size()))
+ rv.Set(reflect.MakeChan(ti.rt, rvlen))
+ }
+ // handle chan specially:
+ for j := 0; j < containerLenS; j++ {
+ rv9 = reflect.New(rtelem0).Elem()
+ slh.ElemContainerState(j)
+ d.decodeValue(rv9, fn)
+ rv.Send(rv9)
+ }
+ } else { // slice or array
+ var truncated bool // says len of sequence is not same as expected number of elements
+ numToRead := containerLenS // if truncated, reset numToRead
+
+ rvcap := rv.Cap()
+ rvlen = rv.Len()
+ if containerLenS > rvcap {
+ if f.seq == seqTypeArray {
+ d.arrayCannotExpand(rvlen, containerLenS)
+ } else {
+ oldRvlenGtZero := rvlen > 0
+ rvlen, truncated = decInferLen(containerLenS, f.d.h.MaxInitLen, int(rtelem0.Size()))
+ if truncated {
+ if rvlen <= rvcap {
+ rv.SetLen(rvlen)
+ } else {
+ rv = reflect.MakeSlice(ti.rt, rvlen, rvlen)
+ rvChanged = true
+ }
+ } else {
+ rv = reflect.MakeSlice(ti.rt, rvlen, rvlen)
+ rvChanged = true
+ }
+ if rvChanged && oldRvlenGtZero && !isImmutableKind(rtelem0.Kind()) {
+ reflect.Copy(rv, rv0) // only copy up to length NOT cap i.e. rv0.Slice(0, rvcap)
+ }
+ rvcap = rvlen
+ }
+ numToRead = rvlen
+ } else if containerLenS != rvlen {
+ if f.seq == seqTypeSlice {
+ rv.SetLen(containerLenS)
+ rvlen = containerLenS
+ }
+ }
+ j := 0
+ // we read up to the numToRead
+ for ; j < numToRead; j++ {
+ slh.ElemContainerState(j)
+ d.decodeValue(rv.Index(j), fn)
+ }
+
+ // if slice, expand and read up to containerLenS (or EOF) iff truncated
+ // if array, swallow all the rest.
+
+ if f.seq == seqTypeArray {
+ for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
+ d.swallow()
+ }
+ } else if truncated { // slice was truncated, as chan NOT in this block
+ for ; j < containerLenS; j++ {
+ rv = expandSliceValue(rv, 1)
+ rv9 = rv.Index(j)
+ if resetSliceElemToZeroValue {
+ rv9.Set(reflect.Zero(rtelem0))
+ }
+ slh.ElemContainerState(j)
+ d.decodeValue(rv9, fn)
+ }
+ }
+ }
+ } else {
+ rvlen = rv.Len()
+ j := 0
+ for ; !dd.CheckBreak(); j++ {
+ if f.seq == seqTypeChan {
+ slh.ElemContainerState(j)
+ rv9 = reflect.New(rtelem0).Elem()
+ d.decodeValue(rv9, fn)
+ rv.Send(rv9)
+ } else {
+ // if indefinite, etc, then expand the slice if necessary
+ var decodeIntoBlank bool
+ if j >= rvlen {
+ if f.seq == seqTypeArray {
+ d.arrayCannotExpand(rvlen, j+1)
+ decodeIntoBlank = true
+ } else { // if f.seq == seqTypeSlice
+ // rv = reflect.Append(rv, reflect.Zero(rtelem0)) // uses append logic, plus varargs
+ rv = expandSliceValue(rv, 1)
+ rv9 = rv.Index(j)
+ // rv.Index(rv.Len() - 1).Set(reflect.Zero(rtelem0))
+ if resetSliceElemToZeroValue {
+ rv9.Set(reflect.Zero(rtelem0))
+ }
+ rvlen++
+ rvChanged = true
+ }
+ } else { // slice or array
+ rv9 = rv.Index(j)
+ }
+ slh.ElemContainerState(j)
+ if decodeIntoBlank {
+ d.swallow()
+ } else { // seqTypeSlice
+ d.decodeValue(rv9, fn)
+ }
+ }
+ }
+ if f.seq == seqTypeSlice {
+ if j < rvlen {
+ rv.SetLen(j)
+ } else if j == 0 && rv.IsNil() {
+ rv = reflect.MakeSlice(ti.rt, 0, 0)
+ rvChanged = true
+ }
+ }
+ }
+ slh.End()
+
+ if rvChanged {
+ rv0.Set(rv)
+ }
+}
+
+func (f *decFnInfo) kArray(rv reflect.Value) {
+ // f.d.decodeValue(rv.Slice(0, rv.Len()))
+ f.kSlice(rv.Slice(0, rv.Len()))
+}
+
+func (f *decFnInfo) kMap(rv reflect.Value) {
+ d := f.d
+ dd := d.d
+ containerLen := dd.ReadMapStart()
+ cr := d.cr
+ ti := f.ti
+ if rv.IsNil() {
+ rv.Set(reflect.MakeMap(ti.rt))
+ }
+
+ if containerLen == 0 {
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return
+ }
+
+ ktype, vtype := ti.rt.Key(), ti.rt.Elem()
+ ktypeId := reflect.ValueOf(ktype).Pointer()
+ vtypeKind := vtype.Kind()
+ var keyFn, valFn *decFn
+ var xtyp reflect.Type
+ for xtyp = ktype; xtyp.Kind() == reflect.Ptr; xtyp = xtyp.Elem() {
+ }
+ keyFn = d.getDecFn(xtyp, true, true)
+ for xtyp = vtype; xtyp.Kind() == reflect.Ptr; xtyp = xtyp.Elem() {
+ }
+ valFn = d.getDecFn(xtyp, true, true)
+ var mapGet, mapSet bool
+ if !f.d.h.MapValueReset {
+ // if pointer, mapGet = true
+ // if interface, mapGet = true if !DecodeNakedAlways (else false)
+ // if builtin, mapGet = false
+ // else mapGet = true
+ if vtypeKind == reflect.Ptr {
+ mapGet = true
+ } else if vtypeKind == reflect.Interface {
+ if !f.d.h.InterfaceReset {
+ mapGet = true
+ }
+ } else if !isImmutableKind(vtypeKind) {
+ mapGet = true
+ }
+ }
+
+ var rvk, rvv, rvz reflect.Value
+
+ // for j := 0; j < containerLen; j++ {
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ rvk = reflect.New(ktype).Elem()
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ d.decodeValue(rvk, keyFn)
+
+ // special case if a byte array.
+ if ktypeId == intfTypId {
+ rvk = rvk.Elem()
+ if rvk.Type() == uint8SliceTyp {
+ rvk = reflect.ValueOf(d.string(rvk.Bytes()))
+ }
+ }
+ mapSet = true // set to false if u do a get, and its a pointer, and exists
+ if mapGet {
+ rvv = rv.MapIndex(rvk)
+ if rvv.IsValid() {
+ if vtypeKind == reflect.Ptr {
+ mapSet = false
+ }
+ } else {
+ if rvz.IsValid() {
+ rvz.Set(reflect.Zero(vtype))
+ } else {
+ rvz = reflect.New(vtype).Elem()
+ }
+ rvv = rvz
+ }
+ } else {
+ if rvz.IsValid() {
+ rvz.Set(reflect.Zero(vtype))
+ } else {
+ rvz = reflect.New(vtype).Elem()
+ }
+ rvv = rvz
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ d.decodeValue(rvv, valFn)
+ if mapSet {
+ rv.SetMapIndex(rvk, rvv)
+ }
+ }
+ } else {
+ for j := 0; !dd.CheckBreak(); j++ {
+ rvk = reflect.New(ktype).Elem()
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ d.decodeValue(rvk, keyFn)
+
+ // special case if a byte array.
+ if ktypeId == intfTypId {
+ rvk = rvk.Elem()
+ if rvk.Type() == uint8SliceTyp {
+ rvk = reflect.ValueOf(d.string(rvk.Bytes()))
+ }
+ }
+ mapSet = true // set to false if u do a get, and its a pointer, and exists
+ if mapGet {
+ rvv = rv.MapIndex(rvk)
+ if rvv.IsValid() {
+ if vtypeKind == reflect.Ptr {
+ mapSet = false
+ }
+ } else {
+ if rvz.IsValid() {
+ rvz.Set(reflect.Zero(vtype))
+ } else {
+ rvz = reflect.New(vtype).Elem()
+ }
+ rvv = rvz
+ }
+ } else {
+ if rvz.IsValid() {
+ rvz.Set(reflect.Zero(vtype))
+ } else {
+ rvz = reflect.New(vtype).Elem()
+ }
+ rvv = rvz
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ d.decodeValue(rvv, valFn)
+ if mapSet {
+ rv.SetMapIndex(rvk, rvv)
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+type decRtidFn struct {
+ rtid uintptr
+ fn decFn
+}
+
+// decNaked is used to keep track of the primitives decoded.
+// Without it, we would have to decode each primitive and wrap it
+// in an interface{}, causing an allocation.
+// In this model, the primitives are decoded in a "pseudo-atomic" fashion,
+// so we can rest assured that no other decoding happens while these
+// primitives are being decoded.
+//
+// maps and arrays are not handled by this mechanism.
+// However, RawExt is, and we accomodate for extensions that decode
+// RawExt from DecodeNaked, but need to decode the value subsequently.
+// kInterfaceNaked and swallow, which call DecodeNaked, handle this caveat.
+//
+// However, decNaked also keeps some arrays of default maps and slices
+// used in DecodeNaked. This way, we can get a pointer to it
+// without causing a new heap allocation.
+//
+// kInterfaceNaked will ensure that there is no allocation for the common
+// uses.
+type decNaked struct {
+ // r RawExt // used for RawExt, uint, []byte.
+ u uint64
+ i int64
+ f float64
+ l []byte
+ s string
+ t time.Time
+ b bool
+ v valueType
+
+ // stacks for reducing allocation
+ is []interface{}
+ ms []map[interface{}]interface{}
+ ns []map[string]interface{}
+ ss [][]interface{}
+ // rs []RawExt
+
+ // keep arrays at the bottom? Chance is that they are not used much.
+ ia [4]interface{}
+ ma [4]map[interface{}]interface{}
+ na [4]map[string]interface{}
+ sa [4][]interface{}
+ // ra [2]RawExt
+}
+
+func (n *decNaked) reset() {
+ if n.ss != nil {
+ n.ss = n.ss[:0]
+ }
+ if n.is != nil {
+ n.is = n.is[:0]
+ }
+ if n.ms != nil {
+ n.ms = n.ms[:0]
+ }
+ if n.ns != nil {
+ n.ns = n.ns[:0]
+ }
+}
+
+// A Decoder reads and decodes an object from an input stream in the codec format.
+type Decoder struct {
+ // hopefully, reduce derefencing cost by laying the decReader inside the Decoder.
+ // Try to put things that go together to fit within a cache line (8 words).
+
+ d decDriver
+ // NOTE: Decoder shouldn't call it's read methods,
+ // as the handler MAY need to do some coordination.
+ r decReader
+ // sa [initCollectionCap]decRtidFn
+ h *BasicHandle
+ hh Handle
+
+ be bool // is binary encoding
+ bytes bool // is bytes reader
+ js bool // is json handle
+
+ rb bytesDecReader
+ ri ioDecReader
+ cr containerStateRecv
+
+ s []decRtidFn
+ f map[uintptr]*decFn
+
+ // _ uintptr // for alignment purposes, so next one starts from a cache line
+
+ // cache the mapTypeId and sliceTypeId for faster comparisons
+ mtid uintptr
+ stid uintptr
+
+ n decNaked
+ b [scratchByteArrayLen]byte
+ is map[string]string // used for interning strings
+}
+
+// NewDecoder returns a Decoder for decoding a stream of bytes from an io.Reader.
+//
+// For efficiency, Users are encouraged to pass in a memory buffered reader
+// (eg bufio.Reader, bytes.Buffer).
+func NewDecoder(r io.Reader, h Handle) *Decoder {
+ d := newDecoder(h)
+ d.Reset(r)
+ return d
+}
+
+// NewDecoderBytes returns a Decoder which efficiently decodes directly
+// from a byte slice with zero copying.
+func NewDecoderBytes(in []byte, h Handle) *Decoder {
+ d := newDecoder(h)
+ d.ResetBytes(in)
+ return d
+}
+
+func newDecoder(h Handle) *Decoder {
+ d := &Decoder{hh: h, h: h.getBasicHandle(), be: h.isBinary()}
+ n := &d.n
+ // n.rs = n.ra[:0]
+ n.ms = n.ma[:0]
+ n.is = n.ia[:0]
+ n.ns = n.na[:0]
+ n.ss = n.sa[:0]
+ _, d.js = h.(*JsonHandle)
+ if d.h.InternString {
+ d.is = make(map[string]string, 32)
+ }
+ d.d = h.newDecDriver(d)
+ d.cr, _ = d.d.(containerStateRecv)
+ // d.d = h.newDecDriver(decReaderT{true, &d.rb, &d.ri})
+ return d
+}
+
+func (d *Decoder) resetCommon() {
+ d.n.reset()
+ d.d.reset()
+ // reset all things which were cached from the Handle,
+ // but could be changed.
+ d.mtid, d.stid = 0, 0
+ if d.h.MapType != nil {
+ d.mtid = reflect.ValueOf(d.h.MapType).Pointer()
+ }
+ if d.h.SliceType != nil {
+ d.stid = reflect.ValueOf(d.h.SliceType).Pointer()
+ }
+}
+
+func (d *Decoder) Reset(r io.Reader) {
+ d.ri.x = &d.b
+ // d.s = d.sa[:0]
+ d.ri.bs.r = r
+ var ok bool
+ d.ri.br, ok = r.(decReaderByteScanner)
+ if !ok {
+ d.ri.br = &d.ri.bs
+ }
+ d.r = &d.ri
+ d.resetCommon()
+}
+
+func (d *Decoder) ResetBytes(in []byte) {
+ // d.s = d.sa[:0]
+ d.rb.reset(in)
+ d.r = &d.rb
+ d.resetCommon()
+}
+
+// func (d *Decoder) sendContainerState(c containerState) {
+// if d.cr != nil {
+// d.cr.sendContainerState(c)
+// }
+// }
+
+// Decode decodes the stream from reader and stores the result in the
+// value pointed to by v. v cannot be a nil pointer. v can also be
+// a reflect.Value of a pointer.
+//
+// Note that a pointer to a nil interface is not a nil pointer.
+// If you do not know what type of stream it is, pass in a pointer to a nil interface.
+// We will decode and store a value in that nil interface.
+//
+// Sample usages:
+// // Decoding into a non-nil typed value
+// var f float32
+// err = codec.NewDecoder(r, handle).Decode(&f)
+//
+// // Decoding into nil interface
+// var v interface{}
+// dec := codec.NewDecoder(r, handle)
+// err = dec.Decode(&v)
+//
+// When decoding into a nil interface{}, we will decode into an appropriate value based
+// on the contents of the stream:
+// - Numbers are decoded as float64, int64 or uint64.
+// - Other values are decoded appropriately depending on the type:
+// bool, string, []byte, time.Time, etc
+// - Extensions are decoded as RawExt (if no ext function registered for the tag)
+// Configurations exist on the Handle to override defaults
+// (e.g. for MapType, SliceType and how to decode raw bytes).
+//
+// When decoding into a non-nil interface{} value, the mode of encoding is based on the
+// type of the value. When a value is seen:
+// - If an extension is registered for it, call that extension function
+// - If it implements BinaryUnmarshaler, call its UnmarshalBinary(data []byte) error
+// - Else decode it based on its reflect.Kind
+//
+// There are some special rules when decoding into containers (slice/array/map/struct).
+// Decode will typically use the stream contents to UPDATE the container.
+// - A map can be decoded from a stream map, by updating matching keys.
+// - A slice can be decoded from a stream array,
+// by updating the first n elements, where n is length of the stream.
+// - A slice can be decoded from a stream map, by decoding as if
+// it contains a sequence of key-value pairs.
+// - A struct can be decoded from a stream map, by updating matching fields.
+// - A struct can be decoded from a stream array,
+// by updating fields as they occur in the struct (by index).
+//
+// When decoding a stream map or array with length of 0 into a nil map or slice,
+// we reset the destination map or slice to a zero-length value.
+//
+// However, when decoding a stream nil, we reset the destination container
+// to its "zero" value (e.g. nil for slice/map, etc).
+//
+func (d *Decoder) Decode(v interface{}) (err error) {
+ defer panicToErr(&err)
+ d.decode(v)
+ return
+}
+
+// this is not a smart swallow, as it allocates objects and does unnecessary work.
+func (d *Decoder) swallowViaHammer() {
+ var blank interface{}
+ d.decodeValue(reflect.ValueOf(&blank).Elem(), nil)
+}
+
+func (d *Decoder) swallow() {
+ // smarter decode that just swallows the content
+ dd := d.d
+ if dd.TryDecodeAsNil() {
+ return
+ }
+ cr := d.cr
+ switch dd.ContainerType() {
+ case valueTypeMap:
+ containerLen := dd.ReadMapStart()
+ clenGtEqualZero := containerLen >= 0
+ for j := 0; ; j++ {
+ if clenGtEqualZero {
+ if j >= containerLen {
+ break
+ }
+ } else if dd.CheckBreak() {
+ break
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ d.swallow()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ d.swallow()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ case valueTypeArray:
+ containerLenS := dd.ReadArrayStart()
+ clenGtEqualZero := containerLenS >= 0
+ for j := 0; ; j++ {
+ if clenGtEqualZero {
+ if j >= containerLenS {
+ break
+ }
+ } else if dd.CheckBreak() {
+ break
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ d.swallow()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+ case valueTypeBytes:
+ dd.DecodeBytes(d.b[:], false, true)
+ case valueTypeString:
+ dd.DecodeBytes(d.b[:], true, true)
+ // dd.DecodeStringAsBytes(d.b[:])
+ default:
+ // these are all primitives, which we can get from decodeNaked
+ // if RawExt using Value, complete the processing.
+ dd.DecodeNaked()
+ if n := &d.n; n.v == valueTypeExt && n.l == nil {
+ l := len(n.is)
+ n.is = append(n.is, nil)
+ v2 := &n.is[l]
+ d.decode(v2)
+ n.is = n.is[:l]
+ }
+ }
+}
+
+// MustDecode is like Decode, but panics if unable to Decode.
+// This provides insight to the code location that triggered the error.
+func (d *Decoder) MustDecode(v interface{}) {
+ d.decode(v)
+}
+
+func (d *Decoder) decode(iv interface{}) {
+ // if ics, ok := iv.(Selfer); ok {
+ // ics.CodecDecodeSelf(d)
+ // return
+ // }
+
+ if d.d.TryDecodeAsNil() {
+ switch v := iv.(type) {
+ case nil:
+ case *string:
+ *v = ""
+ case *bool:
+ *v = false
+ case *int:
+ *v = 0
+ case *int8:
+ *v = 0
+ case *int16:
+ *v = 0
+ case *int32:
+ *v = 0
+ case *int64:
+ *v = 0
+ case *uint:
+ *v = 0
+ case *uint8:
+ *v = 0
+ case *uint16:
+ *v = 0
+ case *uint32:
+ *v = 0
+ case *uint64:
+ *v = 0
+ case *float32:
+ *v = 0
+ case *float64:
+ *v = 0
+ case *[]uint8:
+ *v = nil
+ case reflect.Value:
+ if v.Kind() != reflect.Ptr || v.IsNil() {
+ d.errNotValidPtrValue(v)
+ }
+ // d.chkPtrValue(v)
+ v = v.Elem()
+ if v.IsValid() {
+ v.Set(reflect.Zero(v.Type()))
+ }
+ default:
+ rv := reflect.ValueOf(iv)
+ if rv.Kind() != reflect.Ptr || rv.IsNil() {
+ d.errNotValidPtrValue(rv)
+ }
+ // d.chkPtrValue(rv)
+ rv = rv.Elem()
+ if rv.IsValid() {
+ rv.Set(reflect.Zero(rv.Type()))
+ }
+ }
+ return
+ }
+
+ switch v := iv.(type) {
+ case nil:
+ d.error(cannotDecodeIntoNilErr)
+ return
+
+ case Selfer:
+ v.CodecDecodeSelf(d)
+
+ case reflect.Value:
+ if v.Kind() != reflect.Ptr || v.IsNil() {
+ d.errNotValidPtrValue(v)
+ }
+ // d.chkPtrValue(v)
+ d.decodeValueNotNil(v.Elem(), nil)
+
+ case *string:
+
+ *v = d.d.DecodeString()
+ case *bool:
+ *v = d.d.DecodeBool()
+ case *int:
+ *v = int(d.d.DecodeInt(intBitsize))
+ case *int8:
+ *v = int8(d.d.DecodeInt(8))
+ case *int16:
+ *v = int16(d.d.DecodeInt(16))
+ case *int32:
+ *v = int32(d.d.DecodeInt(32))
+ case *int64:
+ *v = d.d.DecodeInt(64)
+ case *uint:
+ *v = uint(d.d.DecodeUint(uintBitsize))
+ case *uint8:
+ *v = uint8(d.d.DecodeUint(8))
+ case *uint16:
+ *v = uint16(d.d.DecodeUint(16))
+ case *uint32:
+ *v = uint32(d.d.DecodeUint(32))
+ case *uint64:
+ *v = d.d.DecodeUint(64)
+ case *float32:
+ *v = float32(d.d.DecodeFloat(true))
+ case *float64:
+ *v = d.d.DecodeFloat(false)
+ case *[]uint8:
+ *v = d.d.DecodeBytes(*v, false, false)
+
+ case *interface{}:
+ d.decodeValueNotNil(reflect.ValueOf(iv).Elem(), nil)
+
+ default:
+ if !fastpathDecodeTypeSwitch(iv, d) {
+ d.decodeI(iv, true, false, false, false)
+ }
+ }
+}
+
+func (d *Decoder) preDecodeValue(rv reflect.Value, tryNil bool) (rv2 reflect.Value, proceed bool) {
+ if tryNil && d.d.TryDecodeAsNil() {
+ // No need to check if a ptr, recursively, to determine
+ // whether to set value to nil.
+ // Just always set value to its zero type.
+ if rv.IsValid() { // rv.CanSet() // always settable, except it's invalid
+ rv.Set(reflect.Zero(rv.Type()))
+ }
+ return
+ }
+
+ // If stream is not containing a nil value, then we can deref to the base
+ // non-pointer value, and decode into that.
+ for rv.Kind() == reflect.Ptr {
+ if rv.IsNil() {
+ rv.Set(reflect.New(rv.Type().Elem()))
+ }
+ rv = rv.Elem()
+ }
+ return rv, true
+}
+
+func (d *Decoder) decodeI(iv interface{}, checkPtr, tryNil, checkFastpath, checkCodecSelfer bool) {
+ rv := reflect.ValueOf(iv)
+ if checkPtr {
+ if rv.Kind() != reflect.Ptr || rv.IsNil() {
+ d.errNotValidPtrValue(rv)
+ }
+ // d.chkPtrValue(rv)
+ }
+ rv, proceed := d.preDecodeValue(rv, tryNil)
+ if proceed {
+ fn := d.getDecFn(rv.Type(), checkFastpath, checkCodecSelfer)
+ fn.f(&fn.i, rv)
+ }
+}
+
+func (d *Decoder) decodeValue(rv reflect.Value, fn *decFn) {
+ if rv, proceed := d.preDecodeValue(rv, true); proceed {
+ if fn == nil {
+ fn = d.getDecFn(rv.Type(), true, true)
+ }
+ fn.f(&fn.i, rv)
+ }
+}
+
+func (d *Decoder) decodeValueNotNil(rv reflect.Value, fn *decFn) {
+ if rv, proceed := d.preDecodeValue(rv, false); proceed {
+ if fn == nil {
+ fn = d.getDecFn(rv.Type(), true, true)
+ }
+ fn.f(&fn.i, rv)
+ }
+}
+
+func (d *Decoder) getDecFn(rt reflect.Type, checkFastpath, checkCodecSelfer bool) (fn *decFn) {
+ rtid := reflect.ValueOf(rt).Pointer()
+
+ // retrieve or register a focus'ed function for this type
+ // to eliminate need to do the retrieval multiple times
+
+ // if d.f == nil && d.s == nil { debugf("---->Creating new dec f map for type: %v\n", rt) }
+ var ok bool
+ if useMapForCodecCache {
+ fn, ok = d.f[rtid]
+ } else {
+ for i := range d.s {
+ v := &(d.s[i])
+ if v.rtid == rtid {
+ fn, ok = &(v.fn), true
+ break
+ }
+ }
+ }
+ if ok {
+ return
+ }
+
+ if useMapForCodecCache {
+ if d.f == nil {
+ d.f = make(map[uintptr]*decFn, initCollectionCap)
+ }
+ fn = new(decFn)
+ d.f[rtid] = fn
+ } else {
+ if d.s == nil {
+ d.s = make([]decRtidFn, 0, initCollectionCap)
+ }
+ d.s = append(d.s, decRtidFn{rtid: rtid})
+ fn = &(d.s[len(d.s)-1]).fn
+ }
+
+ // debugf("\tCreating new dec fn for type: %v\n", rt)
+ ti := d.h.getTypeInfo(rtid, rt)
+ fi := &(fn.i)
+ fi.d = d
+ fi.ti = ti
+
+ // An extension can be registered for any type, regardless of the Kind
+ // (e.g. type BitSet int64, type MyStruct { / * unexported fields * / }, type X []int, etc.
+ //
+ // We can't check if it's an extension byte here first, because the user may have
+ // registered a pointer or non-pointer type, meaning we may have to recurse first
+ // before matching a mapped type, even though the extension byte is already detected.
+ //
+ // NOTE: if decoding into a nil interface{}, we return a non-nil
+ // value except even if the container registers a length of 0.
+ if checkCodecSelfer && ti.cs {
+ fn.f = (*decFnInfo).selferUnmarshal
+ } else if rtid == rawExtTypId {
+ fn.f = (*decFnInfo).rawExt
+ } else if d.d.IsBuiltinType(rtid) {
+ fn.f = (*decFnInfo).builtin
+ } else if xfFn := d.h.getExt(rtid); xfFn != nil {
+ fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext
+ fn.f = (*decFnInfo).ext
+ } else if supportMarshalInterfaces && d.be && ti.bunm {
+ fn.f = (*decFnInfo).binaryUnmarshal
+ } else if supportMarshalInterfaces && !d.be && d.js && ti.junm {
+ //If JSON, we should check JSONUnmarshal before textUnmarshal
+ fn.f = (*decFnInfo).jsonUnmarshal
+ } else if supportMarshalInterfaces && !d.be && ti.tunm {
+ fn.f = (*decFnInfo).textUnmarshal
+ } else {
+ rk := rt.Kind()
+ if fastpathEnabled && checkFastpath && (rk == reflect.Map || rk == reflect.Slice) {
+ if rt.PkgPath() == "" {
+ if idx := fastpathAV.index(rtid); idx != -1 {
+ fn.f = fastpathAV[idx].decfn
+ }
+ } else {
+ // use mapping for underlying type if there
+ ok = false
+ var rtu reflect.Type
+ if rk == reflect.Map {
+ rtu = reflect.MapOf(rt.Key(), rt.Elem())
+ } else {
+ rtu = reflect.SliceOf(rt.Elem())
+ }
+ rtuid := reflect.ValueOf(rtu).Pointer()
+ if idx := fastpathAV.index(rtuid); idx != -1 {
+ xfnf := fastpathAV[idx].decfn
+ xrt := fastpathAV[idx].rt
+ fn.f = func(xf *decFnInfo, xrv reflect.Value) {
+ // xfnf(xf, xrv.Convert(xrt))
+ xfnf(xf, xrv.Addr().Convert(reflect.PtrTo(xrt)).Elem())
+ }
+ }
+ }
+ }
+ if fn.f == nil {
+ switch rk {
+ case reflect.String:
+ fn.f = (*decFnInfo).kString
+ case reflect.Bool:
+ fn.f = (*decFnInfo).kBool
+ case reflect.Int:
+ fn.f = (*decFnInfo).kInt
+ case reflect.Int64:
+ fn.f = (*decFnInfo).kInt64
+ case reflect.Int32:
+ fn.f = (*decFnInfo).kInt32
+ case reflect.Int8:
+ fn.f = (*decFnInfo).kInt8
+ case reflect.Int16:
+ fn.f = (*decFnInfo).kInt16
+ case reflect.Float32:
+ fn.f = (*decFnInfo).kFloat32
+ case reflect.Float64:
+ fn.f = (*decFnInfo).kFloat64
+ case reflect.Uint8:
+ fn.f = (*decFnInfo).kUint8
+ case reflect.Uint64:
+ fn.f = (*decFnInfo).kUint64
+ case reflect.Uint:
+ fn.f = (*decFnInfo).kUint
+ case reflect.Uint32:
+ fn.f = (*decFnInfo).kUint32
+ case reflect.Uint16:
+ fn.f = (*decFnInfo).kUint16
+ // case reflect.Ptr:
+ // fn.f = (*decFnInfo).kPtr
+ case reflect.Uintptr:
+ fn.f = (*decFnInfo).kUintptr
+ case reflect.Interface:
+ fn.f = (*decFnInfo).kInterface
+ case reflect.Struct:
+ fn.f = (*decFnInfo).kStruct
+ case reflect.Chan:
+ fi.seq = seqTypeChan
+ fn.f = (*decFnInfo).kSlice
+ case reflect.Slice:
+ fi.seq = seqTypeSlice
+ fn.f = (*decFnInfo).kSlice
+ case reflect.Array:
+ fi.seq = seqTypeArray
+ fn.f = (*decFnInfo).kArray
+ case reflect.Map:
+ fn.f = (*decFnInfo).kMap
+ default:
+ fn.f = (*decFnInfo).kErr
+ }
+ }
+ }
+
+ return
+}
+
+func (d *Decoder) structFieldNotFound(index int, rvkencname string) {
+ if d.h.ErrorIfNoField {
+ if index >= 0 {
+ d.errorf("no matching struct field found when decoding stream array at index %v", index)
+ return
+ } else if rvkencname != "" {
+ d.errorf("no matching struct field found when decoding stream map with key %s", rvkencname)
+ return
+ }
+ }
+ d.swallow()
+}
+
+func (d *Decoder) arrayCannotExpand(sliceLen, streamLen int) {
+ if d.h.ErrorIfNoArrayExpand {
+ d.errorf("cannot expand array len during decode from %v to %v", sliceLen, streamLen)
+ }
+}
+
+func (d *Decoder) chkPtrValue(rv reflect.Value) {
+ // We can only decode into a non-nil pointer
+ if rv.Kind() == reflect.Ptr && !rv.IsNil() {
+ return
+ }
+ d.errNotValidPtrValue(rv)
+}
+
+func (d *Decoder) errNotValidPtrValue(rv reflect.Value) {
+ if !rv.IsValid() {
+ d.error(cannotDecodeIntoNilErr)
+ return
+ }
+ if !rv.CanInterface() {
+ d.errorf("cannot decode into a value without an interface: %v", rv)
+ return
+ }
+ rvi := rv.Interface()
+ d.errorf("cannot decode into non-pointer or nil pointer. Got: %v, %T, %v", rv.Kind(), rvi, rvi)
+}
+
+func (d *Decoder) error(err error) {
+ panic(err)
+}
+
+func (d *Decoder) errorf(format string, params ...interface{}) {
+ params2 := make([]interface{}, len(params)+1)
+ params2[0] = d.r.numread()
+ copy(params2[1:], params)
+ err := fmt.Errorf("[pos %d]: "+format, params2...)
+ panic(err)
+}
+
+func (d *Decoder) string(v []byte) (s string) {
+ if d.is != nil {
+ s, ok := d.is[string(v)] // no allocation here.
+ if !ok {
+ s = string(v)
+ d.is[s] = s
+ }
+ return s
+ }
+ return string(v) // don't return stringView, as we need a real string here.
+}
+
+func (d *Decoder) intern(s string) {
+ if d.is != nil {
+ d.is[s] = s
+ }
+}
+
+// nextValueBytes returns the next value in the stream as a set of bytes.
+func (d *Decoder) nextValueBytes() []byte {
+ d.d.uncacheRead()
+ d.r.track()
+ d.swallow()
+ return d.r.stopTrack()
+}
+
+// --------------------------------------------------
+
+// decSliceHelper assists when decoding into a slice, from a map or an array in the stream.
+// A slice can be set from a map or array in stream. This supports the MapBySlice interface.
+type decSliceHelper struct {
+ d *Decoder
+ // ct valueType
+ array bool
+}
+
+func (d *Decoder) decSliceHelperStart() (x decSliceHelper, clen int) {
+ dd := d.d
+ ctyp := dd.ContainerType()
+ if ctyp == valueTypeArray {
+ x.array = true
+ clen = dd.ReadArrayStart()
+ } else if ctyp == valueTypeMap {
+ clen = dd.ReadMapStart() * 2
+ } else {
+ d.errorf("only encoded map or array can be decoded into a slice (%d)", ctyp)
+ }
+ // x.ct = ctyp
+ x.d = d
+ return
+}
+
+func (x decSliceHelper) End() {
+ cr := x.d.cr
+ if cr == nil {
+ return
+ }
+ if x.array {
+ cr.sendContainerState(containerArrayEnd)
+ } else {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (x decSliceHelper) ElemContainerState(index int) {
+ cr := x.d.cr
+ if cr == nil {
+ return
+ }
+ if x.array {
+ cr.sendContainerState(containerArrayElem)
+ } else {
+ if index%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ }
+}
+
+func decByteSlice(r decReader, clen int, bs []byte) (bsOut []byte) {
+ if clen == 0 {
+ return zeroByteSlice
+ }
+ if len(bs) == clen {
+ bsOut = bs
+ } else if cap(bs) >= clen {
+ bsOut = bs[:clen]
+ } else {
+ bsOut = make([]byte, clen)
+ }
+ r.readb(bsOut)
+ return
+}
+
+func detachZeroCopyBytes(isBytesReader bool, dest []byte, in []byte) (out []byte) {
+ if xlen := len(in); xlen > 0 {
+ if isBytesReader || xlen <= scratchByteArrayLen {
+ if cap(dest) >= xlen {
+ out = dest[:xlen]
+ } else {
+ out = make([]byte, xlen)
+ }
+ copy(out, in)
+ return
+ }
+ }
+ return in
+}
+
+// decInferLen will infer a sensible length, given the following:
+// - clen: length wanted.
+// - maxlen: max length to be returned.
+// if <= 0, it is unset, and we infer it based on the unit size
+// - unit: number of bytes for each element of the collection
+func decInferLen(clen, maxlen, unit int) (rvlen int, truncated bool) {
+ // handle when maxlen is not set i.e. <= 0
+ if clen <= 0 {
+ return
+ }
+ if maxlen <= 0 {
+ // no maxlen defined. Use maximum of 256K memory, with a floor of 4K items.
+ // maxlen = 256 * 1024 / unit
+ // if maxlen < (4 * 1024) {
+ // maxlen = 4 * 1024
+ // }
+ if unit < (256 / 4) {
+ maxlen = 256 * 1024 / unit
+ } else {
+ maxlen = 4 * 1024
+ }
+ }
+ if clen > maxlen {
+ rvlen = maxlen
+ truncated = true
+ } else {
+ rvlen = clen
+ }
+ return
+ // if clen <= 0 {
+ // rvlen = 0
+ // } else if maxlen > 0 && clen > maxlen {
+ // rvlen = maxlen
+ // truncated = true
+ // } else {
+ // rvlen = clen
+ // }
+ // return
+}
+
+// // implement overall decReader wrapping both, for possible use inline:
+// type decReaderT struct {
+// bytes bool
+// rb *bytesDecReader
+// ri *ioDecReader
+// }
+//
+// // implement *Decoder as a decReader.
+// // Using decReaderT (defined just above) caused performance degradation
+// // possibly because of constant copying the value,
+// // and some value->interface conversion causing allocation.
+// func (d *Decoder) unreadn1() {
+// if d.bytes {
+// d.rb.unreadn1()
+// } else {
+// d.ri.unreadn1()
+// }
+// }
+// ... for other methods of decReader.
+// Testing showed that performance improvement was negligible.
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/encode.go b/src/kube2msb/vendor/github.com/ugorji/go/codec/encode.go
new file mode 100644
index 0000000..a874c74
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/encode.go
@@ -0,0 +1,1419 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+ "encoding"
+ "fmt"
+ "io"
+ "reflect"
+ "sort"
+ "sync"
+)
+
+const (
+ defEncByteBufSize = 1 << 6 // 4:16, 6:64, 8:256, 10:1024
+)
+
+// AsSymbolFlag defines what should be encoded as symbols.
+type AsSymbolFlag uint8
+
+const (
+ // AsSymbolDefault is default.
+ // Currently, this means only encode struct field names as symbols.
+ // The default is subject to change.
+ AsSymbolDefault AsSymbolFlag = iota
+
+ // AsSymbolAll means encode anything which could be a symbol as a symbol.
+ AsSymbolAll = 0xfe
+
+ // AsSymbolNone means do not encode anything as a symbol.
+ AsSymbolNone = 1 << iota
+
+ // AsSymbolMapStringKeys means encode keys in map[string]XXX as symbols.
+ AsSymbolMapStringKeysFlag
+
+ // AsSymbolStructFieldName means encode struct field names as symbols.
+ AsSymbolStructFieldNameFlag
+)
+
+// encWriter abstracts writing to a byte array or to an io.Writer.
+type encWriter interface {
+ writeb([]byte)
+ writestr(string)
+ writen1(byte)
+ writen2(byte, byte)
+ atEndOfEncode()
+}
+
+// encDriver abstracts the actual codec (binc vs msgpack, etc)
+type encDriver interface {
+ IsBuiltinType(rt uintptr) bool
+ EncodeBuiltin(rt uintptr, v interface{})
+ EncodeNil()
+ EncodeInt(i int64)
+ EncodeUint(i uint64)
+ EncodeBool(b bool)
+ EncodeFloat32(f float32)
+ EncodeFloat64(f float64)
+ // encodeExtPreamble(xtag byte, length int)
+ EncodeRawExt(re *RawExt, e *Encoder)
+ EncodeExt(v interface{}, xtag uint64, ext Ext, e *Encoder)
+ EncodeArrayStart(length int)
+ EncodeMapStart(length int)
+ EncodeString(c charEncoding, v string)
+ EncodeSymbol(v string)
+ EncodeStringBytes(c charEncoding, v []byte)
+ //TODO
+ //encBignum(f *big.Int)
+ //encStringRunes(c charEncoding, v []rune)
+
+ reset()
+}
+
+type encDriverAsis interface {
+ EncodeAsis(v []byte)
+}
+
+type encNoSeparator struct{}
+
+func (_ encNoSeparator) EncodeEnd() {}
+
+type ioEncWriterWriter interface {
+ WriteByte(c byte) error
+ WriteString(s string) (n int, err error)
+ Write(p []byte) (n int, err error)
+}
+
+type ioEncStringWriter interface {
+ WriteString(s string) (n int, err error)
+}
+
+type EncodeOptions struct {
+ // Encode a struct as an array, and not as a map
+ StructToArray bool
+
+ // Canonical representation means that encoding a value will always result in the same
+ // sequence of bytes.
+ //
+ // This only affects maps, as the iteration order for maps is random.
+ //
+ // The implementation MAY use the natural sort order for the map keys if possible:
+ //
+ // - If there is a natural sort order (ie for number, bool, string or []byte keys),
+ // then the map keys are first sorted in natural order and then written
+ // with corresponding map values to the strema.
+ // - If there is no natural sort order, then the map keys will first be
+ // encoded into []byte, and then sorted,
+ // before writing the sorted keys and the corresponding map values to the stream.
+ //
+ Canonical bool
+
+ // CheckCircularRef controls whether we check for circular references
+ // and error fast during an encode.
+ //
+ // If enabled, an error is received if a pointer to a struct
+ // references itself either directly or through one of its fields (iteratively).
+ //
+ // This is opt-in, as there may be a performance hit to checking circular references.
+ CheckCircularRef bool
+
+ // AsSymbols defines what should be encoded as symbols.
+ //
+ // Encoding as symbols can reduce the encoded size significantly.
+ //
+ // However, during decoding, each string to be encoded as a symbol must
+ // be checked to see if it has been seen before. Consequently, encoding time
+ // will increase if using symbols, because string comparisons has a clear cost.
+ //
+ // Sample values:
+ // AsSymbolNone
+ // AsSymbolAll
+ // AsSymbolMapStringKeys
+ // AsSymbolMapStringKeysFlag | AsSymbolStructFieldNameFlag
+ AsSymbols AsSymbolFlag
+}
+
+// ---------------------------------------------
+
+type simpleIoEncWriterWriter struct {
+ w io.Writer
+ bw io.ByteWriter
+ sw ioEncStringWriter
+}
+
+func (o *simpleIoEncWriterWriter) WriteByte(c byte) (err error) {
+ if o.bw != nil {
+ return o.bw.WriteByte(c)
+ }
+ _, err = o.w.Write([]byte{c})
+ return
+}
+
+func (o *simpleIoEncWriterWriter) WriteString(s string) (n int, err error) {
+ if o.sw != nil {
+ return o.sw.WriteString(s)
+ }
+ // return o.w.Write([]byte(s))
+ return o.w.Write(bytesView(s))
+}
+
+func (o *simpleIoEncWriterWriter) Write(p []byte) (n int, err error) {
+ return o.w.Write(p)
+}
+
+// ----------------------------------------
+
+// ioEncWriter implements encWriter and can write to an io.Writer implementation
+type ioEncWriter struct {
+ w ioEncWriterWriter
+ s simpleIoEncWriterWriter
+ // x [8]byte // temp byte array re-used internally for efficiency
+}
+
+func (z *ioEncWriter) writeb(bs []byte) {
+ if len(bs) == 0 {
+ return
+ }
+ n, err := z.w.Write(bs)
+ if err != nil {
+ panic(err)
+ }
+ if n != len(bs) {
+ panic(fmt.Errorf("incorrect num bytes written. Expecting: %v, Wrote: %v", len(bs), n))
+ }
+}
+
+func (z *ioEncWriter) writestr(s string) {
+ n, err := z.w.WriteString(s)
+ if err != nil {
+ panic(err)
+ }
+ if n != len(s) {
+ panic(fmt.Errorf("incorrect num bytes written. Expecting: %v, Wrote: %v", len(s), n))
+ }
+}
+
+func (z *ioEncWriter) writen1(b byte) {
+ if err := z.w.WriteByte(b); err != nil {
+ panic(err)
+ }
+}
+
+func (z *ioEncWriter) writen2(b1 byte, b2 byte) {
+ z.writen1(b1)
+ z.writen1(b2)
+}
+
+func (z *ioEncWriter) atEndOfEncode() {}
+
+// ----------------------------------------
+
+// bytesEncWriter implements encWriter and can write to an byte slice.
+// It is used by Marshal function.
+type bytesEncWriter struct {
+ b []byte
+ c int // cursor
+ out *[]byte // write out on atEndOfEncode
+}
+
+func (z *bytesEncWriter) writeb(s []byte) {
+ if len(s) > 0 {
+ c := z.grow(len(s))
+ copy(z.b[c:], s)
+ }
+}
+
+func (z *bytesEncWriter) writestr(s string) {
+ if len(s) > 0 {
+ c := z.grow(len(s))
+ copy(z.b[c:], s)
+ }
+}
+
+func (z *bytesEncWriter) writen1(b1 byte) {
+ c := z.grow(1)
+ z.b[c] = b1
+}
+
+func (z *bytesEncWriter) writen2(b1 byte, b2 byte) {
+ c := z.grow(2)
+ z.b[c] = b1
+ z.b[c+1] = b2
+}
+
+func (z *bytesEncWriter) atEndOfEncode() {
+ *(z.out) = z.b[:z.c]
+}
+
+func (z *bytesEncWriter) grow(n int) (oldcursor int) {
+ oldcursor = z.c
+ z.c = oldcursor + n
+ if z.c > len(z.b) {
+ if z.c > cap(z.b) {
+ // appendslice logic (if cap < 1024, *2, else *1.25): more expensive. many copy calls.
+ // bytes.Buffer model (2*cap + n): much better
+ // bs := make([]byte, 2*cap(z.b)+n)
+ bs := make([]byte, growCap(cap(z.b), 1, n))
+ copy(bs, z.b[:oldcursor])
+ z.b = bs
+ } else {
+ z.b = z.b[:cap(z.b)]
+ }
+ }
+ return
+}
+
+// ---------------------------------------------
+
+type encFnInfo struct {
+ e *Encoder
+ ti *typeInfo
+ xfFn Ext
+ xfTag uint64
+ seq seqType
+}
+
+func (f *encFnInfo) builtin(rv reflect.Value) {
+ f.e.e.EncodeBuiltin(f.ti.rtid, rv.Interface())
+}
+
+func (f *encFnInfo) rawExt(rv reflect.Value) {
+ // rev := rv.Interface().(RawExt)
+ // f.e.e.EncodeRawExt(&rev, f.e)
+ var re *RawExt
+ if rv.CanAddr() {
+ re = rv.Addr().Interface().(*RawExt)
+ } else {
+ rev := rv.Interface().(RawExt)
+ re = &rev
+ }
+ f.e.e.EncodeRawExt(re, f.e)
+}
+
+func (f *encFnInfo) ext(rv reflect.Value) {
+ // if this is a struct|array and it was addressable, then pass the address directly (not the value)
+ if k := rv.Kind(); (k == reflect.Struct || k == reflect.Array) && rv.CanAddr() {
+ rv = rv.Addr()
+ }
+ f.e.e.EncodeExt(rv.Interface(), f.xfTag, f.xfFn, f.e)
+}
+
+func (f *encFnInfo) getValueForMarshalInterface(rv reflect.Value, indir int8) (v interface{}, proceed bool) {
+ if indir == 0 {
+ v = rv.Interface()
+ } else if indir == -1 {
+ // If a non-pointer was passed to Encode(), then that value is not addressable.
+ // Take addr if addresable, else copy value to an addressable value.
+ if rv.CanAddr() {
+ v = rv.Addr().Interface()
+ } else {
+ rv2 := reflect.New(rv.Type())
+ rv2.Elem().Set(rv)
+ v = rv2.Interface()
+ // fmt.Printf("rv.Type: %v, rv2.Type: %v, v: %v\n", rv.Type(), rv2.Type(), v)
+ }
+ } else {
+ for j := int8(0); j < indir; j++ {
+ if rv.IsNil() {
+ f.e.e.EncodeNil()
+ return
+ }
+ rv = rv.Elem()
+ }
+ v = rv.Interface()
+ }
+ return v, true
+}
+
+func (f *encFnInfo) selferMarshal(rv reflect.Value) {
+ if v, proceed := f.getValueForMarshalInterface(rv, f.ti.csIndir); proceed {
+ v.(Selfer).CodecEncodeSelf(f.e)
+ }
+}
+
+func (f *encFnInfo) binaryMarshal(rv reflect.Value) {
+ if v, proceed := f.getValueForMarshalInterface(rv, f.ti.bmIndir); proceed {
+ bs, fnerr := v.(encoding.BinaryMarshaler).MarshalBinary()
+ f.e.marshal(bs, fnerr, false, c_RAW)
+ }
+}
+
+func (f *encFnInfo) textMarshal(rv reflect.Value) {
+ if v, proceed := f.getValueForMarshalInterface(rv, f.ti.tmIndir); proceed {
+ // debugf(">>>> encoding.TextMarshaler: %T", rv.Interface())
+ bs, fnerr := v.(encoding.TextMarshaler).MarshalText()
+ f.e.marshal(bs, fnerr, false, c_UTF8)
+ }
+}
+
+func (f *encFnInfo) jsonMarshal(rv reflect.Value) {
+ if v, proceed := f.getValueForMarshalInterface(rv, f.ti.jmIndir); proceed {
+ bs, fnerr := v.(jsonMarshaler).MarshalJSON()
+ f.e.marshal(bs, fnerr, true, c_UTF8)
+ }
+}
+
+func (f *encFnInfo) kBool(rv reflect.Value) {
+ f.e.e.EncodeBool(rv.Bool())
+}
+
+func (f *encFnInfo) kString(rv reflect.Value) {
+ f.e.e.EncodeString(c_UTF8, rv.String())
+}
+
+func (f *encFnInfo) kFloat64(rv reflect.Value) {
+ f.e.e.EncodeFloat64(rv.Float())
+}
+
+func (f *encFnInfo) kFloat32(rv reflect.Value) {
+ f.e.e.EncodeFloat32(float32(rv.Float()))
+}
+
+func (f *encFnInfo) kInt(rv reflect.Value) {
+ f.e.e.EncodeInt(rv.Int())
+}
+
+func (f *encFnInfo) kUint(rv reflect.Value) {
+ f.e.e.EncodeUint(rv.Uint())
+}
+
+func (f *encFnInfo) kInvalid(rv reflect.Value) {
+ f.e.e.EncodeNil()
+}
+
+func (f *encFnInfo) kErr(rv reflect.Value) {
+ f.e.errorf("unsupported kind %s, for %#v", rv.Kind(), rv)
+}
+
+func (f *encFnInfo) kSlice(rv reflect.Value) {
+ ti := f.ti
+ // array may be non-addressable, so we have to manage with care
+ // (don't call rv.Bytes, rv.Slice, etc).
+ // E.g. type struct S{B [2]byte};
+ // Encode(S{}) will bomb on "panic: slice of unaddressable array".
+ e := f.e
+ if f.seq != seqTypeArray {
+ if rv.IsNil() {
+ e.e.EncodeNil()
+ return
+ }
+ // If in this method, then there was no extension function defined.
+ // So it's okay to treat as []byte.
+ if ti.rtid == uint8SliceTypId {
+ e.e.EncodeStringBytes(c_RAW, rv.Bytes())
+ return
+ }
+ }
+ cr := e.cr
+ rtelem := ti.rt.Elem()
+ l := rv.Len()
+ if ti.rtid == uint8SliceTypId || rtelem.Kind() == reflect.Uint8 {
+ switch f.seq {
+ case seqTypeArray:
+ // if l == 0 { e.e.encodeStringBytes(c_RAW, nil) } else
+ if rv.CanAddr() {
+ e.e.EncodeStringBytes(c_RAW, rv.Slice(0, l).Bytes())
+ } else {
+ var bs []byte
+ if l <= cap(e.b) {
+ bs = e.b[:l]
+ } else {
+ bs = make([]byte, l)
+ }
+ reflect.Copy(reflect.ValueOf(bs), rv)
+ // TODO: Test that reflect.Copy works instead of manual one-by-one
+ // for i := 0; i < l; i++ {
+ // bs[i] = byte(rv.Index(i).Uint())
+ // }
+ e.e.EncodeStringBytes(c_RAW, bs)
+ }
+ case seqTypeSlice:
+ e.e.EncodeStringBytes(c_RAW, rv.Bytes())
+ case seqTypeChan:
+ bs := e.b[:0]
+ // do not use range, so that the number of elements encoded
+ // does not change, and encoding does not hang waiting on someone to close chan.
+ // for b := range rv.Interface().(<-chan byte) {
+ // bs = append(bs, b)
+ // }
+ ch := rv.Interface().(<-chan byte)
+ for i := 0; i < l; i++ {
+ bs = append(bs, <-ch)
+ }
+ e.e.EncodeStringBytes(c_RAW, bs)
+ }
+ return
+ }
+
+ if ti.mbs {
+ if l%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", l)
+ return
+ }
+ e.e.EncodeMapStart(l / 2)
+ } else {
+ e.e.EncodeArrayStart(l)
+ }
+
+ if l > 0 {
+ for rtelem.Kind() == reflect.Ptr {
+ rtelem = rtelem.Elem()
+ }
+ // if kind is reflect.Interface, do not pre-determine the
+ // encoding type, because preEncodeValue may break it down to
+ // a concrete type and kInterface will bomb.
+ var fn *encFn
+ if rtelem.Kind() != reflect.Interface {
+ rtelemid := reflect.ValueOf(rtelem).Pointer()
+ fn = e.getEncFn(rtelemid, rtelem, true, true)
+ }
+ // TODO: Consider perf implication of encoding odd index values as symbols if type is string
+ for j := 0; j < l; j++ {
+ if cr != nil {
+ if ti.mbs {
+ if j%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ } else {
+ cr.sendContainerState(containerArrayElem)
+ }
+ }
+ if f.seq == seqTypeChan {
+ if rv2, ok2 := rv.Recv(); ok2 {
+ e.encodeValue(rv2, fn)
+ } else {
+ e.encode(nil) // WE HAVE TO DO SOMETHING, so nil if nothing received.
+ }
+ } else {
+ e.encodeValue(rv.Index(j), fn)
+ }
+ }
+ }
+
+ if cr != nil {
+ if ti.mbs {
+ cr.sendContainerState(containerMapEnd)
+ } else {
+ cr.sendContainerState(containerArrayEnd)
+ }
+ }
+}
+
+func (f *encFnInfo) kStruct(rv reflect.Value) {
+ fti := f.ti
+ e := f.e
+ cr := e.cr
+ tisfi := fti.sfip
+ toMap := !(fti.toArray || e.h.StructToArray)
+ newlen := len(fti.sfi)
+
+ // Use sync.Pool to reduce allocating slices unnecessarily.
+ // The cost of sync.Pool is less than the cost of new allocation.
+ pool, poolv, fkvs := encStructPoolGet(newlen)
+
+ // if toMap, use the sorted array. If toArray, use unsorted array (to match sequence in struct)
+ if toMap {
+ tisfi = fti.sfi
+ }
+ newlen = 0
+ var kv stringRv
+ for _, si := range tisfi {
+ kv.r = si.field(rv, false)
+ if toMap {
+ if si.omitEmpty && isEmptyValue(kv.r) {
+ continue
+ }
+ kv.v = si.encName
+ } else {
+ // use the zero value.
+ // if a reference or struct, set to nil (so you do not output too much)
+ if si.omitEmpty && isEmptyValue(kv.r) {
+ switch kv.r.Kind() {
+ case reflect.Struct, reflect.Interface, reflect.Ptr, reflect.Array,
+ reflect.Map, reflect.Slice:
+ kv.r = reflect.Value{} //encode as nil
+ }
+ }
+ }
+ fkvs[newlen] = kv
+ newlen++
+ }
+
+ // debugf(">>>> kStruct: newlen: %v", newlen)
+ // sep := !e.be
+ ee := e.e //don't dereference everytime
+
+ if toMap {
+ ee.EncodeMapStart(newlen)
+ // asSymbols := e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0
+ asSymbols := e.h.AsSymbols == AsSymbolDefault || e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0
+ for j := 0; j < newlen; j++ {
+ kv = fkvs[j]
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(kv.v)
+ } else {
+ ee.EncodeString(c_UTF8, kv.v)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encodeValue(kv.r, nil)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ } else {
+ ee.EncodeArrayStart(newlen)
+ for j := 0; j < newlen; j++ {
+ kv = fkvs[j]
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ e.encodeValue(kv.r, nil)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+ }
+
+ // do not use defer. Instead, use explicit pool return at end of function.
+ // defer has a cost we are trying to avoid.
+ // If there is a panic and these slices are not returned, it is ok.
+ if pool != nil {
+ pool.Put(poolv)
+ }
+}
+
+// func (f *encFnInfo) kPtr(rv reflect.Value) {
+// debugf(">>>>>>> ??? encode kPtr called - shouldn't get called")
+// if rv.IsNil() {
+// f.e.e.encodeNil()
+// return
+// }
+// f.e.encodeValue(rv.Elem())
+// }
+
+// func (f *encFnInfo) kInterface(rv reflect.Value) {
+// println("kInterface called")
+// debug.PrintStack()
+// if rv.IsNil() {
+// f.e.e.EncodeNil()
+// return
+// }
+// f.e.encodeValue(rv.Elem(), nil)
+// }
+
+func (f *encFnInfo) kMap(rv reflect.Value) {
+ ee := f.e.e
+ if rv.IsNil() {
+ ee.EncodeNil()
+ return
+ }
+
+ l := rv.Len()
+ ee.EncodeMapStart(l)
+ e := f.e
+ cr := e.cr
+ if l == 0 {
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return
+ }
+ var asSymbols bool
+ // determine the underlying key and val encFn's for the map.
+ // This eliminates some work which is done for each loop iteration i.e.
+ // rv.Type(), ref.ValueOf(rt).Pointer(), then check map/list for fn.
+ //
+ // However, if kind is reflect.Interface, do not pre-determine the
+ // encoding type, because preEncodeValue may break it down to
+ // a concrete type and kInterface will bomb.
+ var keyFn, valFn *encFn
+ ti := f.ti
+ rtkey := ti.rt.Key()
+ rtval := ti.rt.Elem()
+ rtkeyid := reflect.ValueOf(rtkey).Pointer()
+ // keyTypeIsString := f.ti.rt.Key().Kind() == reflect.String
+ var keyTypeIsString = rtkeyid == stringTypId
+ if keyTypeIsString {
+ asSymbols = e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ } else {
+ for rtkey.Kind() == reflect.Ptr {
+ rtkey = rtkey.Elem()
+ }
+ if rtkey.Kind() != reflect.Interface {
+ rtkeyid = reflect.ValueOf(rtkey).Pointer()
+ keyFn = e.getEncFn(rtkeyid, rtkey, true, true)
+ }
+ }
+ for rtval.Kind() == reflect.Ptr {
+ rtval = rtval.Elem()
+ }
+ if rtval.Kind() != reflect.Interface {
+ rtvalid := reflect.ValueOf(rtval).Pointer()
+ valFn = e.getEncFn(rtvalid, rtval, true, true)
+ }
+ mks := rv.MapKeys()
+ // for j, lmks := 0, len(mks); j < lmks; j++ {
+
+ if e.h.Canonical {
+ e.kMapCanonical(rtkeyid, rtkey, rv, mks, valFn, asSymbols)
+ } else {
+ for j := range mks {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if keyTypeIsString {
+ if asSymbols {
+ ee.EncodeSymbol(mks[j].String())
+ } else {
+ ee.EncodeString(c_UTF8, mks[j].String())
+ }
+ } else {
+ e.encodeValue(mks[j], keyFn)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encodeValue(rv.MapIndex(mks[j]), valFn)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (e *Encoder) kMapCanonical(rtkeyid uintptr, rtkey reflect.Type, rv reflect.Value, mks []reflect.Value, valFn *encFn, asSymbols bool) {
+ ee := e.e
+ cr := e.cr
+ // we previously did out-of-band if an extension was registered.
+ // This is not necessary, as the natural kind is sufficient for ordering.
+
+ if rtkeyid == uint8SliceTypId {
+ mksv := make([]bytesRv, len(mks))
+ for i, k := range mks {
+ v := &mksv[i]
+ v.r = k
+ v.v = k.Bytes()
+ }
+ sort.Sort(bytesRvSlice(mksv))
+ for i := range mksv {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeStringBytes(c_RAW, mksv[i].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encodeValue(rv.MapIndex(mksv[i].r), valFn)
+ }
+ } else {
+ switch rtkey.Kind() {
+ case reflect.Bool:
+ mksv := make([]boolRv, len(mks))
+ for i, k := range mks {
+ v := &mksv[i]
+ v.r = k
+ v.v = k.Bool()
+ }
+ sort.Sort(boolRvSlice(mksv))
+ for i := range mksv {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(mksv[i].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encodeValue(rv.MapIndex(mksv[i].r), valFn)
+ }
+ case reflect.String:
+ mksv := make([]stringRv, len(mks))
+ for i, k := range mks {
+ v := &mksv[i]
+ v.r = k
+ v.v = k.String()
+ }
+ sort.Sort(stringRvSlice(mksv))
+ for i := range mksv {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(mksv[i].v)
+ } else {
+ ee.EncodeString(c_UTF8, mksv[i].v)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encodeValue(rv.MapIndex(mksv[i].r), valFn)
+ }
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint, reflect.Uintptr:
+ mksv := make([]uintRv, len(mks))
+ for i, k := range mks {
+ v := &mksv[i]
+ v.r = k
+ v.v = k.Uint()
+ }
+ sort.Sort(uintRvSlice(mksv))
+ for i := range mksv {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(mksv[i].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encodeValue(rv.MapIndex(mksv[i].r), valFn)
+ }
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ mksv := make([]intRv, len(mks))
+ for i, k := range mks {
+ v := &mksv[i]
+ v.r = k
+ v.v = k.Int()
+ }
+ sort.Sort(intRvSlice(mksv))
+ for i := range mksv {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(mksv[i].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encodeValue(rv.MapIndex(mksv[i].r), valFn)
+ }
+ case reflect.Float32:
+ mksv := make([]floatRv, len(mks))
+ for i, k := range mks {
+ v := &mksv[i]
+ v.r = k
+ v.v = k.Float()
+ }
+ sort.Sort(floatRvSlice(mksv))
+ for i := range mksv {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(float32(mksv[i].v))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encodeValue(rv.MapIndex(mksv[i].r), valFn)
+ }
+ case reflect.Float64:
+ mksv := make([]floatRv, len(mks))
+ for i, k := range mks {
+ v := &mksv[i]
+ v.r = k
+ v.v = k.Float()
+ }
+ sort.Sort(floatRvSlice(mksv))
+ for i := range mksv {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(mksv[i].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encodeValue(rv.MapIndex(mksv[i].r), valFn)
+ }
+ default:
+ // out-of-band
+ // first encode each key to a []byte first, then sort them, then record
+ var mksv []byte = make([]byte, 0, len(mks)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ mksbv := make([]bytesRv, len(mks))
+ for i, k := range mks {
+ v := &mksbv[i]
+ l := len(mksv)
+ e2.MustEncode(k)
+ v.r = k
+ v.v = mksv[l:]
+ // fmt.Printf(">>>>> %s\n", mksv[l:])
+ }
+ sort.Sort(bytesRvSlice(mksbv))
+ for j := range mksbv {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.asis(mksbv[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encodeValue(rv.MapIndex(mksbv[j].r), valFn)
+ }
+ }
+ }
+}
+
+// --------------------------------------------------
+
+// encFn encapsulates the captured variables and the encode function.
+// This way, we only do some calculations one times, and pass to the
+// code block that should be called (encapsulated in a function)
+// instead of executing the checks every time.
+type encFn struct {
+ i encFnInfo
+ f func(*encFnInfo, reflect.Value)
+}
+
+// --------------------------------------------------
+
+type encRtidFn struct {
+ rtid uintptr
+ fn encFn
+}
+
+// An Encoder writes an object to an output stream in the codec format.
+type Encoder struct {
+ // hopefully, reduce derefencing cost by laying the encWriter inside the Encoder
+ e encDriver
+ // NOTE: Encoder shouldn't call it's write methods,
+ // as the handler MAY need to do some coordination.
+ w encWriter
+ s []encRtidFn
+ ci set
+ be bool // is binary encoding
+ js bool // is json handle
+
+ wi ioEncWriter
+ wb bytesEncWriter
+
+ h *BasicHandle
+ hh Handle
+
+ cr containerStateRecv
+ as encDriverAsis
+
+ f map[uintptr]*encFn
+ b [scratchByteArrayLen]byte
+}
+
+// NewEncoder returns an Encoder for encoding into an io.Writer.
+//
+// For efficiency, Users are encouraged to pass in a memory buffered writer
+// (eg bufio.Writer, bytes.Buffer).
+func NewEncoder(w io.Writer, h Handle) *Encoder {
+ e := newEncoder(h)
+ e.Reset(w)
+ return e
+}
+
+// NewEncoderBytes returns an encoder for encoding directly and efficiently
+// into a byte slice, using zero-copying to temporary slices.
+//
+// It will potentially replace the output byte slice pointed to.
+// After encoding, the out parameter contains the encoded contents.
+func NewEncoderBytes(out *[]byte, h Handle) *Encoder {
+ e := newEncoder(h)
+ e.ResetBytes(out)
+ return e
+}
+
+func newEncoder(h Handle) *Encoder {
+ e := &Encoder{hh: h, h: h.getBasicHandle(), be: h.isBinary()}
+ _, e.js = h.(*JsonHandle)
+ e.e = h.newEncDriver(e)
+ e.as, _ = e.e.(encDriverAsis)
+ e.cr, _ = e.e.(containerStateRecv)
+ return e
+}
+
+// Reset the Encoder with a new output stream.
+//
+// This accomodates using the state of the Encoder,
+// where it has "cached" information about sub-engines.
+func (e *Encoder) Reset(w io.Writer) {
+ ww, ok := w.(ioEncWriterWriter)
+ if ok {
+ e.wi.w = ww
+ } else {
+ sww := &e.wi.s
+ sww.w = w
+ sww.bw, _ = w.(io.ByteWriter)
+ sww.sw, _ = w.(ioEncStringWriter)
+ e.wi.w = sww
+ //ww = bufio.NewWriterSize(w, defEncByteBufSize)
+ }
+ e.w = &e.wi
+ e.e.reset()
+}
+
+func (e *Encoder) ResetBytes(out *[]byte) {
+ in := *out
+ if in == nil {
+ in = make([]byte, defEncByteBufSize)
+ }
+ e.wb.b, e.wb.out, e.wb.c = in, out, 0
+ e.w = &e.wb
+ e.e.reset()
+}
+
+// func (e *Encoder) sendContainerState(c containerState) {
+// if e.cr != nil {
+// e.cr.sendContainerState(c)
+// }
+// }
+
+// Encode writes an object into a stream.
+//
+// Encoding can be configured via the struct tag for the fields.
+// The "codec" key in struct field's tag value is the key name,
+// followed by an optional comma and options.
+// Note that the "json" key is used in the absence of the "codec" key.
+//
+// To set an option on all fields (e.g. omitempty on all fields), you
+// can create a field called _struct, and set flags on it.
+//
+// Struct values "usually" encode as maps. Each exported struct field is encoded unless:
+// - the field's tag is "-", OR
+// - the field is empty (empty or the zero value) and its tag specifies the "omitempty" option.
+//
+// When encoding as a map, the first string in the tag (before the comma)
+// is the map key string to use when encoding.
+//
+// However, struct values may encode as arrays. This happens when:
+// - StructToArray Encode option is set, OR
+// - the tag on the _struct field sets the "toarray" option
+//
+// Values with types that implement MapBySlice are encoded as stream maps.
+//
+// The empty values (for omitempty option) are false, 0, any nil pointer
+// or interface value, and any array, slice, map, or string of length zero.
+//
+// Anonymous fields are encoded inline except:
+// - the struct tag specifies a replacement name (first value)
+// - the field is of an interface type
+//
+// Examples:
+//
+// // NOTE: 'json:' can be used as struct tag key, in place 'codec:' below.
+// type MyStruct struct {
+// _struct bool `codec:",omitempty"` //set omitempty for every field
+// Field1 string `codec:"-"` //skip this field
+// Field2 int `codec:"myName"` //Use key "myName" in encode stream
+// Field3 int32 `codec:",omitempty"` //use key "Field3". Omit if empty.
+// Field4 bool `codec:"f4,omitempty"` //use key "f4". Omit if empty.
+// io.Reader //use key "Reader".
+// MyStruct `codec:"my1" //use key "my1".
+// MyStruct //inline it
+// ...
+// }
+//
+// type MyStruct struct {
+// _struct bool `codec:",omitempty,toarray"` //set omitempty for every field
+// //and encode struct as an array
+// }
+//
+// The mode of encoding is based on the type of the value. When a value is seen:
+// - If a Selfer, call its CodecEncodeSelf method
+// - If an extension is registered for it, call that extension function
+// - If it implements encoding.(Binary|Text|JSON)Marshaler, call its Marshal(Binary|Text|JSON) method
+// - Else encode it based on its reflect.Kind
+//
+// Note that struct field names and keys in map[string]XXX will be treated as symbols.
+// Some formats support symbols (e.g. binc) and will properly encode the string
+// only once in the stream, and use a tag to refer to it thereafter.
+func (e *Encoder) Encode(v interface{}) (err error) {
+ defer panicToErr(&err)
+ e.encode(v)
+ e.w.atEndOfEncode()
+ return
+}
+
+// MustEncode is like Encode, but panics if unable to Encode.
+// This provides insight to the code location that triggered the error.
+func (e *Encoder) MustEncode(v interface{}) {
+ e.encode(v)
+ e.w.atEndOfEncode()
+}
+
+// comment out these (Must)Write methods. They were only put there to support cbor.
+// However, users already have access to the streams, and can write directly.
+//
+// // Write allows users write to the Encoder stream directly.
+// func (e *Encoder) Write(bs []byte) (err error) {
+// defer panicToErr(&err)
+// e.w.writeb(bs)
+// return
+// }
+// // MustWrite is like write, but panics if unable to Write.
+// func (e *Encoder) MustWrite(bs []byte) {
+// e.w.writeb(bs)
+// }
+
+func (e *Encoder) encode(iv interface{}) {
+ // if ics, ok := iv.(Selfer); ok {
+ // ics.CodecEncodeSelf(e)
+ // return
+ // }
+
+ switch v := iv.(type) {
+ case nil:
+ e.e.EncodeNil()
+ case Selfer:
+ v.CodecEncodeSelf(e)
+
+ case reflect.Value:
+ e.encodeValue(v, nil)
+
+ case string:
+ e.e.EncodeString(c_UTF8, v)
+ case bool:
+ e.e.EncodeBool(v)
+ case int:
+ e.e.EncodeInt(int64(v))
+ case int8:
+ e.e.EncodeInt(int64(v))
+ case int16:
+ e.e.EncodeInt(int64(v))
+ case int32:
+ e.e.EncodeInt(int64(v))
+ case int64:
+ e.e.EncodeInt(v)
+ case uint:
+ e.e.EncodeUint(uint64(v))
+ case uint8:
+ e.e.EncodeUint(uint64(v))
+ case uint16:
+ e.e.EncodeUint(uint64(v))
+ case uint32:
+ e.e.EncodeUint(uint64(v))
+ case uint64:
+ e.e.EncodeUint(v)
+ case float32:
+ e.e.EncodeFloat32(v)
+ case float64:
+ e.e.EncodeFloat64(v)
+
+ case []uint8:
+ e.e.EncodeStringBytes(c_RAW, v)
+
+ case *string:
+ e.e.EncodeString(c_UTF8, *v)
+ case *bool:
+ e.e.EncodeBool(*v)
+ case *int:
+ e.e.EncodeInt(int64(*v))
+ case *int8:
+ e.e.EncodeInt(int64(*v))
+ case *int16:
+ e.e.EncodeInt(int64(*v))
+ case *int32:
+ e.e.EncodeInt(int64(*v))
+ case *int64:
+ e.e.EncodeInt(*v)
+ case *uint:
+ e.e.EncodeUint(uint64(*v))
+ case *uint8:
+ e.e.EncodeUint(uint64(*v))
+ case *uint16:
+ e.e.EncodeUint(uint64(*v))
+ case *uint32:
+ e.e.EncodeUint(uint64(*v))
+ case *uint64:
+ e.e.EncodeUint(*v)
+ case *float32:
+ e.e.EncodeFloat32(*v)
+ case *float64:
+ e.e.EncodeFloat64(*v)
+
+ case *[]uint8:
+ e.e.EncodeStringBytes(c_RAW, *v)
+
+ default:
+ const checkCodecSelfer1 = true // in case T is passed, where *T is a Selfer, still checkCodecSelfer
+ if !fastpathEncodeTypeSwitch(iv, e) {
+ e.encodeI(iv, false, checkCodecSelfer1)
+ }
+ }
+}
+
+func (e *Encoder) preEncodeValue(rv reflect.Value) (rv2 reflect.Value, sptr uintptr, proceed bool) {
+ // use a goto statement instead of a recursive function for ptr/interface.
+TOP:
+ switch rv.Kind() {
+ case reflect.Ptr:
+ if rv.IsNil() {
+ e.e.EncodeNil()
+ return
+ }
+ rv = rv.Elem()
+ if e.h.CheckCircularRef && rv.Kind() == reflect.Struct {
+ // TODO: Movable pointers will be an issue here. Future problem.
+ sptr = rv.UnsafeAddr()
+ break TOP
+ }
+ goto TOP
+ case reflect.Interface:
+ if rv.IsNil() {
+ e.e.EncodeNil()
+ return
+ }
+ rv = rv.Elem()
+ goto TOP
+ case reflect.Slice, reflect.Map:
+ if rv.IsNil() {
+ e.e.EncodeNil()
+ return
+ }
+ case reflect.Invalid, reflect.Func:
+ e.e.EncodeNil()
+ return
+ }
+
+ proceed = true
+ rv2 = rv
+ return
+}
+
+func (e *Encoder) doEncodeValue(rv reflect.Value, fn *encFn, sptr uintptr,
+ checkFastpath, checkCodecSelfer bool) {
+ if sptr != 0 {
+ if (&e.ci).add(sptr) {
+ e.errorf("circular reference found: # %d", sptr)
+ }
+ }
+ if fn == nil {
+ rt := rv.Type()
+ rtid := reflect.ValueOf(rt).Pointer()
+ // fn = e.getEncFn(rtid, rt, true, true)
+ fn = e.getEncFn(rtid, rt, checkFastpath, checkCodecSelfer)
+ }
+ fn.f(&fn.i, rv)
+ if sptr != 0 {
+ (&e.ci).remove(sptr)
+ }
+}
+
+func (e *Encoder) encodeI(iv interface{}, checkFastpath, checkCodecSelfer bool) {
+ if rv, sptr, proceed := e.preEncodeValue(reflect.ValueOf(iv)); proceed {
+ e.doEncodeValue(rv, nil, sptr, checkFastpath, checkCodecSelfer)
+ }
+}
+
+func (e *Encoder) encodeValue(rv reflect.Value, fn *encFn) {
+ // if a valid fn is passed, it MUST BE for the dereferenced type of rv
+ if rv, sptr, proceed := e.preEncodeValue(rv); proceed {
+ e.doEncodeValue(rv, fn, sptr, true, true)
+ }
+}
+
+func (e *Encoder) getEncFn(rtid uintptr, rt reflect.Type, checkFastpath, checkCodecSelfer bool) (fn *encFn) {
+ // rtid := reflect.ValueOf(rt).Pointer()
+ var ok bool
+ if useMapForCodecCache {
+ fn, ok = e.f[rtid]
+ } else {
+ for i := range e.s {
+ v := &(e.s[i])
+ if v.rtid == rtid {
+ fn, ok = &(v.fn), true
+ break
+ }
+ }
+ }
+ if ok {
+ return
+ }
+
+ if useMapForCodecCache {
+ if e.f == nil {
+ e.f = make(map[uintptr]*encFn, initCollectionCap)
+ }
+ fn = new(encFn)
+ e.f[rtid] = fn
+ } else {
+ if e.s == nil {
+ e.s = make([]encRtidFn, 0, initCollectionCap)
+ }
+ e.s = append(e.s, encRtidFn{rtid: rtid})
+ fn = &(e.s[len(e.s)-1]).fn
+ }
+
+ ti := e.h.getTypeInfo(rtid, rt)
+ fi := &(fn.i)
+ fi.e = e
+ fi.ti = ti
+
+ if checkCodecSelfer && ti.cs {
+ fn.f = (*encFnInfo).selferMarshal
+ } else if rtid == rawExtTypId {
+ fn.f = (*encFnInfo).rawExt
+ } else if e.e.IsBuiltinType(rtid) {
+ fn.f = (*encFnInfo).builtin
+ } else if xfFn := e.h.getExt(rtid); xfFn != nil {
+ fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext
+ fn.f = (*encFnInfo).ext
+ } else if supportMarshalInterfaces && e.be && ti.bm {
+ fn.f = (*encFnInfo).binaryMarshal
+ } else if supportMarshalInterfaces && !e.be && e.js && ti.jm {
+ //If JSON, we should check JSONMarshal before textMarshal
+ fn.f = (*encFnInfo).jsonMarshal
+ } else if supportMarshalInterfaces && !e.be && ti.tm {
+ fn.f = (*encFnInfo).textMarshal
+ } else {
+ rk := rt.Kind()
+ if fastpathEnabled && checkFastpath && (rk == reflect.Map || rk == reflect.Slice) {
+ if rt.PkgPath() == "" { // un-named slice or map
+ if idx := fastpathAV.index(rtid); idx != -1 {
+ fn.f = fastpathAV[idx].encfn
+ }
+ } else {
+ ok = false
+ // use mapping for underlying type if there
+ var rtu reflect.Type
+ if rk == reflect.Map {
+ rtu = reflect.MapOf(rt.Key(), rt.Elem())
+ } else {
+ rtu = reflect.SliceOf(rt.Elem())
+ }
+ rtuid := reflect.ValueOf(rtu).Pointer()
+ if idx := fastpathAV.index(rtuid); idx != -1 {
+ xfnf := fastpathAV[idx].encfn
+ xrt := fastpathAV[idx].rt
+ fn.f = func(xf *encFnInfo, xrv reflect.Value) {
+ xfnf(xf, xrv.Convert(xrt))
+ }
+ }
+ }
+ }
+ if fn.f == nil {
+ switch rk {
+ case reflect.Bool:
+ fn.f = (*encFnInfo).kBool
+ case reflect.String:
+ fn.f = (*encFnInfo).kString
+ case reflect.Float64:
+ fn.f = (*encFnInfo).kFloat64
+ case reflect.Float32:
+ fn.f = (*encFnInfo).kFloat32
+ case reflect.Int, reflect.Int8, reflect.Int64, reflect.Int32, reflect.Int16:
+ fn.f = (*encFnInfo).kInt
+ case reflect.Uint8, reflect.Uint64, reflect.Uint, reflect.Uint32, reflect.Uint16, reflect.Uintptr:
+ fn.f = (*encFnInfo).kUint
+ case reflect.Invalid:
+ fn.f = (*encFnInfo).kInvalid
+ case reflect.Chan:
+ fi.seq = seqTypeChan
+ fn.f = (*encFnInfo).kSlice
+ case reflect.Slice:
+ fi.seq = seqTypeSlice
+ fn.f = (*encFnInfo).kSlice
+ case reflect.Array:
+ fi.seq = seqTypeArray
+ fn.f = (*encFnInfo).kSlice
+ case reflect.Struct:
+ fn.f = (*encFnInfo).kStruct
+ // reflect.Ptr and reflect.Interface are handled already by preEncodeValue
+ // case reflect.Ptr:
+ // fn.f = (*encFnInfo).kPtr
+ // case reflect.Interface:
+ // fn.f = (*encFnInfo).kInterface
+ case reflect.Map:
+ fn.f = (*encFnInfo).kMap
+ default:
+ fn.f = (*encFnInfo).kErr
+ }
+ }
+ }
+
+ return
+}
+
+func (e *Encoder) marshal(bs []byte, fnerr error, asis bool, c charEncoding) {
+ if fnerr != nil {
+ panic(fnerr)
+ }
+ if bs == nil {
+ e.e.EncodeNil()
+ } else if asis {
+ e.asis(bs)
+ } else {
+ e.e.EncodeStringBytes(c, bs)
+ }
+}
+
+func (e *Encoder) asis(v []byte) {
+ if e.as == nil {
+ e.w.writeb(v)
+ } else {
+ e.as.EncodeAsis(v)
+ }
+}
+
+func (e *Encoder) errorf(format string, params ...interface{}) {
+ err := fmt.Errorf(format, params...)
+ panic(err)
+}
+
+// ----------------------------------------
+
+const encStructPoolLen = 5
+
+// encStructPool is an array of sync.Pool.
+// Each element of the array pools one of encStructPool(8|16|32|64).
+// It allows the re-use of slices up to 64 in length.
+// A performance cost of encoding structs was collecting
+// which values were empty and should be omitted.
+// We needed slices of reflect.Value and string to collect them.
+// This shared pool reduces the amount of unnecessary creation we do.
+// The cost is that of locking sometimes, but sync.Pool is efficient
+// enough to reduce thread contention.
+var encStructPool [encStructPoolLen]sync.Pool
+
+func init() {
+ encStructPool[0].New = func() interface{} { return new([8]stringRv) }
+ encStructPool[1].New = func() interface{} { return new([16]stringRv) }
+ encStructPool[2].New = func() interface{} { return new([32]stringRv) }
+ encStructPool[3].New = func() interface{} { return new([64]stringRv) }
+ encStructPool[4].New = func() interface{} { return new([128]stringRv) }
+}
+
+func encStructPoolGet(newlen int) (p *sync.Pool, v interface{}, s []stringRv) {
+ // if encStructPoolLen != 5 { // constant chec, so removed at build time.
+ // panic(errors.New("encStructPoolLen must be equal to 4")) // defensive, in case it is changed
+ // }
+ // idxpool := newlen / 8
+ if newlen <= 8 {
+ p = &encStructPool[0]
+ v = p.Get()
+ s = v.(*[8]stringRv)[:newlen]
+ } else if newlen <= 16 {
+ p = &encStructPool[1]
+ v = p.Get()
+ s = v.(*[16]stringRv)[:newlen]
+ } else if newlen <= 32 {
+ p = &encStructPool[2]
+ v = p.Get()
+ s = v.(*[32]stringRv)[:newlen]
+ } else if newlen <= 64 {
+ p = &encStructPool[3]
+ v = p.Get()
+ s = v.(*[64]stringRv)[:newlen]
+ } else if newlen <= 128 {
+ p = &encStructPool[4]
+ v = p.Get()
+ s = v.(*[128]stringRv)[:newlen]
+ } else {
+ s = make([]stringRv, newlen)
+ }
+ return
+}
+
+// ----------------------------------------
+
+// func encErr(format string, params ...interface{}) {
+// doPanic(msgTagEnc, format, params...)
+// }
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/fast-path.generated.go b/src/kube2msb/vendor/github.com/ugorji/go/codec/fast-path.generated.go
new file mode 100644
index 0000000..cf6e00d
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/fast-path.generated.go
@@ -0,0 +1,39365 @@
+// +build !notfastpath
+
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// ************************************************************
+// DO NOT EDIT.
+// THIS FILE IS AUTO-GENERATED from fast-path.go.tmpl
+// ************************************************************
+
+package codec
+
+// Fast path functions try to create a fast path encode or decode implementation
+// for common maps and slices.
+//
+// We define the functions and register then in this single file
+// so as not to pollute the encode.go and decode.go, and create a dependency in there.
+// This file can be omitted without causing a build failure.
+//
+// The advantage of fast paths is:
+// - Many calls bypass reflection altogether
+//
+// Currently support
+// - slice of all builtin types,
+// - map of all builtin types to string or interface value
+// - symetrical maps of all builtin types (e.g. str-str, uint8-uint8)
+// This should provide adequate "typical" implementations.
+//
+// Note that fast track decode functions must handle values for which an address cannot be obtained.
+// For example:
+// m2 := map[string]int{}
+// p2 := []interface{}{m2}
+// // decoding into p2 will bomb if fast track functions do not treat like unaddressable.
+//
+
+import (
+ "reflect"
+ "sort"
+)
+
+const fastpathCheckNilFalse = false // for reflect
+const fastpathCheckNilTrue = true // for type switch
+
+type fastpathT struct{}
+
+var fastpathTV fastpathT
+
+type fastpathE struct {
+ rtid uintptr
+ rt reflect.Type
+ encfn func(*encFnInfo, reflect.Value)
+ decfn func(*decFnInfo, reflect.Value)
+}
+
+type fastpathA [271]fastpathE
+
+func (x *fastpathA) index(rtid uintptr) int {
+ // use binary search to grab the index (adapted from sort/search.go)
+ h, i, j := 0, 0, 271 // len(x)
+ for i < j {
+ h = i + (j-i)/2
+ if x[h].rtid < rtid {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ if i < 271 && x[i].rtid == rtid {
+ return i
+ }
+ return -1
+}
+
+type fastpathAslice []fastpathE
+
+func (x fastpathAslice) Len() int { return len(x) }
+func (x fastpathAslice) Less(i, j int) bool { return x[i].rtid < x[j].rtid }
+func (x fastpathAslice) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+var fastpathAV fastpathA
+
+// due to possible initialization loop error, make fastpath in an init()
+func init() {
+ if !fastpathEnabled {
+ return
+ }
+ i := 0
+ fn := func(v interface{}, fe func(*encFnInfo, reflect.Value), fd func(*decFnInfo, reflect.Value)) (f fastpathE) {
+ xrt := reflect.TypeOf(v)
+ xptr := reflect.ValueOf(xrt).Pointer()
+ fastpathAV[i] = fastpathE{xptr, xrt, fe, fd}
+ i++
+ return
+ }
+
+ fn([]interface{}(nil), (*encFnInfo).fastpathEncSliceIntfR, (*decFnInfo).fastpathDecSliceIntfR)
+ fn([]string(nil), (*encFnInfo).fastpathEncSliceStringR, (*decFnInfo).fastpathDecSliceStringR)
+ fn([]float32(nil), (*encFnInfo).fastpathEncSliceFloat32R, (*decFnInfo).fastpathDecSliceFloat32R)
+ fn([]float64(nil), (*encFnInfo).fastpathEncSliceFloat64R, (*decFnInfo).fastpathDecSliceFloat64R)
+ fn([]uint(nil), (*encFnInfo).fastpathEncSliceUintR, (*decFnInfo).fastpathDecSliceUintR)
+ fn([]uint16(nil), (*encFnInfo).fastpathEncSliceUint16R, (*decFnInfo).fastpathDecSliceUint16R)
+ fn([]uint32(nil), (*encFnInfo).fastpathEncSliceUint32R, (*decFnInfo).fastpathDecSliceUint32R)
+ fn([]uint64(nil), (*encFnInfo).fastpathEncSliceUint64R, (*decFnInfo).fastpathDecSliceUint64R)
+ fn([]uintptr(nil), (*encFnInfo).fastpathEncSliceUintptrR, (*decFnInfo).fastpathDecSliceUintptrR)
+ fn([]int(nil), (*encFnInfo).fastpathEncSliceIntR, (*decFnInfo).fastpathDecSliceIntR)
+ fn([]int8(nil), (*encFnInfo).fastpathEncSliceInt8R, (*decFnInfo).fastpathDecSliceInt8R)
+ fn([]int16(nil), (*encFnInfo).fastpathEncSliceInt16R, (*decFnInfo).fastpathDecSliceInt16R)
+ fn([]int32(nil), (*encFnInfo).fastpathEncSliceInt32R, (*decFnInfo).fastpathDecSliceInt32R)
+ fn([]int64(nil), (*encFnInfo).fastpathEncSliceInt64R, (*decFnInfo).fastpathDecSliceInt64R)
+ fn([]bool(nil), (*encFnInfo).fastpathEncSliceBoolR, (*decFnInfo).fastpathDecSliceBoolR)
+
+ fn(map[interface{}]interface{}(nil), (*encFnInfo).fastpathEncMapIntfIntfR, (*decFnInfo).fastpathDecMapIntfIntfR)
+ fn(map[interface{}]string(nil), (*encFnInfo).fastpathEncMapIntfStringR, (*decFnInfo).fastpathDecMapIntfStringR)
+ fn(map[interface{}]uint(nil), (*encFnInfo).fastpathEncMapIntfUintR, (*decFnInfo).fastpathDecMapIntfUintR)
+ fn(map[interface{}]uint8(nil), (*encFnInfo).fastpathEncMapIntfUint8R, (*decFnInfo).fastpathDecMapIntfUint8R)
+ fn(map[interface{}]uint16(nil), (*encFnInfo).fastpathEncMapIntfUint16R, (*decFnInfo).fastpathDecMapIntfUint16R)
+ fn(map[interface{}]uint32(nil), (*encFnInfo).fastpathEncMapIntfUint32R, (*decFnInfo).fastpathDecMapIntfUint32R)
+ fn(map[interface{}]uint64(nil), (*encFnInfo).fastpathEncMapIntfUint64R, (*decFnInfo).fastpathDecMapIntfUint64R)
+ fn(map[interface{}]uintptr(nil), (*encFnInfo).fastpathEncMapIntfUintptrR, (*decFnInfo).fastpathDecMapIntfUintptrR)
+ fn(map[interface{}]int(nil), (*encFnInfo).fastpathEncMapIntfIntR, (*decFnInfo).fastpathDecMapIntfIntR)
+ fn(map[interface{}]int8(nil), (*encFnInfo).fastpathEncMapIntfInt8R, (*decFnInfo).fastpathDecMapIntfInt8R)
+ fn(map[interface{}]int16(nil), (*encFnInfo).fastpathEncMapIntfInt16R, (*decFnInfo).fastpathDecMapIntfInt16R)
+ fn(map[interface{}]int32(nil), (*encFnInfo).fastpathEncMapIntfInt32R, (*decFnInfo).fastpathDecMapIntfInt32R)
+ fn(map[interface{}]int64(nil), (*encFnInfo).fastpathEncMapIntfInt64R, (*decFnInfo).fastpathDecMapIntfInt64R)
+ fn(map[interface{}]float32(nil), (*encFnInfo).fastpathEncMapIntfFloat32R, (*decFnInfo).fastpathDecMapIntfFloat32R)
+ fn(map[interface{}]float64(nil), (*encFnInfo).fastpathEncMapIntfFloat64R, (*decFnInfo).fastpathDecMapIntfFloat64R)
+ fn(map[interface{}]bool(nil), (*encFnInfo).fastpathEncMapIntfBoolR, (*decFnInfo).fastpathDecMapIntfBoolR)
+ fn(map[string]interface{}(nil), (*encFnInfo).fastpathEncMapStringIntfR, (*decFnInfo).fastpathDecMapStringIntfR)
+ fn(map[string]string(nil), (*encFnInfo).fastpathEncMapStringStringR, (*decFnInfo).fastpathDecMapStringStringR)
+ fn(map[string]uint(nil), (*encFnInfo).fastpathEncMapStringUintR, (*decFnInfo).fastpathDecMapStringUintR)
+ fn(map[string]uint8(nil), (*encFnInfo).fastpathEncMapStringUint8R, (*decFnInfo).fastpathDecMapStringUint8R)
+ fn(map[string]uint16(nil), (*encFnInfo).fastpathEncMapStringUint16R, (*decFnInfo).fastpathDecMapStringUint16R)
+ fn(map[string]uint32(nil), (*encFnInfo).fastpathEncMapStringUint32R, (*decFnInfo).fastpathDecMapStringUint32R)
+ fn(map[string]uint64(nil), (*encFnInfo).fastpathEncMapStringUint64R, (*decFnInfo).fastpathDecMapStringUint64R)
+ fn(map[string]uintptr(nil), (*encFnInfo).fastpathEncMapStringUintptrR, (*decFnInfo).fastpathDecMapStringUintptrR)
+ fn(map[string]int(nil), (*encFnInfo).fastpathEncMapStringIntR, (*decFnInfo).fastpathDecMapStringIntR)
+ fn(map[string]int8(nil), (*encFnInfo).fastpathEncMapStringInt8R, (*decFnInfo).fastpathDecMapStringInt8R)
+ fn(map[string]int16(nil), (*encFnInfo).fastpathEncMapStringInt16R, (*decFnInfo).fastpathDecMapStringInt16R)
+ fn(map[string]int32(nil), (*encFnInfo).fastpathEncMapStringInt32R, (*decFnInfo).fastpathDecMapStringInt32R)
+ fn(map[string]int64(nil), (*encFnInfo).fastpathEncMapStringInt64R, (*decFnInfo).fastpathDecMapStringInt64R)
+ fn(map[string]float32(nil), (*encFnInfo).fastpathEncMapStringFloat32R, (*decFnInfo).fastpathDecMapStringFloat32R)
+ fn(map[string]float64(nil), (*encFnInfo).fastpathEncMapStringFloat64R, (*decFnInfo).fastpathDecMapStringFloat64R)
+ fn(map[string]bool(nil), (*encFnInfo).fastpathEncMapStringBoolR, (*decFnInfo).fastpathDecMapStringBoolR)
+ fn(map[float32]interface{}(nil), (*encFnInfo).fastpathEncMapFloat32IntfR, (*decFnInfo).fastpathDecMapFloat32IntfR)
+ fn(map[float32]string(nil), (*encFnInfo).fastpathEncMapFloat32StringR, (*decFnInfo).fastpathDecMapFloat32StringR)
+ fn(map[float32]uint(nil), (*encFnInfo).fastpathEncMapFloat32UintR, (*decFnInfo).fastpathDecMapFloat32UintR)
+ fn(map[float32]uint8(nil), (*encFnInfo).fastpathEncMapFloat32Uint8R, (*decFnInfo).fastpathDecMapFloat32Uint8R)
+ fn(map[float32]uint16(nil), (*encFnInfo).fastpathEncMapFloat32Uint16R, (*decFnInfo).fastpathDecMapFloat32Uint16R)
+ fn(map[float32]uint32(nil), (*encFnInfo).fastpathEncMapFloat32Uint32R, (*decFnInfo).fastpathDecMapFloat32Uint32R)
+ fn(map[float32]uint64(nil), (*encFnInfo).fastpathEncMapFloat32Uint64R, (*decFnInfo).fastpathDecMapFloat32Uint64R)
+ fn(map[float32]uintptr(nil), (*encFnInfo).fastpathEncMapFloat32UintptrR, (*decFnInfo).fastpathDecMapFloat32UintptrR)
+ fn(map[float32]int(nil), (*encFnInfo).fastpathEncMapFloat32IntR, (*decFnInfo).fastpathDecMapFloat32IntR)
+ fn(map[float32]int8(nil), (*encFnInfo).fastpathEncMapFloat32Int8R, (*decFnInfo).fastpathDecMapFloat32Int8R)
+ fn(map[float32]int16(nil), (*encFnInfo).fastpathEncMapFloat32Int16R, (*decFnInfo).fastpathDecMapFloat32Int16R)
+ fn(map[float32]int32(nil), (*encFnInfo).fastpathEncMapFloat32Int32R, (*decFnInfo).fastpathDecMapFloat32Int32R)
+ fn(map[float32]int64(nil), (*encFnInfo).fastpathEncMapFloat32Int64R, (*decFnInfo).fastpathDecMapFloat32Int64R)
+ fn(map[float32]float32(nil), (*encFnInfo).fastpathEncMapFloat32Float32R, (*decFnInfo).fastpathDecMapFloat32Float32R)
+ fn(map[float32]float64(nil), (*encFnInfo).fastpathEncMapFloat32Float64R, (*decFnInfo).fastpathDecMapFloat32Float64R)
+ fn(map[float32]bool(nil), (*encFnInfo).fastpathEncMapFloat32BoolR, (*decFnInfo).fastpathDecMapFloat32BoolR)
+ fn(map[float64]interface{}(nil), (*encFnInfo).fastpathEncMapFloat64IntfR, (*decFnInfo).fastpathDecMapFloat64IntfR)
+ fn(map[float64]string(nil), (*encFnInfo).fastpathEncMapFloat64StringR, (*decFnInfo).fastpathDecMapFloat64StringR)
+ fn(map[float64]uint(nil), (*encFnInfo).fastpathEncMapFloat64UintR, (*decFnInfo).fastpathDecMapFloat64UintR)
+ fn(map[float64]uint8(nil), (*encFnInfo).fastpathEncMapFloat64Uint8R, (*decFnInfo).fastpathDecMapFloat64Uint8R)
+ fn(map[float64]uint16(nil), (*encFnInfo).fastpathEncMapFloat64Uint16R, (*decFnInfo).fastpathDecMapFloat64Uint16R)
+ fn(map[float64]uint32(nil), (*encFnInfo).fastpathEncMapFloat64Uint32R, (*decFnInfo).fastpathDecMapFloat64Uint32R)
+ fn(map[float64]uint64(nil), (*encFnInfo).fastpathEncMapFloat64Uint64R, (*decFnInfo).fastpathDecMapFloat64Uint64R)
+ fn(map[float64]uintptr(nil), (*encFnInfo).fastpathEncMapFloat64UintptrR, (*decFnInfo).fastpathDecMapFloat64UintptrR)
+ fn(map[float64]int(nil), (*encFnInfo).fastpathEncMapFloat64IntR, (*decFnInfo).fastpathDecMapFloat64IntR)
+ fn(map[float64]int8(nil), (*encFnInfo).fastpathEncMapFloat64Int8R, (*decFnInfo).fastpathDecMapFloat64Int8R)
+ fn(map[float64]int16(nil), (*encFnInfo).fastpathEncMapFloat64Int16R, (*decFnInfo).fastpathDecMapFloat64Int16R)
+ fn(map[float64]int32(nil), (*encFnInfo).fastpathEncMapFloat64Int32R, (*decFnInfo).fastpathDecMapFloat64Int32R)
+ fn(map[float64]int64(nil), (*encFnInfo).fastpathEncMapFloat64Int64R, (*decFnInfo).fastpathDecMapFloat64Int64R)
+ fn(map[float64]float32(nil), (*encFnInfo).fastpathEncMapFloat64Float32R, (*decFnInfo).fastpathDecMapFloat64Float32R)
+ fn(map[float64]float64(nil), (*encFnInfo).fastpathEncMapFloat64Float64R, (*decFnInfo).fastpathDecMapFloat64Float64R)
+ fn(map[float64]bool(nil), (*encFnInfo).fastpathEncMapFloat64BoolR, (*decFnInfo).fastpathDecMapFloat64BoolR)
+ fn(map[uint]interface{}(nil), (*encFnInfo).fastpathEncMapUintIntfR, (*decFnInfo).fastpathDecMapUintIntfR)
+ fn(map[uint]string(nil), (*encFnInfo).fastpathEncMapUintStringR, (*decFnInfo).fastpathDecMapUintStringR)
+ fn(map[uint]uint(nil), (*encFnInfo).fastpathEncMapUintUintR, (*decFnInfo).fastpathDecMapUintUintR)
+ fn(map[uint]uint8(nil), (*encFnInfo).fastpathEncMapUintUint8R, (*decFnInfo).fastpathDecMapUintUint8R)
+ fn(map[uint]uint16(nil), (*encFnInfo).fastpathEncMapUintUint16R, (*decFnInfo).fastpathDecMapUintUint16R)
+ fn(map[uint]uint32(nil), (*encFnInfo).fastpathEncMapUintUint32R, (*decFnInfo).fastpathDecMapUintUint32R)
+ fn(map[uint]uint64(nil), (*encFnInfo).fastpathEncMapUintUint64R, (*decFnInfo).fastpathDecMapUintUint64R)
+ fn(map[uint]uintptr(nil), (*encFnInfo).fastpathEncMapUintUintptrR, (*decFnInfo).fastpathDecMapUintUintptrR)
+ fn(map[uint]int(nil), (*encFnInfo).fastpathEncMapUintIntR, (*decFnInfo).fastpathDecMapUintIntR)
+ fn(map[uint]int8(nil), (*encFnInfo).fastpathEncMapUintInt8R, (*decFnInfo).fastpathDecMapUintInt8R)
+ fn(map[uint]int16(nil), (*encFnInfo).fastpathEncMapUintInt16R, (*decFnInfo).fastpathDecMapUintInt16R)
+ fn(map[uint]int32(nil), (*encFnInfo).fastpathEncMapUintInt32R, (*decFnInfo).fastpathDecMapUintInt32R)
+ fn(map[uint]int64(nil), (*encFnInfo).fastpathEncMapUintInt64R, (*decFnInfo).fastpathDecMapUintInt64R)
+ fn(map[uint]float32(nil), (*encFnInfo).fastpathEncMapUintFloat32R, (*decFnInfo).fastpathDecMapUintFloat32R)
+ fn(map[uint]float64(nil), (*encFnInfo).fastpathEncMapUintFloat64R, (*decFnInfo).fastpathDecMapUintFloat64R)
+ fn(map[uint]bool(nil), (*encFnInfo).fastpathEncMapUintBoolR, (*decFnInfo).fastpathDecMapUintBoolR)
+ fn(map[uint8]interface{}(nil), (*encFnInfo).fastpathEncMapUint8IntfR, (*decFnInfo).fastpathDecMapUint8IntfR)
+ fn(map[uint8]string(nil), (*encFnInfo).fastpathEncMapUint8StringR, (*decFnInfo).fastpathDecMapUint8StringR)
+ fn(map[uint8]uint(nil), (*encFnInfo).fastpathEncMapUint8UintR, (*decFnInfo).fastpathDecMapUint8UintR)
+ fn(map[uint8]uint8(nil), (*encFnInfo).fastpathEncMapUint8Uint8R, (*decFnInfo).fastpathDecMapUint8Uint8R)
+ fn(map[uint8]uint16(nil), (*encFnInfo).fastpathEncMapUint8Uint16R, (*decFnInfo).fastpathDecMapUint8Uint16R)
+ fn(map[uint8]uint32(nil), (*encFnInfo).fastpathEncMapUint8Uint32R, (*decFnInfo).fastpathDecMapUint8Uint32R)
+ fn(map[uint8]uint64(nil), (*encFnInfo).fastpathEncMapUint8Uint64R, (*decFnInfo).fastpathDecMapUint8Uint64R)
+ fn(map[uint8]uintptr(nil), (*encFnInfo).fastpathEncMapUint8UintptrR, (*decFnInfo).fastpathDecMapUint8UintptrR)
+ fn(map[uint8]int(nil), (*encFnInfo).fastpathEncMapUint8IntR, (*decFnInfo).fastpathDecMapUint8IntR)
+ fn(map[uint8]int8(nil), (*encFnInfo).fastpathEncMapUint8Int8R, (*decFnInfo).fastpathDecMapUint8Int8R)
+ fn(map[uint8]int16(nil), (*encFnInfo).fastpathEncMapUint8Int16R, (*decFnInfo).fastpathDecMapUint8Int16R)
+ fn(map[uint8]int32(nil), (*encFnInfo).fastpathEncMapUint8Int32R, (*decFnInfo).fastpathDecMapUint8Int32R)
+ fn(map[uint8]int64(nil), (*encFnInfo).fastpathEncMapUint8Int64R, (*decFnInfo).fastpathDecMapUint8Int64R)
+ fn(map[uint8]float32(nil), (*encFnInfo).fastpathEncMapUint8Float32R, (*decFnInfo).fastpathDecMapUint8Float32R)
+ fn(map[uint8]float64(nil), (*encFnInfo).fastpathEncMapUint8Float64R, (*decFnInfo).fastpathDecMapUint8Float64R)
+ fn(map[uint8]bool(nil), (*encFnInfo).fastpathEncMapUint8BoolR, (*decFnInfo).fastpathDecMapUint8BoolR)
+ fn(map[uint16]interface{}(nil), (*encFnInfo).fastpathEncMapUint16IntfR, (*decFnInfo).fastpathDecMapUint16IntfR)
+ fn(map[uint16]string(nil), (*encFnInfo).fastpathEncMapUint16StringR, (*decFnInfo).fastpathDecMapUint16StringR)
+ fn(map[uint16]uint(nil), (*encFnInfo).fastpathEncMapUint16UintR, (*decFnInfo).fastpathDecMapUint16UintR)
+ fn(map[uint16]uint8(nil), (*encFnInfo).fastpathEncMapUint16Uint8R, (*decFnInfo).fastpathDecMapUint16Uint8R)
+ fn(map[uint16]uint16(nil), (*encFnInfo).fastpathEncMapUint16Uint16R, (*decFnInfo).fastpathDecMapUint16Uint16R)
+ fn(map[uint16]uint32(nil), (*encFnInfo).fastpathEncMapUint16Uint32R, (*decFnInfo).fastpathDecMapUint16Uint32R)
+ fn(map[uint16]uint64(nil), (*encFnInfo).fastpathEncMapUint16Uint64R, (*decFnInfo).fastpathDecMapUint16Uint64R)
+ fn(map[uint16]uintptr(nil), (*encFnInfo).fastpathEncMapUint16UintptrR, (*decFnInfo).fastpathDecMapUint16UintptrR)
+ fn(map[uint16]int(nil), (*encFnInfo).fastpathEncMapUint16IntR, (*decFnInfo).fastpathDecMapUint16IntR)
+ fn(map[uint16]int8(nil), (*encFnInfo).fastpathEncMapUint16Int8R, (*decFnInfo).fastpathDecMapUint16Int8R)
+ fn(map[uint16]int16(nil), (*encFnInfo).fastpathEncMapUint16Int16R, (*decFnInfo).fastpathDecMapUint16Int16R)
+ fn(map[uint16]int32(nil), (*encFnInfo).fastpathEncMapUint16Int32R, (*decFnInfo).fastpathDecMapUint16Int32R)
+ fn(map[uint16]int64(nil), (*encFnInfo).fastpathEncMapUint16Int64R, (*decFnInfo).fastpathDecMapUint16Int64R)
+ fn(map[uint16]float32(nil), (*encFnInfo).fastpathEncMapUint16Float32R, (*decFnInfo).fastpathDecMapUint16Float32R)
+ fn(map[uint16]float64(nil), (*encFnInfo).fastpathEncMapUint16Float64R, (*decFnInfo).fastpathDecMapUint16Float64R)
+ fn(map[uint16]bool(nil), (*encFnInfo).fastpathEncMapUint16BoolR, (*decFnInfo).fastpathDecMapUint16BoolR)
+ fn(map[uint32]interface{}(nil), (*encFnInfo).fastpathEncMapUint32IntfR, (*decFnInfo).fastpathDecMapUint32IntfR)
+ fn(map[uint32]string(nil), (*encFnInfo).fastpathEncMapUint32StringR, (*decFnInfo).fastpathDecMapUint32StringR)
+ fn(map[uint32]uint(nil), (*encFnInfo).fastpathEncMapUint32UintR, (*decFnInfo).fastpathDecMapUint32UintR)
+ fn(map[uint32]uint8(nil), (*encFnInfo).fastpathEncMapUint32Uint8R, (*decFnInfo).fastpathDecMapUint32Uint8R)
+ fn(map[uint32]uint16(nil), (*encFnInfo).fastpathEncMapUint32Uint16R, (*decFnInfo).fastpathDecMapUint32Uint16R)
+ fn(map[uint32]uint32(nil), (*encFnInfo).fastpathEncMapUint32Uint32R, (*decFnInfo).fastpathDecMapUint32Uint32R)
+ fn(map[uint32]uint64(nil), (*encFnInfo).fastpathEncMapUint32Uint64R, (*decFnInfo).fastpathDecMapUint32Uint64R)
+ fn(map[uint32]uintptr(nil), (*encFnInfo).fastpathEncMapUint32UintptrR, (*decFnInfo).fastpathDecMapUint32UintptrR)
+ fn(map[uint32]int(nil), (*encFnInfo).fastpathEncMapUint32IntR, (*decFnInfo).fastpathDecMapUint32IntR)
+ fn(map[uint32]int8(nil), (*encFnInfo).fastpathEncMapUint32Int8R, (*decFnInfo).fastpathDecMapUint32Int8R)
+ fn(map[uint32]int16(nil), (*encFnInfo).fastpathEncMapUint32Int16R, (*decFnInfo).fastpathDecMapUint32Int16R)
+ fn(map[uint32]int32(nil), (*encFnInfo).fastpathEncMapUint32Int32R, (*decFnInfo).fastpathDecMapUint32Int32R)
+ fn(map[uint32]int64(nil), (*encFnInfo).fastpathEncMapUint32Int64R, (*decFnInfo).fastpathDecMapUint32Int64R)
+ fn(map[uint32]float32(nil), (*encFnInfo).fastpathEncMapUint32Float32R, (*decFnInfo).fastpathDecMapUint32Float32R)
+ fn(map[uint32]float64(nil), (*encFnInfo).fastpathEncMapUint32Float64R, (*decFnInfo).fastpathDecMapUint32Float64R)
+ fn(map[uint32]bool(nil), (*encFnInfo).fastpathEncMapUint32BoolR, (*decFnInfo).fastpathDecMapUint32BoolR)
+ fn(map[uint64]interface{}(nil), (*encFnInfo).fastpathEncMapUint64IntfR, (*decFnInfo).fastpathDecMapUint64IntfR)
+ fn(map[uint64]string(nil), (*encFnInfo).fastpathEncMapUint64StringR, (*decFnInfo).fastpathDecMapUint64StringR)
+ fn(map[uint64]uint(nil), (*encFnInfo).fastpathEncMapUint64UintR, (*decFnInfo).fastpathDecMapUint64UintR)
+ fn(map[uint64]uint8(nil), (*encFnInfo).fastpathEncMapUint64Uint8R, (*decFnInfo).fastpathDecMapUint64Uint8R)
+ fn(map[uint64]uint16(nil), (*encFnInfo).fastpathEncMapUint64Uint16R, (*decFnInfo).fastpathDecMapUint64Uint16R)
+ fn(map[uint64]uint32(nil), (*encFnInfo).fastpathEncMapUint64Uint32R, (*decFnInfo).fastpathDecMapUint64Uint32R)
+ fn(map[uint64]uint64(nil), (*encFnInfo).fastpathEncMapUint64Uint64R, (*decFnInfo).fastpathDecMapUint64Uint64R)
+ fn(map[uint64]uintptr(nil), (*encFnInfo).fastpathEncMapUint64UintptrR, (*decFnInfo).fastpathDecMapUint64UintptrR)
+ fn(map[uint64]int(nil), (*encFnInfo).fastpathEncMapUint64IntR, (*decFnInfo).fastpathDecMapUint64IntR)
+ fn(map[uint64]int8(nil), (*encFnInfo).fastpathEncMapUint64Int8R, (*decFnInfo).fastpathDecMapUint64Int8R)
+ fn(map[uint64]int16(nil), (*encFnInfo).fastpathEncMapUint64Int16R, (*decFnInfo).fastpathDecMapUint64Int16R)
+ fn(map[uint64]int32(nil), (*encFnInfo).fastpathEncMapUint64Int32R, (*decFnInfo).fastpathDecMapUint64Int32R)
+ fn(map[uint64]int64(nil), (*encFnInfo).fastpathEncMapUint64Int64R, (*decFnInfo).fastpathDecMapUint64Int64R)
+ fn(map[uint64]float32(nil), (*encFnInfo).fastpathEncMapUint64Float32R, (*decFnInfo).fastpathDecMapUint64Float32R)
+ fn(map[uint64]float64(nil), (*encFnInfo).fastpathEncMapUint64Float64R, (*decFnInfo).fastpathDecMapUint64Float64R)
+ fn(map[uint64]bool(nil), (*encFnInfo).fastpathEncMapUint64BoolR, (*decFnInfo).fastpathDecMapUint64BoolR)
+ fn(map[uintptr]interface{}(nil), (*encFnInfo).fastpathEncMapUintptrIntfR, (*decFnInfo).fastpathDecMapUintptrIntfR)
+ fn(map[uintptr]string(nil), (*encFnInfo).fastpathEncMapUintptrStringR, (*decFnInfo).fastpathDecMapUintptrStringR)
+ fn(map[uintptr]uint(nil), (*encFnInfo).fastpathEncMapUintptrUintR, (*decFnInfo).fastpathDecMapUintptrUintR)
+ fn(map[uintptr]uint8(nil), (*encFnInfo).fastpathEncMapUintptrUint8R, (*decFnInfo).fastpathDecMapUintptrUint8R)
+ fn(map[uintptr]uint16(nil), (*encFnInfo).fastpathEncMapUintptrUint16R, (*decFnInfo).fastpathDecMapUintptrUint16R)
+ fn(map[uintptr]uint32(nil), (*encFnInfo).fastpathEncMapUintptrUint32R, (*decFnInfo).fastpathDecMapUintptrUint32R)
+ fn(map[uintptr]uint64(nil), (*encFnInfo).fastpathEncMapUintptrUint64R, (*decFnInfo).fastpathDecMapUintptrUint64R)
+ fn(map[uintptr]uintptr(nil), (*encFnInfo).fastpathEncMapUintptrUintptrR, (*decFnInfo).fastpathDecMapUintptrUintptrR)
+ fn(map[uintptr]int(nil), (*encFnInfo).fastpathEncMapUintptrIntR, (*decFnInfo).fastpathDecMapUintptrIntR)
+ fn(map[uintptr]int8(nil), (*encFnInfo).fastpathEncMapUintptrInt8R, (*decFnInfo).fastpathDecMapUintptrInt8R)
+ fn(map[uintptr]int16(nil), (*encFnInfo).fastpathEncMapUintptrInt16R, (*decFnInfo).fastpathDecMapUintptrInt16R)
+ fn(map[uintptr]int32(nil), (*encFnInfo).fastpathEncMapUintptrInt32R, (*decFnInfo).fastpathDecMapUintptrInt32R)
+ fn(map[uintptr]int64(nil), (*encFnInfo).fastpathEncMapUintptrInt64R, (*decFnInfo).fastpathDecMapUintptrInt64R)
+ fn(map[uintptr]float32(nil), (*encFnInfo).fastpathEncMapUintptrFloat32R, (*decFnInfo).fastpathDecMapUintptrFloat32R)
+ fn(map[uintptr]float64(nil), (*encFnInfo).fastpathEncMapUintptrFloat64R, (*decFnInfo).fastpathDecMapUintptrFloat64R)
+ fn(map[uintptr]bool(nil), (*encFnInfo).fastpathEncMapUintptrBoolR, (*decFnInfo).fastpathDecMapUintptrBoolR)
+ fn(map[int]interface{}(nil), (*encFnInfo).fastpathEncMapIntIntfR, (*decFnInfo).fastpathDecMapIntIntfR)
+ fn(map[int]string(nil), (*encFnInfo).fastpathEncMapIntStringR, (*decFnInfo).fastpathDecMapIntStringR)
+ fn(map[int]uint(nil), (*encFnInfo).fastpathEncMapIntUintR, (*decFnInfo).fastpathDecMapIntUintR)
+ fn(map[int]uint8(nil), (*encFnInfo).fastpathEncMapIntUint8R, (*decFnInfo).fastpathDecMapIntUint8R)
+ fn(map[int]uint16(nil), (*encFnInfo).fastpathEncMapIntUint16R, (*decFnInfo).fastpathDecMapIntUint16R)
+ fn(map[int]uint32(nil), (*encFnInfo).fastpathEncMapIntUint32R, (*decFnInfo).fastpathDecMapIntUint32R)
+ fn(map[int]uint64(nil), (*encFnInfo).fastpathEncMapIntUint64R, (*decFnInfo).fastpathDecMapIntUint64R)
+ fn(map[int]uintptr(nil), (*encFnInfo).fastpathEncMapIntUintptrR, (*decFnInfo).fastpathDecMapIntUintptrR)
+ fn(map[int]int(nil), (*encFnInfo).fastpathEncMapIntIntR, (*decFnInfo).fastpathDecMapIntIntR)
+ fn(map[int]int8(nil), (*encFnInfo).fastpathEncMapIntInt8R, (*decFnInfo).fastpathDecMapIntInt8R)
+ fn(map[int]int16(nil), (*encFnInfo).fastpathEncMapIntInt16R, (*decFnInfo).fastpathDecMapIntInt16R)
+ fn(map[int]int32(nil), (*encFnInfo).fastpathEncMapIntInt32R, (*decFnInfo).fastpathDecMapIntInt32R)
+ fn(map[int]int64(nil), (*encFnInfo).fastpathEncMapIntInt64R, (*decFnInfo).fastpathDecMapIntInt64R)
+ fn(map[int]float32(nil), (*encFnInfo).fastpathEncMapIntFloat32R, (*decFnInfo).fastpathDecMapIntFloat32R)
+ fn(map[int]float64(nil), (*encFnInfo).fastpathEncMapIntFloat64R, (*decFnInfo).fastpathDecMapIntFloat64R)
+ fn(map[int]bool(nil), (*encFnInfo).fastpathEncMapIntBoolR, (*decFnInfo).fastpathDecMapIntBoolR)
+ fn(map[int8]interface{}(nil), (*encFnInfo).fastpathEncMapInt8IntfR, (*decFnInfo).fastpathDecMapInt8IntfR)
+ fn(map[int8]string(nil), (*encFnInfo).fastpathEncMapInt8StringR, (*decFnInfo).fastpathDecMapInt8StringR)
+ fn(map[int8]uint(nil), (*encFnInfo).fastpathEncMapInt8UintR, (*decFnInfo).fastpathDecMapInt8UintR)
+ fn(map[int8]uint8(nil), (*encFnInfo).fastpathEncMapInt8Uint8R, (*decFnInfo).fastpathDecMapInt8Uint8R)
+ fn(map[int8]uint16(nil), (*encFnInfo).fastpathEncMapInt8Uint16R, (*decFnInfo).fastpathDecMapInt8Uint16R)
+ fn(map[int8]uint32(nil), (*encFnInfo).fastpathEncMapInt8Uint32R, (*decFnInfo).fastpathDecMapInt8Uint32R)
+ fn(map[int8]uint64(nil), (*encFnInfo).fastpathEncMapInt8Uint64R, (*decFnInfo).fastpathDecMapInt8Uint64R)
+ fn(map[int8]uintptr(nil), (*encFnInfo).fastpathEncMapInt8UintptrR, (*decFnInfo).fastpathDecMapInt8UintptrR)
+ fn(map[int8]int(nil), (*encFnInfo).fastpathEncMapInt8IntR, (*decFnInfo).fastpathDecMapInt8IntR)
+ fn(map[int8]int8(nil), (*encFnInfo).fastpathEncMapInt8Int8R, (*decFnInfo).fastpathDecMapInt8Int8R)
+ fn(map[int8]int16(nil), (*encFnInfo).fastpathEncMapInt8Int16R, (*decFnInfo).fastpathDecMapInt8Int16R)
+ fn(map[int8]int32(nil), (*encFnInfo).fastpathEncMapInt8Int32R, (*decFnInfo).fastpathDecMapInt8Int32R)
+ fn(map[int8]int64(nil), (*encFnInfo).fastpathEncMapInt8Int64R, (*decFnInfo).fastpathDecMapInt8Int64R)
+ fn(map[int8]float32(nil), (*encFnInfo).fastpathEncMapInt8Float32R, (*decFnInfo).fastpathDecMapInt8Float32R)
+ fn(map[int8]float64(nil), (*encFnInfo).fastpathEncMapInt8Float64R, (*decFnInfo).fastpathDecMapInt8Float64R)
+ fn(map[int8]bool(nil), (*encFnInfo).fastpathEncMapInt8BoolR, (*decFnInfo).fastpathDecMapInt8BoolR)
+ fn(map[int16]interface{}(nil), (*encFnInfo).fastpathEncMapInt16IntfR, (*decFnInfo).fastpathDecMapInt16IntfR)
+ fn(map[int16]string(nil), (*encFnInfo).fastpathEncMapInt16StringR, (*decFnInfo).fastpathDecMapInt16StringR)
+ fn(map[int16]uint(nil), (*encFnInfo).fastpathEncMapInt16UintR, (*decFnInfo).fastpathDecMapInt16UintR)
+ fn(map[int16]uint8(nil), (*encFnInfo).fastpathEncMapInt16Uint8R, (*decFnInfo).fastpathDecMapInt16Uint8R)
+ fn(map[int16]uint16(nil), (*encFnInfo).fastpathEncMapInt16Uint16R, (*decFnInfo).fastpathDecMapInt16Uint16R)
+ fn(map[int16]uint32(nil), (*encFnInfo).fastpathEncMapInt16Uint32R, (*decFnInfo).fastpathDecMapInt16Uint32R)
+ fn(map[int16]uint64(nil), (*encFnInfo).fastpathEncMapInt16Uint64R, (*decFnInfo).fastpathDecMapInt16Uint64R)
+ fn(map[int16]uintptr(nil), (*encFnInfo).fastpathEncMapInt16UintptrR, (*decFnInfo).fastpathDecMapInt16UintptrR)
+ fn(map[int16]int(nil), (*encFnInfo).fastpathEncMapInt16IntR, (*decFnInfo).fastpathDecMapInt16IntR)
+ fn(map[int16]int8(nil), (*encFnInfo).fastpathEncMapInt16Int8R, (*decFnInfo).fastpathDecMapInt16Int8R)
+ fn(map[int16]int16(nil), (*encFnInfo).fastpathEncMapInt16Int16R, (*decFnInfo).fastpathDecMapInt16Int16R)
+ fn(map[int16]int32(nil), (*encFnInfo).fastpathEncMapInt16Int32R, (*decFnInfo).fastpathDecMapInt16Int32R)
+ fn(map[int16]int64(nil), (*encFnInfo).fastpathEncMapInt16Int64R, (*decFnInfo).fastpathDecMapInt16Int64R)
+ fn(map[int16]float32(nil), (*encFnInfo).fastpathEncMapInt16Float32R, (*decFnInfo).fastpathDecMapInt16Float32R)
+ fn(map[int16]float64(nil), (*encFnInfo).fastpathEncMapInt16Float64R, (*decFnInfo).fastpathDecMapInt16Float64R)
+ fn(map[int16]bool(nil), (*encFnInfo).fastpathEncMapInt16BoolR, (*decFnInfo).fastpathDecMapInt16BoolR)
+ fn(map[int32]interface{}(nil), (*encFnInfo).fastpathEncMapInt32IntfR, (*decFnInfo).fastpathDecMapInt32IntfR)
+ fn(map[int32]string(nil), (*encFnInfo).fastpathEncMapInt32StringR, (*decFnInfo).fastpathDecMapInt32StringR)
+ fn(map[int32]uint(nil), (*encFnInfo).fastpathEncMapInt32UintR, (*decFnInfo).fastpathDecMapInt32UintR)
+ fn(map[int32]uint8(nil), (*encFnInfo).fastpathEncMapInt32Uint8R, (*decFnInfo).fastpathDecMapInt32Uint8R)
+ fn(map[int32]uint16(nil), (*encFnInfo).fastpathEncMapInt32Uint16R, (*decFnInfo).fastpathDecMapInt32Uint16R)
+ fn(map[int32]uint32(nil), (*encFnInfo).fastpathEncMapInt32Uint32R, (*decFnInfo).fastpathDecMapInt32Uint32R)
+ fn(map[int32]uint64(nil), (*encFnInfo).fastpathEncMapInt32Uint64R, (*decFnInfo).fastpathDecMapInt32Uint64R)
+ fn(map[int32]uintptr(nil), (*encFnInfo).fastpathEncMapInt32UintptrR, (*decFnInfo).fastpathDecMapInt32UintptrR)
+ fn(map[int32]int(nil), (*encFnInfo).fastpathEncMapInt32IntR, (*decFnInfo).fastpathDecMapInt32IntR)
+ fn(map[int32]int8(nil), (*encFnInfo).fastpathEncMapInt32Int8R, (*decFnInfo).fastpathDecMapInt32Int8R)
+ fn(map[int32]int16(nil), (*encFnInfo).fastpathEncMapInt32Int16R, (*decFnInfo).fastpathDecMapInt32Int16R)
+ fn(map[int32]int32(nil), (*encFnInfo).fastpathEncMapInt32Int32R, (*decFnInfo).fastpathDecMapInt32Int32R)
+ fn(map[int32]int64(nil), (*encFnInfo).fastpathEncMapInt32Int64R, (*decFnInfo).fastpathDecMapInt32Int64R)
+ fn(map[int32]float32(nil), (*encFnInfo).fastpathEncMapInt32Float32R, (*decFnInfo).fastpathDecMapInt32Float32R)
+ fn(map[int32]float64(nil), (*encFnInfo).fastpathEncMapInt32Float64R, (*decFnInfo).fastpathDecMapInt32Float64R)
+ fn(map[int32]bool(nil), (*encFnInfo).fastpathEncMapInt32BoolR, (*decFnInfo).fastpathDecMapInt32BoolR)
+ fn(map[int64]interface{}(nil), (*encFnInfo).fastpathEncMapInt64IntfR, (*decFnInfo).fastpathDecMapInt64IntfR)
+ fn(map[int64]string(nil), (*encFnInfo).fastpathEncMapInt64StringR, (*decFnInfo).fastpathDecMapInt64StringR)
+ fn(map[int64]uint(nil), (*encFnInfo).fastpathEncMapInt64UintR, (*decFnInfo).fastpathDecMapInt64UintR)
+ fn(map[int64]uint8(nil), (*encFnInfo).fastpathEncMapInt64Uint8R, (*decFnInfo).fastpathDecMapInt64Uint8R)
+ fn(map[int64]uint16(nil), (*encFnInfo).fastpathEncMapInt64Uint16R, (*decFnInfo).fastpathDecMapInt64Uint16R)
+ fn(map[int64]uint32(nil), (*encFnInfo).fastpathEncMapInt64Uint32R, (*decFnInfo).fastpathDecMapInt64Uint32R)
+ fn(map[int64]uint64(nil), (*encFnInfo).fastpathEncMapInt64Uint64R, (*decFnInfo).fastpathDecMapInt64Uint64R)
+ fn(map[int64]uintptr(nil), (*encFnInfo).fastpathEncMapInt64UintptrR, (*decFnInfo).fastpathDecMapInt64UintptrR)
+ fn(map[int64]int(nil), (*encFnInfo).fastpathEncMapInt64IntR, (*decFnInfo).fastpathDecMapInt64IntR)
+ fn(map[int64]int8(nil), (*encFnInfo).fastpathEncMapInt64Int8R, (*decFnInfo).fastpathDecMapInt64Int8R)
+ fn(map[int64]int16(nil), (*encFnInfo).fastpathEncMapInt64Int16R, (*decFnInfo).fastpathDecMapInt64Int16R)
+ fn(map[int64]int32(nil), (*encFnInfo).fastpathEncMapInt64Int32R, (*decFnInfo).fastpathDecMapInt64Int32R)
+ fn(map[int64]int64(nil), (*encFnInfo).fastpathEncMapInt64Int64R, (*decFnInfo).fastpathDecMapInt64Int64R)
+ fn(map[int64]float32(nil), (*encFnInfo).fastpathEncMapInt64Float32R, (*decFnInfo).fastpathDecMapInt64Float32R)
+ fn(map[int64]float64(nil), (*encFnInfo).fastpathEncMapInt64Float64R, (*decFnInfo).fastpathDecMapInt64Float64R)
+ fn(map[int64]bool(nil), (*encFnInfo).fastpathEncMapInt64BoolR, (*decFnInfo).fastpathDecMapInt64BoolR)
+ fn(map[bool]interface{}(nil), (*encFnInfo).fastpathEncMapBoolIntfR, (*decFnInfo).fastpathDecMapBoolIntfR)
+ fn(map[bool]string(nil), (*encFnInfo).fastpathEncMapBoolStringR, (*decFnInfo).fastpathDecMapBoolStringR)
+ fn(map[bool]uint(nil), (*encFnInfo).fastpathEncMapBoolUintR, (*decFnInfo).fastpathDecMapBoolUintR)
+ fn(map[bool]uint8(nil), (*encFnInfo).fastpathEncMapBoolUint8R, (*decFnInfo).fastpathDecMapBoolUint8R)
+ fn(map[bool]uint16(nil), (*encFnInfo).fastpathEncMapBoolUint16R, (*decFnInfo).fastpathDecMapBoolUint16R)
+ fn(map[bool]uint32(nil), (*encFnInfo).fastpathEncMapBoolUint32R, (*decFnInfo).fastpathDecMapBoolUint32R)
+ fn(map[bool]uint64(nil), (*encFnInfo).fastpathEncMapBoolUint64R, (*decFnInfo).fastpathDecMapBoolUint64R)
+ fn(map[bool]uintptr(nil), (*encFnInfo).fastpathEncMapBoolUintptrR, (*decFnInfo).fastpathDecMapBoolUintptrR)
+ fn(map[bool]int(nil), (*encFnInfo).fastpathEncMapBoolIntR, (*decFnInfo).fastpathDecMapBoolIntR)
+ fn(map[bool]int8(nil), (*encFnInfo).fastpathEncMapBoolInt8R, (*decFnInfo).fastpathDecMapBoolInt8R)
+ fn(map[bool]int16(nil), (*encFnInfo).fastpathEncMapBoolInt16R, (*decFnInfo).fastpathDecMapBoolInt16R)
+ fn(map[bool]int32(nil), (*encFnInfo).fastpathEncMapBoolInt32R, (*decFnInfo).fastpathDecMapBoolInt32R)
+ fn(map[bool]int64(nil), (*encFnInfo).fastpathEncMapBoolInt64R, (*decFnInfo).fastpathDecMapBoolInt64R)
+ fn(map[bool]float32(nil), (*encFnInfo).fastpathEncMapBoolFloat32R, (*decFnInfo).fastpathDecMapBoolFloat32R)
+ fn(map[bool]float64(nil), (*encFnInfo).fastpathEncMapBoolFloat64R, (*decFnInfo).fastpathDecMapBoolFloat64R)
+ fn(map[bool]bool(nil), (*encFnInfo).fastpathEncMapBoolBoolR, (*decFnInfo).fastpathDecMapBoolBoolR)
+
+ sort.Sort(fastpathAslice(fastpathAV[:]))
+}
+
+// -- encode
+
+// -- -- fast path type switch
+func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool {
+ if !fastpathEnabled {
+ return false
+ }
+ switch v := iv.(type) {
+
+ case []interface{}:
+ fastpathTV.EncSliceIntfV(v, fastpathCheckNilTrue, e)
+ case *[]interface{}:
+ fastpathTV.EncSliceIntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]interface{}:
+ fastpathTV.EncMapIntfIntfV(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]interface{}:
+ fastpathTV.EncMapIntfIntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]string:
+ fastpathTV.EncMapIntfStringV(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]string:
+ fastpathTV.EncMapIntfStringV(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]uint:
+ fastpathTV.EncMapIntfUintV(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]uint:
+ fastpathTV.EncMapIntfUintV(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]uint8:
+ fastpathTV.EncMapIntfUint8V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]uint8:
+ fastpathTV.EncMapIntfUint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]uint16:
+ fastpathTV.EncMapIntfUint16V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]uint16:
+ fastpathTV.EncMapIntfUint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]uint32:
+ fastpathTV.EncMapIntfUint32V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]uint32:
+ fastpathTV.EncMapIntfUint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]uint64:
+ fastpathTV.EncMapIntfUint64V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]uint64:
+ fastpathTV.EncMapIntfUint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]uintptr:
+ fastpathTV.EncMapIntfUintptrV(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]uintptr:
+ fastpathTV.EncMapIntfUintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]int:
+ fastpathTV.EncMapIntfIntV(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]int:
+ fastpathTV.EncMapIntfIntV(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]int8:
+ fastpathTV.EncMapIntfInt8V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]int8:
+ fastpathTV.EncMapIntfInt8V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]int16:
+ fastpathTV.EncMapIntfInt16V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]int16:
+ fastpathTV.EncMapIntfInt16V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]int32:
+ fastpathTV.EncMapIntfInt32V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]int32:
+ fastpathTV.EncMapIntfInt32V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]int64:
+ fastpathTV.EncMapIntfInt64V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]int64:
+ fastpathTV.EncMapIntfInt64V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]float32:
+ fastpathTV.EncMapIntfFloat32V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]float32:
+ fastpathTV.EncMapIntfFloat32V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]float64:
+ fastpathTV.EncMapIntfFloat64V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]float64:
+ fastpathTV.EncMapIntfFloat64V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]bool:
+ fastpathTV.EncMapIntfBoolV(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]bool:
+ fastpathTV.EncMapIntfBoolV(*v, fastpathCheckNilTrue, e)
+
+ case []string:
+ fastpathTV.EncSliceStringV(v, fastpathCheckNilTrue, e)
+ case *[]string:
+ fastpathTV.EncSliceStringV(*v, fastpathCheckNilTrue, e)
+
+ case map[string]interface{}:
+ fastpathTV.EncMapStringIntfV(v, fastpathCheckNilTrue, e)
+ case *map[string]interface{}:
+ fastpathTV.EncMapStringIntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[string]string:
+ fastpathTV.EncMapStringStringV(v, fastpathCheckNilTrue, e)
+ case *map[string]string:
+ fastpathTV.EncMapStringStringV(*v, fastpathCheckNilTrue, e)
+
+ case map[string]uint:
+ fastpathTV.EncMapStringUintV(v, fastpathCheckNilTrue, e)
+ case *map[string]uint:
+ fastpathTV.EncMapStringUintV(*v, fastpathCheckNilTrue, e)
+
+ case map[string]uint8:
+ fastpathTV.EncMapStringUint8V(v, fastpathCheckNilTrue, e)
+ case *map[string]uint8:
+ fastpathTV.EncMapStringUint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]uint16:
+ fastpathTV.EncMapStringUint16V(v, fastpathCheckNilTrue, e)
+ case *map[string]uint16:
+ fastpathTV.EncMapStringUint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]uint32:
+ fastpathTV.EncMapStringUint32V(v, fastpathCheckNilTrue, e)
+ case *map[string]uint32:
+ fastpathTV.EncMapStringUint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]uint64:
+ fastpathTV.EncMapStringUint64V(v, fastpathCheckNilTrue, e)
+ case *map[string]uint64:
+ fastpathTV.EncMapStringUint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]uintptr:
+ fastpathTV.EncMapStringUintptrV(v, fastpathCheckNilTrue, e)
+ case *map[string]uintptr:
+ fastpathTV.EncMapStringUintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[string]int:
+ fastpathTV.EncMapStringIntV(v, fastpathCheckNilTrue, e)
+ case *map[string]int:
+ fastpathTV.EncMapStringIntV(*v, fastpathCheckNilTrue, e)
+
+ case map[string]int8:
+ fastpathTV.EncMapStringInt8V(v, fastpathCheckNilTrue, e)
+ case *map[string]int8:
+ fastpathTV.EncMapStringInt8V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]int16:
+ fastpathTV.EncMapStringInt16V(v, fastpathCheckNilTrue, e)
+ case *map[string]int16:
+ fastpathTV.EncMapStringInt16V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]int32:
+ fastpathTV.EncMapStringInt32V(v, fastpathCheckNilTrue, e)
+ case *map[string]int32:
+ fastpathTV.EncMapStringInt32V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]int64:
+ fastpathTV.EncMapStringInt64V(v, fastpathCheckNilTrue, e)
+ case *map[string]int64:
+ fastpathTV.EncMapStringInt64V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]float32:
+ fastpathTV.EncMapStringFloat32V(v, fastpathCheckNilTrue, e)
+ case *map[string]float32:
+ fastpathTV.EncMapStringFloat32V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]float64:
+ fastpathTV.EncMapStringFloat64V(v, fastpathCheckNilTrue, e)
+ case *map[string]float64:
+ fastpathTV.EncMapStringFloat64V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]bool:
+ fastpathTV.EncMapStringBoolV(v, fastpathCheckNilTrue, e)
+ case *map[string]bool:
+ fastpathTV.EncMapStringBoolV(*v, fastpathCheckNilTrue, e)
+
+ case []float32:
+ fastpathTV.EncSliceFloat32V(v, fastpathCheckNilTrue, e)
+ case *[]float32:
+ fastpathTV.EncSliceFloat32V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]interface{}:
+ fastpathTV.EncMapFloat32IntfV(v, fastpathCheckNilTrue, e)
+ case *map[float32]interface{}:
+ fastpathTV.EncMapFloat32IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]string:
+ fastpathTV.EncMapFloat32StringV(v, fastpathCheckNilTrue, e)
+ case *map[float32]string:
+ fastpathTV.EncMapFloat32StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]uint:
+ fastpathTV.EncMapFloat32UintV(v, fastpathCheckNilTrue, e)
+ case *map[float32]uint:
+ fastpathTV.EncMapFloat32UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]uint8:
+ fastpathTV.EncMapFloat32Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[float32]uint8:
+ fastpathTV.EncMapFloat32Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]uint16:
+ fastpathTV.EncMapFloat32Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[float32]uint16:
+ fastpathTV.EncMapFloat32Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]uint32:
+ fastpathTV.EncMapFloat32Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[float32]uint32:
+ fastpathTV.EncMapFloat32Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]uint64:
+ fastpathTV.EncMapFloat32Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[float32]uint64:
+ fastpathTV.EncMapFloat32Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]uintptr:
+ fastpathTV.EncMapFloat32UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[float32]uintptr:
+ fastpathTV.EncMapFloat32UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]int:
+ fastpathTV.EncMapFloat32IntV(v, fastpathCheckNilTrue, e)
+ case *map[float32]int:
+ fastpathTV.EncMapFloat32IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]int8:
+ fastpathTV.EncMapFloat32Int8V(v, fastpathCheckNilTrue, e)
+ case *map[float32]int8:
+ fastpathTV.EncMapFloat32Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]int16:
+ fastpathTV.EncMapFloat32Int16V(v, fastpathCheckNilTrue, e)
+ case *map[float32]int16:
+ fastpathTV.EncMapFloat32Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]int32:
+ fastpathTV.EncMapFloat32Int32V(v, fastpathCheckNilTrue, e)
+ case *map[float32]int32:
+ fastpathTV.EncMapFloat32Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]int64:
+ fastpathTV.EncMapFloat32Int64V(v, fastpathCheckNilTrue, e)
+ case *map[float32]int64:
+ fastpathTV.EncMapFloat32Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]float32:
+ fastpathTV.EncMapFloat32Float32V(v, fastpathCheckNilTrue, e)
+ case *map[float32]float32:
+ fastpathTV.EncMapFloat32Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]float64:
+ fastpathTV.EncMapFloat32Float64V(v, fastpathCheckNilTrue, e)
+ case *map[float32]float64:
+ fastpathTV.EncMapFloat32Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]bool:
+ fastpathTV.EncMapFloat32BoolV(v, fastpathCheckNilTrue, e)
+ case *map[float32]bool:
+ fastpathTV.EncMapFloat32BoolV(*v, fastpathCheckNilTrue, e)
+
+ case []float64:
+ fastpathTV.EncSliceFloat64V(v, fastpathCheckNilTrue, e)
+ case *[]float64:
+ fastpathTV.EncSliceFloat64V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]interface{}:
+ fastpathTV.EncMapFloat64IntfV(v, fastpathCheckNilTrue, e)
+ case *map[float64]interface{}:
+ fastpathTV.EncMapFloat64IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]string:
+ fastpathTV.EncMapFloat64StringV(v, fastpathCheckNilTrue, e)
+ case *map[float64]string:
+ fastpathTV.EncMapFloat64StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]uint:
+ fastpathTV.EncMapFloat64UintV(v, fastpathCheckNilTrue, e)
+ case *map[float64]uint:
+ fastpathTV.EncMapFloat64UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]uint8:
+ fastpathTV.EncMapFloat64Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[float64]uint8:
+ fastpathTV.EncMapFloat64Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]uint16:
+ fastpathTV.EncMapFloat64Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[float64]uint16:
+ fastpathTV.EncMapFloat64Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]uint32:
+ fastpathTV.EncMapFloat64Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[float64]uint32:
+ fastpathTV.EncMapFloat64Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]uint64:
+ fastpathTV.EncMapFloat64Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[float64]uint64:
+ fastpathTV.EncMapFloat64Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]uintptr:
+ fastpathTV.EncMapFloat64UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[float64]uintptr:
+ fastpathTV.EncMapFloat64UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]int:
+ fastpathTV.EncMapFloat64IntV(v, fastpathCheckNilTrue, e)
+ case *map[float64]int:
+ fastpathTV.EncMapFloat64IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]int8:
+ fastpathTV.EncMapFloat64Int8V(v, fastpathCheckNilTrue, e)
+ case *map[float64]int8:
+ fastpathTV.EncMapFloat64Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]int16:
+ fastpathTV.EncMapFloat64Int16V(v, fastpathCheckNilTrue, e)
+ case *map[float64]int16:
+ fastpathTV.EncMapFloat64Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]int32:
+ fastpathTV.EncMapFloat64Int32V(v, fastpathCheckNilTrue, e)
+ case *map[float64]int32:
+ fastpathTV.EncMapFloat64Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]int64:
+ fastpathTV.EncMapFloat64Int64V(v, fastpathCheckNilTrue, e)
+ case *map[float64]int64:
+ fastpathTV.EncMapFloat64Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]float32:
+ fastpathTV.EncMapFloat64Float32V(v, fastpathCheckNilTrue, e)
+ case *map[float64]float32:
+ fastpathTV.EncMapFloat64Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]float64:
+ fastpathTV.EncMapFloat64Float64V(v, fastpathCheckNilTrue, e)
+ case *map[float64]float64:
+ fastpathTV.EncMapFloat64Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]bool:
+ fastpathTV.EncMapFloat64BoolV(v, fastpathCheckNilTrue, e)
+ case *map[float64]bool:
+ fastpathTV.EncMapFloat64BoolV(*v, fastpathCheckNilTrue, e)
+
+ case []uint:
+ fastpathTV.EncSliceUintV(v, fastpathCheckNilTrue, e)
+ case *[]uint:
+ fastpathTV.EncSliceUintV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]interface{}:
+ fastpathTV.EncMapUintIntfV(v, fastpathCheckNilTrue, e)
+ case *map[uint]interface{}:
+ fastpathTV.EncMapUintIntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]string:
+ fastpathTV.EncMapUintStringV(v, fastpathCheckNilTrue, e)
+ case *map[uint]string:
+ fastpathTV.EncMapUintStringV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]uint:
+ fastpathTV.EncMapUintUintV(v, fastpathCheckNilTrue, e)
+ case *map[uint]uint:
+ fastpathTV.EncMapUintUintV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]uint8:
+ fastpathTV.EncMapUintUint8V(v, fastpathCheckNilTrue, e)
+ case *map[uint]uint8:
+ fastpathTV.EncMapUintUint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]uint16:
+ fastpathTV.EncMapUintUint16V(v, fastpathCheckNilTrue, e)
+ case *map[uint]uint16:
+ fastpathTV.EncMapUintUint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]uint32:
+ fastpathTV.EncMapUintUint32V(v, fastpathCheckNilTrue, e)
+ case *map[uint]uint32:
+ fastpathTV.EncMapUintUint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]uint64:
+ fastpathTV.EncMapUintUint64V(v, fastpathCheckNilTrue, e)
+ case *map[uint]uint64:
+ fastpathTV.EncMapUintUint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]uintptr:
+ fastpathTV.EncMapUintUintptrV(v, fastpathCheckNilTrue, e)
+ case *map[uint]uintptr:
+ fastpathTV.EncMapUintUintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]int:
+ fastpathTV.EncMapUintIntV(v, fastpathCheckNilTrue, e)
+ case *map[uint]int:
+ fastpathTV.EncMapUintIntV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]int8:
+ fastpathTV.EncMapUintInt8V(v, fastpathCheckNilTrue, e)
+ case *map[uint]int8:
+ fastpathTV.EncMapUintInt8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]int16:
+ fastpathTV.EncMapUintInt16V(v, fastpathCheckNilTrue, e)
+ case *map[uint]int16:
+ fastpathTV.EncMapUintInt16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]int32:
+ fastpathTV.EncMapUintInt32V(v, fastpathCheckNilTrue, e)
+ case *map[uint]int32:
+ fastpathTV.EncMapUintInt32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]int64:
+ fastpathTV.EncMapUintInt64V(v, fastpathCheckNilTrue, e)
+ case *map[uint]int64:
+ fastpathTV.EncMapUintInt64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]float32:
+ fastpathTV.EncMapUintFloat32V(v, fastpathCheckNilTrue, e)
+ case *map[uint]float32:
+ fastpathTV.EncMapUintFloat32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]float64:
+ fastpathTV.EncMapUintFloat64V(v, fastpathCheckNilTrue, e)
+ case *map[uint]float64:
+ fastpathTV.EncMapUintFloat64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]bool:
+ fastpathTV.EncMapUintBoolV(v, fastpathCheckNilTrue, e)
+ case *map[uint]bool:
+ fastpathTV.EncMapUintBoolV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]interface{}:
+ fastpathTV.EncMapUint8IntfV(v, fastpathCheckNilTrue, e)
+ case *map[uint8]interface{}:
+ fastpathTV.EncMapUint8IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]string:
+ fastpathTV.EncMapUint8StringV(v, fastpathCheckNilTrue, e)
+ case *map[uint8]string:
+ fastpathTV.EncMapUint8StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]uint:
+ fastpathTV.EncMapUint8UintV(v, fastpathCheckNilTrue, e)
+ case *map[uint8]uint:
+ fastpathTV.EncMapUint8UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]uint8:
+ fastpathTV.EncMapUint8Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]uint8:
+ fastpathTV.EncMapUint8Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]uint16:
+ fastpathTV.EncMapUint8Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]uint16:
+ fastpathTV.EncMapUint8Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]uint32:
+ fastpathTV.EncMapUint8Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]uint32:
+ fastpathTV.EncMapUint8Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]uint64:
+ fastpathTV.EncMapUint8Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]uint64:
+ fastpathTV.EncMapUint8Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]uintptr:
+ fastpathTV.EncMapUint8UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[uint8]uintptr:
+ fastpathTV.EncMapUint8UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]int:
+ fastpathTV.EncMapUint8IntV(v, fastpathCheckNilTrue, e)
+ case *map[uint8]int:
+ fastpathTV.EncMapUint8IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]int8:
+ fastpathTV.EncMapUint8Int8V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]int8:
+ fastpathTV.EncMapUint8Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]int16:
+ fastpathTV.EncMapUint8Int16V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]int16:
+ fastpathTV.EncMapUint8Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]int32:
+ fastpathTV.EncMapUint8Int32V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]int32:
+ fastpathTV.EncMapUint8Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]int64:
+ fastpathTV.EncMapUint8Int64V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]int64:
+ fastpathTV.EncMapUint8Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]float32:
+ fastpathTV.EncMapUint8Float32V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]float32:
+ fastpathTV.EncMapUint8Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]float64:
+ fastpathTV.EncMapUint8Float64V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]float64:
+ fastpathTV.EncMapUint8Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]bool:
+ fastpathTV.EncMapUint8BoolV(v, fastpathCheckNilTrue, e)
+ case *map[uint8]bool:
+ fastpathTV.EncMapUint8BoolV(*v, fastpathCheckNilTrue, e)
+
+ case []uint16:
+ fastpathTV.EncSliceUint16V(v, fastpathCheckNilTrue, e)
+ case *[]uint16:
+ fastpathTV.EncSliceUint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]interface{}:
+ fastpathTV.EncMapUint16IntfV(v, fastpathCheckNilTrue, e)
+ case *map[uint16]interface{}:
+ fastpathTV.EncMapUint16IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]string:
+ fastpathTV.EncMapUint16StringV(v, fastpathCheckNilTrue, e)
+ case *map[uint16]string:
+ fastpathTV.EncMapUint16StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]uint:
+ fastpathTV.EncMapUint16UintV(v, fastpathCheckNilTrue, e)
+ case *map[uint16]uint:
+ fastpathTV.EncMapUint16UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]uint8:
+ fastpathTV.EncMapUint16Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]uint8:
+ fastpathTV.EncMapUint16Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]uint16:
+ fastpathTV.EncMapUint16Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]uint16:
+ fastpathTV.EncMapUint16Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]uint32:
+ fastpathTV.EncMapUint16Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]uint32:
+ fastpathTV.EncMapUint16Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]uint64:
+ fastpathTV.EncMapUint16Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]uint64:
+ fastpathTV.EncMapUint16Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]uintptr:
+ fastpathTV.EncMapUint16UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[uint16]uintptr:
+ fastpathTV.EncMapUint16UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]int:
+ fastpathTV.EncMapUint16IntV(v, fastpathCheckNilTrue, e)
+ case *map[uint16]int:
+ fastpathTV.EncMapUint16IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]int8:
+ fastpathTV.EncMapUint16Int8V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]int8:
+ fastpathTV.EncMapUint16Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]int16:
+ fastpathTV.EncMapUint16Int16V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]int16:
+ fastpathTV.EncMapUint16Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]int32:
+ fastpathTV.EncMapUint16Int32V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]int32:
+ fastpathTV.EncMapUint16Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]int64:
+ fastpathTV.EncMapUint16Int64V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]int64:
+ fastpathTV.EncMapUint16Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]float32:
+ fastpathTV.EncMapUint16Float32V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]float32:
+ fastpathTV.EncMapUint16Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]float64:
+ fastpathTV.EncMapUint16Float64V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]float64:
+ fastpathTV.EncMapUint16Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]bool:
+ fastpathTV.EncMapUint16BoolV(v, fastpathCheckNilTrue, e)
+ case *map[uint16]bool:
+ fastpathTV.EncMapUint16BoolV(*v, fastpathCheckNilTrue, e)
+
+ case []uint32:
+ fastpathTV.EncSliceUint32V(v, fastpathCheckNilTrue, e)
+ case *[]uint32:
+ fastpathTV.EncSliceUint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]interface{}:
+ fastpathTV.EncMapUint32IntfV(v, fastpathCheckNilTrue, e)
+ case *map[uint32]interface{}:
+ fastpathTV.EncMapUint32IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]string:
+ fastpathTV.EncMapUint32StringV(v, fastpathCheckNilTrue, e)
+ case *map[uint32]string:
+ fastpathTV.EncMapUint32StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]uint:
+ fastpathTV.EncMapUint32UintV(v, fastpathCheckNilTrue, e)
+ case *map[uint32]uint:
+ fastpathTV.EncMapUint32UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]uint8:
+ fastpathTV.EncMapUint32Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]uint8:
+ fastpathTV.EncMapUint32Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]uint16:
+ fastpathTV.EncMapUint32Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]uint16:
+ fastpathTV.EncMapUint32Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]uint32:
+ fastpathTV.EncMapUint32Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]uint32:
+ fastpathTV.EncMapUint32Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]uint64:
+ fastpathTV.EncMapUint32Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]uint64:
+ fastpathTV.EncMapUint32Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]uintptr:
+ fastpathTV.EncMapUint32UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[uint32]uintptr:
+ fastpathTV.EncMapUint32UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]int:
+ fastpathTV.EncMapUint32IntV(v, fastpathCheckNilTrue, e)
+ case *map[uint32]int:
+ fastpathTV.EncMapUint32IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]int8:
+ fastpathTV.EncMapUint32Int8V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]int8:
+ fastpathTV.EncMapUint32Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]int16:
+ fastpathTV.EncMapUint32Int16V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]int16:
+ fastpathTV.EncMapUint32Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]int32:
+ fastpathTV.EncMapUint32Int32V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]int32:
+ fastpathTV.EncMapUint32Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]int64:
+ fastpathTV.EncMapUint32Int64V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]int64:
+ fastpathTV.EncMapUint32Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]float32:
+ fastpathTV.EncMapUint32Float32V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]float32:
+ fastpathTV.EncMapUint32Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]float64:
+ fastpathTV.EncMapUint32Float64V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]float64:
+ fastpathTV.EncMapUint32Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]bool:
+ fastpathTV.EncMapUint32BoolV(v, fastpathCheckNilTrue, e)
+ case *map[uint32]bool:
+ fastpathTV.EncMapUint32BoolV(*v, fastpathCheckNilTrue, e)
+
+ case []uint64:
+ fastpathTV.EncSliceUint64V(v, fastpathCheckNilTrue, e)
+ case *[]uint64:
+ fastpathTV.EncSliceUint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]interface{}:
+ fastpathTV.EncMapUint64IntfV(v, fastpathCheckNilTrue, e)
+ case *map[uint64]interface{}:
+ fastpathTV.EncMapUint64IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]string:
+ fastpathTV.EncMapUint64StringV(v, fastpathCheckNilTrue, e)
+ case *map[uint64]string:
+ fastpathTV.EncMapUint64StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]uint:
+ fastpathTV.EncMapUint64UintV(v, fastpathCheckNilTrue, e)
+ case *map[uint64]uint:
+ fastpathTV.EncMapUint64UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]uint8:
+ fastpathTV.EncMapUint64Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]uint8:
+ fastpathTV.EncMapUint64Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]uint16:
+ fastpathTV.EncMapUint64Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]uint16:
+ fastpathTV.EncMapUint64Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]uint32:
+ fastpathTV.EncMapUint64Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]uint32:
+ fastpathTV.EncMapUint64Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]uint64:
+ fastpathTV.EncMapUint64Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]uint64:
+ fastpathTV.EncMapUint64Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]uintptr:
+ fastpathTV.EncMapUint64UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[uint64]uintptr:
+ fastpathTV.EncMapUint64UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]int:
+ fastpathTV.EncMapUint64IntV(v, fastpathCheckNilTrue, e)
+ case *map[uint64]int:
+ fastpathTV.EncMapUint64IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]int8:
+ fastpathTV.EncMapUint64Int8V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]int8:
+ fastpathTV.EncMapUint64Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]int16:
+ fastpathTV.EncMapUint64Int16V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]int16:
+ fastpathTV.EncMapUint64Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]int32:
+ fastpathTV.EncMapUint64Int32V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]int32:
+ fastpathTV.EncMapUint64Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]int64:
+ fastpathTV.EncMapUint64Int64V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]int64:
+ fastpathTV.EncMapUint64Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]float32:
+ fastpathTV.EncMapUint64Float32V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]float32:
+ fastpathTV.EncMapUint64Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]float64:
+ fastpathTV.EncMapUint64Float64V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]float64:
+ fastpathTV.EncMapUint64Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]bool:
+ fastpathTV.EncMapUint64BoolV(v, fastpathCheckNilTrue, e)
+ case *map[uint64]bool:
+ fastpathTV.EncMapUint64BoolV(*v, fastpathCheckNilTrue, e)
+
+ case []uintptr:
+ fastpathTV.EncSliceUintptrV(v, fastpathCheckNilTrue, e)
+ case *[]uintptr:
+ fastpathTV.EncSliceUintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]interface{}:
+ fastpathTV.EncMapUintptrIntfV(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]interface{}:
+ fastpathTV.EncMapUintptrIntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]string:
+ fastpathTV.EncMapUintptrStringV(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]string:
+ fastpathTV.EncMapUintptrStringV(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]uint:
+ fastpathTV.EncMapUintptrUintV(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]uint:
+ fastpathTV.EncMapUintptrUintV(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]uint8:
+ fastpathTV.EncMapUintptrUint8V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]uint8:
+ fastpathTV.EncMapUintptrUint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]uint16:
+ fastpathTV.EncMapUintptrUint16V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]uint16:
+ fastpathTV.EncMapUintptrUint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]uint32:
+ fastpathTV.EncMapUintptrUint32V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]uint32:
+ fastpathTV.EncMapUintptrUint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]uint64:
+ fastpathTV.EncMapUintptrUint64V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]uint64:
+ fastpathTV.EncMapUintptrUint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]uintptr:
+ fastpathTV.EncMapUintptrUintptrV(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]uintptr:
+ fastpathTV.EncMapUintptrUintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]int:
+ fastpathTV.EncMapUintptrIntV(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]int:
+ fastpathTV.EncMapUintptrIntV(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]int8:
+ fastpathTV.EncMapUintptrInt8V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]int8:
+ fastpathTV.EncMapUintptrInt8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]int16:
+ fastpathTV.EncMapUintptrInt16V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]int16:
+ fastpathTV.EncMapUintptrInt16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]int32:
+ fastpathTV.EncMapUintptrInt32V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]int32:
+ fastpathTV.EncMapUintptrInt32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]int64:
+ fastpathTV.EncMapUintptrInt64V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]int64:
+ fastpathTV.EncMapUintptrInt64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]float32:
+ fastpathTV.EncMapUintptrFloat32V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]float32:
+ fastpathTV.EncMapUintptrFloat32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]float64:
+ fastpathTV.EncMapUintptrFloat64V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]float64:
+ fastpathTV.EncMapUintptrFloat64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]bool:
+ fastpathTV.EncMapUintptrBoolV(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]bool:
+ fastpathTV.EncMapUintptrBoolV(*v, fastpathCheckNilTrue, e)
+
+ case []int:
+ fastpathTV.EncSliceIntV(v, fastpathCheckNilTrue, e)
+ case *[]int:
+ fastpathTV.EncSliceIntV(*v, fastpathCheckNilTrue, e)
+
+ case map[int]interface{}:
+ fastpathTV.EncMapIntIntfV(v, fastpathCheckNilTrue, e)
+ case *map[int]interface{}:
+ fastpathTV.EncMapIntIntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[int]string:
+ fastpathTV.EncMapIntStringV(v, fastpathCheckNilTrue, e)
+ case *map[int]string:
+ fastpathTV.EncMapIntStringV(*v, fastpathCheckNilTrue, e)
+
+ case map[int]uint:
+ fastpathTV.EncMapIntUintV(v, fastpathCheckNilTrue, e)
+ case *map[int]uint:
+ fastpathTV.EncMapIntUintV(*v, fastpathCheckNilTrue, e)
+
+ case map[int]uint8:
+ fastpathTV.EncMapIntUint8V(v, fastpathCheckNilTrue, e)
+ case *map[int]uint8:
+ fastpathTV.EncMapIntUint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]uint16:
+ fastpathTV.EncMapIntUint16V(v, fastpathCheckNilTrue, e)
+ case *map[int]uint16:
+ fastpathTV.EncMapIntUint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]uint32:
+ fastpathTV.EncMapIntUint32V(v, fastpathCheckNilTrue, e)
+ case *map[int]uint32:
+ fastpathTV.EncMapIntUint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]uint64:
+ fastpathTV.EncMapIntUint64V(v, fastpathCheckNilTrue, e)
+ case *map[int]uint64:
+ fastpathTV.EncMapIntUint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]uintptr:
+ fastpathTV.EncMapIntUintptrV(v, fastpathCheckNilTrue, e)
+ case *map[int]uintptr:
+ fastpathTV.EncMapIntUintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[int]int:
+ fastpathTV.EncMapIntIntV(v, fastpathCheckNilTrue, e)
+ case *map[int]int:
+ fastpathTV.EncMapIntIntV(*v, fastpathCheckNilTrue, e)
+
+ case map[int]int8:
+ fastpathTV.EncMapIntInt8V(v, fastpathCheckNilTrue, e)
+ case *map[int]int8:
+ fastpathTV.EncMapIntInt8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]int16:
+ fastpathTV.EncMapIntInt16V(v, fastpathCheckNilTrue, e)
+ case *map[int]int16:
+ fastpathTV.EncMapIntInt16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]int32:
+ fastpathTV.EncMapIntInt32V(v, fastpathCheckNilTrue, e)
+ case *map[int]int32:
+ fastpathTV.EncMapIntInt32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]int64:
+ fastpathTV.EncMapIntInt64V(v, fastpathCheckNilTrue, e)
+ case *map[int]int64:
+ fastpathTV.EncMapIntInt64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]float32:
+ fastpathTV.EncMapIntFloat32V(v, fastpathCheckNilTrue, e)
+ case *map[int]float32:
+ fastpathTV.EncMapIntFloat32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]float64:
+ fastpathTV.EncMapIntFloat64V(v, fastpathCheckNilTrue, e)
+ case *map[int]float64:
+ fastpathTV.EncMapIntFloat64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]bool:
+ fastpathTV.EncMapIntBoolV(v, fastpathCheckNilTrue, e)
+ case *map[int]bool:
+ fastpathTV.EncMapIntBoolV(*v, fastpathCheckNilTrue, e)
+
+ case []int8:
+ fastpathTV.EncSliceInt8V(v, fastpathCheckNilTrue, e)
+ case *[]int8:
+ fastpathTV.EncSliceInt8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]interface{}:
+ fastpathTV.EncMapInt8IntfV(v, fastpathCheckNilTrue, e)
+ case *map[int8]interface{}:
+ fastpathTV.EncMapInt8IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]string:
+ fastpathTV.EncMapInt8StringV(v, fastpathCheckNilTrue, e)
+ case *map[int8]string:
+ fastpathTV.EncMapInt8StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]uint:
+ fastpathTV.EncMapInt8UintV(v, fastpathCheckNilTrue, e)
+ case *map[int8]uint:
+ fastpathTV.EncMapInt8UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]uint8:
+ fastpathTV.EncMapInt8Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[int8]uint8:
+ fastpathTV.EncMapInt8Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]uint16:
+ fastpathTV.EncMapInt8Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[int8]uint16:
+ fastpathTV.EncMapInt8Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]uint32:
+ fastpathTV.EncMapInt8Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[int8]uint32:
+ fastpathTV.EncMapInt8Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]uint64:
+ fastpathTV.EncMapInt8Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[int8]uint64:
+ fastpathTV.EncMapInt8Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]uintptr:
+ fastpathTV.EncMapInt8UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[int8]uintptr:
+ fastpathTV.EncMapInt8UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]int:
+ fastpathTV.EncMapInt8IntV(v, fastpathCheckNilTrue, e)
+ case *map[int8]int:
+ fastpathTV.EncMapInt8IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]int8:
+ fastpathTV.EncMapInt8Int8V(v, fastpathCheckNilTrue, e)
+ case *map[int8]int8:
+ fastpathTV.EncMapInt8Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]int16:
+ fastpathTV.EncMapInt8Int16V(v, fastpathCheckNilTrue, e)
+ case *map[int8]int16:
+ fastpathTV.EncMapInt8Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]int32:
+ fastpathTV.EncMapInt8Int32V(v, fastpathCheckNilTrue, e)
+ case *map[int8]int32:
+ fastpathTV.EncMapInt8Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]int64:
+ fastpathTV.EncMapInt8Int64V(v, fastpathCheckNilTrue, e)
+ case *map[int8]int64:
+ fastpathTV.EncMapInt8Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]float32:
+ fastpathTV.EncMapInt8Float32V(v, fastpathCheckNilTrue, e)
+ case *map[int8]float32:
+ fastpathTV.EncMapInt8Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]float64:
+ fastpathTV.EncMapInt8Float64V(v, fastpathCheckNilTrue, e)
+ case *map[int8]float64:
+ fastpathTV.EncMapInt8Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]bool:
+ fastpathTV.EncMapInt8BoolV(v, fastpathCheckNilTrue, e)
+ case *map[int8]bool:
+ fastpathTV.EncMapInt8BoolV(*v, fastpathCheckNilTrue, e)
+
+ case []int16:
+ fastpathTV.EncSliceInt16V(v, fastpathCheckNilTrue, e)
+ case *[]int16:
+ fastpathTV.EncSliceInt16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]interface{}:
+ fastpathTV.EncMapInt16IntfV(v, fastpathCheckNilTrue, e)
+ case *map[int16]interface{}:
+ fastpathTV.EncMapInt16IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]string:
+ fastpathTV.EncMapInt16StringV(v, fastpathCheckNilTrue, e)
+ case *map[int16]string:
+ fastpathTV.EncMapInt16StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]uint:
+ fastpathTV.EncMapInt16UintV(v, fastpathCheckNilTrue, e)
+ case *map[int16]uint:
+ fastpathTV.EncMapInt16UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]uint8:
+ fastpathTV.EncMapInt16Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[int16]uint8:
+ fastpathTV.EncMapInt16Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]uint16:
+ fastpathTV.EncMapInt16Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[int16]uint16:
+ fastpathTV.EncMapInt16Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]uint32:
+ fastpathTV.EncMapInt16Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[int16]uint32:
+ fastpathTV.EncMapInt16Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]uint64:
+ fastpathTV.EncMapInt16Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[int16]uint64:
+ fastpathTV.EncMapInt16Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]uintptr:
+ fastpathTV.EncMapInt16UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[int16]uintptr:
+ fastpathTV.EncMapInt16UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]int:
+ fastpathTV.EncMapInt16IntV(v, fastpathCheckNilTrue, e)
+ case *map[int16]int:
+ fastpathTV.EncMapInt16IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]int8:
+ fastpathTV.EncMapInt16Int8V(v, fastpathCheckNilTrue, e)
+ case *map[int16]int8:
+ fastpathTV.EncMapInt16Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]int16:
+ fastpathTV.EncMapInt16Int16V(v, fastpathCheckNilTrue, e)
+ case *map[int16]int16:
+ fastpathTV.EncMapInt16Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]int32:
+ fastpathTV.EncMapInt16Int32V(v, fastpathCheckNilTrue, e)
+ case *map[int16]int32:
+ fastpathTV.EncMapInt16Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]int64:
+ fastpathTV.EncMapInt16Int64V(v, fastpathCheckNilTrue, e)
+ case *map[int16]int64:
+ fastpathTV.EncMapInt16Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]float32:
+ fastpathTV.EncMapInt16Float32V(v, fastpathCheckNilTrue, e)
+ case *map[int16]float32:
+ fastpathTV.EncMapInt16Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]float64:
+ fastpathTV.EncMapInt16Float64V(v, fastpathCheckNilTrue, e)
+ case *map[int16]float64:
+ fastpathTV.EncMapInt16Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]bool:
+ fastpathTV.EncMapInt16BoolV(v, fastpathCheckNilTrue, e)
+ case *map[int16]bool:
+ fastpathTV.EncMapInt16BoolV(*v, fastpathCheckNilTrue, e)
+
+ case []int32:
+ fastpathTV.EncSliceInt32V(v, fastpathCheckNilTrue, e)
+ case *[]int32:
+ fastpathTV.EncSliceInt32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]interface{}:
+ fastpathTV.EncMapInt32IntfV(v, fastpathCheckNilTrue, e)
+ case *map[int32]interface{}:
+ fastpathTV.EncMapInt32IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]string:
+ fastpathTV.EncMapInt32StringV(v, fastpathCheckNilTrue, e)
+ case *map[int32]string:
+ fastpathTV.EncMapInt32StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]uint:
+ fastpathTV.EncMapInt32UintV(v, fastpathCheckNilTrue, e)
+ case *map[int32]uint:
+ fastpathTV.EncMapInt32UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]uint8:
+ fastpathTV.EncMapInt32Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[int32]uint8:
+ fastpathTV.EncMapInt32Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]uint16:
+ fastpathTV.EncMapInt32Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[int32]uint16:
+ fastpathTV.EncMapInt32Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]uint32:
+ fastpathTV.EncMapInt32Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[int32]uint32:
+ fastpathTV.EncMapInt32Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]uint64:
+ fastpathTV.EncMapInt32Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[int32]uint64:
+ fastpathTV.EncMapInt32Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]uintptr:
+ fastpathTV.EncMapInt32UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[int32]uintptr:
+ fastpathTV.EncMapInt32UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]int:
+ fastpathTV.EncMapInt32IntV(v, fastpathCheckNilTrue, e)
+ case *map[int32]int:
+ fastpathTV.EncMapInt32IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]int8:
+ fastpathTV.EncMapInt32Int8V(v, fastpathCheckNilTrue, e)
+ case *map[int32]int8:
+ fastpathTV.EncMapInt32Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]int16:
+ fastpathTV.EncMapInt32Int16V(v, fastpathCheckNilTrue, e)
+ case *map[int32]int16:
+ fastpathTV.EncMapInt32Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]int32:
+ fastpathTV.EncMapInt32Int32V(v, fastpathCheckNilTrue, e)
+ case *map[int32]int32:
+ fastpathTV.EncMapInt32Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]int64:
+ fastpathTV.EncMapInt32Int64V(v, fastpathCheckNilTrue, e)
+ case *map[int32]int64:
+ fastpathTV.EncMapInt32Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]float32:
+ fastpathTV.EncMapInt32Float32V(v, fastpathCheckNilTrue, e)
+ case *map[int32]float32:
+ fastpathTV.EncMapInt32Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]float64:
+ fastpathTV.EncMapInt32Float64V(v, fastpathCheckNilTrue, e)
+ case *map[int32]float64:
+ fastpathTV.EncMapInt32Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]bool:
+ fastpathTV.EncMapInt32BoolV(v, fastpathCheckNilTrue, e)
+ case *map[int32]bool:
+ fastpathTV.EncMapInt32BoolV(*v, fastpathCheckNilTrue, e)
+
+ case []int64:
+ fastpathTV.EncSliceInt64V(v, fastpathCheckNilTrue, e)
+ case *[]int64:
+ fastpathTV.EncSliceInt64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]interface{}:
+ fastpathTV.EncMapInt64IntfV(v, fastpathCheckNilTrue, e)
+ case *map[int64]interface{}:
+ fastpathTV.EncMapInt64IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]string:
+ fastpathTV.EncMapInt64StringV(v, fastpathCheckNilTrue, e)
+ case *map[int64]string:
+ fastpathTV.EncMapInt64StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]uint:
+ fastpathTV.EncMapInt64UintV(v, fastpathCheckNilTrue, e)
+ case *map[int64]uint:
+ fastpathTV.EncMapInt64UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]uint8:
+ fastpathTV.EncMapInt64Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[int64]uint8:
+ fastpathTV.EncMapInt64Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]uint16:
+ fastpathTV.EncMapInt64Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[int64]uint16:
+ fastpathTV.EncMapInt64Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]uint32:
+ fastpathTV.EncMapInt64Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[int64]uint32:
+ fastpathTV.EncMapInt64Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]uint64:
+ fastpathTV.EncMapInt64Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[int64]uint64:
+ fastpathTV.EncMapInt64Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]uintptr:
+ fastpathTV.EncMapInt64UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[int64]uintptr:
+ fastpathTV.EncMapInt64UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]int:
+ fastpathTV.EncMapInt64IntV(v, fastpathCheckNilTrue, e)
+ case *map[int64]int:
+ fastpathTV.EncMapInt64IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]int8:
+ fastpathTV.EncMapInt64Int8V(v, fastpathCheckNilTrue, e)
+ case *map[int64]int8:
+ fastpathTV.EncMapInt64Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]int16:
+ fastpathTV.EncMapInt64Int16V(v, fastpathCheckNilTrue, e)
+ case *map[int64]int16:
+ fastpathTV.EncMapInt64Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]int32:
+ fastpathTV.EncMapInt64Int32V(v, fastpathCheckNilTrue, e)
+ case *map[int64]int32:
+ fastpathTV.EncMapInt64Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]int64:
+ fastpathTV.EncMapInt64Int64V(v, fastpathCheckNilTrue, e)
+ case *map[int64]int64:
+ fastpathTV.EncMapInt64Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]float32:
+ fastpathTV.EncMapInt64Float32V(v, fastpathCheckNilTrue, e)
+ case *map[int64]float32:
+ fastpathTV.EncMapInt64Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]float64:
+ fastpathTV.EncMapInt64Float64V(v, fastpathCheckNilTrue, e)
+ case *map[int64]float64:
+ fastpathTV.EncMapInt64Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]bool:
+ fastpathTV.EncMapInt64BoolV(v, fastpathCheckNilTrue, e)
+ case *map[int64]bool:
+ fastpathTV.EncMapInt64BoolV(*v, fastpathCheckNilTrue, e)
+
+ case []bool:
+ fastpathTV.EncSliceBoolV(v, fastpathCheckNilTrue, e)
+ case *[]bool:
+ fastpathTV.EncSliceBoolV(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]interface{}:
+ fastpathTV.EncMapBoolIntfV(v, fastpathCheckNilTrue, e)
+ case *map[bool]interface{}:
+ fastpathTV.EncMapBoolIntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]string:
+ fastpathTV.EncMapBoolStringV(v, fastpathCheckNilTrue, e)
+ case *map[bool]string:
+ fastpathTV.EncMapBoolStringV(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]uint:
+ fastpathTV.EncMapBoolUintV(v, fastpathCheckNilTrue, e)
+ case *map[bool]uint:
+ fastpathTV.EncMapBoolUintV(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]uint8:
+ fastpathTV.EncMapBoolUint8V(v, fastpathCheckNilTrue, e)
+ case *map[bool]uint8:
+ fastpathTV.EncMapBoolUint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]uint16:
+ fastpathTV.EncMapBoolUint16V(v, fastpathCheckNilTrue, e)
+ case *map[bool]uint16:
+ fastpathTV.EncMapBoolUint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]uint32:
+ fastpathTV.EncMapBoolUint32V(v, fastpathCheckNilTrue, e)
+ case *map[bool]uint32:
+ fastpathTV.EncMapBoolUint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]uint64:
+ fastpathTV.EncMapBoolUint64V(v, fastpathCheckNilTrue, e)
+ case *map[bool]uint64:
+ fastpathTV.EncMapBoolUint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]uintptr:
+ fastpathTV.EncMapBoolUintptrV(v, fastpathCheckNilTrue, e)
+ case *map[bool]uintptr:
+ fastpathTV.EncMapBoolUintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]int:
+ fastpathTV.EncMapBoolIntV(v, fastpathCheckNilTrue, e)
+ case *map[bool]int:
+ fastpathTV.EncMapBoolIntV(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]int8:
+ fastpathTV.EncMapBoolInt8V(v, fastpathCheckNilTrue, e)
+ case *map[bool]int8:
+ fastpathTV.EncMapBoolInt8V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]int16:
+ fastpathTV.EncMapBoolInt16V(v, fastpathCheckNilTrue, e)
+ case *map[bool]int16:
+ fastpathTV.EncMapBoolInt16V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]int32:
+ fastpathTV.EncMapBoolInt32V(v, fastpathCheckNilTrue, e)
+ case *map[bool]int32:
+ fastpathTV.EncMapBoolInt32V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]int64:
+ fastpathTV.EncMapBoolInt64V(v, fastpathCheckNilTrue, e)
+ case *map[bool]int64:
+ fastpathTV.EncMapBoolInt64V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]float32:
+ fastpathTV.EncMapBoolFloat32V(v, fastpathCheckNilTrue, e)
+ case *map[bool]float32:
+ fastpathTV.EncMapBoolFloat32V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]float64:
+ fastpathTV.EncMapBoolFloat64V(v, fastpathCheckNilTrue, e)
+ case *map[bool]float64:
+ fastpathTV.EncMapBoolFloat64V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]bool:
+ fastpathTV.EncMapBoolBoolV(v, fastpathCheckNilTrue, e)
+ case *map[bool]bool:
+ fastpathTV.EncMapBoolBoolV(*v, fastpathCheckNilTrue, e)
+
+ default:
+ _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release)
+ return false
+ }
+ return true
+}
+
+func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool {
+ if !fastpathEnabled {
+ return false
+ }
+ switch v := iv.(type) {
+
+ case []interface{}:
+ fastpathTV.EncSliceIntfV(v, fastpathCheckNilTrue, e)
+ case *[]interface{}:
+ fastpathTV.EncSliceIntfV(*v, fastpathCheckNilTrue, e)
+
+ case []string:
+ fastpathTV.EncSliceStringV(v, fastpathCheckNilTrue, e)
+ case *[]string:
+ fastpathTV.EncSliceStringV(*v, fastpathCheckNilTrue, e)
+
+ case []float32:
+ fastpathTV.EncSliceFloat32V(v, fastpathCheckNilTrue, e)
+ case *[]float32:
+ fastpathTV.EncSliceFloat32V(*v, fastpathCheckNilTrue, e)
+
+ case []float64:
+ fastpathTV.EncSliceFloat64V(v, fastpathCheckNilTrue, e)
+ case *[]float64:
+ fastpathTV.EncSliceFloat64V(*v, fastpathCheckNilTrue, e)
+
+ case []uint:
+ fastpathTV.EncSliceUintV(v, fastpathCheckNilTrue, e)
+ case *[]uint:
+ fastpathTV.EncSliceUintV(*v, fastpathCheckNilTrue, e)
+
+ case []uint16:
+ fastpathTV.EncSliceUint16V(v, fastpathCheckNilTrue, e)
+ case *[]uint16:
+ fastpathTV.EncSliceUint16V(*v, fastpathCheckNilTrue, e)
+
+ case []uint32:
+ fastpathTV.EncSliceUint32V(v, fastpathCheckNilTrue, e)
+ case *[]uint32:
+ fastpathTV.EncSliceUint32V(*v, fastpathCheckNilTrue, e)
+
+ case []uint64:
+ fastpathTV.EncSliceUint64V(v, fastpathCheckNilTrue, e)
+ case *[]uint64:
+ fastpathTV.EncSliceUint64V(*v, fastpathCheckNilTrue, e)
+
+ case []uintptr:
+ fastpathTV.EncSliceUintptrV(v, fastpathCheckNilTrue, e)
+ case *[]uintptr:
+ fastpathTV.EncSliceUintptrV(*v, fastpathCheckNilTrue, e)
+
+ case []int:
+ fastpathTV.EncSliceIntV(v, fastpathCheckNilTrue, e)
+ case *[]int:
+ fastpathTV.EncSliceIntV(*v, fastpathCheckNilTrue, e)
+
+ case []int8:
+ fastpathTV.EncSliceInt8V(v, fastpathCheckNilTrue, e)
+ case *[]int8:
+ fastpathTV.EncSliceInt8V(*v, fastpathCheckNilTrue, e)
+
+ case []int16:
+ fastpathTV.EncSliceInt16V(v, fastpathCheckNilTrue, e)
+ case *[]int16:
+ fastpathTV.EncSliceInt16V(*v, fastpathCheckNilTrue, e)
+
+ case []int32:
+ fastpathTV.EncSliceInt32V(v, fastpathCheckNilTrue, e)
+ case *[]int32:
+ fastpathTV.EncSliceInt32V(*v, fastpathCheckNilTrue, e)
+
+ case []int64:
+ fastpathTV.EncSliceInt64V(v, fastpathCheckNilTrue, e)
+ case *[]int64:
+ fastpathTV.EncSliceInt64V(*v, fastpathCheckNilTrue, e)
+
+ case []bool:
+ fastpathTV.EncSliceBoolV(v, fastpathCheckNilTrue, e)
+ case *[]bool:
+ fastpathTV.EncSliceBoolV(*v, fastpathCheckNilTrue, e)
+
+ default:
+ _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release)
+ return false
+ }
+ return true
+}
+
+func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool {
+ if !fastpathEnabled {
+ return false
+ }
+ switch v := iv.(type) {
+
+ case map[interface{}]interface{}:
+ fastpathTV.EncMapIntfIntfV(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]interface{}:
+ fastpathTV.EncMapIntfIntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]string:
+ fastpathTV.EncMapIntfStringV(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]string:
+ fastpathTV.EncMapIntfStringV(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]uint:
+ fastpathTV.EncMapIntfUintV(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]uint:
+ fastpathTV.EncMapIntfUintV(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]uint8:
+ fastpathTV.EncMapIntfUint8V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]uint8:
+ fastpathTV.EncMapIntfUint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]uint16:
+ fastpathTV.EncMapIntfUint16V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]uint16:
+ fastpathTV.EncMapIntfUint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]uint32:
+ fastpathTV.EncMapIntfUint32V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]uint32:
+ fastpathTV.EncMapIntfUint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]uint64:
+ fastpathTV.EncMapIntfUint64V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]uint64:
+ fastpathTV.EncMapIntfUint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]uintptr:
+ fastpathTV.EncMapIntfUintptrV(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]uintptr:
+ fastpathTV.EncMapIntfUintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]int:
+ fastpathTV.EncMapIntfIntV(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]int:
+ fastpathTV.EncMapIntfIntV(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]int8:
+ fastpathTV.EncMapIntfInt8V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]int8:
+ fastpathTV.EncMapIntfInt8V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]int16:
+ fastpathTV.EncMapIntfInt16V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]int16:
+ fastpathTV.EncMapIntfInt16V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]int32:
+ fastpathTV.EncMapIntfInt32V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]int32:
+ fastpathTV.EncMapIntfInt32V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]int64:
+ fastpathTV.EncMapIntfInt64V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]int64:
+ fastpathTV.EncMapIntfInt64V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]float32:
+ fastpathTV.EncMapIntfFloat32V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]float32:
+ fastpathTV.EncMapIntfFloat32V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]float64:
+ fastpathTV.EncMapIntfFloat64V(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]float64:
+ fastpathTV.EncMapIntfFloat64V(*v, fastpathCheckNilTrue, e)
+
+ case map[interface{}]bool:
+ fastpathTV.EncMapIntfBoolV(v, fastpathCheckNilTrue, e)
+ case *map[interface{}]bool:
+ fastpathTV.EncMapIntfBoolV(*v, fastpathCheckNilTrue, e)
+
+ case map[string]interface{}:
+ fastpathTV.EncMapStringIntfV(v, fastpathCheckNilTrue, e)
+ case *map[string]interface{}:
+ fastpathTV.EncMapStringIntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[string]string:
+ fastpathTV.EncMapStringStringV(v, fastpathCheckNilTrue, e)
+ case *map[string]string:
+ fastpathTV.EncMapStringStringV(*v, fastpathCheckNilTrue, e)
+
+ case map[string]uint:
+ fastpathTV.EncMapStringUintV(v, fastpathCheckNilTrue, e)
+ case *map[string]uint:
+ fastpathTV.EncMapStringUintV(*v, fastpathCheckNilTrue, e)
+
+ case map[string]uint8:
+ fastpathTV.EncMapStringUint8V(v, fastpathCheckNilTrue, e)
+ case *map[string]uint8:
+ fastpathTV.EncMapStringUint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]uint16:
+ fastpathTV.EncMapStringUint16V(v, fastpathCheckNilTrue, e)
+ case *map[string]uint16:
+ fastpathTV.EncMapStringUint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]uint32:
+ fastpathTV.EncMapStringUint32V(v, fastpathCheckNilTrue, e)
+ case *map[string]uint32:
+ fastpathTV.EncMapStringUint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]uint64:
+ fastpathTV.EncMapStringUint64V(v, fastpathCheckNilTrue, e)
+ case *map[string]uint64:
+ fastpathTV.EncMapStringUint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]uintptr:
+ fastpathTV.EncMapStringUintptrV(v, fastpathCheckNilTrue, e)
+ case *map[string]uintptr:
+ fastpathTV.EncMapStringUintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[string]int:
+ fastpathTV.EncMapStringIntV(v, fastpathCheckNilTrue, e)
+ case *map[string]int:
+ fastpathTV.EncMapStringIntV(*v, fastpathCheckNilTrue, e)
+
+ case map[string]int8:
+ fastpathTV.EncMapStringInt8V(v, fastpathCheckNilTrue, e)
+ case *map[string]int8:
+ fastpathTV.EncMapStringInt8V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]int16:
+ fastpathTV.EncMapStringInt16V(v, fastpathCheckNilTrue, e)
+ case *map[string]int16:
+ fastpathTV.EncMapStringInt16V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]int32:
+ fastpathTV.EncMapStringInt32V(v, fastpathCheckNilTrue, e)
+ case *map[string]int32:
+ fastpathTV.EncMapStringInt32V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]int64:
+ fastpathTV.EncMapStringInt64V(v, fastpathCheckNilTrue, e)
+ case *map[string]int64:
+ fastpathTV.EncMapStringInt64V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]float32:
+ fastpathTV.EncMapStringFloat32V(v, fastpathCheckNilTrue, e)
+ case *map[string]float32:
+ fastpathTV.EncMapStringFloat32V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]float64:
+ fastpathTV.EncMapStringFloat64V(v, fastpathCheckNilTrue, e)
+ case *map[string]float64:
+ fastpathTV.EncMapStringFloat64V(*v, fastpathCheckNilTrue, e)
+
+ case map[string]bool:
+ fastpathTV.EncMapStringBoolV(v, fastpathCheckNilTrue, e)
+ case *map[string]bool:
+ fastpathTV.EncMapStringBoolV(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]interface{}:
+ fastpathTV.EncMapFloat32IntfV(v, fastpathCheckNilTrue, e)
+ case *map[float32]interface{}:
+ fastpathTV.EncMapFloat32IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]string:
+ fastpathTV.EncMapFloat32StringV(v, fastpathCheckNilTrue, e)
+ case *map[float32]string:
+ fastpathTV.EncMapFloat32StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]uint:
+ fastpathTV.EncMapFloat32UintV(v, fastpathCheckNilTrue, e)
+ case *map[float32]uint:
+ fastpathTV.EncMapFloat32UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]uint8:
+ fastpathTV.EncMapFloat32Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[float32]uint8:
+ fastpathTV.EncMapFloat32Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]uint16:
+ fastpathTV.EncMapFloat32Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[float32]uint16:
+ fastpathTV.EncMapFloat32Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]uint32:
+ fastpathTV.EncMapFloat32Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[float32]uint32:
+ fastpathTV.EncMapFloat32Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]uint64:
+ fastpathTV.EncMapFloat32Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[float32]uint64:
+ fastpathTV.EncMapFloat32Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]uintptr:
+ fastpathTV.EncMapFloat32UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[float32]uintptr:
+ fastpathTV.EncMapFloat32UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]int:
+ fastpathTV.EncMapFloat32IntV(v, fastpathCheckNilTrue, e)
+ case *map[float32]int:
+ fastpathTV.EncMapFloat32IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]int8:
+ fastpathTV.EncMapFloat32Int8V(v, fastpathCheckNilTrue, e)
+ case *map[float32]int8:
+ fastpathTV.EncMapFloat32Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]int16:
+ fastpathTV.EncMapFloat32Int16V(v, fastpathCheckNilTrue, e)
+ case *map[float32]int16:
+ fastpathTV.EncMapFloat32Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]int32:
+ fastpathTV.EncMapFloat32Int32V(v, fastpathCheckNilTrue, e)
+ case *map[float32]int32:
+ fastpathTV.EncMapFloat32Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]int64:
+ fastpathTV.EncMapFloat32Int64V(v, fastpathCheckNilTrue, e)
+ case *map[float32]int64:
+ fastpathTV.EncMapFloat32Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]float32:
+ fastpathTV.EncMapFloat32Float32V(v, fastpathCheckNilTrue, e)
+ case *map[float32]float32:
+ fastpathTV.EncMapFloat32Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]float64:
+ fastpathTV.EncMapFloat32Float64V(v, fastpathCheckNilTrue, e)
+ case *map[float32]float64:
+ fastpathTV.EncMapFloat32Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[float32]bool:
+ fastpathTV.EncMapFloat32BoolV(v, fastpathCheckNilTrue, e)
+ case *map[float32]bool:
+ fastpathTV.EncMapFloat32BoolV(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]interface{}:
+ fastpathTV.EncMapFloat64IntfV(v, fastpathCheckNilTrue, e)
+ case *map[float64]interface{}:
+ fastpathTV.EncMapFloat64IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]string:
+ fastpathTV.EncMapFloat64StringV(v, fastpathCheckNilTrue, e)
+ case *map[float64]string:
+ fastpathTV.EncMapFloat64StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]uint:
+ fastpathTV.EncMapFloat64UintV(v, fastpathCheckNilTrue, e)
+ case *map[float64]uint:
+ fastpathTV.EncMapFloat64UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]uint8:
+ fastpathTV.EncMapFloat64Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[float64]uint8:
+ fastpathTV.EncMapFloat64Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]uint16:
+ fastpathTV.EncMapFloat64Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[float64]uint16:
+ fastpathTV.EncMapFloat64Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]uint32:
+ fastpathTV.EncMapFloat64Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[float64]uint32:
+ fastpathTV.EncMapFloat64Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]uint64:
+ fastpathTV.EncMapFloat64Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[float64]uint64:
+ fastpathTV.EncMapFloat64Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]uintptr:
+ fastpathTV.EncMapFloat64UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[float64]uintptr:
+ fastpathTV.EncMapFloat64UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]int:
+ fastpathTV.EncMapFloat64IntV(v, fastpathCheckNilTrue, e)
+ case *map[float64]int:
+ fastpathTV.EncMapFloat64IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]int8:
+ fastpathTV.EncMapFloat64Int8V(v, fastpathCheckNilTrue, e)
+ case *map[float64]int8:
+ fastpathTV.EncMapFloat64Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]int16:
+ fastpathTV.EncMapFloat64Int16V(v, fastpathCheckNilTrue, e)
+ case *map[float64]int16:
+ fastpathTV.EncMapFloat64Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]int32:
+ fastpathTV.EncMapFloat64Int32V(v, fastpathCheckNilTrue, e)
+ case *map[float64]int32:
+ fastpathTV.EncMapFloat64Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]int64:
+ fastpathTV.EncMapFloat64Int64V(v, fastpathCheckNilTrue, e)
+ case *map[float64]int64:
+ fastpathTV.EncMapFloat64Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]float32:
+ fastpathTV.EncMapFloat64Float32V(v, fastpathCheckNilTrue, e)
+ case *map[float64]float32:
+ fastpathTV.EncMapFloat64Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]float64:
+ fastpathTV.EncMapFloat64Float64V(v, fastpathCheckNilTrue, e)
+ case *map[float64]float64:
+ fastpathTV.EncMapFloat64Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[float64]bool:
+ fastpathTV.EncMapFloat64BoolV(v, fastpathCheckNilTrue, e)
+ case *map[float64]bool:
+ fastpathTV.EncMapFloat64BoolV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]interface{}:
+ fastpathTV.EncMapUintIntfV(v, fastpathCheckNilTrue, e)
+ case *map[uint]interface{}:
+ fastpathTV.EncMapUintIntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]string:
+ fastpathTV.EncMapUintStringV(v, fastpathCheckNilTrue, e)
+ case *map[uint]string:
+ fastpathTV.EncMapUintStringV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]uint:
+ fastpathTV.EncMapUintUintV(v, fastpathCheckNilTrue, e)
+ case *map[uint]uint:
+ fastpathTV.EncMapUintUintV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]uint8:
+ fastpathTV.EncMapUintUint8V(v, fastpathCheckNilTrue, e)
+ case *map[uint]uint8:
+ fastpathTV.EncMapUintUint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]uint16:
+ fastpathTV.EncMapUintUint16V(v, fastpathCheckNilTrue, e)
+ case *map[uint]uint16:
+ fastpathTV.EncMapUintUint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]uint32:
+ fastpathTV.EncMapUintUint32V(v, fastpathCheckNilTrue, e)
+ case *map[uint]uint32:
+ fastpathTV.EncMapUintUint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]uint64:
+ fastpathTV.EncMapUintUint64V(v, fastpathCheckNilTrue, e)
+ case *map[uint]uint64:
+ fastpathTV.EncMapUintUint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]uintptr:
+ fastpathTV.EncMapUintUintptrV(v, fastpathCheckNilTrue, e)
+ case *map[uint]uintptr:
+ fastpathTV.EncMapUintUintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]int:
+ fastpathTV.EncMapUintIntV(v, fastpathCheckNilTrue, e)
+ case *map[uint]int:
+ fastpathTV.EncMapUintIntV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]int8:
+ fastpathTV.EncMapUintInt8V(v, fastpathCheckNilTrue, e)
+ case *map[uint]int8:
+ fastpathTV.EncMapUintInt8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]int16:
+ fastpathTV.EncMapUintInt16V(v, fastpathCheckNilTrue, e)
+ case *map[uint]int16:
+ fastpathTV.EncMapUintInt16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]int32:
+ fastpathTV.EncMapUintInt32V(v, fastpathCheckNilTrue, e)
+ case *map[uint]int32:
+ fastpathTV.EncMapUintInt32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]int64:
+ fastpathTV.EncMapUintInt64V(v, fastpathCheckNilTrue, e)
+ case *map[uint]int64:
+ fastpathTV.EncMapUintInt64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]float32:
+ fastpathTV.EncMapUintFloat32V(v, fastpathCheckNilTrue, e)
+ case *map[uint]float32:
+ fastpathTV.EncMapUintFloat32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]float64:
+ fastpathTV.EncMapUintFloat64V(v, fastpathCheckNilTrue, e)
+ case *map[uint]float64:
+ fastpathTV.EncMapUintFloat64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint]bool:
+ fastpathTV.EncMapUintBoolV(v, fastpathCheckNilTrue, e)
+ case *map[uint]bool:
+ fastpathTV.EncMapUintBoolV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]interface{}:
+ fastpathTV.EncMapUint8IntfV(v, fastpathCheckNilTrue, e)
+ case *map[uint8]interface{}:
+ fastpathTV.EncMapUint8IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]string:
+ fastpathTV.EncMapUint8StringV(v, fastpathCheckNilTrue, e)
+ case *map[uint8]string:
+ fastpathTV.EncMapUint8StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]uint:
+ fastpathTV.EncMapUint8UintV(v, fastpathCheckNilTrue, e)
+ case *map[uint8]uint:
+ fastpathTV.EncMapUint8UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]uint8:
+ fastpathTV.EncMapUint8Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]uint8:
+ fastpathTV.EncMapUint8Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]uint16:
+ fastpathTV.EncMapUint8Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]uint16:
+ fastpathTV.EncMapUint8Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]uint32:
+ fastpathTV.EncMapUint8Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]uint32:
+ fastpathTV.EncMapUint8Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]uint64:
+ fastpathTV.EncMapUint8Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]uint64:
+ fastpathTV.EncMapUint8Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]uintptr:
+ fastpathTV.EncMapUint8UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[uint8]uintptr:
+ fastpathTV.EncMapUint8UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]int:
+ fastpathTV.EncMapUint8IntV(v, fastpathCheckNilTrue, e)
+ case *map[uint8]int:
+ fastpathTV.EncMapUint8IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]int8:
+ fastpathTV.EncMapUint8Int8V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]int8:
+ fastpathTV.EncMapUint8Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]int16:
+ fastpathTV.EncMapUint8Int16V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]int16:
+ fastpathTV.EncMapUint8Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]int32:
+ fastpathTV.EncMapUint8Int32V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]int32:
+ fastpathTV.EncMapUint8Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]int64:
+ fastpathTV.EncMapUint8Int64V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]int64:
+ fastpathTV.EncMapUint8Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]float32:
+ fastpathTV.EncMapUint8Float32V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]float32:
+ fastpathTV.EncMapUint8Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]float64:
+ fastpathTV.EncMapUint8Float64V(v, fastpathCheckNilTrue, e)
+ case *map[uint8]float64:
+ fastpathTV.EncMapUint8Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint8]bool:
+ fastpathTV.EncMapUint8BoolV(v, fastpathCheckNilTrue, e)
+ case *map[uint8]bool:
+ fastpathTV.EncMapUint8BoolV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]interface{}:
+ fastpathTV.EncMapUint16IntfV(v, fastpathCheckNilTrue, e)
+ case *map[uint16]interface{}:
+ fastpathTV.EncMapUint16IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]string:
+ fastpathTV.EncMapUint16StringV(v, fastpathCheckNilTrue, e)
+ case *map[uint16]string:
+ fastpathTV.EncMapUint16StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]uint:
+ fastpathTV.EncMapUint16UintV(v, fastpathCheckNilTrue, e)
+ case *map[uint16]uint:
+ fastpathTV.EncMapUint16UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]uint8:
+ fastpathTV.EncMapUint16Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]uint8:
+ fastpathTV.EncMapUint16Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]uint16:
+ fastpathTV.EncMapUint16Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]uint16:
+ fastpathTV.EncMapUint16Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]uint32:
+ fastpathTV.EncMapUint16Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]uint32:
+ fastpathTV.EncMapUint16Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]uint64:
+ fastpathTV.EncMapUint16Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]uint64:
+ fastpathTV.EncMapUint16Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]uintptr:
+ fastpathTV.EncMapUint16UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[uint16]uintptr:
+ fastpathTV.EncMapUint16UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]int:
+ fastpathTV.EncMapUint16IntV(v, fastpathCheckNilTrue, e)
+ case *map[uint16]int:
+ fastpathTV.EncMapUint16IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]int8:
+ fastpathTV.EncMapUint16Int8V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]int8:
+ fastpathTV.EncMapUint16Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]int16:
+ fastpathTV.EncMapUint16Int16V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]int16:
+ fastpathTV.EncMapUint16Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]int32:
+ fastpathTV.EncMapUint16Int32V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]int32:
+ fastpathTV.EncMapUint16Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]int64:
+ fastpathTV.EncMapUint16Int64V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]int64:
+ fastpathTV.EncMapUint16Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]float32:
+ fastpathTV.EncMapUint16Float32V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]float32:
+ fastpathTV.EncMapUint16Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]float64:
+ fastpathTV.EncMapUint16Float64V(v, fastpathCheckNilTrue, e)
+ case *map[uint16]float64:
+ fastpathTV.EncMapUint16Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint16]bool:
+ fastpathTV.EncMapUint16BoolV(v, fastpathCheckNilTrue, e)
+ case *map[uint16]bool:
+ fastpathTV.EncMapUint16BoolV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]interface{}:
+ fastpathTV.EncMapUint32IntfV(v, fastpathCheckNilTrue, e)
+ case *map[uint32]interface{}:
+ fastpathTV.EncMapUint32IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]string:
+ fastpathTV.EncMapUint32StringV(v, fastpathCheckNilTrue, e)
+ case *map[uint32]string:
+ fastpathTV.EncMapUint32StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]uint:
+ fastpathTV.EncMapUint32UintV(v, fastpathCheckNilTrue, e)
+ case *map[uint32]uint:
+ fastpathTV.EncMapUint32UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]uint8:
+ fastpathTV.EncMapUint32Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]uint8:
+ fastpathTV.EncMapUint32Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]uint16:
+ fastpathTV.EncMapUint32Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]uint16:
+ fastpathTV.EncMapUint32Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]uint32:
+ fastpathTV.EncMapUint32Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]uint32:
+ fastpathTV.EncMapUint32Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]uint64:
+ fastpathTV.EncMapUint32Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]uint64:
+ fastpathTV.EncMapUint32Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]uintptr:
+ fastpathTV.EncMapUint32UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[uint32]uintptr:
+ fastpathTV.EncMapUint32UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]int:
+ fastpathTV.EncMapUint32IntV(v, fastpathCheckNilTrue, e)
+ case *map[uint32]int:
+ fastpathTV.EncMapUint32IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]int8:
+ fastpathTV.EncMapUint32Int8V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]int8:
+ fastpathTV.EncMapUint32Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]int16:
+ fastpathTV.EncMapUint32Int16V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]int16:
+ fastpathTV.EncMapUint32Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]int32:
+ fastpathTV.EncMapUint32Int32V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]int32:
+ fastpathTV.EncMapUint32Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]int64:
+ fastpathTV.EncMapUint32Int64V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]int64:
+ fastpathTV.EncMapUint32Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]float32:
+ fastpathTV.EncMapUint32Float32V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]float32:
+ fastpathTV.EncMapUint32Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]float64:
+ fastpathTV.EncMapUint32Float64V(v, fastpathCheckNilTrue, e)
+ case *map[uint32]float64:
+ fastpathTV.EncMapUint32Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint32]bool:
+ fastpathTV.EncMapUint32BoolV(v, fastpathCheckNilTrue, e)
+ case *map[uint32]bool:
+ fastpathTV.EncMapUint32BoolV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]interface{}:
+ fastpathTV.EncMapUint64IntfV(v, fastpathCheckNilTrue, e)
+ case *map[uint64]interface{}:
+ fastpathTV.EncMapUint64IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]string:
+ fastpathTV.EncMapUint64StringV(v, fastpathCheckNilTrue, e)
+ case *map[uint64]string:
+ fastpathTV.EncMapUint64StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]uint:
+ fastpathTV.EncMapUint64UintV(v, fastpathCheckNilTrue, e)
+ case *map[uint64]uint:
+ fastpathTV.EncMapUint64UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]uint8:
+ fastpathTV.EncMapUint64Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]uint8:
+ fastpathTV.EncMapUint64Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]uint16:
+ fastpathTV.EncMapUint64Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]uint16:
+ fastpathTV.EncMapUint64Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]uint32:
+ fastpathTV.EncMapUint64Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]uint32:
+ fastpathTV.EncMapUint64Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]uint64:
+ fastpathTV.EncMapUint64Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]uint64:
+ fastpathTV.EncMapUint64Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]uintptr:
+ fastpathTV.EncMapUint64UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[uint64]uintptr:
+ fastpathTV.EncMapUint64UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]int:
+ fastpathTV.EncMapUint64IntV(v, fastpathCheckNilTrue, e)
+ case *map[uint64]int:
+ fastpathTV.EncMapUint64IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]int8:
+ fastpathTV.EncMapUint64Int8V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]int8:
+ fastpathTV.EncMapUint64Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]int16:
+ fastpathTV.EncMapUint64Int16V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]int16:
+ fastpathTV.EncMapUint64Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]int32:
+ fastpathTV.EncMapUint64Int32V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]int32:
+ fastpathTV.EncMapUint64Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]int64:
+ fastpathTV.EncMapUint64Int64V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]int64:
+ fastpathTV.EncMapUint64Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]float32:
+ fastpathTV.EncMapUint64Float32V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]float32:
+ fastpathTV.EncMapUint64Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]float64:
+ fastpathTV.EncMapUint64Float64V(v, fastpathCheckNilTrue, e)
+ case *map[uint64]float64:
+ fastpathTV.EncMapUint64Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uint64]bool:
+ fastpathTV.EncMapUint64BoolV(v, fastpathCheckNilTrue, e)
+ case *map[uint64]bool:
+ fastpathTV.EncMapUint64BoolV(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]interface{}:
+ fastpathTV.EncMapUintptrIntfV(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]interface{}:
+ fastpathTV.EncMapUintptrIntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]string:
+ fastpathTV.EncMapUintptrStringV(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]string:
+ fastpathTV.EncMapUintptrStringV(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]uint:
+ fastpathTV.EncMapUintptrUintV(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]uint:
+ fastpathTV.EncMapUintptrUintV(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]uint8:
+ fastpathTV.EncMapUintptrUint8V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]uint8:
+ fastpathTV.EncMapUintptrUint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]uint16:
+ fastpathTV.EncMapUintptrUint16V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]uint16:
+ fastpathTV.EncMapUintptrUint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]uint32:
+ fastpathTV.EncMapUintptrUint32V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]uint32:
+ fastpathTV.EncMapUintptrUint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]uint64:
+ fastpathTV.EncMapUintptrUint64V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]uint64:
+ fastpathTV.EncMapUintptrUint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]uintptr:
+ fastpathTV.EncMapUintptrUintptrV(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]uintptr:
+ fastpathTV.EncMapUintptrUintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]int:
+ fastpathTV.EncMapUintptrIntV(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]int:
+ fastpathTV.EncMapUintptrIntV(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]int8:
+ fastpathTV.EncMapUintptrInt8V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]int8:
+ fastpathTV.EncMapUintptrInt8V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]int16:
+ fastpathTV.EncMapUintptrInt16V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]int16:
+ fastpathTV.EncMapUintptrInt16V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]int32:
+ fastpathTV.EncMapUintptrInt32V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]int32:
+ fastpathTV.EncMapUintptrInt32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]int64:
+ fastpathTV.EncMapUintptrInt64V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]int64:
+ fastpathTV.EncMapUintptrInt64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]float32:
+ fastpathTV.EncMapUintptrFloat32V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]float32:
+ fastpathTV.EncMapUintptrFloat32V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]float64:
+ fastpathTV.EncMapUintptrFloat64V(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]float64:
+ fastpathTV.EncMapUintptrFloat64V(*v, fastpathCheckNilTrue, e)
+
+ case map[uintptr]bool:
+ fastpathTV.EncMapUintptrBoolV(v, fastpathCheckNilTrue, e)
+ case *map[uintptr]bool:
+ fastpathTV.EncMapUintptrBoolV(*v, fastpathCheckNilTrue, e)
+
+ case map[int]interface{}:
+ fastpathTV.EncMapIntIntfV(v, fastpathCheckNilTrue, e)
+ case *map[int]interface{}:
+ fastpathTV.EncMapIntIntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[int]string:
+ fastpathTV.EncMapIntStringV(v, fastpathCheckNilTrue, e)
+ case *map[int]string:
+ fastpathTV.EncMapIntStringV(*v, fastpathCheckNilTrue, e)
+
+ case map[int]uint:
+ fastpathTV.EncMapIntUintV(v, fastpathCheckNilTrue, e)
+ case *map[int]uint:
+ fastpathTV.EncMapIntUintV(*v, fastpathCheckNilTrue, e)
+
+ case map[int]uint8:
+ fastpathTV.EncMapIntUint8V(v, fastpathCheckNilTrue, e)
+ case *map[int]uint8:
+ fastpathTV.EncMapIntUint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]uint16:
+ fastpathTV.EncMapIntUint16V(v, fastpathCheckNilTrue, e)
+ case *map[int]uint16:
+ fastpathTV.EncMapIntUint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]uint32:
+ fastpathTV.EncMapIntUint32V(v, fastpathCheckNilTrue, e)
+ case *map[int]uint32:
+ fastpathTV.EncMapIntUint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]uint64:
+ fastpathTV.EncMapIntUint64V(v, fastpathCheckNilTrue, e)
+ case *map[int]uint64:
+ fastpathTV.EncMapIntUint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]uintptr:
+ fastpathTV.EncMapIntUintptrV(v, fastpathCheckNilTrue, e)
+ case *map[int]uintptr:
+ fastpathTV.EncMapIntUintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[int]int:
+ fastpathTV.EncMapIntIntV(v, fastpathCheckNilTrue, e)
+ case *map[int]int:
+ fastpathTV.EncMapIntIntV(*v, fastpathCheckNilTrue, e)
+
+ case map[int]int8:
+ fastpathTV.EncMapIntInt8V(v, fastpathCheckNilTrue, e)
+ case *map[int]int8:
+ fastpathTV.EncMapIntInt8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]int16:
+ fastpathTV.EncMapIntInt16V(v, fastpathCheckNilTrue, e)
+ case *map[int]int16:
+ fastpathTV.EncMapIntInt16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]int32:
+ fastpathTV.EncMapIntInt32V(v, fastpathCheckNilTrue, e)
+ case *map[int]int32:
+ fastpathTV.EncMapIntInt32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]int64:
+ fastpathTV.EncMapIntInt64V(v, fastpathCheckNilTrue, e)
+ case *map[int]int64:
+ fastpathTV.EncMapIntInt64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]float32:
+ fastpathTV.EncMapIntFloat32V(v, fastpathCheckNilTrue, e)
+ case *map[int]float32:
+ fastpathTV.EncMapIntFloat32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]float64:
+ fastpathTV.EncMapIntFloat64V(v, fastpathCheckNilTrue, e)
+ case *map[int]float64:
+ fastpathTV.EncMapIntFloat64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int]bool:
+ fastpathTV.EncMapIntBoolV(v, fastpathCheckNilTrue, e)
+ case *map[int]bool:
+ fastpathTV.EncMapIntBoolV(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]interface{}:
+ fastpathTV.EncMapInt8IntfV(v, fastpathCheckNilTrue, e)
+ case *map[int8]interface{}:
+ fastpathTV.EncMapInt8IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]string:
+ fastpathTV.EncMapInt8StringV(v, fastpathCheckNilTrue, e)
+ case *map[int8]string:
+ fastpathTV.EncMapInt8StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]uint:
+ fastpathTV.EncMapInt8UintV(v, fastpathCheckNilTrue, e)
+ case *map[int8]uint:
+ fastpathTV.EncMapInt8UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]uint8:
+ fastpathTV.EncMapInt8Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[int8]uint8:
+ fastpathTV.EncMapInt8Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]uint16:
+ fastpathTV.EncMapInt8Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[int8]uint16:
+ fastpathTV.EncMapInt8Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]uint32:
+ fastpathTV.EncMapInt8Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[int8]uint32:
+ fastpathTV.EncMapInt8Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]uint64:
+ fastpathTV.EncMapInt8Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[int8]uint64:
+ fastpathTV.EncMapInt8Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]uintptr:
+ fastpathTV.EncMapInt8UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[int8]uintptr:
+ fastpathTV.EncMapInt8UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]int:
+ fastpathTV.EncMapInt8IntV(v, fastpathCheckNilTrue, e)
+ case *map[int8]int:
+ fastpathTV.EncMapInt8IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]int8:
+ fastpathTV.EncMapInt8Int8V(v, fastpathCheckNilTrue, e)
+ case *map[int8]int8:
+ fastpathTV.EncMapInt8Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]int16:
+ fastpathTV.EncMapInt8Int16V(v, fastpathCheckNilTrue, e)
+ case *map[int8]int16:
+ fastpathTV.EncMapInt8Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]int32:
+ fastpathTV.EncMapInt8Int32V(v, fastpathCheckNilTrue, e)
+ case *map[int8]int32:
+ fastpathTV.EncMapInt8Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]int64:
+ fastpathTV.EncMapInt8Int64V(v, fastpathCheckNilTrue, e)
+ case *map[int8]int64:
+ fastpathTV.EncMapInt8Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]float32:
+ fastpathTV.EncMapInt8Float32V(v, fastpathCheckNilTrue, e)
+ case *map[int8]float32:
+ fastpathTV.EncMapInt8Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]float64:
+ fastpathTV.EncMapInt8Float64V(v, fastpathCheckNilTrue, e)
+ case *map[int8]float64:
+ fastpathTV.EncMapInt8Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int8]bool:
+ fastpathTV.EncMapInt8BoolV(v, fastpathCheckNilTrue, e)
+ case *map[int8]bool:
+ fastpathTV.EncMapInt8BoolV(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]interface{}:
+ fastpathTV.EncMapInt16IntfV(v, fastpathCheckNilTrue, e)
+ case *map[int16]interface{}:
+ fastpathTV.EncMapInt16IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]string:
+ fastpathTV.EncMapInt16StringV(v, fastpathCheckNilTrue, e)
+ case *map[int16]string:
+ fastpathTV.EncMapInt16StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]uint:
+ fastpathTV.EncMapInt16UintV(v, fastpathCheckNilTrue, e)
+ case *map[int16]uint:
+ fastpathTV.EncMapInt16UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]uint8:
+ fastpathTV.EncMapInt16Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[int16]uint8:
+ fastpathTV.EncMapInt16Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]uint16:
+ fastpathTV.EncMapInt16Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[int16]uint16:
+ fastpathTV.EncMapInt16Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]uint32:
+ fastpathTV.EncMapInt16Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[int16]uint32:
+ fastpathTV.EncMapInt16Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]uint64:
+ fastpathTV.EncMapInt16Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[int16]uint64:
+ fastpathTV.EncMapInt16Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]uintptr:
+ fastpathTV.EncMapInt16UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[int16]uintptr:
+ fastpathTV.EncMapInt16UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]int:
+ fastpathTV.EncMapInt16IntV(v, fastpathCheckNilTrue, e)
+ case *map[int16]int:
+ fastpathTV.EncMapInt16IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]int8:
+ fastpathTV.EncMapInt16Int8V(v, fastpathCheckNilTrue, e)
+ case *map[int16]int8:
+ fastpathTV.EncMapInt16Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]int16:
+ fastpathTV.EncMapInt16Int16V(v, fastpathCheckNilTrue, e)
+ case *map[int16]int16:
+ fastpathTV.EncMapInt16Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]int32:
+ fastpathTV.EncMapInt16Int32V(v, fastpathCheckNilTrue, e)
+ case *map[int16]int32:
+ fastpathTV.EncMapInt16Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]int64:
+ fastpathTV.EncMapInt16Int64V(v, fastpathCheckNilTrue, e)
+ case *map[int16]int64:
+ fastpathTV.EncMapInt16Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]float32:
+ fastpathTV.EncMapInt16Float32V(v, fastpathCheckNilTrue, e)
+ case *map[int16]float32:
+ fastpathTV.EncMapInt16Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]float64:
+ fastpathTV.EncMapInt16Float64V(v, fastpathCheckNilTrue, e)
+ case *map[int16]float64:
+ fastpathTV.EncMapInt16Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int16]bool:
+ fastpathTV.EncMapInt16BoolV(v, fastpathCheckNilTrue, e)
+ case *map[int16]bool:
+ fastpathTV.EncMapInt16BoolV(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]interface{}:
+ fastpathTV.EncMapInt32IntfV(v, fastpathCheckNilTrue, e)
+ case *map[int32]interface{}:
+ fastpathTV.EncMapInt32IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]string:
+ fastpathTV.EncMapInt32StringV(v, fastpathCheckNilTrue, e)
+ case *map[int32]string:
+ fastpathTV.EncMapInt32StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]uint:
+ fastpathTV.EncMapInt32UintV(v, fastpathCheckNilTrue, e)
+ case *map[int32]uint:
+ fastpathTV.EncMapInt32UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]uint8:
+ fastpathTV.EncMapInt32Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[int32]uint8:
+ fastpathTV.EncMapInt32Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]uint16:
+ fastpathTV.EncMapInt32Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[int32]uint16:
+ fastpathTV.EncMapInt32Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]uint32:
+ fastpathTV.EncMapInt32Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[int32]uint32:
+ fastpathTV.EncMapInt32Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]uint64:
+ fastpathTV.EncMapInt32Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[int32]uint64:
+ fastpathTV.EncMapInt32Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]uintptr:
+ fastpathTV.EncMapInt32UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[int32]uintptr:
+ fastpathTV.EncMapInt32UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]int:
+ fastpathTV.EncMapInt32IntV(v, fastpathCheckNilTrue, e)
+ case *map[int32]int:
+ fastpathTV.EncMapInt32IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]int8:
+ fastpathTV.EncMapInt32Int8V(v, fastpathCheckNilTrue, e)
+ case *map[int32]int8:
+ fastpathTV.EncMapInt32Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]int16:
+ fastpathTV.EncMapInt32Int16V(v, fastpathCheckNilTrue, e)
+ case *map[int32]int16:
+ fastpathTV.EncMapInt32Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]int32:
+ fastpathTV.EncMapInt32Int32V(v, fastpathCheckNilTrue, e)
+ case *map[int32]int32:
+ fastpathTV.EncMapInt32Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]int64:
+ fastpathTV.EncMapInt32Int64V(v, fastpathCheckNilTrue, e)
+ case *map[int32]int64:
+ fastpathTV.EncMapInt32Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]float32:
+ fastpathTV.EncMapInt32Float32V(v, fastpathCheckNilTrue, e)
+ case *map[int32]float32:
+ fastpathTV.EncMapInt32Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]float64:
+ fastpathTV.EncMapInt32Float64V(v, fastpathCheckNilTrue, e)
+ case *map[int32]float64:
+ fastpathTV.EncMapInt32Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int32]bool:
+ fastpathTV.EncMapInt32BoolV(v, fastpathCheckNilTrue, e)
+ case *map[int32]bool:
+ fastpathTV.EncMapInt32BoolV(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]interface{}:
+ fastpathTV.EncMapInt64IntfV(v, fastpathCheckNilTrue, e)
+ case *map[int64]interface{}:
+ fastpathTV.EncMapInt64IntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]string:
+ fastpathTV.EncMapInt64StringV(v, fastpathCheckNilTrue, e)
+ case *map[int64]string:
+ fastpathTV.EncMapInt64StringV(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]uint:
+ fastpathTV.EncMapInt64UintV(v, fastpathCheckNilTrue, e)
+ case *map[int64]uint:
+ fastpathTV.EncMapInt64UintV(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]uint8:
+ fastpathTV.EncMapInt64Uint8V(v, fastpathCheckNilTrue, e)
+ case *map[int64]uint8:
+ fastpathTV.EncMapInt64Uint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]uint16:
+ fastpathTV.EncMapInt64Uint16V(v, fastpathCheckNilTrue, e)
+ case *map[int64]uint16:
+ fastpathTV.EncMapInt64Uint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]uint32:
+ fastpathTV.EncMapInt64Uint32V(v, fastpathCheckNilTrue, e)
+ case *map[int64]uint32:
+ fastpathTV.EncMapInt64Uint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]uint64:
+ fastpathTV.EncMapInt64Uint64V(v, fastpathCheckNilTrue, e)
+ case *map[int64]uint64:
+ fastpathTV.EncMapInt64Uint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]uintptr:
+ fastpathTV.EncMapInt64UintptrV(v, fastpathCheckNilTrue, e)
+ case *map[int64]uintptr:
+ fastpathTV.EncMapInt64UintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]int:
+ fastpathTV.EncMapInt64IntV(v, fastpathCheckNilTrue, e)
+ case *map[int64]int:
+ fastpathTV.EncMapInt64IntV(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]int8:
+ fastpathTV.EncMapInt64Int8V(v, fastpathCheckNilTrue, e)
+ case *map[int64]int8:
+ fastpathTV.EncMapInt64Int8V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]int16:
+ fastpathTV.EncMapInt64Int16V(v, fastpathCheckNilTrue, e)
+ case *map[int64]int16:
+ fastpathTV.EncMapInt64Int16V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]int32:
+ fastpathTV.EncMapInt64Int32V(v, fastpathCheckNilTrue, e)
+ case *map[int64]int32:
+ fastpathTV.EncMapInt64Int32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]int64:
+ fastpathTV.EncMapInt64Int64V(v, fastpathCheckNilTrue, e)
+ case *map[int64]int64:
+ fastpathTV.EncMapInt64Int64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]float32:
+ fastpathTV.EncMapInt64Float32V(v, fastpathCheckNilTrue, e)
+ case *map[int64]float32:
+ fastpathTV.EncMapInt64Float32V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]float64:
+ fastpathTV.EncMapInt64Float64V(v, fastpathCheckNilTrue, e)
+ case *map[int64]float64:
+ fastpathTV.EncMapInt64Float64V(*v, fastpathCheckNilTrue, e)
+
+ case map[int64]bool:
+ fastpathTV.EncMapInt64BoolV(v, fastpathCheckNilTrue, e)
+ case *map[int64]bool:
+ fastpathTV.EncMapInt64BoolV(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]interface{}:
+ fastpathTV.EncMapBoolIntfV(v, fastpathCheckNilTrue, e)
+ case *map[bool]interface{}:
+ fastpathTV.EncMapBoolIntfV(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]string:
+ fastpathTV.EncMapBoolStringV(v, fastpathCheckNilTrue, e)
+ case *map[bool]string:
+ fastpathTV.EncMapBoolStringV(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]uint:
+ fastpathTV.EncMapBoolUintV(v, fastpathCheckNilTrue, e)
+ case *map[bool]uint:
+ fastpathTV.EncMapBoolUintV(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]uint8:
+ fastpathTV.EncMapBoolUint8V(v, fastpathCheckNilTrue, e)
+ case *map[bool]uint8:
+ fastpathTV.EncMapBoolUint8V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]uint16:
+ fastpathTV.EncMapBoolUint16V(v, fastpathCheckNilTrue, e)
+ case *map[bool]uint16:
+ fastpathTV.EncMapBoolUint16V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]uint32:
+ fastpathTV.EncMapBoolUint32V(v, fastpathCheckNilTrue, e)
+ case *map[bool]uint32:
+ fastpathTV.EncMapBoolUint32V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]uint64:
+ fastpathTV.EncMapBoolUint64V(v, fastpathCheckNilTrue, e)
+ case *map[bool]uint64:
+ fastpathTV.EncMapBoolUint64V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]uintptr:
+ fastpathTV.EncMapBoolUintptrV(v, fastpathCheckNilTrue, e)
+ case *map[bool]uintptr:
+ fastpathTV.EncMapBoolUintptrV(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]int:
+ fastpathTV.EncMapBoolIntV(v, fastpathCheckNilTrue, e)
+ case *map[bool]int:
+ fastpathTV.EncMapBoolIntV(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]int8:
+ fastpathTV.EncMapBoolInt8V(v, fastpathCheckNilTrue, e)
+ case *map[bool]int8:
+ fastpathTV.EncMapBoolInt8V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]int16:
+ fastpathTV.EncMapBoolInt16V(v, fastpathCheckNilTrue, e)
+ case *map[bool]int16:
+ fastpathTV.EncMapBoolInt16V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]int32:
+ fastpathTV.EncMapBoolInt32V(v, fastpathCheckNilTrue, e)
+ case *map[bool]int32:
+ fastpathTV.EncMapBoolInt32V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]int64:
+ fastpathTV.EncMapBoolInt64V(v, fastpathCheckNilTrue, e)
+ case *map[bool]int64:
+ fastpathTV.EncMapBoolInt64V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]float32:
+ fastpathTV.EncMapBoolFloat32V(v, fastpathCheckNilTrue, e)
+ case *map[bool]float32:
+ fastpathTV.EncMapBoolFloat32V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]float64:
+ fastpathTV.EncMapBoolFloat64V(v, fastpathCheckNilTrue, e)
+ case *map[bool]float64:
+ fastpathTV.EncMapBoolFloat64V(*v, fastpathCheckNilTrue, e)
+
+ case map[bool]bool:
+ fastpathTV.EncMapBoolBoolV(v, fastpathCheckNilTrue, e)
+ case *map[bool]bool:
+ fastpathTV.EncMapBoolBoolV(*v, fastpathCheckNilTrue, e)
+
+ default:
+ _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release)
+ return false
+ }
+ return true
+}
+
+// -- -- fast path functions
+
+func (f *encFnInfo) fastpathEncSliceIntfR(rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceIntfV(rv.Interface().([]interface{}), fastpathCheckNilFalse, f.e)
+ } else {
+ fastpathTV.EncSliceIntfV(rv.Interface().([]interface{}), fastpathCheckNilFalse, f.e)
+ }
+}
+func (_ fastpathT) EncSliceIntfV(v []interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeArrayStart(len(v))
+ for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ e.encode(v2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+}
+
+func (_ fastpathT) EncAsMapSliceIntfV(v []interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.EncodeMapStart(len(v) / 2)
+ for j, v2 := range v {
+ if cr != nil {
+ if j%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ }
+ e.encode(v2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncSliceStringR(rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceStringV(rv.Interface().([]string), fastpathCheckNilFalse, f.e)
+ } else {
+ fastpathTV.EncSliceStringV(rv.Interface().([]string), fastpathCheckNilFalse, f.e)
+ }
+}
+func (_ fastpathT) EncSliceStringV(v []string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeArrayStart(len(v))
+ for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+}
+
+func (_ fastpathT) EncAsMapSliceStringV(v []string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.EncodeMapStart(len(v) / 2)
+ for j, v2 := range v {
+ if cr != nil {
+ if j%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncSliceFloat32R(rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceFloat32V(rv.Interface().([]float32), fastpathCheckNilFalse, f.e)
+ } else {
+ fastpathTV.EncSliceFloat32V(rv.Interface().([]float32), fastpathCheckNilFalse, f.e)
+ }
+}
+func (_ fastpathT) EncSliceFloat32V(v []float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeArrayStart(len(v))
+ for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ ee.EncodeFloat32(v2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+}
+
+func (_ fastpathT) EncAsMapSliceFloat32V(v []float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.EncodeMapStart(len(v) / 2)
+ for j, v2 := range v {
+ if cr != nil {
+ if j%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ }
+ ee.EncodeFloat32(v2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncSliceFloat64R(rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceFloat64V(rv.Interface().([]float64), fastpathCheckNilFalse, f.e)
+ } else {
+ fastpathTV.EncSliceFloat64V(rv.Interface().([]float64), fastpathCheckNilFalse, f.e)
+ }
+}
+func (_ fastpathT) EncSliceFloat64V(v []float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeArrayStart(len(v))
+ for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ ee.EncodeFloat64(v2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+}
+
+func (_ fastpathT) EncAsMapSliceFloat64V(v []float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.EncodeMapStart(len(v) / 2)
+ for j, v2 := range v {
+ if cr != nil {
+ if j%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ }
+ ee.EncodeFloat64(v2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncSliceUintR(rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceUintV(rv.Interface().([]uint), fastpathCheckNilFalse, f.e)
+ } else {
+ fastpathTV.EncSliceUintV(rv.Interface().([]uint), fastpathCheckNilFalse, f.e)
+ }
+}
+func (_ fastpathT) EncSliceUintV(v []uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeArrayStart(len(v))
+ for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+}
+
+func (_ fastpathT) EncAsMapSliceUintV(v []uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.EncodeMapStart(len(v) / 2)
+ for j, v2 := range v {
+ if cr != nil {
+ if j%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncSliceUint16R(rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceUint16V(rv.Interface().([]uint16), fastpathCheckNilFalse, f.e)
+ } else {
+ fastpathTV.EncSliceUint16V(rv.Interface().([]uint16), fastpathCheckNilFalse, f.e)
+ }
+}
+func (_ fastpathT) EncSliceUint16V(v []uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeArrayStart(len(v))
+ for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+}
+
+func (_ fastpathT) EncAsMapSliceUint16V(v []uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.EncodeMapStart(len(v) / 2)
+ for j, v2 := range v {
+ if cr != nil {
+ if j%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncSliceUint32R(rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceUint32V(rv.Interface().([]uint32), fastpathCheckNilFalse, f.e)
+ } else {
+ fastpathTV.EncSliceUint32V(rv.Interface().([]uint32), fastpathCheckNilFalse, f.e)
+ }
+}
+func (_ fastpathT) EncSliceUint32V(v []uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeArrayStart(len(v))
+ for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+}
+
+func (_ fastpathT) EncAsMapSliceUint32V(v []uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.EncodeMapStart(len(v) / 2)
+ for j, v2 := range v {
+ if cr != nil {
+ if j%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncSliceUint64R(rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceUint64V(rv.Interface().([]uint64), fastpathCheckNilFalse, f.e)
+ } else {
+ fastpathTV.EncSliceUint64V(rv.Interface().([]uint64), fastpathCheckNilFalse, f.e)
+ }
+}
+func (_ fastpathT) EncSliceUint64V(v []uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeArrayStart(len(v))
+ for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+}
+
+func (_ fastpathT) EncAsMapSliceUint64V(v []uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.EncodeMapStart(len(v) / 2)
+ for j, v2 := range v {
+ if cr != nil {
+ if j%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncSliceUintptrR(rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceUintptrV(rv.Interface().([]uintptr), fastpathCheckNilFalse, f.e)
+ } else {
+ fastpathTV.EncSliceUintptrV(rv.Interface().([]uintptr), fastpathCheckNilFalse, f.e)
+ }
+}
+func (_ fastpathT) EncSliceUintptrV(v []uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeArrayStart(len(v))
+ for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ e.encode(v2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+}
+
+func (_ fastpathT) EncAsMapSliceUintptrV(v []uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.EncodeMapStart(len(v) / 2)
+ for j, v2 := range v {
+ if cr != nil {
+ if j%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ }
+ e.encode(v2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncSliceIntR(rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceIntV(rv.Interface().([]int), fastpathCheckNilFalse, f.e)
+ } else {
+ fastpathTV.EncSliceIntV(rv.Interface().([]int), fastpathCheckNilFalse, f.e)
+ }
+}
+func (_ fastpathT) EncSliceIntV(v []int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeArrayStart(len(v))
+ for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+}
+
+func (_ fastpathT) EncAsMapSliceIntV(v []int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.EncodeMapStart(len(v) / 2)
+ for j, v2 := range v {
+ if cr != nil {
+ if j%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncSliceInt8R(rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceInt8V(rv.Interface().([]int8), fastpathCheckNilFalse, f.e)
+ } else {
+ fastpathTV.EncSliceInt8V(rv.Interface().([]int8), fastpathCheckNilFalse, f.e)
+ }
+}
+func (_ fastpathT) EncSliceInt8V(v []int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeArrayStart(len(v))
+ for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+}
+
+func (_ fastpathT) EncAsMapSliceInt8V(v []int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.EncodeMapStart(len(v) / 2)
+ for j, v2 := range v {
+ if cr != nil {
+ if j%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncSliceInt16R(rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceInt16V(rv.Interface().([]int16), fastpathCheckNilFalse, f.e)
+ } else {
+ fastpathTV.EncSliceInt16V(rv.Interface().([]int16), fastpathCheckNilFalse, f.e)
+ }
+}
+func (_ fastpathT) EncSliceInt16V(v []int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeArrayStart(len(v))
+ for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+}
+
+func (_ fastpathT) EncAsMapSliceInt16V(v []int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.EncodeMapStart(len(v) / 2)
+ for j, v2 := range v {
+ if cr != nil {
+ if j%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncSliceInt32R(rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceInt32V(rv.Interface().([]int32), fastpathCheckNilFalse, f.e)
+ } else {
+ fastpathTV.EncSliceInt32V(rv.Interface().([]int32), fastpathCheckNilFalse, f.e)
+ }
+}
+func (_ fastpathT) EncSliceInt32V(v []int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeArrayStart(len(v))
+ for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+}
+
+func (_ fastpathT) EncAsMapSliceInt32V(v []int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.EncodeMapStart(len(v) / 2)
+ for j, v2 := range v {
+ if cr != nil {
+ if j%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncSliceInt64R(rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceInt64V(rv.Interface().([]int64), fastpathCheckNilFalse, f.e)
+ } else {
+ fastpathTV.EncSliceInt64V(rv.Interface().([]int64), fastpathCheckNilFalse, f.e)
+ }
+}
+func (_ fastpathT) EncSliceInt64V(v []int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeArrayStart(len(v))
+ for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+}
+
+func (_ fastpathT) EncAsMapSliceInt64V(v []int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.EncodeMapStart(len(v) / 2)
+ for j, v2 := range v {
+ if cr != nil {
+ if j%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncSliceBoolR(rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceBoolV(rv.Interface().([]bool), fastpathCheckNilFalse, f.e)
+ } else {
+ fastpathTV.EncSliceBoolV(rv.Interface().([]bool), fastpathCheckNilFalse, f.e)
+ }
+}
+func (_ fastpathT) EncSliceBoolV(v []bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeArrayStart(len(v))
+ for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
+ ee.EncodeBool(v2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+}
+
+func (_ fastpathT) EncAsMapSliceBoolV(v []bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.EncodeMapStart(len(v) / 2)
+ for j, v2 := range v {
+ if cr != nil {
+ if j%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ }
+ ee.EncodeBool(v2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntfIntfR(rv reflect.Value) {
+ fastpathTV.EncMapIntfIntfV(rv.Interface().(map[interface{}]interface{}), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntfIntfV(v map[interface{}]interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntfStringR(rv reflect.Value) {
+ fastpathTV.EncMapIntfStringV(rv.Interface().(map[interface{}]string), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntfStringV(v map[interface{}]string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntfUintR(rv reflect.Value) {
+ fastpathTV.EncMapIntfUintV(rv.Interface().(map[interface{}]uint), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntfUintV(v map[interface{}]uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntfUint8R(rv reflect.Value) {
+ fastpathTV.EncMapIntfUint8V(rv.Interface().(map[interface{}]uint8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntfUint8V(v map[interface{}]uint8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntfUint16R(rv reflect.Value) {
+ fastpathTV.EncMapIntfUint16V(rv.Interface().(map[interface{}]uint16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntfUint16V(v map[interface{}]uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntfUint32R(rv reflect.Value) {
+ fastpathTV.EncMapIntfUint32V(rv.Interface().(map[interface{}]uint32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntfUint32V(v map[interface{}]uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntfUint64R(rv reflect.Value) {
+ fastpathTV.EncMapIntfUint64V(rv.Interface().(map[interface{}]uint64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntfUint64V(v map[interface{}]uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntfUintptrR(rv reflect.Value) {
+ fastpathTV.EncMapIntfUintptrV(rv.Interface().(map[interface{}]uintptr), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntfUintptrV(v map[interface{}]uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntfIntR(rv reflect.Value) {
+ fastpathTV.EncMapIntfIntV(rv.Interface().(map[interface{}]int), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntfIntV(v map[interface{}]int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntfInt8R(rv reflect.Value) {
+ fastpathTV.EncMapIntfInt8V(rv.Interface().(map[interface{}]int8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntfInt8V(v map[interface{}]int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntfInt16R(rv reflect.Value) {
+ fastpathTV.EncMapIntfInt16V(rv.Interface().(map[interface{}]int16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntfInt16V(v map[interface{}]int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntfInt32R(rv reflect.Value) {
+ fastpathTV.EncMapIntfInt32V(rv.Interface().(map[interface{}]int32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntfInt32V(v map[interface{}]int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntfInt64R(rv reflect.Value) {
+ fastpathTV.EncMapIntfInt64V(rv.Interface().(map[interface{}]int64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntfInt64V(v map[interface{}]int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntfFloat32R(rv reflect.Value) {
+ fastpathTV.EncMapIntfFloat32V(rv.Interface().(map[interface{}]float32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntfFloat32V(v map[interface{}]float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntfFloat64R(rv reflect.Value) {
+ fastpathTV.EncMapIntfFloat64V(rv.Interface().(map[interface{}]float64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntfFloat64V(v map[interface{}]float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntfBoolR(rv reflect.Value) {
+ fastpathTV.EncMapIntfBoolV(rv.Interface().(map[interface{}]bool), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntfBoolV(v map[interface{}]bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapStringIntfR(rv reflect.Value) {
+ fastpathTV.EncMapStringIntfV(rv.Interface().(map[string]interface{}), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapStringIntfV(v map[string]interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[string(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapStringStringR(rv reflect.Value) {
+ fastpathTV.EncMapStringStringV(rv.Interface().(map[string]string), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapStringStringV(v map[string]string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v[string(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapStringUintR(rv reflect.Value) {
+ fastpathTV.EncMapStringUintV(rv.Interface().(map[string]uint), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapStringUintV(v map[string]uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[string(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapStringUint8R(rv reflect.Value) {
+ fastpathTV.EncMapStringUint8V(rv.Interface().(map[string]uint8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapStringUint8V(v map[string]uint8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[string(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapStringUint16R(rv reflect.Value) {
+ fastpathTV.EncMapStringUint16V(rv.Interface().(map[string]uint16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapStringUint16V(v map[string]uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[string(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapStringUint32R(rv reflect.Value) {
+ fastpathTV.EncMapStringUint32V(rv.Interface().(map[string]uint32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapStringUint32V(v map[string]uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[string(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapStringUint64R(rv reflect.Value) {
+ fastpathTV.EncMapStringUint64V(rv.Interface().(map[string]uint64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapStringUint64V(v map[string]uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[string(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapStringUintptrR(rv reflect.Value) {
+ fastpathTV.EncMapStringUintptrV(rv.Interface().(map[string]uintptr), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapStringUintptrV(v map[string]uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[string(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapStringIntR(rv reflect.Value) {
+ fastpathTV.EncMapStringIntV(rv.Interface().(map[string]int), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapStringIntV(v map[string]int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[string(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapStringInt8R(rv reflect.Value) {
+ fastpathTV.EncMapStringInt8V(rv.Interface().(map[string]int8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapStringInt8V(v map[string]int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[string(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapStringInt16R(rv reflect.Value) {
+ fastpathTV.EncMapStringInt16V(rv.Interface().(map[string]int16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapStringInt16V(v map[string]int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[string(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapStringInt32R(rv reflect.Value) {
+ fastpathTV.EncMapStringInt32V(rv.Interface().(map[string]int32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapStringInt32V(v map[string]int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[string(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapStringInt64R(rv reflect.Value) {
+ fastpathTV.EncMapStringInt64V(rv.Interface().(map[string]int64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapStringInt64V(v map[string]int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[string(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapStringFloat32R(rv reflect.Value) {
+ fastpathTV.EncMapStringFloat32V(rv.Interface().(map[string]float32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapStringFloat32V(v map[string]float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v[string(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapStringFloat64R(rv reflect.Value) {
+ fastpathTV.EncMapStringFloat64V(rv.Interface().(map[string]float64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapStringFloat64V(v map[string]float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v[string(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapStringBoolR(rv reflect.Value) {
+ fastpathTV.EncMapStringBoolV(rv.Interface().(map[string]bool), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapStringBoolV(v map[string]bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v[string(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat32IntfR(rv reflect.Value) {
+ fastpathTV.EncMapFloat32IntfV(rv.Interface().(map[float32]interface{}), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat32IntfV(v map[float32]interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[float32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat32StringR(rv reflect.Value) {
+ fastpathTV.EncMapFloat32StringV(rv.Interface().(map[float32]string), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat32StringV(v map[float32]string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v[float32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat32UintR(rv reflect.Value) {
+ fastpathTV.EncMapFloat32UintV(rv.Interface().(map[float32]uint), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat32UintV(v map[float32]uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[float32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat32Uint8R(rv reflect.Value) {
+ fastpathTV.EncMapFloat32Uint8V(rv.Interface().(map[float32]uint8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat32Uint8V(v map[float32]uint8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[float32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat32Uint16R(rv reflect.Value) {
+ fastpathTV.EncMapFloat32Uint16V(rv.Interface().(map[float32]uint16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat32Uint16V(v map[float32]uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[float32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat32Uint32R(rv reflect.Value) {
+ fastpathTV.EncMapFloat32Uint32V(rv.Interface().(map[float32]uint32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat32Uint32V(v map[float32]uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[float32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat32Uint64R(rv reflect.Value) {
+ fastpathTV.EncMapFloat32Uint64V(rv.Interface().(map[float32]uint64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat32Uint64V(v map[float32]uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[float32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat32UintptrR(rv reflect.Value) {
+ fastpathTV.EncMapFloat32UintptrV(rv.Interface().(map[float32]uintptr), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat32UintptrV(v map[float32]uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[float32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat32IntR(rv reflect.Value) {
+ fastpathTV.EncMapFloat32IntV(rv.Interface().(map[float32]int), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat32IntV(v map[float32]int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[float32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat32Int8R(rv reflect.Value) {
+ fastpathTV.EncMapFloat32Int8V(rv.Interface().(map[float32]int8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat32Int8V(v map[float32]int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[float32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat32Int16R(rv reflect.Value) {
+ fastpathTV.EncMapFloat32Int16V(rv.Interface().(map[float32]int16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat32Int16V(v map[float32]int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[float32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat32Int32R(rv reflect.Value) {
+ fastpathTV.EncMapFloat32Int32V(rv.Interface().(map[float32]int32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat32Int32V(v map[float32]int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[float32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat32Int64R(rv reflect.Value) {
+ fastpathTV.EncMapFloat32Int64V(rv.Interface().(map[float32]int64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat32Int64V(v map[float32]int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[float32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat32Float32R(rv reflect.Value) {
+ fastpathTV.EncMapFloat32Float32V(rv.Interface().(map[float32]float32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat32Float32V(v map[float32]float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v[float32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat32Float64R(rv reflect.Value) {
+ fastpathTV.EncMapFloat32Float64V(rv.Interface().(map[float32]float64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat32Float64V(v map[float32]float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v[float32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat32BoolR(rv reflect.Value) {
+ fastpathTV.EncMapFloat32BoolV(rv.Interface().(map[float32]bool), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat32BoolV(v map[float32]bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v[float32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat64IntfR(rv reflect.Value) {
+ fastpathTV.EncMapFloat64IntfV(rv.Interface().(map[float64]interface{}), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat64IntfV(v map[float64]interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[float64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat64StringR(rv reflect.Value) {
+ fastpathTV.EncMapFloat64StringV(rv.Interface().(map[float64]string), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat64StringV(v map[float64]string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v[float64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat64UintR(rv reflect.Value) {
+ fastpathTV.EncMapFloat64UintV(rv.Interface().(map[float64]uint), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat64UintV(v map[float64]uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[float64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat64Uint8R(rv reflect.Value) {
+ fastpathTV.EncMapFloat64Uint8V(rv.Interface().(map[float64]uint8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat64Uint8V(v map[float64]uint8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[float64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat64Uint16R(rv reflect.Value) {
+ fastpathTV.EncMapFloat64Uint16V(rv.Interface().(map[float64]uint16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat64Uint16V(v map[float64]uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[float64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat64Uint32R(rv reflect.Value) {
+ fastpathTV.EncMapFloat64Uint32V(rv.Interface().(map[float64]uint32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat64Uint32V(v map[float64]uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[float64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat64Uint64R(rv reflect.Value) {
+ fastpathTV.EncMapFloat64Uint64V(rv.Interface().(map[float64]uint64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat64Uint64V(v map[float64]uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[float64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat64UintptrR(rv reflect.Value) {
+ fastpathTV.EncMapFloat64UintptrV(rv.Interface().(map[float64]uintptr), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat64UintptrV(v map[float64]uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[float64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat64IntR(rv reflect.Value) {
+ fastpathTV.EncMapFloat64IntV(rv.Interface().(map[float64]int), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat64IntV(v map[float64]int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[float64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat64Int8R(rv reflect.Value) {
+ fastpathTV.EncMapFloat64Int8V(rv.Interface().(map[float64]int8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat64Int8V(v map[float64]int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[float64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat64Int16R(rv reflect.Value) {
+ fastpathTV.EncMapFloat64Int16V(rv.Interface().(map[float64]int16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat64Int16V(v map[float64]int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[float64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat64Int32R(rv reflect.Value) {
+ fastpathTV.EncMapFloat64Int32V(rv.Interface().(map[float64]int32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat64Int32V(v map[float64]int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[float64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat64Int64R(rv reflect.Value) {
+ fastpathTV.EncMapFloat64Int64V(rv.Interface().(map[float64]int64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat64Int64V(v map[float64]int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[float64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat64Float32R(rv reflect.Value) {
+ fastpathTV.EncMapFloat64Float32V(rv.Interface().(map[float64]float32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat64Float32V(v map[float64]float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v[float64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat64Float64R(rv reflect.Value) {
+ fastpathTV.EncMapFloat64Float64V(rv.Interface().(map[float64]float64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat64Float64V(v map[float64]float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v[float64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapFloat64BoolR(rv reflect.Value) {
+ fastpathTV.EncMapFloat64BoolV(rv.Interface().(map[float64]bool), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapFloat64BoolV(v map[float64]bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v[float64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintIntfR(rv reflect.Value) {
+ fastpathTV.EncMapUintIntfV(rv.Interface().(map[uint]interface{}), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintIntfV(v map[uint]interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[uint(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintStringR(rv reflect.Value) {
+ fastpathTV.EncMapUintStringV(rv.Interface().(map[uint]string), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintStringV(v map[uint]string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v[uint(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintUintR(rv reflect.Value) {
+ fastpathTV.EncMapUintUintV(rv.Interface().(map[uint]uint), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintUintV(v map[uint]uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintUint8R(rv reflect.Value) {
+ fastpathTV.EncMapUintUint8V(rv.Interface().(map[uint]uint8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintUint8V(v map[uint]uint8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintUint16R(rv reflect.Value) {
+ fastpathTV.EncMapUintUint16V(rv.Interface().(map[uint]uint16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintUint16V(v map[uint]uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintUint32R(rv reflect.Value) {
+ fastpathTV.EncMapUintUint32V(rv.Interface().(map[uint]uint32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintUint32V(v map[uint]uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintUint64R(rv reflect.Value) {
+ fastpathTV.EncMapUintUint64V(rv.Interface().(map[uint]uint64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintUint64V(v map[uint]uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintUintptrR(rv reflect.Value) {
+ fastpathTV.EncMapUintUintptrV(rv.Interface().(map[uint]uintptr), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintUintptrV(v map[uint]uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[uint(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintIntR(rv reflect.Value) {
+ fastpathTV.EncMapUintIntV(rv.Interface().(map[uint]int), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintIntV(v map[uint]int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintInt8R(rv reflect.Value) {
+ fastpathTV.EncMapUintInt8V(rv.Interface().(map[uint]int8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintInt8V(v map[uint]int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintInt16R(rv reflect.Value) {
+ fastpathTV.EncMapUintInt16V(rv.Interface().(map[uint]int16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintInt16V(v map[uint]int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintInt32R(rv reflect.Value) {
+ fastpathTV.EncMapUintInt32V(rv.Interface().(map[uint]int32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintInt32V(v map[uint]int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintInt64R(rv reflect.Value) {
+ fastpathTV.EncMapUintInt64V(rv.Interface().(map[uint]int64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintInt64V(v map[uint]int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintFloat32R(rv reflect.Value) {
+ fastpathTV.EncMapUintFloat32V(rv.Interface().(map[uint]float32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintFloat32V(v map[uint]float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v[uint(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintFloat64R(rv reflect.Value) {
+ fastpathTV.EncMapUintFloat64V(rv.Interface().(map[uint]float64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintFloat64V(v map[uint]float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v[uint(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintBoolR(rv reflect.Value) {
+ fastpathTV.EncMapUintBoolV(rv.Interface().(map[uint]bool), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintBoolV(v map[uint]bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v[uint(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint8IntfR(rv reflect.Value) {
+ fastpathTV.EncMapUint8IntfV(rv.Interface().(map[uint8]interface{}), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint8IntfV(v map[uint8]interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[uint8(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint8StringR(rv reflect.Value) {
+ fastpathTV.EncMapUint8StringV(rv.Interface().(map[uint8]string), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint8StringV(v map[uint8]string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v[uint8(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint8UintR(rv reflect.Value) {
+ fastpathTV.EncMapUint8UintV(rv.Interface().(map[uint8]uint), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint8UintV(v map[uint8]uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint8Uint8R(rv reflect.Value) {
+ fastpathTV.EncMapUint8Uint8V(rv.Interface().(map[uint8]uint8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint8Uint8V(v map[uint8]uint8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint8Uint16R(rv reflect.Value) {
+ fastpathTV.EncMapUint8Uint16V(rv.Interface().(map[uint8]uint16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint8Uint16V(v map[uint8]uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint8Uint32R(rv reflect.Value) {
+ fastpathTV.EncMapUint8Uint32V(rv.Interface().(map[uint8]uint32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint8Uint32V(v map[uint8]uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint8Uint64R(rv reflect.Value) {
+ fastpathTV.EncMapUint8Uint64V(rv.Interface().(map[uint8]uint64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint8Uint64V(v map[uint8]uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint8UintptrR(rv reflect.Value) {
+ fastpathTV.EncMapUint8UintptrV(rv.Interface().(map[uint8]uintptr), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint8UintptrV(v map[uint8]uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[uint8(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint8IntR(rv reflect.Value) {
+ fastpathTV.EncMapUint8IntV(rv.Interface().(map[uint8]int), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint8IntV(v map[uint8]int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint8Int8R(rv reflect.Value) {
+ fastpathTV.EncMapUint8Int8V(rv.Interface().(map[uint8]int8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint8Int8V(v map[uint8]int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint8Int16R(rv reflect.Value) {
+ fastpathTV.EncMapUint8Int16V(rv.Interface().(map[uint8]int16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint8Int16V(v map[uint8]int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint8Int32R(rv reflect.Value) {
+ fastpathTV.EncMapUint8Int32V(rv.Interface().(map[uint8]int32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint8Int32V(v map[uint8]int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint8Int64R(rv reflect.Value) {
+ fastpathTV.EncMapUint8Int64V(rv.Interface().(map[uint8]int64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint8Int64V(v map[uint8]int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint8Float32R(rv reflect.Value) {
+ fastpathTV.EncMapUint8Float32V(rv.Interface().(map[uint8]float32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint8Float32V(v map[uint8]float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v[uint8(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint8Float64R(rv reflect.Value) {
+ fastpathTV.EncMapUint8Float64V(rv.Interface().(map[uint8]float64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint8Float64V(v map[uint8]float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v[uint8(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint8BoolR(rv reflect.Value) {
+ fastpathTV.EncMapUint8BoolV(rv.Interface().(map[uint8]bool), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint8BoolV(v map[uint8]bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v[uint8(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint16IntfR(rv reflect.Value) {
+ fastpathTV.EncMapUint16IntfV(rv.Interface().(map[uint16]interface{}), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint16IntfV(v map[uint16]interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[uint16(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint16StringR(rv reflect.Value) {
+ fastpathTV.EncMapUint16StringV(rv.Interface().(map[uint16]string), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint16StringV(v map[uint16]string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v[uint16(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint16UintR(rv reflect.Value) {
+ fastpathTV.EncMapUint16UintV(rv.Interface().(map[uint16]uint), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint16UintV(v map[uint16]uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint16Uint8R(rv reflect.Value) {
+ fastpathTV.EncMapUint16Uint8V(rv.Interface().(map[uint16]uint8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint16Uint8V(v map[uint16]uint8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint16Uint16R(rv reflect.Value) {
+ fastpathTV.EncMapUint16Uint16V(rv.Interface().(map[uint16]uint16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint16Uint16V(v map[uint16]uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint16Uint32R(rv reflect.Value) {
+ fastpathTV.EncMapUint16Uint32V(rv.Interface().(map[uint16]uint32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint16Uint32V(v map[uint16]uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint16Uint64R(rv reflect.Value) {
+ fastpathTV.EncMapUint16Uint64V(rv.Interface().(map[uint16]uint64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint16Uint64V(v map[uint16]uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint16UintptrR(rv reflect.Value) {
+ fastpathTV.EncMapUint16UintptrV(rv.Interface().(map[uint16]uintptr), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint16UintptrV(v map[uint16]uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[uint16(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint16IntR(rv reflect.Value) {
+ fastpathTV.EncMapUint16IntV(rv.Interface().(map[uint16]int), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint16IntV(v map[uint16]int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint16Int8R(rv reflect.Value) {
+ fastpathTV.EncMapUint16Int8V(rv.Interface().(map[uint16]int8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint16Int8V(v map[uint16]int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint16Int16R(rv reflect.Value) {
+ fastpathTV.EncMapUint16Int16V(rv.Interface().(map[uint16]int16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint16Int16V(v map[uint16]int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint16Int32R(rv reflect.Value) {
+ fastpathTV.EncMapUint16Int32V(rv.Interface().(map[uint16]int32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint16Int32V(v map[uint16]int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint16Int64R(rv reflect.Value) {
+ fastpathTV.EncMapUint16Int64V(rv.Interface().(map[uint16]int64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint16Int64V(v map[uint16]int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint16Float32R(rv reflect.Value) {
+ fastpathTV.EncMapUint16Float32V(rv.Interface().(map[uint16]float32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint16Float32V(v map[uint16]float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v[uint16(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint16Float64R(rv reflect.Value) {
+ fastpathTV.EncMapUint16Float64V(rv.Interface().(map[uint16]float64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint16Float64V(v map[uint16]float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v[uint16(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint16BoolR(rv reflect.Value) {
+ fastpathTV.EncMapUint16BoolV(rv.Interface().(map[uint16]bool), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint16BoolV(v map[uint16]bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v[uint16(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint32IntfR(rv reflect.Value) {
+ fastpathTV.EncMapUint32IntfV(rv.Interface().(map[uint32]interface{}), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint32IntfV(v map[uint32]interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[uint32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint32StringR(rv reflect.Value) {
+ fastpathTV.EncMapUint32StringV(rv.Interface().(map[uint32]string), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint32StringV(v map[uint32]string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v[uint32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint32UintR(rv reflect.Value) {
+ fastpathTV.EncMapUint32UintV(rv.Interface().(map[uint32]uint), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint32UintV(v map[uint32]uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint32Uint8R(rv reflect.Value) {
+ fastpathTV.EncMapUint32Uint8V(rv.Interface().(map[uint32]uint8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint32Uint8V(v map[uint32]uint8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint32Uint16R(rv reflect.Value) {
+ fastpathTV.EncMapUint32Uint16V(rv.Interface().(map[uint32]uint16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint32Uint16V(v map[uint32]uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint32Uint32R(rv reflect.Value) {
+ fastpathTV.EncMapUint32Uint32V(rv.Interface().(map[uint32]uint32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint32Uint32V(v map[uint32]uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint32Uint64R(rv reflect.Value) {
+ fastpathTV.EncMapUint32Uint64V(rv.Interface().(map[uint32]uint64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint32Uint64V(v map[uint32]uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint32UintptrR(rv reflect.Value) {
+ fastpathTV.EncMapUint32UintptrV(rv.Interface().(map[uint32]uintptr), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint32UintptrV(v map[uint32]uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[uint32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint32IntR(rv reflect.Value) {
+ fastpathTV.EncMapUint32IntV(rv.Interface().(map[uint32]int), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint32IntV(v map[uint32]int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint32Int8R(rv reflect.Value) {
+ fastpathTV.EncMapUint32Int8V(rv.Interface().(map[uint32]int8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint32Int8V(v map[uint32]int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint32Int16R(rv reflect.Value) {
+ fastpathTV.EncMapUint32Int16V(rv.Interface().(map[uint32]int16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint32Int16V(v map[uint32]int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint32Int32R(rv reflect.Value) {
+ fastpathTV.EncMapUint32Int32V(rv.Interface().(map[uint32]int32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint32Int32V(v map[uint32]int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint32Int64R(rv reflect.Value) {
+ fastpathTV.EncMapUint32Int64V(rv.Interface().(map[uint32]int64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint32Int64V(v map[uint32]int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint32Float32R(rv reflect.Value) {
+ fastpathTV.EncMapUint32Float32V(rv.Interface().(map[uint32]float32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint32Float32V(v map[uint32]float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v[uint32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint32Float64R(rv reflect.Value) {
+ fastpathTV.EncMapUint32Float64V(rv.Interface().(map[uint32]float64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint32Float64V(v map[uint32]float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v[uint32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint32BoolR(rv reflect.Value) {
+ fastpathTV.EncMapUint32BoolV(rv.Interface().(map[uint32]bool), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint32BoolV(v map[uint32]bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v[uint32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint64IntfR(rv reflect.Value) {
+ fastpathTV.EncMapUint64IntfV(rv.Interface().(map[uint64]interface{}), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint64IntfV(v map[uint64]interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[uint64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint64StringR(rv reflect.Value) {
+ fastpathTV.EncMapUint64StringV(rv.Interface().(map[uint64]string), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint64StringV(v map[uint64]string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v[uint64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint64UintR(rv reflect.Value) {
+ fastpathTV.EncMapUint64UintV(rv.Interface().(map[uint64]uint), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint64UintV(v map[uint64]uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint64Uint8R(rv reflect.Value) {
+ fastpathTV.EncMapUint64Uint8V(rv.Interface().(map[uint64]uint8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint64Uint8V(v map[uint64]uint8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint64Uint16R(rv reflect.Value) {
+ fastpathTV.EncMapUint64Uint16V(rv.Interface().(map[uint64]uint16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint64Uint16V(v map[uint64]uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint64Uint32R(rv reflect.Value) {
+ fastpathTV.EncMapUint64Uint32V(rv.Interface().(map[uint64]uint32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint64Uint32V(v map[uint64]uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint64Uint64R(rv reflect.Value) {
+ fastpathTV.EncMapUint64Uint64V(rv.Interface().(map[uint64]uint64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint64Uint64V(v map[uint64]uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uint64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint64UintptrR(rv reflect.Value) {
+ fastpathTV.EncMapUint64UintptrV(rv.Interface().(map[uint64]uintptr), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint64UintptrV(v map[uint64]uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[uint64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint64IntR(rv reflect.Value) {
+ fastpathTV.EncMapUint64IntV(rv.Interface().(map[uint64]int), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint64IntV(v map[uint64]int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint64Int8R(rv reflect.Value) {
+ fastpathTV.EncMapUint64Int8V(rv.Interface().(map[uint64]int8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint64Int8V(v map[uint64]int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint64Int16R(rv reflect.Value) {
+ fastpathTV.EncMapUint64Int16V(rv.Interface().(map[uint64]int16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint64Int16V(v map[uint64]int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint64Int32R(rv reflect.Value) {
+ fastpathTV.EncMapUint64Int32V(rv.Interface().(map[uint64]int32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint64Int32V(v map[uint64]int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint64Int64R(rv reflect.Value) {
+ fastpathTV.EncMapUint64Int64V(rv.Interface().(map[uint64]int64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint64Int64V(v map[uint64]int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uint64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint64Float32R(rv reflect.Value) {
+ fastpathTV.EncMapUint64Float32V(rv.Interface().(map[uint64]float32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint64Float32V(v map[uint64]float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v[uint64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint64Float64R(rv reflect.Value) {
+ fastpathTV.EncMapUint64Float64V(rv.Interface().(map[uint64]float64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint64Float64V(v map[uint64]float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v[uint64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUint64BoolR(rv reflect.Value) {
+ fastpathTV.EncMapUint64BoolV(rv.Interface().(map[uint64]bool), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUint64BoolV(v map[uint64]bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v[uint64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintptrIntfR(rv reflect.Value) {
+ fastpathTV.EncMapUintptrIntfV(rv.Interface().(map[uintptr]interface{}), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintptrIntfV(v map[uintptr]interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[uintptr(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintptrStringR(rv reflect.Value) {
+ fastpathTV.EncMapUintptrStringV(rv.Interface().(map[uintptr]string), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintptrStringV(v map[uintptr]string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v[uintptr(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintptrUintR(rv reflect.Value) {
+ fastpathTV.EncMapUintptrUintV(rv.Interface().(map[uintptr]uint), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintptrUintV(v map[uintptr]uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uintptr(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintptrUint8R(rv reflect.Value) {
+ fastpathTV.EncMapUintptrUint8V(rv.Interface().(map[uintptr]uint8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintptrUint8V(v map[uintptr]uint8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uintptr(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintptrUint16R(rv reflect.Value) {
+ fastpathTV.EncMapUintptrUint16V(rv.Interface().(map[uintptr]uint16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintptrUint16V(v map[uintptr]uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uintptr(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintptrUint32R(rv reflect.Value) {
+ fastpathTV.EncMapUintptrUint32V(rv.Interface().(map[uintptr]uint32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintptrUint32V(v map[uintptr]uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uintptr(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintptrUint64R(rv reflect.Value) {
+ fastpathTV.EncMapUintptrUint64V(rv.Interface().(map[uintptr]uint64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintptrUint64V(v map[uintptr]uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[uintptr(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintptrUintptrR(rv reflect.Value) {
+ fastpathTV.EncMapUintptrUintptrV(rv.Interface().(map[uintptr]uintptr), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintptrUintptrV(v map[uintptr]uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[uintptr(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintptrIntR(rv reflect.Value) {
+ fastpathTV.EncMapUintptrIntV(rv.Interface().(map[uintptr]int), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintptrIntV(v map[uintptr]int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uintptr(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintptrInt8R(rv reflect.Value) {
+ fastpathTV.EncMapUintptrInt8V(rv.Interface().(map[uintptr]int8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintptrInt8V(v map[uintptr]int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uintptr(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintptrInt16R(rv reflect.Value) {
+ fastpathTV.EncMapUintptrInt16V(rv.Interface().(map[uintptr]int16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintptrInt16V(v map[uintptr]int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uintptr(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintptrInt32R(rv reflect.Value) {
+ fastpathTV.EncMapUintptrInt32V(rv.Interface().(map[uintptr]int32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintptrInt32V(v map[uintptr]int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uintptr(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintptrInt64R(rv reflect.Value) {
+ fastpathTV.EncMapUintptrInt64V(rv.Interface().(map[uintptr]int64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintptrInt64V(v map[uintptr]int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[uintptr(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintptrFloat32R(rv reflect.Value) {
+ fastpathTV.EncMapUintptrFloat32V(rv.Interface().(map[uintptr]float32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintptrFloat32V(v map[uintptr]float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v[uintptr(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintptrFloat64R(rv reflect.Value) {
+ fastpathTV.EncMapUintptrFloat64V(rv.Interface().(map[uintptr]float64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintptrFloat64V(v map[uintptr]float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v[uintptr(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapUintptrBoolR(rv reflect.Value) {
+ fastpathTV.EncMapUintptrBoolV(rv.Interface().(map[uintptr]bool), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapUintptrBoolV(v map[uintptr]bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v[uintptr(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntIntfR(rv reflect.Value) {
+ fastpathTV.EncMapIntIntfV(rv.Interface().(map[int]interface{}), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntIntfV(v map[int]interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[int(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntStringR(rv reflect.Value) {
+ fastpathTV.EncMapIntStringV(rv.Interface().(map[int]string), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntStringV(v map[int]string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v[int(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntUintR(rv reflect.Value) {
+ fastpathTV.EncMapIntUintV(rv.Interface().(map[int]uint), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntUintV(v map[int]uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntUint8R(rv reflect.Value) {
+ fastpathTV.EncMapIntUint8V(rv.Interface().(map[int]uint8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntUint8V(v map[int]uint8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntUint16R(rv reflect.Value) {
+ fastpathTV.EncMapIntUint16V(rv.Interface().(map[int]uint16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntUint16V(v map[int]uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntUint32R(rv reflect.Value) {
+ fastpathTV.EncMapIntUint32V(rv.Interface().(map[int]uint32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntUint32V(v map[int]uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntUint64R(rv reflect.Value) {
+ fastpathTV.EncMapIntUint64V(rv.Interface().(map[int]uint64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntUint64V(v map[int]uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntUintptrR(rv reflect.Value) {
+ fastpathTV.EncMapIntUintptrV(rv.Interface().(map[int]uintptr), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntUintptrV(v map[int]uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[int(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntIntR(rv reflect.Value) {
+ fastpathTV.EncMapIntIntV(rv.Interface().(map[int]int), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntIntV(v map[int]int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntInt8R(rv reflect.Value) {
+ fastpathTV.EncMapIntInt8V(rv.Interface().(map[int]int8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntInt8V(v map[int]int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntInt16R(rv reflect.Value) {
+ fastpathTV.EncMapIntInt16V(rv.Interface().(map[int]int16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntInt16V(v map[int]int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntInt32R(rv reflect.Value) {
+ fastpathTV.EncMapIntInt32V(rv.Interface().(map[int]int32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntInt32V(v map[int]int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntInt64R(rv reflect.Value) {
+ fastpathTV.EncMapIntInt64V(rv.Interface().(map[int]int64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntInt64V(v map[int]int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntFloat32R(rv reflect.Value) {
+ fastpathTV.EncMapIntFloat32V(rv.Interface().(map[int]float32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntFloat32V(v map[int]float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v[int(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntFloat64R(rv reflect.Value) {
+ fastpathTV.EncMapIntFloat64V(rv.Interface().(map[int]float64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntFloat64V(v map[int]float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v[int(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapIntBoolR(rv reflect.Value) {
+ fastpathTV.EncMapIntBoolV(rv.Interface().(map[int]bool), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapIntBoolV(v map[int]bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v[int(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt8IntfR(rv reflect.Value) {
+ fastpathTV.EncMapInt8IntfV(rv.Interface().(map[int8]interface{}), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt8IntfV(v map[int8]interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[int8(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt8StringR(rv reflect.Value) {
+ fastpathTV.EncMapInt8StringV(rv.Interface().(map[int8]string), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt8StringV(v map[int8]string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v[int8(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt8UintR(rv reflect.Value) {
+ fastpathTV.EncMapInt8UintV(rv.Interface().(map[int8]uint), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt8UintV(v map[int8]uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt8Uint8R(rv reflect.Value) {
+ fastpathTV.EncMapInt8Uint8V(rv.Interface().(map[int8]uint8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt8Uint8V(v map[int8]uint8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt8Uint16R(rv reflect.Value) {
+ fastpathTV.EncMapInt8Uint16V(rv.Interface().(map[int8]uint16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt8Uint16V(v map[int8]uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt8Uint32R(rv reflect.Value) {
+ fastpathTV.EncMapInt8Uint32V(rv.Interface().(map[int8]uint32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt8Uint32V(v map[int8]uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt8Uint64R(rv reflect.Value) {
+ fastpathTV.EncMapInt8Uint64V(rv.Interface().(map[int8]uint64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt8Uint64V(v map[int8]uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt8UintptrR(rv reflect.Value) {
+ fastpathTV.EncMapInt8UintptrV(rv.Interface().(map[int8]uintptr), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt8UintptrV(v map[int8]uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[int8(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt8IntR(rv reflect.Value) {
+ fastpathTV.EncMapInt8IntV(rv.Interface().(map[int8]int), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt8IntV(v map[int8]int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt8Int8R(rv reflect.Value) {
+ fastpathTV.EncMapInt8Int8V(rv.Interface().(map[int8]int8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt8Int8V(v map[int8]int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt8Int16R(rv reflect.Value) {
+ fastpathTV.EncMapInt8Int16V(rv.Interface().(map[int8]int16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt8Int16V(v map[int8]int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt8Int32R(rv reflect.Value) {
+ fastpathTV.EncMapInt8Int32V(rv.Interface().(map[int8]int32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt8Int32V(v map[int8]int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt8Int64R(rv reflect.Value) {
+ fastpathTV.EncMapInt8Int64V(rv.Interface().(map[int8]int64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt8Int64V(v map[int8]int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int8(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt8Float32R(rv reflect.Value) {
+ fastpathTV.EncMapInt8Float32V(rv.Interface().(map[int8]float32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt8Float32V(v map[int8]float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v[int8(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt8Float64R(rv reflect.Value) {
+ fastpathTV.EncMapInt8Float64V(rv.Interface().(map[int8]float64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt8Float64V(v map[int8]float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v[int8(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt8BoolR(rv reflect.Value) {
+ fastpathTV.EncMapInt8BoolV(rv.Interface().(map[int8]bool), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt8BoolV(v map[int8]bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v[int8(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt16IntfR(rv reflect.Value) {
+ fastpathTV.EncMapInt16IntfV(rv.Interface().(map[int16]interface{}), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt16IntfV(v map[int16]interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[int16(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt16StringR(rv reflect.Value) {
+ fastpathTV.EncMapInt16StringV(rv.Interface().(map[int16]string), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt16StringV(v map[int16]string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v[int16(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt16UintR(rv reflect.Value) {
+ fastpathTV.EncMapInt16UintV(rv.Interface().(map[int16]uint), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt16UintV(v map[int16]uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt16Uint8R(rv reflect.Value) {
+ fastpathTV.EncMapInt16Uint8V(rv.Interface().(map[int16]uint8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt16Uint8V(v map[int16]uint8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt16Uint16R(rv reflect.Value) {
+ fastpathTV.EncMapInt16Uint16V(rv.Interface().(map[int16]uint16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt16Uint16V(v map[int16]uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt16Uint32R(rv reflect.Value) {
+ fastpathTV.EncMapInt16Uint32V(rv.Interface().(map[int16]uint32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt16Uint32V(v map[int16]uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt16Uint64R(rv reflect.Value) {
+ fastpathTV.EncMapInt16Uint64V(rv.Interface().(map[int16]uint64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt16Uint64V(v map[int16]uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt16UintptrR(rv reflect.Value) {
+ fastpathTV.EncMapInt16UintptrV(rv.Interface().(map[int16]uintptr), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt16UintptrV(v map[int16]uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[int16(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt16IntR(rv reflect.Value) {
+ fastpathTV.EncMapInt16IntV(rv.Interface().(map[int16]int), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt16IntV(v map[int16]int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt16Int8R(rv reflect.Value) {
+ fastpathTV.EncMapInt16Int8V(rv.Interface().(map[int16]int8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt16Int8V(v map[int16]int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt16Int16R(rv reflect.Value) {
+ fastpathTV.EncMapInt16Int16V(rv.Interface().(map[int16]int16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt16Int16V(v map[int16]int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt16Int32R(rv reflect.Value) {
+ fastpathTV.EncMapInt16Int32V(rv.Interface().(map[int16]int32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt16Int32V(v map[int16]int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt16Int64R(rv reflect.Value) {
+ fastpathTV.EncMapInt16Int64V(rv.Interface().(map[int16]int64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt16Int64V(v map[int16]int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int16(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt16Float32R(rv reflect.Value) {
+ fastpathTV.EncMapInt16Float32V(rv.Interface().(map[int16]float32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt16Float32V(v map[int16]float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v[int16(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt16Float64R(rv reflect.Value) {
+ fastpathTV.EncMapInt16Float64V(rv.Interface().(map[int16]float64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt16Float64V(v map[int16]float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v[int16(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt16BoolR(rv reflect.Value) {
+ fastpathTV.EncMapInt16BoolV(rv.Interface().(map[int16]bool), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt16BoolV(v map[int16]bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v[int16(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt32IntfR(rv reflect.Value) {
+ fastpathTV.EncMapInt32IntfV(rv.Interface().(map[int32]interface{}), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt32IntfV(v map[int32]interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[int32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt32StringR(rv reflect.Value) {
+ fastpathTV.EncMapInt32StringV(rv.Interface().(map[int32]string), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt32StringV(v map[int32]string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v[int32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt32UintR(rv reflect.Value) {
+ fastpathTV.EncMapInt32UintV(rv.Interface().(map[int32]uint), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt32UintV(v map[int32]uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt32Uint8R(rv reflect.Value) {
+ fastpathTV.EncMapInt32Uint8V(rv.Interface().(map[int32]uint8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt32Uint8V(v map[int32]uint8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt32Uint16R(rv reflect.Value) {
+ fastpathTV.EncMapInt32Uint16V(rv.Interface().(map[int32]uint16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt32Uint16V(v map[int32]uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt32Uint32R(rv reflect.Value) {
+ fastpathTV.EncMapInt32Uint32V(rv.Interface().(map[int32]uint32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt32Uint32V(v map[int32]uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt32Uint64R(rv reflect.Value) {
+ fastpathTV.EncMapInt32Uint64V(rv.Interface().(map[int32]uint64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt32Uint64V(v map[int32]uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt32UintptrR(rv reflect.Value) {
+ fastpathTV.EncMapInt32UintptrV(rv.Interface().(map[int32]uintptr), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt32UintptrV(v map[int32]uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[int32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt32IntR(rv reflect.Value) {
+ fastpathTV.EncMapInt32IntV(rv.Interface().(map[int32]int), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt32IntV(v map[int32]int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt32Int8R(rv reflect.Value) {
+ fastpathTV.EncMapInt32Int8V(rv.Interface().(map[int32]int8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt32Int8V(v map[int32]int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt32Int16R(rv reflect.Value) {
+ fastpathTV.EncMapInt32Int16V(rv.Interface().(map[int32]int16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt32Int16V(v map[int32]int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt32Int32R(rv reflect.Value) {
+ fastpathTV.EncMapInt32Int32V(rv.Interface().(map[int32]int32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt32Int32V(v map[int32]int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt32Int64R(rv reflect.Value) {
+ fastpathTV.EncMapInt32Int64V(rv.Interface().(map[int32]int64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt32Int64V(v map[int32]int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int32(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt32Float32R(rv reflect.Value) {
+ fastpathTV.EncMapInt32Float32V(rv.Interface().(map[int32]float32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt32Float32V(v map[int32]float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v[int32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt32Float64R(rv reflect.Value) {
+ fastpathTV.EncMapInt32Float64V(rv.Interface().(map[int32]float64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt32Float64V(v map[int32]float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v[int32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt32BoolR(rv reflect.Value) {
+ fastpathTV.EncMapInt32BoolV(rv.Interface().(map[int32]bool), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt32BoolV(v map[int32]bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v[int32(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt64IntfR(rv reflect.Value) {
+ fastpathTV.EncMapInt64IntfV(rv.Interface().(map[int64]interface{}), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt64IntfV(v map[int64]interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[int64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt64StringR(rv reflect.Value) {
+ fastpathTV.EncMapInt64StringV(rv.Interface().(map[int64]string), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt64StringV(v map[int64]string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v[int64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt64UintR(rv reflect.Value) {
+ fastpathTV.EncMapInt64UintV(rv.Interface().(map[int64]uint), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt64UintV(v map[int64]uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt64Uint8R(rv reflect.Value) {
+ fastpathTV.EncMapInt64Uint8V(rv.Interface().(map[int64]uint8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt64Uint8V(v map[int64]uint8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt64Uint16R(rv reflect.Value) {
+ fastpathTV.EncMapInt64Uint16V(rv.Interface().(map[int64]uint16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt64Uint16V(v map[int64]uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt64Uint32R(rv reflect.Value) {
+ fastpathTV.EncMapInt64Uint32V(rv.Interface().(map[int64]uint32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt64Uint32V(v map[int64]uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt64Uint64R(rv reflect.Value) {
+ fastpathTV.EncMapInt64Uint64V(rv.Interface().(map[int64]uint64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt64Uint64V(v map[int64]uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[int64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt64UintptrR(rv reflect.Value) {
+ fastpathTV.EncMapInt64UintptrV(rv.Interface().(map[int64]uintptr), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt64UintptrV(v map[int64]uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[int64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt64IntR(rv reflect.Value) {
+ fastpathTV.EncMapInt64IntV(rv.Interface().(map[int64]int), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt64IntV(v map[int64]int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt64Int8R(rv reflect.Value) {
+ fastpathTV.EncMapInt64Int8V(rv.Interface().(map[int64]int8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt64Int8V(v map[int64]int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt64Int16R(rv reflect.Value) {
+ fastpathTV.EncMapInt64Int16V(rv.Interface().(map[int64]int16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt64Int16V(v map[int64]int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt64Int32R(rv reflect.Value) {
+ fastpathTV.EncMapInt64Int32V(rv.Interface().(map[int64]int32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt64Int32V(v map[int64]int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt64Int64R(rv reflect.Value) {
+ fastpathTV.EncMapInt64Int64V(rv.Interface().(map[int64]int64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt64Int64V(v map[int64]int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[int64(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt64Float32R(rv reflect.Value) {
+ fastpathTV.EncMapInt64Float32V(rv.Interface().(map[int64]float32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt64Float32V(v map[int64]float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v[int64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt64Float64R(rv reflect.Value) {
+ fastpathTV.EncMapInt64Float64V(rv.Interface().(map[int64]float64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt64Float64V(v map[int64]float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v[int64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapInt64BoolR(rv reflect.Value) {
+ fastpathTV.EncMapInt64BoolV(rv.Interface().(map[int64]bool), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapInt64BoolV(v map[int64]bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v[int64(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapBoolIntfR(rv reflect.Value) {
+ fastpathTV.EncMapBoolIntfV(rv.Interface().(map[bool]interface{}), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapBoolIntfV(v map[bool]interface{}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[bool(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapBoolStringR(rv reflect.Value) {
+ fastpathTV.EncMapBoolStringV(rv.Interface().(map[bool]string), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapBoolStringV(v map[bool]string, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v[bool(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeString(c_UTF8, v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapBoolUintR(rv reflect.Value) {
+ fastpathTV.EncMapBoolUintV(rv.Interface().(map[bool]uint), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapBoolUintV(v map[bool]uint, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[bool(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapBoolUint8R(rv reflect.Value) {
+ fastpathTV.EncMapBoolUint8V(rv.Interface().(map[bool]uint8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapBoolUint8V(v map[bool]uint8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[bool(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapBoolUint16R(rv reflect.Value) {
+ fastpathTV.EncMapBoolUint16V(rv.Interface().(map[bool]uint16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapBoolUint16V(v map[bool]uint16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[bool(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapBoolUint32R(rv reflect.Value) {
+ fastpathTV.EncMapBoolUint32V(rv.Interface().(map[bool]uint32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapBoolUint32V(v map[bool]uint32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[bool(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapBoolUint64R(rv reflect.Value) {
+ fastpathTV.EncMapBoolUint64V(rv.Interface().(map[bool]uint64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapBoolUint64V(v map[bool]uint64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v[bool(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapBoolUintptrR(rv reflect.Value) {
+ fastpathTV.EncMapBoolUintptrV(rv.Interface().(map[bool]uintptr), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapBoolUintptrV(v map[bool]uintptr, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v[bool(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ e.encode(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapBoolIntR(rv reflect.Value) {
+ fastpathTV.EncMapBoolIntV(rv.Interface().(map[bool]int), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapBoolIntV(v map[bool]int, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[bool(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapBoolInt8R(rv reflect.Value) {
+ fastpathTV.EncMapBoolInt8V(rv.Interface().(map[bool]int8), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapBoolInt8V(v map[bool]int8, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[bool(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapBoolInt16R(rv reflect.Value) {
+ fastpathTV.EncMapBoolInt16V(rv.Interface().(map[bool]int16), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapBoolInt16V(v map[bool]int16, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[bool(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapBoolInt32R(rv reflect.Value) {
+ fastpathTV.EncMapBoolInt32V(rv.Interface().(map[bool]int32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapBoolInt32V(v map[bool]int32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[bool(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapBoolInt64R(rv reflect.Value) {
+ fastpathTV.EncMapBoolInt64V(rv.Interface().(map[bool]int64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapBoolInt64V(v map[bool]int64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v[bool(k2)]))
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapBoolFloat32R(rv reflect.Value) {
+ fastpathTV.EncMapBoolFloat32V(rv.Interface().(map[bool]float32), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapBoolFloat32V(v map[bool]float32, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v[bool(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat32(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapBoolFloat64R(rv reflect.Value) {
+ fastpathTV.EncMapBoolFloat64V(rv.Interface().(map[bool]float64), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapBoolFloat64V(v map[bool]float64, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v[bool(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeFloat64(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (f *encFnInfo) fastpathEncMapBoolBoolR(rv reflect.Value) {
+ fastpathTV.EncMapBoolBoolV(rv.Interface().(map[bool]bool), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) EncMapBoolBoolV(v map[bool]bool, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v[bool(k2)])
+ }
+ } else {
+ for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ ee.EncodeBool(v2)
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+// -- decode
+
+// -- -- fast path type switch
+func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool {
+ if !fastpathEnabled {
+ return false
+ }
+ switch v := iv.(type) {
+
+ case []interface{}:
+ fastpathTV.DecSliceIntfV(v, fastpathCheckNilFalse, false, d)
+ case *[]interface{}:
+ v2, changed2 := fastpathTV.DecSliceIntfV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[interface{}]interface{}:
+ fastpathTV.DecMapIntfIntfV(v, fastpathCheckNilFalse, false, d)
+ case *map[interface{}]interface{}:
+ v2, changed2 := fastpathTV.DecMapIntfIntfV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[interface{}]string:
+ fastpathTV.DecMapIntfStringV(v, fastpathCheckNilFalse, false, d)
+ case *map[interface{}]string:
+ v2, changed2 := fastpathTV.DecMapIntfStringV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[interface{}]uint:
+ fastpathTV.DecMapIntfUintV(v, fastpathCheckNilFalse, false, d)
+ case *map[interface{}]uint:
+ v2, changed2 := fastpathTV.DecMapIntfUintV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[interface{}]uint8:
+ fastpathTV.DecMapIntfUint8V(v, fastpathCheckNilFalse, false, d)
+ case *map[interface{}]uint8:
+ v2, changed2 := fastpathTV.DecMapIntfUint8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[interface{}]uint16:
+ fastpathTV.DecMapIntfUint16V(v, fastpathCheckNilFalse, false, d)
+ case *map[interface{}]uint16:
+ v2, changed2 := fastpathTV.DecMapIntfUint16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[interface{}]uint32:
+ fastpathTV.DecMapIntfUint32V(v, fastpathCheckNilFalse, false, d)
+ case *map[interface{}]uint32:
+ v2, changed2 := fastpathTV.DecMapIntfUint32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[interface{}]uint64:
+ fastpathTV.DecMapIntfUint64V(v, fastpathCheckNilFalse, false, d)
+ case *map[interface{}]uint64:
+ v2, changed2 := fastpathTV.DecMapIntfUint64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[interface{}]uintptr:
+ fastpathTV.DecMapIntfUintptrV(v, fastpathCheckNilFalse, false, d)
+ case *map[interface{}]uintptr:
+ v2, changed2 := fastpathTV.DecMapIntfUintptrV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[interface{}]int:
+ fastpathTV.DecMapIntfIntV(v, fastpathCheckNilFalse, false, d)
+ case *map[interface{}]int:
+ v2, changed2 := fastpathTV.DecMapIntfIntV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[interface{}]int8:
+ fastpathTV.DecMapIntfInt8V(v, fastpathCheckNilFalse, false, d)
+ case *map[interface{}]int8:
+ v2, changed2 := fastpathTV.DecMapIntfInt8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[interface{}]int16:
+ fastpathTV.DecMapIntfInt16V(v, fastpathCheckNilFalse, false, d)
+ case *map[interface{}]int16:
+ v2, changed2 := fastpathTV.DecMapIntfInt16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[interface{}]int32:
+ fastpathTV.DecMapIntfInt32V(v, fastpathCheckNilFalse, false, d)
+ case *map[interface{}]int32:
+ v2, changed2 := fastpathTV.DecMapIntfInt32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[interface{}]int64:
+ fastpathTV.DecMapIntfInt64V(v, fastpathCheckNilFalse, false, d)
+ case *map[interface{}]int64:
+ v2, changed2 := fastpathTV.DecMapIntfInt64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[interface{}]float32:
+ fastpathTV.DecMapIntfFloat32V(v, fastpathCheckNilFalse, false, d)
+ case *map[interface{}]float32:
+ v2, changed2 := fastpathTV.DecMapIntfFloat32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[interface{}]float64:
+ fastpathTV.DecMapIntfFloat64V(v, fastpathCheckNilFalse, false, d)
+ case *map[interface{}]float64:
+ v2, changed2 := fastpathTV.DecMapIntfFloat64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[interface{}]bool:
+ fastpathTV.DecMapIntfBoolV(v, fastpathCheckNilFalse, false, d)
+ case *map[interface{}]bool:
+ v2, changed2 := fastpathTV.DecMapIntfBoolV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case []string:
+ fastpathTV.DecSliceStringV(v, fastpathCheckNilFalse, false, d)
+ case *[]string:
+ v2, changed2 := fastpathTV.DecSliceStringV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[string]interface{}:
+ fastpathTV.DecMapStringIntfV(v, fastpathCheckNilFalse, false, d)
+ case *map[string]interface{}:
+ v2, changed2 := fastpathTV.DecMapStringIntfV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[string]string:
+ fastpathTV.DecMapStringStringV(v, fastpathCheckNilFalse, false, d)
+ case *map[string]string:
+ v2, changed2 := fastpathTV.DecMapStringStringV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[string]uint:
+ fastpathTV.DecMapStringUintV(v, fastpathCheckNilFalse, false, d)
+ case *map[string]uint:
+ v2, changed2 := fastpathTV.DecMapStringUintV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[string]uint8:
+ fastpathTV.DecMapStringUint8V(v, fastpathCheckNilFalse, false, d)
+ case *map[string]uint8:
+ v2, changed2 := fastpathTV.DecMapStringUint8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[string]uint16:
+ fastpathTV.DecMapStringUint16V(v, fastpathCheckNilFalse, false, d)
+ case *map[string]uint16:
+ v2, changed2 := fastpathTV.DecMapStringUint16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[string]uint32:
+ fastpathTV.DecMapStringUint32V(v, fastpathCheckNilFalse, false, d)
+ case *map[string]uint32:
+ v2, changed2 := fastpathTV.DecMapStringUint32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[string]uint64:
+ fastpathTV.DecMapStringUint64V(v, fastpathCheckNilFalse, false, d)
+ case *map[string]uint64:
+ v2, changed2 := fastpathTV.DecMapStringUint64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[string]uintptr:
+ fastpathTV.DecMapStringUintptrV(v, fastpathCheckNilFalse, false, d)
+ case *map[string]uintptr:
+ v2, changed2 := fastpathTV.DecMapStringUintptrV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[string]int:
+ fastpathTV.DecMapStringIntV(v, fastpathCheckNilFalse, false, d)
+ case *map[string]int:
+ v2, changed2 := fastpathTV.DecMapStringIntV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[string]int8:
+ fastpathTV.DecMapStringInt8V(v, fastpathCheckNilFalse, false, d)
+ case *map[string]int8:
+ v2, changed2 := fastpathTV.DecMapStringInt8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[string]int16:
+ fastpathTV.DecMapStringInt16V(v, fastpathCheckNilFalse, false, d)
+ case *map[string]int16:
+ v2, changed2 := fastpathTV.DecMapStringInt16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[string]int32:
+ fastpathTV.DecMapStringInt32V(v, fastpathCheckNilFalse, false, d)
+ case *map[string]int32:
+ v2, changed2 := fastpathTV.DecMapStringInt32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[string]int64:
+ fastpathTV.DecMapStringInt64V(v, fastpathCheckNilFalse, false, d)
+ case *map[string]int64:
+ v2, changed2 := fastpathTV.DecMapStringInt64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[string]float32:
+ fastpathTV.DecMapStringFloat32V(v, fastpathCheckNilFalse, false, d)
+ case *map[string]float32:
+ v2, changed2 := fastpathTV.DecMapStringFloat32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[string]float64:
+ fastpathTV.DecMapStringFloat64V(v, fastpathCheckNilFalse, false, d)
+ case *map[string]float64:
+ v2, changed2 := fastpathTV.DecMapStringFloat64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[string]bool:
+ fastpathTV.DecMapStringBoolV(v, fastpathCheckNilFalse, false, d)
+ case *map[string]bool:
+ v2, changed2 := fastpathTV.DecMapStringBoolV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case []float32:
+ fastpathTV.DecSliceFloat32V(v, fastpathCheckNilFalse, false, d)
+ case *[]float32:
+ v2, changed2 := fastpathTV.DecSliceFloat32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float32]interface{}:
+ fastpathTV.DecMapFloat32IntfV(v, fastpathCheckNilFalse, false, d)
+ case *map[float32]interface{}:
+ v2, changed2 := fastpathTV.DecMapFloat32IntfV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float32]string:
+ fastpathTV.DecMapFloat32StringV(v, fastpathCheckNilFalse, false, d)
+ case *map[float32]string:
+ v2, changed2 := fastpathTV.DecMapFloat32StringV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float32]uint:
+ fastpathTV.DecMapFloat32UintV(v, fastpathCheckNilFalse, false, d)
+ case *map[float32]uint:
+ v2, changed2 := fastpathTV.DecMapFloat32UintV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float32]uint8:
+ fastpathTV.DecMapFloat32Uint8V(v, fastpathCheckNilFalse, false, d)
+ case *map[float32]uint8:
+ v2, changed2 := fastpathTV.DecMapFloat32Uint8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float32]uint16:
+ fastpathTV.DecMapFloat32Uint16V(v, fastpathCheckNilFalse, false, d)
+ case *map[float32]uint16:
+ v2, changed2 := fastpathTV.DecMapFloat32Uint16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float32]uint32:
+ fastpathTV.DecMapFloat32Uint32V(v, fastpathCheckNilFalse, false, d)
+ case *map[float32]uint32:
+ v2, changed2 := fastpathTV.DecMapFloat32Uint32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float32]uint64:
+ fastpathTV.DecMapFloat32Uint64V(v, fastpathCheckNilFalse, false, d)
+ case *map[float32]uint64:
+ v2, changed2 := fastpathTV.DecMapFloat32Uint64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float32]uintptr:
+ fastpathTV.DecMapFloat32UintptrV(v, fastpathCheckNilFalse, false, d)
+ case *map[float32]uintptr:
+ v2, changed2 := fastpathTV.DecMapFloat32UintptrV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float32]int:
+ fastpathTV.DecMapFloat32IntV(v, fastpathCheckNilFalse, false, d)
+ case *map[float32]int:
+ v2, changed2 := fastpathTV.DecMapFloat32IntV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float32]int8:
+ fastpathTV.DecMapFloat32Int8V(v, fastpathCheckNilFalse, false, d)
+ case *map[float32]int8:
+ v2, changed2 := fastpathTV.DecMapFloat32Int8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float32]int16:
+ fastpathTV.DecMapFloat32Int16V(v, fastpathCheckNilFalse, false, d)
+ case *map[float32]int16:
+ v2, changed2 := fastpathTV.DecMapFloat32Int16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float32]int32:
+ fastpathTV.DecMapFloat32Int32V(v, fastpathCheckNilFalse, false, d)
+ case *map[float32]int32:
+ v2, changed2 := fastpathTV.DecMapFloat32Int32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float32]int64:
+ fastpathTV.DecMapFloat32Int64V(v, fastpathCheckNilFalse, false, d)
+ case *map[float32]int64:
+ v2, changed2 := fastpathTV.DecMapFloat32Int64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float32]float32:
+ fastpathTV.DecMapFloat32Float32V(v, fastpathCheckNilFalse, false, d)
+ case *map[float32]float32:
+ v2, changed2 := fastpathTV.DecMapFloat32Float32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float32]float64:
+ fastpathTV.DecMapFloat32Float64V(v, fastpathCheckNilFalse, false, d)
+ case *map[float32]float64:
+ v2, changed2 := fastpathTV.DecMapFloat32Float64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float32]bool:
+ fastpathTV.DecMapFloat32BoolV(v, fastpathCheckNilFalse, false, d)
+ case *map[float32]bool:
+ v2, changed2 := fastpathTV.DecMapFloat32BoolV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case []float64:
+ fastpathTV.DecSliceFloat64V(v, fastpathCheckNilFalse, false, d)
+ case *[]float64:
+ v2, changed2 := fastpathTV.DecSliceFloat64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float64]interface{}:
+ fastpathTV.DecMapFloat64IntfV(v, fastpathCheckNilFalse, false, d)
+ case *map[float64]interface{}:
+ v2, changed2 := fastpathTV.DecMapFloat64IntfV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float64]string:
+ fastpathTV.DecMapFloat64StringV(v, fastpathCheckNilFalse, false, d)
+ case *map[float64]string:
+ v2, changed2 := fastpathTV.DecMapFloat64StringV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float64]uint:
+ fastpathTV.DecMapFloat64UintV(v, fastpathCheckNilFalse, false, d)
+ case *map[float64]uint:
+ v2, changed2 := fastpathTV.DecMapFloat64UintV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float64]uint8:
+ fastpathTV.DecMapFloat64Uint8V(v, fastpathCheckNilFalse, false, d)
+ case *map[float64]uint8:
+ v2, changed2 := fastpathTV.DecMapFloat64Uint8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float64]uint16:
+ fastpathTV.DecMapFloat64Uint16V(v, fastpathCheckNilFalse, false, d)
+ case *map[float64]uint16:
+ v2, changed2 := fastpathTV.DecMapFloat64Uint16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float64]uint32:
+ fastpathTV.DecMapFloat64Uint32V(v, fastpathCheckNilFalse, false, d)
+ case *map[float64]uint32:
+ v2, changed2 := fastpathTV.DecMapFloat64Uint32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float64]uint64:
+ fastpathTV.DecMapFloat64Uint64V(v, fastpathCheckNilFalse, false, d)
+ case *map[float64]uint64:
+ v2, changed2 := fastpathTV.DecMapFloat64Uint64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float64]uintptr:
+ fastpathTV.DecMapFloat64UintptrV(v, fastpathCheckNilFalse, false, d)
+ case *map[float64]uintptr:
+ v2, changed2 := fastpathTV.DecMapFloat64UintptrV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float64]int:
+ fastpathTV.DecMapFloat64IntV(v, fastpathCheckNilFalse, false, d)
+ case *map[float64]int:
+ v2, changed2 := fastpathTV.DecMapFloat64IntV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float64]int8:
+ fastpathTV.DecMapFloat64Int8V(v, fastpathCheckNilFalse, false, d)
+ case *map[float64]int8:
+ v2, changed2 := fastpathTV.DecMapFloat64Int8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float64]int16:
+ fastpathTV.DecMapFloat64Int16V(v, fastpathCheckNilFalse, false, d)
+ case *map[float64]int16:
+ v2, changed2 := fastpathTV.DecMapFloat64Int16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float64]int32:
+ fastpathTV.DecMapFloat64Int32V(v, fastpathCheckNilFalse, false, d)
+ case *map[float64]int32:
+ v2, changed2 := fastpathTV.DecMapFloat64Int32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float64]int64:
+ fastpathTV.DecMapFloat64Int64V(v, fastpathCheckNilFalse, false, d)
+ case *map[float64]int64:
+ v2, changed2 := fastpathTV.DecMapFloat64Int64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float64]float32:
+ fastpathTV.DecMapFloat64Float32V(v, fastpathCheckNilFalse, false, d)
+ case *map[float64]float32:
+ v2, changed2 := fastpathTV.DecMapFloat64Float32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float64]float64:
+ fastpathTV.DecMapFloat64Float64V(v, fastpathCheckNilFalse, false, d)
+ case *map[float64]float64:
+ v2, changed2 := fastpathTV.DecMapFloat64Float64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[float64]bool:
+ fastpathTV.DecMapFloat64BoolV(v, fastpathCheckNilFalse, false, d)
+ case *map[float64]bool:
+ v2, changed2 := fastpathTV.DecMapFloat64BoolV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case []uint:
+ fastpathTV.DecSliceUintV(v, fastpathCheckNilFalse, false, d)
+ case *[]uint:
+ v2, changed2 := fastpathTV.DecSliceUintV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint]interface{}:
+ fastpathTV.DecMapUintIntfV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint]interface{}:
+ v2, changed2 := fastpathTV.DecMapUintIntfV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint]string:
+ fastpathTV.DecMapUintStringV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint]string:
+ v2, changed2 := fastpathTV.DecMapUintStringV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint]uint:
+ fastpathTV.DecMapUintUintV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint]uint:
+ v2, changed2 := fastpathTV.DecMapUintUintV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint]uint8:
+ fastpathTV.DecMapUintUint8V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint]uint8:
+ v2, changed2 := fastpathTV.DecMapUintUint8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint]uint16:
+ fastpathTV.DecMapUintUint16V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint]uint16:
+ v2, changed2 := fastpathTV.DecMapUintUint16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint]uint32:
+ fastpathTV.DecMapUintUint32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint]uint32:
+ v2, changed2 := fastpathTV.DecMapUintUint32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint]uint64:
+ fastpathTV.DecMapUintUint64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint]uint64:
+ v2, changed2 := fastpathTV.DecMapUintUint64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint]uintptr:
+ fastpathTV.DecMapUintUintptrV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint]uintptr:
+ v2, changed2 := fastpathTV.DecMapUintUintptrV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint]int:
+ fastpathTV.DecMapUintIntV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint]int:
+ v2, changed2 := fastpathTV.DecMapUintIntV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint]int8:
+ fastpathTV.DecMapUintInt8V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint]int8:
+ v2, changed2 := fastpathTV.DecMapUintInt8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint]int16:
+ fastpathTV.DecMapUintInt16V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint]int16:
+ v2, changed2 := fastpathTV.DecMapUintInt16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint]int32:
+ fastpathTV.DecMapUintInt32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint]int32:
+ v2, changed2 := fastpathTV.DecMapUintInt32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint]int64:
+ fastpathTV.DecMapUintInt64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint]int64:
+ v2, changed2 := fastpathTV.DecMapUintInt64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint]float32:
+ fastpathTV.DecMapUintFloat32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint]float32:
+ v2, changed2 := fastpathTV.DecMapUintFloat32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint]float64:
+ fastpathTV.DecMapUintFloat64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint]float64:
+ v2, changed2 := fastpathTV.DecMapUintFloat64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint]bool:
+ fastpathTV.DecMapUintBoolV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint]bool:
+ v2, changed2 := fastpathTV.DecMapUintBoolV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint8]interface{}:
+ fastpathTV.DecMapUint8IntfV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint8]interface{}:
+ v2, changed2 := fastpathTV.DecMapUint8IntfV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint8]string:
+ fastpathTV.DecMapUint8StringV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint8]string:
+ v2, changed2 := fastpathTV.DecMapUint8StringV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint8]uint:
+ fastpathTV.DecMapUint8UintV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint8]uint:
+ v2, changed2 := fastpathTV.DecMapUint8UintV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint8]uint8:
+ fastpathTV.DecMapUint8Uint8V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint8]uint8:
+ v2, changed2 := fastpathTV.DecMapUint8Uint8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint8]uint16:
+ fastpathTV.DecMapUint8Uint16V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint8]uint16:
+ v2, changed2 := fastpathTV.DecMapUint8Uint16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint8]uint32:
+ fastpathTV.DecMapUint8Uint32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint8]uint32:
+ v2, changed2 := fastpathTV.DecMapUint8Uint32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint8]uint64:
+ fastpathTV.DecMapUint8Uint64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint8]uint64:
+ v2, changed2 := fastpathTV.DecMapUint8Uint64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint8]uintptr:
+ fastpathTV.DecMapUint8UintptrV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint8]uintptr:
+ v2, changed2 := fastpathTV.DecMapUint8UintptrV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint8]int:
+ fastpathTV.DecMapUint8IntV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint8]int:
+ v2, changed2 := fastpathTV.DecMapUint8IntV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint8]int8:
+ fastpathTV.DecMapUint8Int8V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint8]int8:
+ v2, changed2 := fastpathTV.DecMapUint8Int8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint8]int16:
+ fastpathTV.DecMapUint8Int16V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint8]int16:
+ v2, changed2 := fastpathTV.DecMapUint8Int16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint8]int32:
+ fastpathTV.DecMapUint8Int32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint8]int32:
+ v2, changed2 := fastpathTV.DecMapUint8Int32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint8]int64:
+ fastpathTV.DecMapUint8Int64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint8]int64:
+ v2, changed2 := fastpathTV.DecMapUint8Int64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint8]float32:
+ fastpathTV.DecMapUint8Float32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint8]float32:
+ v2, changed2 := fastpathTV.DecMapUint8Float32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint8]float64:
+ fastpathTV.DecMapUint8Float64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint8]float64:
+ v2, changed2 := fastpathTV.DecMapUint8Float64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint8]bool:
+ fastpathTV.DecMapUint8BoolV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint8]bool:
+ v2, changed2 := fastpathTV.DecMapUint8BoolV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case []uint16:
+ fastpathTV.DecSliceUint16V(v, fastpathCheckNilFalse, false, d)
+ case *[]uint16:
+ v2, changed2 := fastpathTV.DecSliceUint16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint16]interface{}:
+ fastpathTV.DecMapUint16IntfV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint16]interface{}:
+ v2, changed2 := fastpathTV.DecMapUint16IntfV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint16]string:
+ fastpathTV.DecMapUint16StringV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint16]string:
+ v2, changed2 := fastpathTV.DecMapUint16StringV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint16]uint:
+ fastpathTV.DecMapUint16UintV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint16]uint:
+ v2, changed2 := fastpathTV.DecMapUint16UintV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint16]uint8:
+ fastpathTV.DecMapUint16Uint8V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint16]uint8:
+ v2, changed2 := fastpathTV.DecMapUint16Uint8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint16]uint16:
+ fastpathTV.DecMapUint16Uint16V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint16]uint16:
+ v2, changed2 := fastpathTV.DecMapUint16Uint16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint16]uint32:
+ fastpathTV.DecMapUint16Uint32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint16]uint32:
+ v2, changed2 := fastpathTV.DecMapUint16Uint32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint16]uint64:
+ fastpathTV.DecMapUint16Uint64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint16]uint64:
+ v2, changed2 := fastpathTV.DecMapUint16Uint64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint16]uintptr:
+ fastpathTV.DecMapUint16UintptrV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint16]uintptr:
+ v2, changed2 := fastpathTV.DecMapUint16UintptrV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint16]int:
+ fastpathTV.DecMapUint16IntV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint16]int:
+ v2, changed2 := fastpathTV.DecMapUint16IntV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint16]int8:
+ fastpathTV.DecMapUint16Int8V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint16]int8:
+ v2, changed2 := fastpathTV.DecMapUint16Int8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint16]int16:
+ fastpathTV.DecMapUint16Int16V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint16]int16:
+ v2, changed2 := fastpathTV.DecMapUint16Int16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint16]int32:
+ fastpathTV.DecMapUint16Int32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint16]int32:
+ v2, changed2 := fastpathTV.DecMapUint16Int32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint16]int64:
+ fastpathTV.DecMapUint16Int64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint16]int64:
+ v2, changed2 := fastpathTV.DecMapUint16Int64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint16]float32:
+ fastpathTV.DecMapUint16Float32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint16]float32:
+ v2, changed2 := fastpathTV.DecMapUint16Float32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint16]float64:
+ fastpathTV.DecMapUint16Float64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint16]float64:
+ v2, changed2 := fastpathTV.DecMapUint16Float64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint16]bool:
+ fastpathTV.DecMapUint16BoolV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint16]bool:
+ v2, changed2 := fastpathTV.DecMapUint16BoolV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case []uint32:
+ fastpathTV.DecSliceUint32V(v, fastpathCheckNilFalse, false, d)
+ case *[]uint32:
+ v2, changed2 := fastpathTV.DecSliceUint32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint32]interface{}:
+ fastpathTV.DecMapUint32IntfV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint32]interface{}:
+ v2, changed2 := fastpathTV.DecMapUint32IntfV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint32]string:
+ fastpathTV.DecMapUint32StringV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint32]string:
+ v2, changed2 := fastpathTV.DecMapUint32StringV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint32]uint:
+ fastpathTV.DecMapUint32UintV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint32]uint:
+ v2, changed2 := fastpathTV.DecMapUint32UintV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint32]uint8:
+ fastpathTV.DecMapUint32Uint8V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint32]uint8:
+ v2, changed2 := fastpathTV.DecMapUint32Uint8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint32]uint16:
+ fastpathTV.DecMapUint32Uint16V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint32]uint16:
+ v2, changed2 := fastpathTV.DecMapUint32Uint16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint32]uint32:
+ fastpathTV.DecMapUint32Uint32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint32]uint32:
+ v2, changed2 := fastpathTV.DecMapUint32Uint32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint32]uint64:
+ fastpathTV.DecMapUint32Uint64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint32]uint64:
+ v2, changed2 := fastpathTV.DecMapUint32Uint64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint32]uintptr:
+ fastpathTV.DecMapUint32UintptrV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint32]uintptr:
+ v2, changed2 := fastpathTV.DecMapUint32UintptrV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint32]int:
+ fastpathTV.DecMapUint32IntV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint32]int:
+ v2, changed2 := fastpathTV.DecMapUint32IntV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint32]int8:
+ fastpathTV.DecMapUint32Int8V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint32]int8:
+ v2, changed2 := fastpathTV.DecMapUint32Int8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint32]int16:
+ fastpathTV.DecMapUint32Int16V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint32]int16:
+ v2, changed2 := fastpathTV.DecMapUint32Int16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint32]int32:
+ fastpathTV.DecMapUint32Int32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint32]int32:
+ v2, changed2 := fastpathTV.DecMapUint32Int32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint32]int64:
+ fastpathTV.DecMapUint32Int64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint32]int64:
+ v2, changed2 := fastpathTV.DecMapUint32Int64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint32]float32:
+ fastpathTV.DecMapUint32Float32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint32]float32:
+ v2, changed2 := fastpathTV.DecMapUint32Float32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint32]float64:
+ fastpathTV.DecMapUint32Float64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint32]float64:
+ v2, changed2 := fastpathTV.DecMapUint32Float64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint32]bool:
+ fastpathTV.DecMapUint32BoolV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint32]bool:
+ v2, changed2 := fastpathTV.DecMapUint32BoolV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case []uint64:
+ fastpathTV.DecSliceUint64V(v, fastpathCheckNilFalse, false, d)
+ case *[]uint64:
+ v2, changed2 := fastpathTV.DecSliceUint64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint64]interface{}:
+ fastpathTV.DecMapUint64IntfV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint64]interface{}:
+ v2, changed2 := fastpathTV.DecMapUint64IntfV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint64]string:
+ fastpathTV.DecMapUint64StringV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint64]string:
+ v2, changed2 := fastpathTV.DecMapUint64StringV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint64]uint:
+ fastpathTV.DecMapUint64UintV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint64]uint:
+ v2, changed2 := fastpathTV.DecMapUint64UintV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint64]uint8:
+ fastpathTV.DecMapUint64Uint8V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint64]uint8:
+ v2, changed2 := fastpathTV.DecMapUint64Uint8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint64]uint16:
+ fastpathTV.DecMapUint64Uint16V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint64]uint16:
+ v2, changed2 := fastpathTV.DecMapUint64Uint16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint64]uint32:
+ fastpathTV.DecMapUint64Uint32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint64]uint32:
+ v2, changed2 := fastpathTV.DecMapUint64Uint32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint64]uint64:
+ fastpathTV.DecMapUint64Uint64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint64]uint64:
+ v2, changed2 := fastpathTV.DecMapUint64Uint64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint64]uintptr:
+ fastpathTV.DecMapUint64UintptrV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint64]uintptr:
+ v2, changed2 := fastpathTV.DecMapUint64UintptrV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint64]int:
+ fastpathTV.DecMapUint64IntV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint64]int:
+ v2, changed2 := fastpathTV.DecMapUint64IntV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint64]int8:
+ fastpathTV.DecMapUint64Int8V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint64]int8:
+ v2, changed2 := fastpathTV.DecMapUint64Int8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint64]int16:
+ fastpathTV.DecMapUint64Int16V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint64]int16:
+ v2, changed2 := fastpathTV.DecMapUint64Int16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint64]int32:
+ fastpathTV.DecMapUint64Int32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint64]int32:
+ v2, changed2 := fastpathTV.DecMapUint64Int32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint64]int64:
+ fastpathTV.DecMapUint64Int64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint64]int64:
+ v2, changed2 := fastpathTV.DecMapUint64Int64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint64]float32:
+ fastpathTV.DecMapUint64Float32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint64]float32:
+ v2, changed2 := fastpathTV.DecMapUint64Float32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint64]float64:
+ fastpathTV.DecMapUint64Float64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uint64]float64:
+ v2, changed2 := fastpathTV.DecMapUint64Float64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uint64]bool:
+ fastpathTV.DecMapUint64BoolV(v, fastpathCheckNilFalse, false, d)
+ case *map[uint64]bool:
+ v2, changed2 := fastpathTV.DecMapUint64BoolV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case []uintptr:
+ fastpathTV.DecSliceUintptrV(v, fastpathCheckNilFalse, false, d)
+ case *[]uintptr:
+ v2, changed2 := fastpathTV.DecSliceUintptrV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uintptr]interface{}:
+ fastpathTV.DecMapUintptrIntfV(v, fastpathCheckNilFalse, false, d)
+ case *map[uintptr]interface{}:
+ v2, changed2 := fastpathTV.DecMapUintptrIntfV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uintptr]string:
+ fastpathTV.DecMapUintptrStringV(v, fastpathCheckNilFalse, false, d)
+ case *map[uintptr]string:
+ v2, changed2 := fastpathTV.DecMapUintptrStringV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uintptr]uint:
+ fastpathTV.DecMapUintptrUintV(v, fastpathCheckNilFalse, false, d)
+ case *map[uintptr]uint:
+ v2, changed2 := fastpathTV.DecMapUintptrUintV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uintptr]uint8:
+ fastpathTV.DecMapUintptrUint8V(v, fastpathCheckNilFalse, false, d)
+ case *map[uintptr]uint8:
+ v2, changed2 := fastpathTV.DecMapUintptrUint8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uintptr]uint16:
+ fastpathTV.DecMapUintptrUint16V(v, fastpathCheckNilFalse, false, d)
+ case *map[uintptr]uint16:
+ v2, changed2 := fastpathTV.DecMapUintptrUint16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uintptr]uint32:
+ fastpathTV.DecMapUintptrUint32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uintptr]uint32:
+ v2, changed2 := fastpathTV.DecMapUintptrUint32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uintptr]uint64:
+ fastpathTV.DecMapUintptrUint64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uintptr]uint64:
+ v2, changed2 := fastpathTV.DecMapUintptrUint64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uintptr]uintptr:
+ fastpathTV.DecMapUintptrUintptrV(v, fastpathCheckNilFalse, false, d)
+ case *map[uintptr]uintptr:
+ v2, changed2 := fastpathTV.DecMapUintptrUintptrV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uintptr]int:
+ fastpathTV.DecMapUintptrIntV(v, fastpathCheckNilFalse, false, d)
+ case *map[uintptr]int:
+ v2, changed2 := fastpathTV.DecMapUintptrIntV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uintptr]int8:
+ fastpathTV.DecMapUintptrInt8V(v, fastpathCheckNilFalse, false, d)
+ case *map[uintptr]int8:
+ v2, changed2 := fastpathTV.DecMapUintptrInt8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uintptr]int16:
+ fastpathTV.DecMapUintptrInt16V(v, fastpathCheckNilFalse, false, d)
+ case *map[uintptr]int16:
+ v2, changed2 := fastpathTV.DecMapUintptrInt16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uintptr]int32:
+ fastpathTV.DecMapUintptrInt32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uintptr]int32:
+ v2, changed2 := fastpathTV.DecMapUintptrInt32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uintptr]int64:
+ fastpathTV.DecMapUintptrInt64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uintptr]int64:
+ v2, changed2 := fastpathTV.DecMapUintptrInt64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uintptr]float32:
+ fastpathTV.DecMapUintptrFloat32V(v, fastpathCheckNilFalse, false, d)
+ case *map[uintptr]float32:
+ v2, changed2 := fastpathTV.DecMapUintptrFloat32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uintptr]float64:
+ fastpathTV.DecMapUintptrFloat64V(v, fastpathCheckNilFalse, false, d)
+ case *map[uintptr]float64:
+ v2, changed2 := fastpathTV.DecMapUintptrFloat64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[uintptr]bool:
+ fastpathTV.DecMapUintptrBoolV(v, fastpathCheckNilFalse, false, d)
+ case *map[uintptr]bool:
+ v2, changed2 := fastpathTV.DecMapUintptrBoolV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case []int:
+ fastpathTV.DecSliceIntV(v, fastpathCheckNilFalse, false, d)
+ case *[]int:
+ v2, changed2 := fastpathTV.DecSliceIntV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int]interface{}:
+ fastpathTV.DecMapIntIntfV(v, fastpathCheckNilFalse, false, d)
+ case *map[int]interface{}:
+ v2, changed2 := fastpathTV.DecMapIntIntfV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int]string:
+ fastpathTV.DecMapIntStringV(v, fastpathCheckNilFalse, false, d)
+ case *map[int]string:
+ v2, changed2 := fastpathTV.DecMapIntStringV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int]uint:
+ fastpathTV.DecMapIntUintV(v, fastpathCheckNilFalse, false, d)
+ case *map[int]uint:
+ v2, changed2 := fastpathTV.DecMapIntUintV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int]uint8:
+ fastpathTV.DecMapIntUint8V(v, fastpathCheckNilFalse, false, d)
+ case *map[int]uint8:
+ v2, changed2 := fastpathTV.DecMapIntUint8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int]uint16:
+ fastpathTV.DecMapIntUint16V(v, fastpathCheckNilFalse, false, d)
+ case *map[int]uint16:
+ v2, changed2 := fastpathTV.DecMapIntUint16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int]uint32:
+ fastpathTV.DecMapIntUint32V(v, fastpathCheckNilFalse, false, d)
+ case *map[int]uint32:
+ v2, changed2 := fastpathTV.DecMapIntUint32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int]uint64:
+ fastpathTV.DecMapIntUint64V(v, fastpathCheckNilFalse, false, d)
+ case *map[int]uint64:
+ v2, changed2 := fastpathTV.DecMapIntUint64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int]uintptr:
+ fastpathTV.DecMapIntUintptrV(v, fastpathCheckNilFalse, false, d)
+ case *map[int]uintptr:
+ v2, changed2 := fastpathTV.DecMapIntUintptrV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int]int:
+ fastpathTV.DecMapIntIntV(v, fastpathCheckNilFalse, false, d)
+ case *map[int]int:
+ v2, changed2 := fastpathTV.DecMapIntIntV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int]int8:
+ fastpathTV.DecMapIntInt8V(v, fastpathCheckNilFalse, false, d)
+ case *map[int]int8:
+ v2, changed2 := fastpathTV.DecMapIntInt8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int]int16:
+ fastpathTV.DecMapIntInt16V(v, fastpathCheckNilFalse, false, d)
+ case *map[int]int16:
+ v2, changed2 := fastpathTV.DecMapIntInt16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int]int32:
+ fastpathTV.DecMapIntInt32V(v, fastpathCheckNilFalse, false, d)
+ case *map[int]int32:
+ v2, changed2 := fastpathTV.DecMapIntInt32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int]int64:
+ fastpathTV.DecMapIntInt64V(v, fastpathCheckNilFalse, false, d)
+ case *map[int]int64:
+ v2, changed2 := fastpathTV.DecMapIntInt64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int]float32:
+ fastpathTV.DecMapIntFloat32V(v, fastpathCheckNilFalse, false, d)
+ case *map[int]float32:
+ v2, changed2 := fastpathTV.DecMapIntFloat32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int]float64:
+ fastpathTV.DecMapIntFloat64V(v, fastpathCheckNilFalse, false, d)
+ case *map[int]float64:
+ v2, changed2 := fastpathTV.DecMapIntFloat64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int]bool:
+ fastpathTV.DecMapIntBoolV(v, fastpathCheckNilFalse, false, d)
+ case *map[int]bool:
+ v2, changed2 := fastpathTV.DecMapIntBoolV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case []int8:
+ fastpathTV.DecSliceInt8V(v, fastpathCheckNilFalse, false, d)
+ case *[]int8:
+ v2, changed2 := fastpathTV.DecSliceInt8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int8]interface{}:
+ fastpathTV.DecMapInt8IntfV(v, fastpathCheckNilFalse, false, d)
+ case *map[int8]interface{}:
+ v2, changed2 := fastpathTV.DecMapInt8IntfV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int8]string:
+ fastpathTV.DecMapInt8StringV(v, fastpathCheckNilFalse, false, d)
+ case *map[int8]string:
+ v2, changed2 := fastpathTV.DecMapInt8StringV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int8]uint:
+ fastpathTV.DecMapInt8UintV(v, fastpathCheckNilFalse, false, d)
+ case *map[int8]uint:
+ v2, changed2 := fastpathTV.DecMapInt8UintV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int8]uint8:
+ fastpathTV.DecMapInt8Uint8V(v, fastpathCheckNilFalse, false, d)
+ case *map[int8]uint8:
+ v2, changed2 := fastpathTV.DecMapInt8Uint8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int8]uint16:
+ fastpathTV.DecMapInt8Uint16V(v, fastpathCheckNilFalse, false, d)
+ case *map[int8]uint16:
+ v2, changed2 := fastpathTV.DecMapInt8Uint16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int8]uint32:
+ fastpathTV.DecMapInt8Uint32V(v, fastpathCheckNilFalse, false, d)
+ case *map[int8]uint32:
+ v2, changed2 := fastpathTV.DecMapInt8Uint32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int8]uint64:
+ fastpathTV.DecMapInt8Uint64V(v, fastpathCheckNilFalse, false, d)
+ case *map[int8]uint64:
+ v2, changed2 := fastpathTV.DecMapInt8Uint64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int8]uintptr:
+ fastpathTV.DecMapInt8UintptrV(v, fastpathCheckNilFalse, false, d)
+ case *map[int8]uintptr:
+ v2, changed2 := fastpathTV.DecMapInt8UintptrV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int8]int:
+ fastpathTV.DecMapInt8IntV(v, fastpathCheckNilFalse, false, d)
+ case *map[int8]int:
+ v2, changed2 := fastpathTV.DecMapInt8IntV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int8]int8:
+ fastpathTV.DecMapInt8Int8V(v, fastpathCheckNilFalse, false, d)
+ case *map[int8]int8:
+ v2, changed2 := fastpathTV.DecMapInt8Int8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int8]int16:
+ fastpathTV.DecMapInt8Int16V(v, fastpathCheckNilFalse, false, d)
+ case *map[int8]int16:
+ v2, changed2 := fastpathTV.DecMapInt8Int16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int8]int32:
+ fastpathTV.DecMapInt8Int32V(v, fastpathCheckNilFalse, false, d)
+ case *map[int8]int32:
+ v2, changed2 := fastpathTV.DecMapInt8Int32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int8]int64:
+ fastpathTV.DecMapInt8Int64V(v, fastpathCheckNilFalse, false, d)
+ case *map[int8]int64:
+ v2, changed2 := fastpathTV.DecMapInt8Int64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int8]float32:
+ fastpathTV.DecMapInt8Float32V(v, fastpathCheckNilFalse, false, d)
+ case *map[int8]float32:
+ v2, changed2 := fastpathTV.DecMapInt8Float32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int8]float64:
+ fastpathTV.DecMapInt8Float64V(v, fastpathCheckNilFalse, false, d)
+ case *map[int8]float64:
+ v2, changed2 := fastpathTV.DecMapInt8Float64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int8]bool:
+ fastpathTV.DecMapInt8BoolV(v, fastpathCheckNilFalse, false, d)
+ case *map[int8]bool:
+ v2, changed2 := fastpathTV.DecMapInt8BoolV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case []int16:
+ fastpathTV.DecSliceInt16V(v, fastpathCheckNilFalse, false, d)
+ case *[]int16:
+ v2, changed2 := fastpathTV.DecSliceInt16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int16]interface{}:
+ fastpathTV.DecMapInt16IntfV(v, fastpathCheckNilFalse, false, d)
+ case *map[int16]interface{}:
+ v2, changed2 := fastpathTV.DecMapInt16IntfV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int16]string:
+ fastpathTV.DecMapInt16StringV(v, fastpathCheckNilFalse, false, d)
+ case *map[int16]string:
+ v2, changed2 := fastpathTV.DecMapInt16StringV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int16]uint:
+ fastpathTV.DecMapInt16UintV(v, fastpathCheckNilFalse, false, d)
+ case *map[int16]uint:
+ v2, changed2 := fastpathTV.DecMapInt16UintV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int16]uint8:
+ fastpathTV.DecMapInt16Uint8V(v, fastpathCheckNilFalse, false, d)
+ case *map[int16]uint8:
+ v2, changed2 := fastpathTV.DecMapInt16Uint8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int16]uint16:
+ fastpathTV.DecMapInt16Uint16V(v, fastpathCheckNilFalse, false, d)
+ case *map[int16]uint16:
+ v2, changed2 := fastpathTV.DecMapInt16Uint16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int16]uint32:
+ fastpathTV.DecMapInt16Uint32V(v, fastpathCheckNilFalse, false, d)
+ case *map[int16]uint32:
+ v2, changed2 := fastpathTV.DecMapInt16Uint32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int16]uint64:
+ fastpathTV.DecMapInt16Uint64V(v, fastpathCheckNilFalse, false, d)
+ case *map[int16]uint64:
+ v2, changed2 := fastpathTV.DecMapInt16Uint64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int16]uintptr:
+ fastpathTV.DecMapInt16UintptrV(v, fastpathCheckNilFalse, false, d)
+ case *map[int16]uintptr:
+ v2, changed2 := fastpathTV.DecMapInt16UintptrV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int16]int:
+ fastpathTV.DecMapInt16IntV(v, fastpathCheckNilFalse, false, d)
+ case *map[int16]int:
+ v2, changed2 := fastpathTV.DecMapInt16IntV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int16]int8:
+ fastpathTV.DecMapInt16Int8V(v, fastpathCheckNilFalse, false, d)
+ case *map[int16]int8:
+ v2, changed2 := fastpathTV.DecMapInt16Int8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int16]int16:
+ fastpathTV.DecMapInt16Int16V(v, fastpathCheckNilFalse, false, d)
+ case *map[int16]int16:
+ v2, changed2 := fastpathTV.DecMapInt16Int16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int16]int32:
+ fastpathTV.DecMapInt16Int32V(v, fastpathCheckNilFalse, false, d)
+ case *map[int16]int32:
+ v2, changed2 := fastpathTV.DecMapInt16Int32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int16]int64:
+ fastpathTV.DecMapInt16Int64V(v, fastpathCheckNilFalse, false, d)
+ case *map[int16]int64:
+ v2, changed2 := fastpathTV.DecMapInt16Int64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int16]float32:
+ fastpathTV.DecMapInt16Float32V(v, fastpathCheckNilFalse, false, d)
+ case *map[int16]float32:
+ v2, changed2 := fastpathTV.DecMapInt16Float32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int16]float64:
+ fastpathTV.DecMapInt16Float64V(v, fastpathCheckNilFalse, false, d)
+ case *map[int16]float64:
+ v2, changed2 := fastpathTV.DecMapInt16Float64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int16]bool:
+ fastpathTV.DecMapInt16BoolV(v, fastpathCheckNilFalse, false, d)
+ case *map[int16]bool:
+ v2, changed2 := fastpathTV.DecMapInt16BoolV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case []int32:
+ fastpathTV.DecSliceInt32V(v, fastpathCheckNilFalse, false, d)
+ case *[]int32:
+ v2, changed2 := fastpathTV.DecSliceInt32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int32]interface{}:
+ fastpathTV.DecMapInt32IntfV(v, fastpathCheckNilFalse, false, d)
+ case *map[int32]interface{}:
+ v2, changed2 := fastpathTV.DecMapInt32IntfV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int32]string:
+ fastpathTV.DecMapInt32StringV(v, fastpathCheckNilFalse, false, d)
+ case *map[int32]string:
+ v2, changed2 := fastpathTV.DecMapInt32StringV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int32]uint:
+ fastpathTV.DecMapInt32UintV(v, fastpathCheckNilFalse, false, d)
+ case *map[int32]uint:
+ v2, changed2 := fastpathTV.DecMapInt32UintV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int32]uint8:
+ fastpathTV.DecMapInt32Uint8V(v, fastpathCheckNilFalse, false, d)
+ case *map[int32]uint8:
+ v2, changed2 := fastpathTV.DecMapInt32Uint8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int32]uint16:
+ fastpathTV.DecMapInt32Uint16V(v, fastpathCheckNilFalse, false, d)
+ case *map[int32]uint16:
+ v2, changed2 := fastpathTV.DecMapInt32Uint16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int32]uint32:
+ fastpathTV.DecMapInt32Uint32V(v, fastpathCheckNilFalse, false, d)
+ case *map[int32]uint32:
+ v2, changed2 := fastpathTV.DecMapInt32Uint32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int32]uint64:
+ fastpathTV.DecMapInt32Uint64V(v, fastpathCheckNilFalse, false, d)
+ case *map[int32]uint64:
+ v2, changed2 := fastpathTV.DecMapInt32Uint64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int32]uintptr:
+ fastpathTV.DecMapInt32UintptrV(v, fastpathCheckNilFalse, false, d)
+ case *map[int32]uintptr:
+ v2, changed2 := fastpathTV.DecMapInt32UintptrV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int32]int:
+ fastpathTV.DecMapInt32IntV(v, fastpathCheckNilFalse, false, d)
+ case *map[int32]int:
+ v2, changed2 := fastpathTV.DecMapInt32IntV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int32]int8:
+ fastpathTV.DecMapInt32Int8V(v, fastpathCheckNilFalse, false, d)
+ case *map[int32]int8:
+ v2, changed2 := fastpathTV.DecMapInt32Int8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int32]int16:
+ fastpathTV.DecMapInt32Int16V(v, fastpathCheckNilFalse, false, d)
+ case *map[int32]int16:
+ v2, changed2 := fastpathTV.DecMapInt32Int16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int32]int32:
+ fastpathTV.DecMapInt32Int32V(v, fastpathCheckNilFalse, false, d)
+ case *map[int32]int32:
+ v2, changed2 := fastpathTV.DecMapInt32Int32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int32]int64:
+ fastpathTV.DecMapInt32Int64V(v, fastpathCheckNilFalse, false, d)
+ case *map[int32]int64:
+ v2, changed2 := fastpathTV.DecMapInt32Int64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int32]float32:
+ fastpathTV.DecMapInt32Float32V(v, fastpathCheckNilFalse, false, d)
+ case *map[int32]float32:
+ v2, changed2 := fastpathTV.DecMapInt32Float32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int32]float64:
+ fastpathTV.DecMapInt32Float64V(v, fastpathCheckNilFalse, false, d)
+ case *map[int32]float64:
+ v2, changed2 := fastpathTV.DecMapInt32Float64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int32]bool:
+ fastpathTV.DecMapInt32BoolV(v, fastpathCheckNilFalse, false, d)
+ case *map[int32]bool:
+ v2, changed2 := fastpathTV.DecMapInt32BoolV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case []int64:
+ fastpathTV.DecSliceInt64V(v, fastpathCheckNilFalse, false, d)
+ case *[]int64:
+ v2, changed2 := fastpathTV.DecSliceInt64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int64]interface{}:
+ fastpathTV.DecMapInt64IntfV(v, fastpathCheckNilFalse, false, d)
+ case *map[int64]interface{}:
+ v2, changed2 := fastpathTV.DecMapInt64IntfV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int64]string:
+ fastpathTV.DecMapInt64StringV(v, fastpathCheckNilFalse, false, d)
+ case *map[int64]string:
+ v2, changed2 := fastpathTV.DecMapInt64StringV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int64]uint:
+ fastpathTV.DecMapInt64UintV(v, fastpathCheckNilFalse, false, d)
+ case *map[int64]uint:
+ v2, changed2 := fastpathTV.DecMapInt64UintV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int64]uint8:
+ fastpathTV.DecMapInt64Uint8V(v, fastpathCheckNilFalse, false, d)
+ case *map[int64]uint8:
+ v2, changed2 := fastpathTV.DecMapInt64Uint8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int64]uint16:
+ fastpathTV.DecMapInt64Uint16V(v, fastpathCheckNilFalse, false, d)
+ case *map[int64]uint16:
+ v2, changed2 := fastpathTV.DecMapInt64Uint16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int64]uint32:
+ fastpathTV.DecMapInt64Uint32V(v, fastpathCheckNilFalse, false, d)
+ case *map[int64]uint32:
+ v2, changed2 := fastpathTV.DecMapInt64Uint32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int64]uint64:
+ fastpathTV.DecMapInt64Uint64V(v, fastpathCheckNilFalse, false, d)
+ case *map[int64]uint64:
+ v2, changed2 := fastpathTV.DecMapInt64Uint64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int64]uintptr:
+ fastpathTV.DecMapInt64UintptrV(v, fastpathCheckNilFalse, false, d)
+ case *map[int64]uintptr:
+ v2, changed2 := fastpathTV.DecMapInt64UintptrV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int64]int:
+ fastpathTV.DecMapInt64IntV(v, fastpathCheckNilFalse, false, d)
+ case *map[int64]int:
+ v2, changed2 := fastpathTV.DecMapInt64IntV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int64]int8:
+ fastpathTV.DecMapInt64Int8V(v, fastpathCheckNilFalse, false, d)
+ case *map[int64]int8:
+ v2, changed2 := fastpathTV.DecMapInt64Int8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int64]int16:
+ fastpathTV.DecMapInt64Int16V(v, fastpathCheckNilFalse, false, d)
+ case *map[int64]int16:
+ v2, changed2 := fastpathTV.DecMapInt64Int16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int64]int32:
+ fastpathTV.DecMapInt64Int32V(v, fastpathCheckNilFalse, false, d)
+ case *map[int64]int32:
+ v2, changed2 := fastpathTV.DecMapInt64Int32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int64]int64:
+ fastpathTV.DecMapInt64Int64V(v, fastpathCheckNilFalse, false, d)
+ case *map[int64]int64:
+ v2, changed2 := fastpathTV.DecMapInt64Int64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int64]float32:
+ fastpathTV.DecMapInt64Float32V(v, fastpathCheckNilFalse, false, d)
+ case *map[int64]float32:
+ v2, changed2 := fastpathTV.DecMapInt64Float32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int64]float64:
+ fastpathTV.DecMapInt64Float64V(v, fastpathCheckNilFalse, false, d)
+ case *map[int64]float64:
+ v2, changed2 := fastpathTV.DecMapInt64Float64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[int64]bool:
+ fastpathTV.DecMapInt64BoolV(v, fastpathCheckNilFalse, false, d)
+ case *map[int64]bool:
+ v2, changed2 := fastpathTV.DecMapInt64BoolV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case []bool:
+ fastpathTV.DecSliceBoolV(v, fastpathCheckNilFalse, false, d)
+ case *[]bool:
+ v2, changed2 := fastpathTV.DecSliceBoolV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[bool]interface{}:
+ fastpathTV.DecMapBoolIntfV(v, fastpathCheckNilFalse, false, d)
+ case *map[bool]interface{}:
+ v2, changed2 := fastpathTV.DecMapBoolIntfV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[bool]string:
+ fastpathTV.DecMapBoolStringV(v, fastpathCheckNilFalse, false, d)
+ case *map[bool]string:
+ v2, changed2 := fastpathTV.DecMapBoolStringV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[bool]uint:
+ fastpathTV.DecMapBoolUintV(v, fastpathCheckNilFalse, false, d)
+ case *map[bool]uint:
+ v2, changed2 := fastpathTV.DecMapBoolUintV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[bool]uint8:
+ fastpathTV.DecMapBoolUint8V(v, fastpathCheckNilFalse, false, d)
+ case *map[bool]uint8:
+ v2, changed2 := fastpathTV.DecMapBoolUint8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[bool]uint16:
+ fastpathTV.DecMapBoolUint16V(v, fastpathCheckNilFalse, false, d)
+ case *map[bool]uint16:
+ v2, changed2 := fastpathTV.DecMapBoolUint16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[bool]uint32:
+ fastpathTV.DecMapBoolUint32V(v, fastpathCheckNilFalse, false, d)
+ case *map[bool]uint32:
+ v2, changed2 := fastpathTV.DecMapBoolUint32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[bool]uint64:
+ fastpathTV.DecMapBoolUint64V(v, fastpathCheckNilFalse, false, d)
+ case *map[bool]uint64:
+ v2, changed2 := fastpathTV.DecMapBoolUint64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[bool]uintptr:
+ fastpathTV.DecMapBoolUintptrV(v, fastpathCheckNilFalse, false, d)
+ case *map[bool]uintptr:
+ v2, changed2 := fastpathTV.DecMapBoolUintptrV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[bool]int:
+ fastpathTV.DecMapBoolIntV(v, fastpathCheckNilFalse, false, d)
+ case *map[bool]int:
+ v2, changed2 := fastpathTV.DecMapBoolIntV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[bool]int8:
+ fastpathTV.DecMapBoolInt8V(v, fastpathCheckNilFalse, false, d)
+ case *map[bool]int8:
+ v2, changed2 := fastpathTV.DecMapBoolInt8V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[bool]int16:
+ fastpathTV.DecMapBoolInt16V(v, fastpathCheckNilFalse, false, d)
+ case *map[bool]int16:
+ v2, changed2 := fastpathTV.DecMapBoolInt16V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[bool]int32:
+ fastpathTV.DecMapBoolInt32V(v, fastpathCheckNilFalse, false, d)
+ case *map[bool]int32:
+ v2, changed2 := fastpathTV.DecMapBoolInt32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[bool]int64:
+ fastpathTV.DecMapBoolInt64V(v, fastpathCheckNilFalse, false, d)
+ case *map[bool]int64:
+ v2, changed2 := fastpathTV.DecMapBoolInt64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[bool]float32:
+ fastpathTV.DecMapBoolFloat32V(v, fastpathCheckNilFalse, false, d)
+ case *map[bool]float32:
+ v2, changed2 := fastpathTV.DecMapBoolFloat32V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[bool]float64:
+ fastpathTV.DecMapBoolFloat64V(v, fastpathCheckNilFalse, false, d)
+ case *map[bool]float64:
+ v2, changed2 := fastpathTV.DecMapBoolFloat64V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ case map[bool]bool:
+ fastpathTV.DecMapBoolBoolV(v, fastpathCheckNilFalse, false, d)
+ case *map[bool]bool:
+ v2, changed2 := fastpathTV.DecMapBoolBoolV(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+
+ default:
+ _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release)
+ return false
+ }
+ return true
+}
+
+// -- -- fast path functions
+
+func (f *decFnInfo) fastpathDecSliceIntfR(rv reflect.Value) {
+ array := f.seq == seqTypeArray
+ if !array && rv.CanAddr() {
+ vp := rv.Addr().Interface().(*[]interface{})
+ v, changed := fastpathTV.DecSliceIntfV(*vp, fastpathCheckNilFalse, !array, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().([]interface{})
+ fastpathTV.DecSliceIntfV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+
+func (f fastpathT) DecSliceIntfX(vp *[]interface{}, checkNil bool, d *Decoder) {
+ v, changed := f.DecSliceIntfV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceIntfV(v []interface{}, checkNil bool, canChange bool, d *Decoder) (_ []interface{}, changed bool) {
+ dd := d.d
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []interface{}{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+
+ if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
+ if containerLenS > cap(v) {
+ if canChange {
+ var xlen int
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 16)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]interface{}, xlen)
+ }
+ } else {
+ v = make([]interface{}, xlen)
+ }
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), containerLenS)
+ }
+ x2read = len(v)
+ } else if containerLenS != len(v) {
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
+ d.decode(&v[j])
+ }
+ if xtrunc {
+ for ; j < containerLenS; j++ {
+ v = append(v, nil)
+ slh.ElemContainerState(j)
+ d.decode(&v[j])
+ }
+ } else if !canChange {
+ for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
+ d.swallow()
+ }
+ }
+ } else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []interface{}{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ if cap(v) == 0 {
+ v = make([]interface{}, 1, 4)
+ changed = true
+ }
+ j := 0
+ for ; !breakFound; j++ {
+ if j >= len(v) {
+ if canChange {
+ v = append(v, nil)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ }
+ }
+ slh.ElemContainerState(j)
+ if j < len(v) {
+ d.decode(&v[j])
+
+ } else {
+ d.swallow()
+ }
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecSliceStringR(rv reflect.Value) {
+ array := f.seq == seqTypeArray
+ if !array && rv.CanAddr() {
+ vp := rv.Addr().Interface().(*[]string)
+ v, changed := fastpathTV.DecSliceStringV(*vp, fastpathCheckNilFalse, !array, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().([]string)
+ fastpathTV.DecSliceStringV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+
+func (f fastpathT) DecSliceStringX(vp *[]string, checkNil bool, d *Decoder) {
+ v, changed := f.DecSliceStringV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceStringV(v []string, checkNil bool, canChange bool, d *Decoder) (_ []string, changed bool) {
+ dd := d.d
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []string{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+
+ if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
+ if containerLenS > cap(v) {
+ if canChange {
+ var xlen int
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 16)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]string, xlen)
+ }
+ } else {
+ v = make([]string, xlen)
+ }
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), containerLenS)
+ }
+ x2read = len(v)
+ } else if containerLenS != len(v) {
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
+ v[j] = dd.DecodeString()
+ }
+ if xtrunc {
+ for ; j < containerLenS; j++ {
+ v = append(v, "")
+ slh.ElemContainerState(j)
+ v[j] = dd.DecodeString()
+ }
+ } else if !canChange {
+ for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
+ d.swallow()
+ }
+ }
+ } else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []string{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ if cap(v) == 0 {
+ v = make([]string, 1, 4)
+ changed = true
+ }
+ j := 0
+ for ; !breakFound; j++ {
+ if j >= len(v) {
+ if canChange {
+ v = append(v, "")
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ }
+ }
+ slh.ElemContainerState(j)
+ if j < len(v) {
+ v[j] = dd.DecodeString()
+ } else {
+ d.swallow()
+ }
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecSliceFloat32R(rv reflect.Value) {
+ array := f.seq == seqTypeArray
+ if !array && rv.CanAddr() {
+ vp := rv.Addr().Interface().(*[]float32)
+ v, changed := fastpathTV.DecSliceFloat32V(*vp, fastpathCheckNilFalse, !array, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().([]float32)
+ fastpathTV.DecSliceFloat32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+
+func (f fastpathT) DecSliceFloat32X(vp *[]float32, checkNil bool, d *Decoder) {
+ v, changed := f.DecSliceFloat32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceFloat32V(v []float32, checkNil bool, canChange bool, d *Decoder) (_ []float32, changed bool) {
+ dd := d.d
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []float32{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+
+ if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
+ if containerLenS > cap(v) {
+ if canChange {
+ var xlen int
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 4)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]float32, xlen)
+ }
+ } else {
+ v = make([]float32, xlen)
+ }
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), containerLenS)
+ }
+ x2read = len(v)
+ } else if containerLenS != len(v) {
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
+ v[j] = float32(dd.DecodeFloat(true))
+ }
+ if xtrunc {
+ for ; j < containerLenS; j++ {
+ v = append(v, 0)
+ slh.ElemContainerState(j)
+ v[j] = float32(dd.DecodeFloat(true))
+ }
+ } else if !canChange {
+ for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
+ d.swallow()
+ }
+ }
+ } else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []float32{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ if cap(v) == 0 {
+ v = make([]float32, 1, 4)
+ changed = true
+ }
+ j := 0
+ for ; !breakFound; j++ {
+ if j >= len(v) {
+ if canChange {
+ v = append(v, 0)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ }
+ }
+ slh.ElemContainerState(j)
+ if j < len(v) {
+ v[j] = float32(dd.DecodeFloat(true))
+ } else {
+ d.swallow()
+ }
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecSliceFloat64R(rv reflect.Value) {
+ array := f.seq == seqTypeArray
+ if !array && rv.CanAddr() {
+ vp := rv.Addr().Interface().(*[]float64)
+ v, changed := fastpathTV.DecSliceFloat64V(*vp, fastpathCheckNilFalse, !array, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().([]float64)
+ fastpathTV.DecSliceFloat64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+
+func (f fastpathT) DecSliceFloat64X(vp *[]float64, checkNil bool, d *Decoder) {
+ v, changed := f.DecSliceFloat64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceFloat64V(v []float64, checkNil bool, canChange bool, d *Decoder) (_ []float64, changed bool) {
+ dd := d.d
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []float64{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+
+ if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
+ if containerLenS > cap(v) {
+ if canChange {
+ var xlen int
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]float64, xlen)
+ }
+ } else {
+ v = make([]float64, xlen)
+ }
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), containerLenS)
+ }
+ x2read = len(v)
+ } else if containerLenS != len(v) {
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
+ v[j] = dd.DecodeFloat(false)
+ }
+ if xtrunc {
+ for ; j < containerLenS; j++ {
+ v = append(v, 0)
+ slh.ElemContainerState(j)
+ v[j] = dd.DecodeFloat(false)
+ }
+ } else if !canChange {
+ for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
+ d.swallow()
+ }
+ }
+ } else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []float64{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ if cap(v) == 0 {
+ v = make([]float64, 1, 4)
+ changed = true
+ }
+ j := 0
+ for ; !breakFound; j++ {
+ if j >= len(v) {
+ if canChange {
+ v = append(v, 0)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ }
+ }
+ slh.ElemContainerState(j)
+ if j < len(v) {
+ v[j] = dd.DecodeFloat(false)
+ } else {
+ d.swallow()
+ }
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecSliceUintR(rv reflect.Value) {
+ array := f.seq == seqTypeArray
+ if !array && rv.CanAddr() {
+ vp := rv.Addr().Interface().(*[]uint)
+ v, changed := fastpathTV.DecSliceUintV(*vp, fastpathCheckNilFalse, !array, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().([]uint)
+ fastpathTV.DecSliceUintV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+
+func (f fastpathT) DecSliceUintX(vp *[]uint, checkNil bool, d *Decoder) {
+ v, changed := f.DecSliceUintV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceUintV(v []uint, checkNil bool, canChange bool, d *Decoder) (_ []uint, changed bool) {
+ dd := d.d
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []uint{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+
+ if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
+ if containerLenS > cap(v) {
+ if canChange {
+ var xlen int
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]uint, xlen)
+ }
+ } else {
+ v = make([]uint, xlen)
+ }
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), containerLenS)
+ }
+ x2read = len(v)
+ } else if containerLenS != len(v) {
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
+ v[j] = uint(dd.DecodeUint(uintBitsize))
+ }
+ if xtrunc {
+ for ; j < containerLenS; j++ {
+ v = append(v, 0)
+ slh.ElemContainerState(j)
+ v[j] = uint(dd.DecodeUint(uintBitsize))
+ }
+ } else if !canChange {
+ for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
+ d.swallow()
+ }
+ }
+ } else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []uint{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ if cap(v) == 0 {
+ v = make([]uint, 1, 4)
+ changed = true
+ }
+ j := 0
+ for ; !breakFound; j++ {
+ if j >= len(v) {
+ if canChange {
+ v = append(v, 0)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ }
+ }
+ slh.ElemContainerState(j)
+ if j < len(v) {
+ v[j] = uint(dd.DecodeUint(uintBitsize))
+ } else {
+ d.swallow()
+ }
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecSliceUint16R(rv reflect.Value) {
+ array := f.seq == seqTypeArray
+ if !array && rv.CanAddr() {
+ vp := rv.Addr().Interface().(*[]uint16)
+ v, changed := fastpathTV.DecSliceUint16V(*vp, fastpathCheckNilFalse, !array, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().([]uint16)
+ fastpathTV.DecSliceUint16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+
+func (f fastpathT) DecSliceUint16X(vp *[]uint16, checkNil bool, d *Decoder) {
+ v, changed := f.DecSliceUint16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceUint16V(v []uint16, checkNil bool, canChange bool, d *Decoder) (_ []uint16, changed bool) {
+ dd := d.d
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []uint16{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+
+ if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
+ if containerLenS > cap(v) {
+ if canChange {
+ var xlen int
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 2)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]uint16, xlen)
+ }
+ } else {
+ v = make([]uint16, xlen)
+ }
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), containerLenS)
+ }
+ x2read = len(v)
+ } else if containerLenS != len(v) {
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
+ v[j] = uint16(dd.DecodeUint(16))
+ }
+ if xtrunc {
+ for ; j < containerLenS; j++ {
+ v = append(v, 0)
+ slh.ElemContainerState(j)
+ v[j] = uint16(dd.DecodeUint(16))
+ }
+ } else if !canChange {
+ for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
+ d.swallow()
+ }
+ }
+ } else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []uint16{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ if cap(v) == 0 {
+ v = make([]uint16, 1, 4)
+ changed = true
+ }
+ j := 0
+ for ; !breakFound; j++ {
+ if j >= len(v) {
+ if canChange {
+ v = append(v, 0)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ }
+ }
+ slh.ElemContainerState(j)
+ if j < len(v) {
+ v[j] = uint16(dd.DecodeUint(16))
+ } else {
+ d.swallow()
+ }
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecSliceUint32R(rv reflect.Value) {
+ array := f.seq == seqTypeArray
+ if !array && rv.CanAddr() {
+ vp := rv.Addr().Interface().(*[]uint32)
+ v, changed := fastpathTV.DecSliceUint32V(*vp, fastpathCheckNilFalse, !array, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().([]uint32)
+ fastpathTV.DecSliceUint32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+
+func (f fastpathT) DecSliceUint32X(vp *[]uint32, checkNil bool, d *Decoder) {
+ v, changed := f.DecSliceUint32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceUint32V(v []uint32, checkNil bool, canChange bool, d *Decoder) (_ []uint32, changed bool) {
+ dd := d.d
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []uint32{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+
+ if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
+ if containerLenS > cap(v) {
+ if canChange {
+ var xlen int
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 4)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]uint32, xlen)
+ }
+ } else {
+ v = make([]uint32, xlen)
+ }
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), containerLenS)
+ }
+ x2read = len(v)
+ } else if containerLenS != len(v) {
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
+ v[j] = uint32(dd.DecodeUint(32))
+ }
+ if xtrunc {
+ for ; j < containerLenS; j++ {
+ v = append(v, 0)
+ slh.ElemContainerState(j)
+ v[j] = uint32(dd.DecodeUint(32))
+ }
+ } else if !canChange {
+ for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
+ d.swallow()
+ }
+ }
+ } else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []uint32{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ if cap(v) == 0 {
+ v = make([]uint32, 1, 4)
+ changed = true
+ }
+ j := 0
+ for ; !breakFound; j++ {
+ if j >= len(v) {
+ if canChange {
+ v = append(v, 0)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ }
+ }
+ slh.ElemContainerState(j)
+ if j < len(v) {
+ v[j] = uint32(dd.DecodeUint(32))
+ } else {
+ d.swallow()
+ }
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecSliceUint64R(rv reflect.Value) {
+ array := f.seq == seqTypeArray
+ if !array && rv.CanAddr() {
+ vp := rv.Addr().Interface().(*[]uint64)
+ v, changed := fastpathTV.DecSliceUint64V(*vp, fastpathCheckNilFalse, !array, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().([]uint64)
+ fastpathTV.DecSliceUint64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+
+func (f fastpathT) DecSliceUint64X(vp *[]uint64, checkNil bool, d *Decoder) {
+ v, changed := f.DecSliceUint64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceUint64V(v []uint64, checkNil bool, canChange bool, d *Decoder) (_ []uint64, changed bool) {
+ dd := d.d
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []uint64{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+
+ if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
+ if containerLenS > cap(v) {
+ if canChange {
+ var xlen int
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]uint64, xlen)
+ }
+ } else {
+ v = make([]uint64, xlen)
+ }
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), containerLenS)
+ }
+ x2read = len(v)
+ } else if containerLenS != len(v) {
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
+ v[j] = dd.DecodeUint(64)
+ }
+ if xtrunc {
+ for ; j < containerLenS; j++ {
+ v = append(v, 0)
+ slh.ElemContainerState(j)
+ v[j] = dd.DecodeUint(64)
+ }
+ } else if !canChange {
+ for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
+ d.swallow()
+ }
+ }
+ } else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []uint64{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ if cap(v) == 0 {
+ v = make([]uint64, 1, 4)
+ changed = true
+ }
+ j := 0
+ for ; !breakFound; j++ {
+ if j >= len(v) {
+ if canChange {
+ v = append(v, 0)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ }
+ }
+ slh.ElemContainerState(j)
+ if j < len(v) {
+ v[j] = dd.DecodeUint(64)
+ } else {
+ d.swallow()
+ }
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecSliceUintptrR(rv reflect.Value) {
+ array := f.seq == seqTypeArray
+ if !array && rv.CanAddr() {
+ vp := rv.Addr().Interface().(*[]uintptr)
+ v, changed := fastpathTV.DecSliceUintptrV(*vp, fastpathCheckNilFalse, !array, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().([]uintptr)
+ fastpathTV.DecSliceUintptrV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+
+func (f fastpathT) DecSliceUintptrX(vp *[]uintptr, checkNil bool, d *Decoder) {
+ v, changed := f.DecSliceUintptrV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceUintptrV(v []uintptr, checkNil bool, canChange bool, d *Decoder) (_ []uintptr, changed bool) {
+ dd := d.d
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []uintptr{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+
+ if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
+ if containerLenS > cap(v) {
+ if canChange {
+ var xlen int
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]uintptr, xlen)
+ }
+ } else {
+ v = make([]uintptr, xlen)
+ }
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), containerLenS)
+ }
+ x2read = len(v)
+ } else if containerLenS != len(v) {
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
+ v[j] = uintptr(dd.DecodeUint(uintBitsize))
+ }
+ if xtrunc {
+ for ; j < containerLenS; j++ {
+ v = append(v, 0)
+ slh.ElemContainerState(j)
+ v[j] = uintptr(dd.DecodeUint(uintBitsize))
+ }
+ } else if !canChange {
+ for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
+ d.swallow()
+ }
+ }
+ } else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []uintptr{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ if cap(v) == 0 {
+ v = make([]uintptr, 1, 4)
+ changed = true
+ }
+ j := 0
+ for ; !breakFound; j++ {
+ if j >= len(v) {
+ if canChange {
+ v = append(v, 0)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ }
+ }
+ slh.ElemContainerState(j)
+ if j < len(v) {
+ v[j] = uintptr(dd.DecodeUint(uintBitsize))
+ } else {
+ d.swallow()
+ }
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecSliceIntR(rv reflect.Value) {
+ array := f.seq == seqTypeArray
+ if !array && rv.CanAddr() {
+ vp := rv.Addr().Interface().(*[]int)
+ v, changed := fastpathTV.DecSliceIntV(*vp, fastpathCheckNilFalse, !array, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().([]int)
+ fastpathTV.DecSliceIntV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+
+func (f fastpathT) DecSliceIntX(vp *[]int, checkNil bool, d *Decoder) {
+ v, changed := f.DecSliceIntV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceIntV(v []int, checkNil bool, canChange bool, d *Decoder) (_ []int, changed bool) {
+ dd := d.d
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []int{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+
+ if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
+ if containerLenS > cap(v) {
+ if canChange {
+ var xlen int
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]int, xlen)
+ }
+ } else {
+ v = make([]int, xlen)
+ }
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), containerLenS)
+ }
+ x2read = len(v)
+ } else if containerLenS != len(v) {
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
+ v[j] = int(dd.DecodeInt(intBitsize))
+ }
+ if xtrunc {
+ for ; j < containerLenS; j++ {
+ v = append(v, 0)
+ slh.ElemContainerState(j)
+ v[j] = int(dd.DecodeInt(intBitsize))
+ }
+ } else if !canChange {
+ for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
+ d.swallow()
+ }
+ }
+ } else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []int{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ if cap(v) == 0 {
+ v = make([]int, 1, 4)
+ changed = true
+ }
+ j := 0
+ for ; !breakFound; j++ {
+ if j >= len(v) {
+ if canChange {
+ v = append(v, 0)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ }
+ }
+ slh.ElemContainerState(j)
+ if j < len(v) {
+ v[j] = int(dd.DecodeInt(intBitsize))
+ } else {
+ d.swallow()
+ }
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecSliceInt8R(rv reflect.Value) {
+ array := f.seq == seqTypeArray
+ if !array && rv.CanAddr() {
+ vp := rv.Addr().Interface().(*[]int8)
+ v, changed := fastpathTV.DecSliceInt8V(*vp, fastpathCheckNilFalse, !array, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().([]int8)
+ fastpathTV.DecSliceInt8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+
+func (f fastpathT) DecSliceInt8X(vp *[]int8, checkNil bool, d *Decoder) {
+ v, changed := f.DecSliceInt8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceInt8V(v []int8, checkNil bool, canChange bool, d *Decoder) (_ []int8, changed bool) {
+ dd := d.d
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []int8{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+
+ if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
+ if containerLenS > cap(v) {
+ if canChange {
+ var xlen int
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 1)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]int8, xlen)
+ }
+ } else {
+ v = make([]int8, xlen)
+ }
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), containerLenS)
+ }
+ x2read = len(v)
+ } else if containerLenS != len(v) {
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
+ v[j] = int8(dd.DecodeInt(8))
+ }
+ if xtrunc {
+ for ; j < containerLenS; j++ {
+ v = append(v, 0)
+ slh.ElemContainerState(j)
+ v[j] = int8(dd.DecodeInt(8))
+ }
+ } else if !canChange {
+ for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
+ d.swallow()
+ }
+ }
+ } else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []int8{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ if cap(v) == 0 {
+ v = make([]int8, 1, 4)
+ changed = true
+ }
+ j := 0
+ for ; !breakFound; j++ {
+ if j >= len(v) {
+ if canChange {
+ v = append(v, 0)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ }
+ }
+ slh.ElemContainerState(j)
+ if j < len(v) {
+ v[j] = int8(dd.DecodeInt(8))
+ } else {
+ d.swallow()
+ }
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecSliceInt16R(rv reflect.Value) {
+ array := f.seq == seqTypeArray
+ if !array && rv.CanAddr() {
+ vp := rv.Addr().Interface().(*[]int16)
+ v, changed := fastpathTV.DecSliceInt16V(*vp, fastpathCheckNilFalse, !array, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().([]int16)
+ fastpathTV.DecSliceInt16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+
+func (f fastpathT) DecSliceInt16X(vp *[]int16, checkNil bool, d *Decoder) {
+ v, changed := f.DecSliceInt16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceInt16V(v []int16, checkNil bool, canChange bool, d *Decoder) (_ []int16, changed bool) {
+ dd := d.d
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []int16{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+
+ if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
+ if containerLenS > cap(v) {
+ if canChange {
+ var xlen int
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 2)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]int16, xlen)
+ }
+ } else {
+ v = make([]int16, xlen)
+ }
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), containerLenS)
+ }
+ x2read = len(v)
+ } else if containerLenS != len(v) {
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
+ v[j] = int16(dd.DecodeInt(16))
+ }
+ if xtrunc {
+ for ; j < containerLenS; j++ {
+ v = append(v, 0)
+ slh.ElemContainerState(j)
+ v[j] = int16(dd.DecodeInt(16))
+ }
+ } else if !canChange {
+ for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
+ d.swallow()
+ }
+ }
+ } else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []int16{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ if cap(v) == 0 {
+ v = make([]int16, 1, 4)
+ changed = true
+ }
+ j := 0
+ for ; !breakFound; j++ {
+ if j >= len(v) {
+ if canChange {
+ v = append(v, 0)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ }
+ }
+ slh.ElemContainerState(j)
+ if j < len(v) {
+ v[j] = int16(dd.DecodeInt(16))
+ } else {
+ d.swallow()
+ }
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecSliceInt32R(rv reflect.Value) {
+ array := f.seq == seqTypeArray
+ if !array && rv.CanAddr() {
+ vp := rv.Addr().Interface().(*[]int32)
+ v, changed := fastpathTV.DecSliceInt32V(*vp, fastpathCheckNilFalse, !array, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().([]int32)
+ fastpathTV.DecSliceInt32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+
+func (f fastpathT) DecSliceInt32X(vp *[]int32, checkNil bool, d *Decoder) {
+ v, changed := f.DecSliceInt32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceInt32V(v []int32, checkNil bool, canChange bool, d *Decoder) (_ []int32, changed bool) {
+ dd := d.d
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []int32{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+
+ if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
+ if containerLenS > cap(v) {
+ if canChange {
+ var xlen int
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 4)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]int32, xlen)
+ }
+ } else {
+ v = make([]int32, xlen)
+ }
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), containerLenS)
+ }
+ x2read = len(v)
+ } else if containerLenS != len(v) {
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
+ v[j] = int32(dd.DecodeInt(32))
+ }
+ if xtrunc {
+ for ; j < containerLenS; j++ {
+ v = append(v, 0)
+ slh.ElemContainerState(j)
+ v[j] = int32(dd.DecodeInt(32))
+ }
+ } else if !canChange {
+ for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
+ d.swallow()
+ }
+ }
+ } else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []int32{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ if cap(v) == 0 {
+ v = make([]int32, 1, 4)
+ changed = true
+ }
+ j := 0
+ for ; !breakFound; j++ {
+ if j >= len(v) {
+ if canChange {
+ v = append(v, 0)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ }
+ }
+ slh.ElemContainerState(j)
+ if j < len(v) {
+ v[j] = int32(dd.DecodeInt(32))
+ } else {
+ d.swallow()
+ }
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecSliceInt64R(rv reflect.Value) {
+ array := f.seq == seqTypeArray
+ if !array && rv.CanAddr() {
+ vp := rv.Addr().Interface().(*[]int64)
+ v, changed := fastpathTV.DecSliceInt64V(*vp, fastpathCheckNilFalse, !array, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().([]int64)
+ fastpathTV.DecSliceInt64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+
+func (f fastpathT) DecSliceInt64X(vp *[]int64, checkNil bool, d *Decoder) {
+ v, changed := f.DecSliceInt64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceInt64V(v []int64, checkNil bool, canChange bool, d *Decoder) (_ []int64, changed bool) {
+ dd := d.d
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []int64{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+
+ if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
+ if containerLenS > cap(v) {
+ if canChange {
+ var xlen int
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]int64, xlen)
+ }
+ } else {
+ v = make([]int64, xlen)
+ }
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), containerLenS)
+ }
+ x2read = len(v)
+ } else if containerLenS != len(v) {
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
+ v[j] = dd.DecodeInt(64)
+ }
+ if xtrunc {
+ for ; j < containerLenS; j++ {
+ v = append(v, 0)
+ slh.ElemContainerState(j)
+ v[j] = dd.DecodeInt(64)
+ }
+ } else if !canChange {
+ for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
+ d.swallow()
+ }
+ }
+ } else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []int64{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ if cap(v) == 0 {
+ v = make([]int64, 1, 4)
+ changed = true
+ }
+ j := 0
+ for ; !breakFound; j++ {
+ if j >= len(v) {
+ if canChange {
+ v = append(v, 0)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ }
+ }
+ slh.ElemContainerState(j)
+ if j < len(v) {
+ v[j] = dd.DecodeInt(64)
+ } else {
+ d.swallow()
+ }
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecSliceBoolR(rv reflect.Value) {
+ array := f.seq == seqTypeArray
+ if !array && rv.CanAddr() {
+ vp := rv.Addr().Interface().(*[]bool)
+ v, changed := fastpathTV.DecSliceBoolV(*vp, fastpathCheckNilFalse, !array, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().([]bool)
+ fastpathTV.DecSliceBoolV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+
+func (f fastpathT) DecSliceBoolX(vp *[]bool, checkNil bool, d *Decoder) {
+ v, changed := f.DecSliceBoolV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceBoolV(v []bool, checkNil bool, canChange bool, d *Decoder) (_ []bool, changed bool) {
+ dd := d.d
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []bool{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+
+ if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
+ if containerLenS > cap(v) {
+ if canChange {
+ var xlen int
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 1)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]bool, xlen)
+ }
+ } else {
+ v = make([]bool, xlen)
+ }
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), containerLenS)
+ }
+ x2read = len(v)
+ } else if containerLenS != len(v) {
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
+ v[j] = dd.DecodeBool()
+ }
+ if xtrunc {
+ for ; j < containerLenS; j++ {
+ v = append(v, false)
+ slh.ElemContainerState(j)
+ v[j] = dd.DecodeBool()
+ }
+ } else if !canChange {
+ for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
+ d.swallow()
+ }
+ }
+ } else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []bool{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ if cap(v) == 0 {
+ v = make([]bool, 1, 4)
+ changed = true
+ }
+ j := 0
+ for ; !breakFound; j++ {
+ if j >= len(v) {
+ if canChange {
+ v = append(v, false)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ }
+ }
+ slh.ElemContainerState(j)
+ if j < len(v) {
+ v[j] = dd.DecodeBool()
+ } else {
+ d.swallow()
+ }
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntfIntfR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[interface{}]interface{})
+ v, changed := fastpathTV.DecMapIntfIntfV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[interface{}]interface{})
+ fastpathTV.DecMapIntfIntfV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntfIntfX(vp *map[interface{}]interface{}, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntfIntfV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfIntfV(v map[interface{}]interface{}, checkNil bool, canChange bool,
+ d *Decoder) (_ map[interface{}]interface{}, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 32)
+ v = make(map[interface{}]interface{}, xlen)
+ changed = true
+ }
+ mapGet := !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk interface{}
+ var mv interface{}
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntfStringR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[interface{}]string)
+ v, changed := fastpathTV.DecMapIntfStringV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[interface{}]string)
+ fastpathTV.DecMapIntfStringV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntfStringX(vp *map[interface{}]string, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntfStringV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfStringV(v map[interface{}]string, checkNil bool, canChange bool,
+ d *Decoder) (_ map[interface{}]string, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 32)
+ v = make(map[interface{}]string, xlen)
+ changed = true
+ }
+
+ var mk interface{}
+ var mv string
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntfUintR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[interface{}]uint)
+ v, changed := fastpathTV.DecMapIntfUintV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[interface{}]uint)
+ fastpathTV.DecMapIntfUintV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntfUintX(vp *map[interface{}]uint, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntfUintV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfUintV(v map[interface{}]uint, checkNil bool, canChange bool,
+ d *Decoder) (_ map[interface{}]uint, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[interface{}]uint, xlen)
+ changed = true
+ }
+
+ var mk interface{}
+ var mv uint
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntfUint8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[interface{}]uint8)
+ v, changed := fastpathTV.DecMapIntfUint8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[interface{}]uint8)
+ fastpathTV.DecMapIntfUint8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntfUint8X(vp *map[interface{}]uint8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntfUint8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfUint8V(v map[interface{}]uint8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[interface{}]uint8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17)
+ v = make(map[interface{}]uint8, xlen)
+ changed = true
+ }
+
+ var mk interface{}
+ var mv uint8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntfUint16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[interface{}]uint16)
+ v, changed := fastpathTV.DecMapIntfUint16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[interface{}]uint16)
+ fastpathTV.DecMapIntfUint16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntfUint16X(vp *map[interface{}]uint16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntfUint16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfUint16V(v map[interface{}]uint16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[interface{}]uint16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18)
+ v = make(map[interface{}]uint16, xlen)
+ changed = true
+ }
+
+ var mk interface{}
+ var mv uint16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntfUint32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[interface{}]uint32)
+ v, changed := fastpathTV.DecMapIntfUint32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[interface{}]uint32)
+ fastpathTV.DecMapIntfUint32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntfUint32X(vp *map[interface{}]uint32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntfUint32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfUint32V(v map[interface{}]uint32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[interface{}]uint32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20)
+ v = make(map[interface{}]uint32, xlen)
+ changed = true
+ }
+
+ var mk interface{}
+ var mv uint32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntfUint64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[interface{}]uint64)
+ v, changed := fastpathTV.DecMapIntfUint64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[interface{}]uint64)
+ fastpathTV.DecMapIntfUint64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntfUint64X(vp *map[interface{}]uint64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntfUint64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfUint64V(v map[interface{}]uint64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[interface{}]uint64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[interface{}]uint64, xlen)
+ changed = true
+ }
+
+ var mk interface{}
+ var mv uint64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntfUintptrR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[interface{}]uintptr)
+ v, changed := fastpathTV.DecMapIntfUintptrV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[interface{}]uintptr)
+ fastpathTV.DecMapIntfUintptrV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntfUintptrX(vp *map[interface{}]uintptr, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntfUintptrV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfUintptrV(v map[interface{}]uintptr, checkNil bool, canChange bool,
+ d *Decoder) (_ map[interface{}]uintptr, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[interface{}]uintptr, xlen)
+ changed = true
+ }
+
+ var mk interface{}
+ var mv uintptr
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntfIntR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[interface{}]int)
+ v, changed := fastpathTV.DecMapIntfIntV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[interface{}]int)
+ fastpathTV.DecMapIntfIntV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntfIntX(vp *map[interface{}]int, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntfIntV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfIntV(v map[interface{}]int, checkNil bool, canChange bool,
+ d *Decoder) (_ map[interface{}]int, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[interface{}]int, xlen)
+ changed = true
+ }
+
+ var mk interface{}
+ var mv int
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntfInt8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[interface{}]int8)
+ v, changed := fastpathTV.DecMapIntfInt8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[interface{}]int8)
+ fastpathTV.DecMapIntfInt8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntfInt8X(vp *map[interface{}]int8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntfInt8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfInt8V(v map[interface{}]int8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[interface{}]int8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17)
+ v = make(map[interface{}]int8, xlen)
+ changed = true
+ }
+
+ var mk interface{}
+ var mv int8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntfInt16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[interface{}]int16)
+ v, changed := fastpathTV.DecMapIntfInt16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[interface{}]int16)
+ fastpathTV.DecMapIntfInt16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntfInt16X(vp *map[interface{}]int16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntfInt16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfInt16V(v map[interface{}]int16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[interface{}]int16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18)
+ v = make(map[interface{}]int16, xlen)
+ changed = true
+ }
+
+ var mk interface{}
+ var mv int16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntfInt32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[interface{}]int32)
+ v, changed := fastpathTV.DecMapIntfInt32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[interface{}]int32)
+ fastpathTV.DecMapIntfInt32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntfInt32X(vp *map[interface{}]int32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntfInt32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfInt32V(v map[interface{}]int32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[interface{}]int32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20)
+ v = make(map[interface{}]int32, xlen)
+ changed = true
+ }
+
+ var mk interface{}
+ var mv int32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntfInt64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[interface{}]int64)
+ v, changed := fastpathTV.DecMapIntfInt64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[interface{}]int64)
+ fastpathTV.DecMapIntfInt64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntfInt64X(vp *map[interface{}]int64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntfInt64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfInt64V(v map[interface{}]int64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[interface{}]int64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[interface{}]int64, xlen)
+ changed = true
+ }
+
+ var mk interface{}
+ var mv int64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntfFloat32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[interface{}]float32)
+ v, changed := fastpathTV.DecMapIntfFloat32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[interface{}]float32)
+ fastpathTV.DecMapIntfFloat32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntfFloat32X(vp *map[interface{}]float32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntfFloat32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfFloat32V(v map[interface{}]float32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[interface{}]float32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20)
+ v = make(map[interface{}]float32, xlen)
+ changed = true
+ }
+
+ var mk interface{}
+ var mv float32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntfFloat64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[interface{}]float64)
+ v, changed := fastpathTV.DecMapIntfFloat64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[interface{}]float64)
+ fastpathTV.DecMapIntfFloat64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntfFloat64X(vp *map[interface{}]float64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntfFloat64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfFloat64V(v map[interface{}]float64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[interface{}]float64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[interface{}]float64, xlen)
+ changed = true
+ }
+
+ var mk interface{}
+ var mv float64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntfBoolR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[interface{}]bool)
+ v, changed := fastpathTV.DecMapIntfBoolV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[interface{}]bool)
+ fastpathTV.DecMapIntfBoolV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntfBoolX(vp *map[interface{}]bool, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntfBoolV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfBoolV(v map[interface{}]bool, checkNil bool, canChange bool,
+ d *Decoder) (_ map[interface{}]bool, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17)
+ v = make(map[interface{}]bool, xlen)
+ changed = true
+ }
+
+ var mk interface{}
+ var mv bool
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapStringIntfR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[string]interface{})
+ v, changed := fastpathTV.DecMapStringIntfV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[string]interface{})
+ fastpathTV.DecMapStringIntfV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapStringIntfX(vp *map[string]interface{}, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapStringIntfV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringIntfV(v map[string]interface{}, checkNil bool, canChange bool,
+ d *Decoder) (_ map[string]interface{}, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 32)
+ v = make(map[string]interface{}, xlen)
+ changed = true
+ }
+ mapGet := !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk string
+ var mv interface{}
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapStringStringR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[string]string)
+ v, changed := fastpathTV.DecMapStringStringV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[string]string)
+ fastpathTV.DecMapStringStringV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapStringStringX(vp *map[string]string, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapStringStringV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringStringV(v map[string]string, checkNil bool, canChange bool,
+ d *Decoder) (_ map[string]string, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 32)
+ v = make(map[string]string, xlen)
+ changed = true
+ }
+
+ var mk string
+ var mv string
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapStringUintR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[string]uint)
+ v, changed := fastpathTV.DecMapStringUintV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[string]uint)
+ fastpathTV.DecMapStringUintV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapStringUintX(vp *map[string]uint, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapStringUintV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringUintV(v map[string]uint, checkNil bool, canChange bool,
+ d *Decoder) (_ map[string]uint, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[string]uint, xlen)
+ changed = true
+ }
+
+ var mk string
+ var mv uint
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapStringUint8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[string]uint8)
+ v, changed := fastpathTV.DecMapStringUint8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[string]uint8)
+ fastpathTV.DecMapStringUint8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapStringUint8X(vp *map[string]uint8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapStringUint8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringUint8V(v map[string]uint8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[string]uint8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17)
+ v = make(map[string]uint8, xlen)
+ changed = true
+ }
+
+ var mk string
+ var mv uint8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapStringUint16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[string]uint16)
+ v, changed := fastpathTV.DecMapStringUint16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[string]uint16)
+ fastpathTV.DecMapStringUint16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapStringUint16X(vp *map[string]uint16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapStringUint16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringUint16V(v map[string]uint16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[string]uint16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18)
+ v = make(map[string]uint16, xlen)
+ changed = true
+ }
+
+ var mk string
+ var mv uint16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapStringUint32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[string]uint32)
+ v, changed := fastpathTV.DecMapStringUint32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[string]uint32)
+ fastpathTV.DecMapStringUint32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapStringUint32X(vp *map[string]uint32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapStringUint32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringUint32V(v map[string]uint32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[string]uint32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20)
+ v = make(map[string]uint32, xlen)
+ changed = true
+ }
+
+ var mk string
+ var mv uint32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapStringUint64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[string]uint64)
+ v, changed := fastpathTV.DecMapStringUint64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[string]uint64)
+ fastpathTV.DecMapStringUint64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapStringUint64X(vp *map[string]uint64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapStringUint64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringUint64V(v map[string]uint64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[string]uint64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[string]uint64, xlen)
+ changed = true
+ }
+
+ var mk string
+ var mv uint64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapStringUintptrR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[string]uintptr)
+ v, changed := fastpathTV.DecMapStringUintptrV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[string]uintptr)
+ fastpathTV.DecMapStringUintptrV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapStringUintptrX(vp *map[string]uintptr, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapStringUintptrV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringUintptrV(v map[string]uintptr, checkNil bool, canChange bool,
+ d *Decoder) (_ map[string]uintptr, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[string]uintptr, xlen)
+ changed = true
+ }
+
+ var mk string
+ var mv uintptr
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapStringIntR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[string]int)
+ v, changed := fastpathTV.DecMapStringIntV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[string]int)
+ fastpathTV.DecMapStringIntV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapStringIntX(vp *map[string]int, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapStringIntV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringIntV(v map[string]int, checkNil bool, canChange bool,
+ d *Decoder) (_ map[string]int, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[string]int, xlen)
+ changed = true
+ }
+
+ var mk string
+ var mv int
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapStringInt8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[string]int8)
+ v, changed := fastpathTV.DecMapStringInt8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[string]int8)
+ fastpathTV.DecMapStringInt8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapStringInt8X(vp *map[string]int8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapStringInt8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringInt8V(v map[string]int8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[string]int8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17)
+ v = make(map[string]int8, xlen)
+ changed = true
+ }
+
+ var mk string
+ var mv int8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapStringInt16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[string]int16)
+ v, changed := fastpathTV.DecMapStringInt16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[string]int16)
+ fastpathTV.DecMapStringInt16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapStringInt16X(vp *map[string]int16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapStringInt16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringInt16V(v map[string]int16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[string]int16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18)
+ v = make(map[string]int16, xlen)
+ changed = true
+ }
+
+ var mk string
+ var mv int16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapStringInt32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[string]int32)
+ v, changed := fastpathTV.DecMapStringInt32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[string]int32)
+ fastpathTV.DecMapStringInt32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapStringInt32X(vp *map[string]int32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapStringInt32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringInt32V(v map[string]int32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[string]int32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20)
+ v = make(map[string]int32, xlen)
+ changed = true
+ }
+
+ var mk string
+ var mv int32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapStringInt64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[string]int64)
+ v, changed := fastpathTV.DecMapStringInt64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[string]int64)
+ fastpathTV.DecMapStringInt64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapStringInt64X(vp *map[string]int64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapStringInt64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringInt64V(v map[string]int64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[string]int64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[string]int64, xlen)
+ changed = true
+ }
+
+ var mk string
+ var mv int64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapStringFloat32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[string]float32)
+ v, changed := fastpathTV.DecMapStringFloat32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[string]float32)
+ fastpathTV.DecMapStringFloat32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapStringFloat32X(vp *map[string]float32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapStringFloat32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringFloat32V(v map[string]float32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[string]float32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20)
+ v = make(map[string]float32, xlen)
+ changed = true
+ }
+
+ var mk string
+ var mv float32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapStringFloat64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[string]float64)
+ v, changed := fastpathTV.DecMapStringFloat64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[string]float64)
+ fastpathTV.DecMapStringFloat64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapStringFloat64X(vp *map[string]float64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapStringFloat64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringFloat64V(v map[string]float64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[string]float64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[string]float64, xlen)
+ changed = true
+ }
+
+ var mk string
+ var mv float64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapStringBoolR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[string]bool)
+ v, changed := fastpathTV.DecMapStringBoolV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[string]bool)
+ fastpathTV.DecMapStringBoolV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapStringBoolX(vp *map[string]bool, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapStringBoolV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringBoolV(v map[string]bool, checkNil bool, canChange bool,
+ d *Decoder) (_ map[string]bool, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17)
+ v = make(map[string]bool, xlen)
+ changed = true
+ }
+
+ var mk string
+ var mv bool
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat32IntfR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float32]interface{})
+ v, changed := fastpathTV.DecMapFloat32IntfV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float32]interface{})
+ fastpathTV.DecMapFloat32IntfV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat32IntfX(vp *map[float32]interface{}, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat32IntfV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32IntfV(v map[float32]interface{}, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float32]interface{}, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20)
+ v = make(map[float32]interface{}, xlen)
+ changed = true
+ }
+ mapGet := !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk float32
+ var mv interface{}
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat32StringR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float32]string)
+ v, changed := fastpathTV.DecMapFloat32StringV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float32]string)
+ fastpathTV.DecMapFloat32StringV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat32StringX(vp *map[float32]string, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat32StringV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32StringV(v map[float32]string, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float32]string, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20)
+ v = make(map[float32]string, xlen)
+ changed = true
+ }
+
+ var mk float32
+ var mv string
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat32UintR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float32]uint)
+ v, changed := fastpathTV.DecMapFloat32UintV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float32]uint)
+ fastpathTV.DecMapFloat32UintV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat32UintX(vp *map[float32]uint, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat32UintV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32UintV(v map[float32]uint, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float32]uint, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[float32]uint, xlen)
+ changed = true
+ }
+
+ var mk float32
+ var mv uint
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat32Uint8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float32]uint8)
+ v, changed := fastpathTV.DecMapFloat32Uint8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float32]uint8)
+ fastpathTV.DecMapFloat32Uint8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat32Uint8X(vp *map[float32]uint8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat32Uint8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32Uint8V(v map[float32]uint8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float32]uint8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[float32]uint8, xlen)
+ changed = true
+ }
+
+ var mk float32
+ var mv uint8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat32Uint16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float32]uint16)
+ v, changed := fastpathTV.DecMapFloat32Uint16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float32]uint16)
+ fastpathTV.DecMapFloat32Uint16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat32Uint16X(vp *map[float32]uint16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat32Uint16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32Uint16V(v map[float32]uint16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float32]uint16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6)
+ v = make(map[float32]uint16, xlen)
+ changed = true
+ }
+
+ var mk float32
+ var mv uint16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat32Uint32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float32]uint32)
+ v, changed := fastpathTV.DecMapFloat32Uint32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float32]uint32)
+ fastpathTV.DecMapFloat32Uint32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat32Uint32X(vp *map[float32]uint32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat32Uint32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32Uint32V(v map[float32]uint32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float32]uint32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8)
+ v = make(map[float32]uint32, xlen)
+ changed = true
+ }
+
+ var mk float32
+ var mv uint32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat32Uint64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float32]uint64)
+ v, changed := fastpathTV.DecMapFloat32Uint64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float32]uint64)
+ fastpathTV.DecMapFloat32Uint64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat32Uint64X(vp *map[float32]uint64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat32Uint64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32Uint64V(v map[float32]uint64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float32]uint64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[float32]uint64, xlen)
+ changed = true
+ }
+
+ var mk float32
+ var mv uint64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat32UintptrR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float32]uintptr)
+ v, changed := fastpathTV.DecMapFloat32UintptrV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float32]uintptr)
+ fastpathTV.DecMapFloat32UintptrV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat32UintptrX(vp *map[float32]uintptr, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat32UintptrV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32UintptrV(v map[float32]uintptr, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float32]uintptr, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[float32]uintptr, xlen)
+ changed = true
+ }
+
+ var mk float32
+ var mv uintptr
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat32IntR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float32]int)
+ v, changed := fastpathTV.DecMapFloat32IntV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float32]int)
+ fastpathTV.DecMapFloat32IntV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat32IntX(vp *map[float32]int, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat32IntV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32IntV(v map[float32]int, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float32]int, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[float32]int, xlen)
+ changed = true
+ }
+
+ var mk float32
+ var mv int
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat32Int8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float32]int8)
+ v, changed := fastpathTV.DecMapFloat32Int8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float32]int8)
+ fastpathTV.DecMapFloat32Int8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat32Int8X(vp *map[float32]int8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat32Int8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32Int8V(v map[float32]int8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float32]int8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[float32]int8, xlen)
+ changed = true
+ }
+
+ var mk float32
+ var mv int8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat32Int16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float32]int16)
+ v, changed := fastpathTV.DecMapFloat32Int16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float32]int16)
+ fastpathTV.DecMapFloat32Int16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat32Int16X(vp *map[float32]int16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat32Int16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32Int16V(v map[float32]int16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float32]int16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6)
+ v = make(map[float32]int16, xlen)
+ changed = true
+ }
+
+ var mk float32
+ var mv int16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat32Int32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float32]int32)
+ v, changed := fastpathTV.DecMapFloat32Int32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float32]int32)
+ fastpathTV.DecMapFloat32Int32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat32Int32X(vp *map[float32]int32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat32Int32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32Int32V(v map[float32]int32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float32]int32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8)
+ v = make(map[float32]int32, xlen)
+ changed = true
+ }
+
+ var mk float32
+ var mv int32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat32Int64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float32]int64)
+ v, changed := fastpathTV.DecMapFloat32Int64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float32]int64)
+ fastpathTV.DecMapFloat32Int64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat32Int64X(vp *map[float32]int64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat32Int64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32Int64V(v map[float32]int64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float32]int64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[float32]int64, xlen)
+ changed = true
+ }
+
+ var mk float32
+ var mv int64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat32Float32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float32]float32)
+ v, changed := fastpathTV.DecMapFloat32Float32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float32]float32)
+ fastpathTV.DecMapFloat32Float32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat32Float32X(vp *map[float32]float32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat32Float32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32Float32V(v map[float32]float32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float32]float32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8)
+ v = make(map[float32]float32, xlen)
+ changed = true
+ }
+
+ var mk float32
+ var mv float32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat32Float64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float32]float64)
+ v, changed := fastpathTV.DecMapFloat32Float64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float32]float64)
+ fastpathTV.DecMapFloat32Float64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat32Float64X(vp *map[float32]float64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat32Float64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32Float64V(v map[float32]float64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float32]float64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[float32]float64, xlen)
+ changed = true
+ }
+
+ var mk float32
+ var mv float64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat32BoolR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float32]bool)
+ v, changed := fastpathTV.DecMapFloat32BoolV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float32]bool)
+ fastpathTV.DecMapFloat32BoolV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat32BoolX(vp *map[float32]bool, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat32BoolV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32BoolV(v map[float32]bool, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float32]bool, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[float32]bool, xlen)
+ changed = true
+ }
+
+ var mk float32
+ var mv bool
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat64IntfR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float64]interface{})
+ v, changed := fastpathTV.DecMapFloat64IntfV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float64]interface{})
+ fastpathTV.DecMapFloat64IntfV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat64IntfX(vp *map[float64]interface{}, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat64IntfV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64IntfV(v map[float64]interface{}, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float64]interface{}, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[float64]interface{}, xlen)
+ changed = true
+ }
+ mapGet := !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk float64
+ var mv interface{}
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat64StringR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float64]string)
+ v, changed := fastpathTV.DecMapFloat64StringV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float64]string)
+ fastpathTV.DecMapFloat64StringV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat64StringX(vp *map[float64]string, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat64StringV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64StringV(v map[float64]string, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float64]string, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[float64]string, xlen)
+ changed = true
+ }
+
+ var mk float64
+ var mv string
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat64UintR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float64]uint)
+ v, changed := fastpathTV.DecMapFloat64UintV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float64]uint)
+ fastpathTV.DecMapFloat64UintV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat64UintX(vp *map[float64]uint, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat64UintV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64UintV(v map[float64]uint, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float64]uint, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[float64]uint, xlen)
+ changed = true
+ }
+
+ var mk float64
+ var mv uint
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat64Uint8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float64]uint8)
+ v, changed := fastpathTV.DecMapFloat64Uint8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float64]uint8)
+ fastpathTV.DecMapFloat64Uint8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat64Uint8X(vp *map[float64]uint8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat64Uint8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64Uint8V(v map[float64]uint8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float64]uint8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[float64]uint8, xlen)
+ changed = true
+ }
+
+ var mk float64
+ var mv uint8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat64Uint16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float64]uint16)
+ v, changed := fastpathTV.DecMapFloat64Uint16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float64]uint16)
+ fastpathTV.DecMapFloat64Uint16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat64Uint16X(vp *map[float64]uint16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat64Uint16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64Uint16V(v map[float64]uint16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float64]uint16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[float64]uint16, xlen)
+ changed = true
+ }
+
+ var mk float64
+ var mv uint16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat64Uint32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float64]uint32)
+ v, changed := fastpathTV.DecMapFloat64Uint32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float64]uint32)
+ fastpathTV.DecMapFloat64Uint32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat64Uint32X(vp *map[float64]uint32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat64Uint32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64Uint32V(v map[float64]uint32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float64]uint32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[float64]uint32, xlen)
+ changed = true
+ }
+
+ var mk float64
+ var mv uint32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat64Uint64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float64]uint64)
+ v, changed := fastpathTV.DecMapFloat64Uint64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float64]uint64)
+ fastpathTV.DecMapFloat64Uint64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat64Uint64X(vp *map[float64]uint64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat64Uint64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64Uint64V(v map[float64]uint64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float64]uint64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[float64]uint64, xlen)
+ changed = true
+ }
+
+ var mk float64
+ var mv uint64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat64UintptrR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float64]uintptr)
+ v, changed := fastpathTV.DecMapFloat64UintptrV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float64]uintptr)
+ fastpathTV.DecMapFloat64UintptrV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat64UintptrX(vp *map[float64]uintptr, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat64UintptrV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64UintptrV(v map[float64]uintptr, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float64]uintptr, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[float64]uintptr, xlen)
+ changed = true
+ }
+
+ var mk float64
+ var mv uintptr
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat64IntR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float64]int)
+ v, changed := fastpathTV.DecMapFloat64IntV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float64]int)
+ fastpathTV.DecMapFloat64IntV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat64IntX(vp *map[float64]int, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat64IntV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64IntV(v map[float64]int, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float64]int, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[float64]int, xlen)
+ changed = true
+ }
+
+ var mk float64
+ var mv int
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat64Int8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float64]int8)
+ v, changed := fastpathTV.DecMapFloat64Int8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float64]int8)
+ fastpathTV.DecMapFloat64Int8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat64Int8X(vp *map[float64]int8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat64Int8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64Int8V(v map[float64]int8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float64]int8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[float64]int8, xlen)
+ changed = true
+ }
+
+ var mk float64
+ var mv int8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat64Int16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float64]int16)
+ v, changed := fastpathTV.DecMapFloat64Int16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float64]int16)
+ fastpathTV.DecMapFloat64Int16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat64Int16X(vp *map[float64]int16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat64Int16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64Int16V(v map[float64]int16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float64]int16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[float64]int16, xlen)
+ changed = true
+ }
+
+ var mk float64
+ var mv int16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat64Int32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float64]int32)
+ v, changed := fastpathTV.DecMapFloat64Int32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float64]int32)
+ fastpathTV.DecMapFloat64Int32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat64Int32X(vp *map[float64]int32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat64Int32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64Int32V(v map[float64]int32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float64]int32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[float64]int32, xlen)
+ changed = true
+ }
+
+ var mk float64
+ var mv int32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat64Int64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float64]int64)
+ v, changed := fastpathTV.DecMapFloat64Int64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float64]int64)
+ fastpathTV.DecMapFloat64Int64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat64Int64X(vp *map[float64]int64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat64Int64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64Int64V(v map[float64]int64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float64]int64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[float64]int64, xlen)
+ changed = true
+ }
+
+ var mk float64
+ var mv int64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat64Float32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float64]float32)
+ v, changed := fastpathTV.DecMapFloat64Float32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float64]float32)
+ fastpathTV.DecMapFloat64Float32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat64Float32X(vp *map[float64]float32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat64Float32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64Float32V(v map[float64]float32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float64]float32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[float64]float32, xlen)
+ changed = true
+ }
+
+ var mk float64
+ var mv float32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat64Float64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float64]float64)
+ v, changed := fastpathTV.DecMapFloat64Float64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float64]float64)
+ fastpathTV.DecMapFloat64Float64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat64Float64X(vp *map[float64]float64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat64Float64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64Float64V(v map[float64]float64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float64]float64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[float64]float64, xlen)
+ changed = true
+ }
+
+ var mk float64
+ var mv float64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapFloat64BoolR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[float64]bool)
+ v, changed := fastpathTV.DecMapFloat64BoolV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[float64]bool)
+ fastpathTV.DecMapFloat64BoolV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapFloat64BoolX(vp *map[float64]bool, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapFloat64BoolV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64BoolV(v map[float64]bool, checkNil bool, canChange bool,
+ d *Decoder) (_ map[float64]bool, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[float64]bool, xlen)
+ changed = true
+ }
+
+ var mk float64
+ var mv bool
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintIntfR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint]interface{})
+ v, changed := fastpathTV.DecMapUintIntfV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint]interface{})
+ fastpathTV.DecMapUintIntfV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintIntfX(vp *map[uint]interface{}, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintIntfV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintIntfV(v map[uint]interface{}, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint]interface{}, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[uint]interface{}, xlen)
+ changed = true
+ }
+ mapGet := !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk uint
+ var mv interface{}
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintStringR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint]string)
+ v, changed := fastpathTV.DecMapUintStringV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint]string)
+ fastpathTV.DecMapUintStringV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintStringX(vp *map[uint]string, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintStringV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintStringV(v map[uint]string, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint]string, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[uint]string, xlen)
+ changed = true
+ }
+
+ var mk uint
+ var mv string
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintUintR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint]uint)
+ v, changed := fastpathTV.DecMapUintUintV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint]uint)
+ fastpathTV.DecMapUintUintV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintUintX(vp *map[uint]uint, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintUintV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintUintV(v map[uint]uint, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint]uint, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uint]uint, xlen)
+ changed = true
+ }
+
+ var mk uint
+ var mv uint
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintUint8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint]uint8)
+ v, changed := fastpathTV.DecMapUintUint8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint]uint8)
+ fastpathTV.DecMapUintUint8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintUint8X(vp *map[uint]uint8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintUint8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintUint8V(v map[uint]uint8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint]uint8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uint]uint8, xlen)
+ changed = true
+ }
+
+ var mk uint
+ var mv uint8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintUint16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint]uint16)
+ v, changed := fastpathTV.DecMapUintUint16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint]uint16)
+ fastpathTV.DecMapUintUint16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintUint16X(vp *map[uint]uint16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintUint16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintUint16V(v map[uint]uint16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint]uint16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[uint]uint16, xlen)
+ changed = true
+ }
+
+ var mk uint
+ var mv uint16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintUint32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint]uint32)
+ v, changed := fastpathTV.DecMapUintUint32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint]uint32)
+ fastpathTV.DecMapUintUint32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintUint32X(vp *map[uint]uint32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintUint32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintUint32V(v map[uint]uint32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint]uint32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uint]uint32, xlen)
+ changed = true
+ }
+
+ var mk uint
+ var mv uint32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintUint64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint]uint64)
+ v, changed := fastpathTV.DecMapUintUint64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint]uint64)
+ fastpathTV.DecMapUintUint64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintUint64X(vp *map[uint]uint64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintUint64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintUint64V(v map[uint]uint64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint]uint64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uint]uint64, xlen)
+ changed = true
+ }
+
+ var mk uint
+ var mv uint64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintUintptrR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint]uintptr)
+ v, changed := fastpathTV.DecMapUintUintptrV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint]uintptr)
+ fastpathTV.DecMapUintUintptrV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintUintptrX(vp *map[uint]uintptr, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintUintptrV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintUintptrV(v map[uint]uintptr, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint]uintptr, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uint]uintptr, xlen)
+ changed = true
+ }
+
+ var mk uint
+ var mv uintptr
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintIntR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint]int)
+ v, changed := fastpathTV.DecMapUintIntV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint]int)
+ fastpathTV.DecMapUintIntV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintIntX(vp *map[uint]int, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintIntV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintIntV(v map[uint]int, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint]int, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uint]int, xlen)
+ changed = true
+ }
+
+ var mk uint
+ var mv int
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintInt8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint]int8)
+ v, changed := fastpathTV.DecMapUintInt8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint]int8)
+ fastpathTV.DecMapUintInt8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintInt8X(vp *map[uint]int8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintInt8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintInt8V(v map[uint]int8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint]int8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uint]int8, xlen)
+ changed = true
+ }
+
+ var mk uint
+ var mv int8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintInt16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint]int16)
+ v, changed := fastpathTV.DecMapUintInt16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint]int16)
+ fastpathTV.DecMapUintInt16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintInt16X(vp *map[uint]int16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintInt16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintInt16V(v map[uint]int16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint]int16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[uint]int16, xlen)
+ changed = true
+ }
+
+ var mk uint
+ var mv int16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintInt32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint]int32)
+ v, changed := fastpathTV.DecMapUintInt32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint]int32)
+ fastpathTV.DecMapUintInt32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintInt32X(vp *map[uint]int32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintInt32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintInt32V(v map[uint]int32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint]int32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uint]int32, xlen)
+ changed = true
+ }
+
+ var mk uint
+ var mv int32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintInt64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint]int64)
+ v, changed := fastpathTV.DecMapUintInt64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint]int64)
+ fastpathTV.DecMapUintInt64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintInt64X(vp *map[uint]int64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintInt64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintInt64V(v map[uint]int64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint]int64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uint]int64, xlen)
+ changed = true
+ }
+
+ var mk uint
+ var mv int64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintFloat32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint]float32)
+ v, changed := fastpathTV.DecMapUintFloat32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint]float32)
+ fastpathTV.DecMapUintFloat32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintFloat32X(vp *map[uint]float32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintFloat32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintFloat32V(v map[uint]float32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint]float32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uint]float32, xlen)
+ changed = true
+ }
+
+ var mk uint
+ var mv float32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintFloat64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint]float64)
+ v, changed := fastpathTV.DecMapUintFloat64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint]float64)
+ fastpathTV.DecMapUintFloat64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintFloat64X(vp *map[uint]float64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintFloat64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintFloat64V(v map[uint]float64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint]float64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uint]float64, xlen)
+ changed = true
+ }
+
+ var mk uint
+ var mv float64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintBoolR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint]bool)
+ v, changed := fastpathTV.DecMapUintBoolV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint]bool)
+ fastpathTV.DecMapUintBoolV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintBoolX(vp *map[uint]bool, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintBoolV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintBoolV(v map[uint]bool, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint]bool, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uint]bool, xlen)
+ changed = true
+ }
+
+ var mk uint
+ var mv bool
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint8IntfR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint8]interface{})
+ v, changed := fastpathTV.DecMapUint8IntfV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint8]interface{})
+ fastpathTV.DecMapUint8IntfV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint8IntfX(vp *map[uint8]interface{}, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint8IntfV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8IntfV(v map[uint8]interface{}, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint8]interface{}, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17)
+ v = make(map[uint8]interface{}, xlen)
+ changed = true
+ }
+ mapGet := !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk uint8
+ var mv interface{}
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint8StringR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint8]string)
+ v, changed := fastpathTV.DecMapUint8StringV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint8]string)
+ fastpathTV.DecMapUint8StringV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint8StringX(vp *map[uint8]string, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint8StringV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8StringV(v map[uint8]string, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint8]string, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17)
+ v = make(map[uint8]string, xlen)
+ changed = true
+ }
+
+ var mk uint8
+ var mv string
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint8UintR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint8]uint)
+ v, changed := fastpathTV.DecMapUint8UintV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint8]uint)
+ fastpathTV.DecMapUint8UintV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint8UintX(vp *map[uint8]uint, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint8UintV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8UintV(v map[uint8]uint, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint8]uint, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uint8]uint, xlen)
+ changed = true
+ }
+
+ var mk uint8
+ var mv uint
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint8Uint8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint8]uint8)
+ v, changed := fastpathTV.DecMapUint8Uint8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint8]uint8)
+ fastpathTV.DecMapUint8Uint8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint8Uint8X(vp *map[uint8]uint8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint8Uint8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8Uint8V(v map[uint8]uint8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint8]uint8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2)
+ v = make(map[uint8]uint8, xlen)
+ changed = true
+ }
+
+ var mk uint8
+ var mv uint8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint8Uint16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint8]uint16)
+ v, changed := fastpathTV.DecMapUint8Uint16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint8]uint16)
+ fastpathTV.DecMapUint8Uint16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint8Uint16X(vp *map[uint8]uint16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint8Uint16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8Uint16V(v map[uint8]uint16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint8]uint16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3)
+ v = make(map[uint8]uint16, xlen)
+ changed = true
+ }
+
+ var mk uint8
+ var mv uint16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint8Uint32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint8]uint32)
+ v, changed := fastpathTV.DecMapUint8Uint32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint8]uint32)
+ fastpathTV.DecMapUint8Uint32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint8Uint32X(vp *map[uint8]uint32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint8Uint32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8Uint32V(v map[uint8]uint32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint8]uint32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[uint8]uint32, xlen)
+ changed = true
+ }
+
+ var mk uint8
+ var mv uint32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint8Uint64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint8]uint64)
+ v, changed := fastpathTV.DecMapUint8Uint64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint8]uint64)
+ fastpathTV.DecMapUint8Uint64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint8Uint64X(vp *map[uint8]uint64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint8Uint64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8Uint64V(v map[uint8]uint64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint8]uint64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uint8]uint64, xlen)
+ changed = true
+ }
+
+ var mk uint8
+ var mv uint64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint8UintptrR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint8]uintptr)
+ v, changed := fastpathTV.DecMapUint8UintptrV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint8]uintptr)
+ fastpathTV.DecMapUint8UintptrV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint8UintptrX(vp *map[uint8]uintptr, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint8UintptrV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8UintptrV(v map[uint8]uintptr, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint8]uintptr, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uint8]uintptr, xlen)
+ changed = true
+ }
+
+ var mk uint8
+ var mv uintptr
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint8IntR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint8]int)
+ v, changed := fastpathTV.DecMapUint8IntV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint8]int)
+ fastpathTV.DecMapUint8IntV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint8IntX(vp *map[uint8]int, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint8IntV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8IntV(v map[uint8]int, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint8]int, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uint8]int, xlen)
+ changed = true
+ }
+
+ var mk uint8
+ var mv int
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint8Int8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint8]int8)
+ v, changed := fastpathTV.DecMapUint8Int8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint8]int8)
+ fastpathTV.DecMapUint8Int8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint8Int8X(vp *map[uint8]int8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint8Int8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8Int8V(v map[uint8]int8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint8]int8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2)
+ v = make(map[uint8]int8, xlen)
+ changed = true
+ }
+
+ var mk uint8
+ var mv int8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint8Int16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint8]int16)
+ v, changed := fastpathTV.DecMapUint8Int16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint8]int16)
+ fastpathTV.DecMapUint8Int16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint8Int16X(vp *map[uint8]int16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint8Int16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8Int16V(v map[uint8]int16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint8]int16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3)
+ v = make(map[uint8]int16, xlen)
+ changed = true
+ }
+
+ var mk uint8
+ var mv int16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint8Int32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint8]int32)
+ v, changed := fastpathTV.DecMapUint8Int32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint8]int32)
+ fastpathTV.DecMapUint8Int32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint8Int32X(vp *map[uint8]int32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint8Int32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8Int32V(v map[uint8]int32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint8]int32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[uint8]int32, xlen)
+ changed = true
+ }
+
+ var mk uint8
+ var mv int32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint8Int64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint8]int64)
+ v, changed := fastpathTV.DecMapUint8Int64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint8]int64)
+ fastpathTV.DecMapUint8Int64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint8Int64X(vp *map[uint8]int64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint8Int64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8Int64V(v map[uint8]int64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint8]int64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uint8]int64, xlen)
+ changed = true
+ }
+
+ var mk uint8
+ var mv int64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint8Float32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint8]float32)
+ v, changed := fastpathTV.DecMapUint8Float32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint8]float32)
+ fastpathTV.DecMapUint8Float32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint8Float32X(vp *map[uint8]float32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint8Float32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8Float32V(v map[uint8]float32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint8]float32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[uint8]float32, xlen)
+ changed = true
+ }
+
+ var mk uint8
+ var mv float32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint8Float64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint8]float64)
+ v, changed := fastpathTV.DecMapUint8Float64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint8]float64)
+ fastpathTV.DecMapUint8Float64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint8Float64X(vp *map[uint8]float64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint8Float64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8Float64V(v map[uint8]float64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint8]float64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uint8]float64, xlen)
+ changed = true
+ }
+
+ var mk uint8
+ var mv float64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint8BoolR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint8]bool)
+ v, changed := fastpathTV.DecMapUint8BoolV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint8]bool)
+ fastpathTV.DecMapUint8BoolV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint8BoolX(vp *map[uint8]bool, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint8BoolV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8BoolV(v map[uint8]bool, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint8]bool, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2)
+ v = make(map[uint8]bool, xlen)
+ changed = true
+ }
+
+ var mk uint8
+ var mv bool
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint16IntfR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint16]interface{})
+ v, changed := fastpathTV.DecMapUint16IntfV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint16]interface{})
+ fastpathTV.DecMapUint16IntfV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint16IntfX(vp *map[uint16]interface{}, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint16IntfV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16IntfV(v map[uint16]interface{}, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint16]interface{}, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18)
+ v = make(map[uint16]interface{}, xlen)
+ changed = true
+ }
+ mapGet := !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk uint16
+ var mv interface{}
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint16StringR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint16]string)
+ v, changed := fastpathTV.DecMapUint16StringV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint16]string)
+ fastpathTV.DecMapUint16StringV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint16StringX(vp *map[uint16]string, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint16StringV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16StringV(v map[uint16]string, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint16]string, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18)
+ v = make(map[uint16]string, xlen)
+ changed = true
+ }
+
+ var mk uint16
+ var mv string
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint16UintR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint16]uint)
+ v, changed := fastpathTV.DecMapUint16UintV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint16]uint)
+ fastpathTV.DecMapUint16UintV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint16UintX(vp *map[uint16]uint, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint16UintV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16UintV(v map[uint16]uint, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint16]uint, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[uint16]uint, xlen)
+ changed = true
+ }
+
+ var mk uint16
+ var mv uint
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint16Uint8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint16]uint8)
+ v, changed := fastpathTV.DecMapUint16Uint8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint16]uint8)
+ fastpathTV.DecMapUint16Uint8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint16Uint8X(vp *map[uint16]uint8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint16Uint8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16Uint8V(v map[uint16]uint8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint16]uint8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3)
+ v = make(map[uint16]uint8, xlen)
+ changed = true
+ }
+
+ var mk uint16
+ var mv uint8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint16Uint16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint16]uint16)
+ v, changed := fastpathTV.DecMapUint16Uint16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint16]uint16)
+ fastpathTV.DecMapUint16Uint16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint16Uint16X(vp *map[uint16]uint16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint16Uint16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16Uint16V(v map[uint16]uint16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint16]uint16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 4)
+ v = make(map[uint16]uint16, xlen)
+ changed = true
+ }
+
+ var mk uint16
+ var mv uint16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint16Uint32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint16]uint32)
+ v, changed := fastpathTV.DecMapUint16Uint32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint16]uint32)
+ fastpathTV.DecMapUint16Uint32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint16Uint32X(vp *map[uint16]uint32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint16Uint32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16Uint32V(v map[uint16]uint32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint16]uint32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6)
+ v = make(map[uint16]uint32, xlen)
+ changed = true
+ }
+
+ var mk uint16
+ var mv uint32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint16Uint64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint16]uint64)
+ v, changed := fastpathTV.DecMapUint16Uint64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint16]uint64)
+ fastpathTV.DecMapUint16Uint64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint16Uint64X(vp *map[uint16]uint64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint16Uint64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16Uint64V(v map[uint16]uint64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint16]uint64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[uint16]uint64, xlen)
+ changed = true
+ }
+
+ var mk uint16
+ var mv uint64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint16UintptrR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint16]uintptr)
+ v, changed := fastpathTV.DecMapUint16UintptrV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint16]uintptr)
+ fastpathTV.DecMapUint16UintptrV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint16UintptrX(vp *map[uint16]uintptr, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint16UintptrV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16UintptrV(v map[uint16]uintptr, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint16]uintptr, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[uint16]uintptr, xlen)
+ changed = true
+ }
+
+ var mk uint16
+ var mv uintptr
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint16IntR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint16]int)
+ v, changed := fastpathTV.DecMapUint16IntV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint16]int)
+ fastpathTV.DecMapUint16IntV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint16IntX(vp *map[uint16]int, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint16IntV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16IntV(v map[uint16]int, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint16]int, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[uint16]int, xlen)
+ changed = true
+ }
+
+ var mk uint16
+ var mv int
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint16Int8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint16]int8)
+ v, changed := fastpathTV.DecMapUint16Int8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint16]int8)
+ fastpathTV.DecMapUint16Int8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint16Int8X(vp *map[uint16]int8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint16Int8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16Int8V(v map[uint16]int8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint16]int8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3)
+ v = make(map[uint16]int8, xlen)
+ changed = true
+ }
+
+ var mk uint16
+ var mv int8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint16Int16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint16]int16)
+ v, changed := fastpathTV.DecMapUint16Int16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint16]int16)
+ fastpathTV.DecMapUint16Int16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint16Int16X(vp *map[uint16]int16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint16Int16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16Int16V(v map[uint16]int16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint16]int16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 4)
+ v = make(map[uint16]int16, xlen)
+ changed = true
+ }
+
+ var mk uint16
+ var mv int16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint16Int32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint16]int32)
+ v, changed := fastpathTV.DecMapUint16Int32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint16]int32)
+ fastpathTV.DecMapUint16Int32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint16Int32X(vp *map[uint16]int32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint16Int32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16Int32V(v map[uint16]int32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint16]int32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6)
+ v = make(map[uint16]int32, xlen)
+ changed = true
+ }
+
+ var mk uint16
+ var mv int32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint16Int64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint16]int64)
+ v, changed := fastpathTV.DecMapUint16Int64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint16]int64)
+ fastpathTV.DecMapUint16Int64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint16Int64X(vp *map[uint16]int64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint16Int64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16Int64V(v map[uint16]int64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint16]int64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[uint16]int64, xlen)
+ changed = true
+ }
+
+ var mk uint16
+ var mv int64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint16Float32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint16]float32)
+ v, changed := fastpathTV.DecMapUint16Float32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint16]float32)
+ fastpathTV.DecMapUint16Float32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint16Float32X(vp *map[uint16]float32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint16Float32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16Float32V(v map[uint16]float32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint16]float32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6)
+ v = make(map[uint16]float32, xlen)
+ changed = true
+ }
+
+ var mk uint16
+ var mv float32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint16Float64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint16]float64)
+ v, changed := fastpathTV.DecMapUint16Float64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint16]float64)
+ fastpathTV.DecMapUint16Float64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint16Float64X(vp *map[uint16]float64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint16Float64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16Float64V(v map[uint16]float64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint16]float64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[uint16]float64, xlen)
+ changed = true
+ }
+
+ var mk uint16
+ var mv float64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint16BoolR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint16]bool)
+ v, changed := fastpathTV.DecMapUint16BoolV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint16]bool)
+ fastpathTV.DecMapUint16BoolV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint16BoolX(vp *map[uint16]bool, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint16BoolV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16BoolV(v map[uint16]bool, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint16]bool, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3)
+ v = make(map[uint16]bool, xlen)
+ changed = true
+ }
+
+ var mk uint16
+ var mv bool
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint32IntfR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint32]interface{})
+ v, changed := fastpathTV.DecMapUint32IntfV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint32]interface{})
+ fastpathTV.DecMapUint32IntfV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint32IntfX(vp *map[uint32]interface{}, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint32IntfV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32IntfV(v map[uint32]interface{}, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint32]interface{}, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20)
+ v = make(map[uint32]interface{}, xlen)
+ changed = true
+ }
+ mapGet := !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk uint32
+ var mv interface{}
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint32StringR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint32]string)
+ v, changed := fastpathTV.DecMapUint32StringV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint32]string)
+ fastpathTV.DecMapUint32StringV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint32StringX(vp *map[uint32]string, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint32StringV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32StringV(v map[uint32]string, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint32]string, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20)
+ v = make(map[uint32]string, xlen)
+ changed = true
+ }
+
+ var mk uint32
+ var mv string
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint32UintR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint32]uint)
+ v, changed := fastpathTV.DecMapUint32UintV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint32]uint)
+ fastpathTV.DecMapUint32UintV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint32UintX(vp *map[uint32]uint, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint32UintV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32UintV(v map[uint32]uint, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint32]uint, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uint32]uint, xlen)
+ changed = true
+ }
+
+ var mk uint32
+ var mv uint
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint32Uint8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint32]uint8)
+ v, changed := fastpathTV.DecMapUint32Uint8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint32]uint8)
+ fastpathTV.DecMapUint32Uint8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint32Uint8X(vp *map[uint32]uint8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint32Uint8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32Uint8V(v map[uint32]uint8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint32]uint8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[uint32]uint8, xlen)
+ changed = true
+ }
+
+ var mk uint32
+ var mv uint8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint32Uint16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint32]uint16)
+ v, changed := fastpathTV.DecMapUint32Uint16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint32]uint16)
+ fastpathTV.DecMapUint32Uint16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint32Uint16X(vp *map[uint32]uint16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint32Uint16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32Uint16V(v map[uint32]uint16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint32]uint16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6)
+ v = make(map[uint32]uint16, xlen)
+ changed = true
+ }
+
+ var mk uint32
+ var mv uint16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint32Uint32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint32]uint32)
+ v, changed := fastpathTV.DecMapUint32Uint32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint32]uint32)
+ fastpathTV.DecMapUint32Uint32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint32Uint32X(vp *map[uint32]uint32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint32Uint32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32Uint32V(v map[uint32]uint32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint32]uint32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8)
+ v = make(map[uint32]uint32, xlen)
+ changed = true
+ }
+
+ var mk uint32
+ var mv uint32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint32Uint64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint32]uint64)
+ v, changed := fastpathTV.DecMapUint32Uint64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint32]uint64)
+ fastpathTV.DecMapUint32Uint64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint32Uint64X(vp *map[uint32]uint64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint32Uint64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32Uint64V(v map[uint32]uint64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint32]uint64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uint32]uint64, xlen)
+ changed = true
+ }
+
+ var mk uint32
+ var mv uint64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint32UintptrR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint32]uintptr)
+ v, changed := fastpathTV.DecMapUint32UintptrV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint32]uintptr)
+ fastpathTV.DecMapUint32UintptrV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint32UintptrX(vp *map[uint32]uintptr, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint32UintptrV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32UintptrV(v map[uint32]uintptr, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint32]uintptr, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uint32]uintptr, xlen)
+ changed = true
+ }
+
+ var mk uint32
+ var mv uintptr
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint32IntR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint32]int)
+ v, changed := fastpathTV.DecMapUint32IntV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint32]int)
+ fastpathTV.DecMapUint32IntV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint32IntX(vp *map[uint32]int, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint32IntV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32IntV(v map[uint32]int, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint32]int, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uint32]int, xlen)
+ changed = true
+ }
+
+ var mk uint32
+ var mv int
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint32Int8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint32]int8)
+ v, changed := fastpathTV.DecMapUint32Int8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint32]int8)
+ fastpathTV.DecMapUint32Int8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint32Int8X(vp *map[uint32]int8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint32Int8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32Int8V(v map[uint32]int8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint32]int8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[uint32]int8, xlen)
+ changed = true
+ }
+
+ var mk uint32
+ var mv int8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint32Int16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint32]int16)
+ v, changed := fastpathTV.DecMapUint32Int16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint32]int16)
+ fastpathTV.DecMapUint32Int16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint32Int16X(vp *map[uint32]int16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint32Int16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32Int16V(v map[uint32]int16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint32]int16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6)
+ v = make(map[uint32]int16, xlen)
+ changed = true
+ }
+
+ var mk uint32
+ var mv int16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint32Int32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint32]int32)
+ v, changed := fastpathTV.DecMapUint32Int32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint32]int32)
+ fastpathTV.DecMapUint32Int32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint32Int32X(vp *map[uint32]int32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint32Int32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32Int32V(v map[uint32]int32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint32]int32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8)
+ v = make(map[uint32]int32, xlen)
+ changed = true
+ }
+
+ var mk uint32
+ var mv int32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint32Int64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint32]int64)
+ v, changed := fastpathTV.DecMapUint32Int64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint32]int64)
+ fastpathTV.DecMapUint32Int64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint32Int64X(vp *map[uint32]int64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint32Int64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32Int64V(v map[uint32]int64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint32]int64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uint32]int64, xlen)
+ changed = true
+ }
+
+ var mk uint32
+ var mv int64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint32Float32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint32]float32)
+ v, changed := fastpathTV.DecMapUint32Float32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint32]float32)
+ fastpathTV.DecMapUint32Float32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint32Float32X(vp *map[uint32]float32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint32Float32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32Float32V(v map[uint32]float32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint32]float32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8)
+ v = make(map[uint32]float32, xlen)
+ changed = true
+ }
+
+ var mk uint32
+ var mv float32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint32Float64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint32]float64)
+ v, changed := fastpathTV.DecMapUint32Float64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint32]float64)
+ fastpathTV.DecMapUint32Float64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint32Float64X(vp *map[uint32]float64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint32Float64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32Float64V(v map[uint32]float64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint32]float64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uint32]float64, xlen)
+ changed = true
+ }
+
+ var mk uint32
+ var mv float64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint32BoolR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint32]bool)
+ v, changed := fastpathTV.DecMapUint32BoolV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint32]bool)
+ fastpathTV.DecMapUint32BoolV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint32BoolX(vp *map[uint32]bool, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint32BoolV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32BoolV(v map[uint32]bool, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint32]bool, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[uint32]bool, xlen)
+ changed = true
+ }
+
+ var mk uint32
+ var mv bool
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint64IntfR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint64]interface{})
+ v, changed := fastpathTV.DecMapUint64IntfV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint64]interface{})
+ fastpathTV.DecMapUint64IntfV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint64IntfX(vp *map[uint64]interface{}, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint64IntfV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64IntfV(v map[uint64]interface{}, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint64]interface{}, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[uint64]interface{}, xlen)
+ changed = true
+ }
+ mapGet := !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk uint64
+ var mv interface{}
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint64StringR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint64]string)
+ v, changed := fastpathTV.DecMapUint64StringV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint64]string)
+ fastpathTV.DecMapUint64StringV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint64StringX(vp *map[uint64]string, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint64StringV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64StringV(v map[uint64]string, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint64]string, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[uint64]string, xlen)
+ changed = true
+ }
+
+ var mk uint64
+ var mv string
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint64UintR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint64]uint)
+ v, changed := fastpathTV.DecMapUint64UintV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint64]uint)
+ fastpathTV.DecMapUint64UintV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint64UintX(vp *map[uint64]uint, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint64UintV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64UintV(v map[uint64]uint, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint64]uint, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uint64]uint, xlen)
+ changed = true
+ }
+
+ var mk uint64
+ var mv uint
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint64Uint8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint64]uint8)
+ v, changed := fastpathTV.DecMapUint64Uint8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint64]uint8)
+ fastpathTV.DecMapUint64Uint8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint64Uint8X(vp *map[uint64]uint8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint64Uint8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64Uint8V(v map[uint64]uint8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint64]uint8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uint64]uint8, xlen)
+ changed = true
+ }
+
+ var mk uint64
+ var mv uint8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint64Uint16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint64]uint16)
+ v, changed := fastpathTV.DecMapUint64Uint16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint64]uint16)
+ fastpathTV.DecMapUint64Uint16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint64Uint16X(vp *map[uint64]uint16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint64Uint16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64Uint16V(v map[uint64]uint16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint64]uint16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[uint64]uint16, xlen)
+ changed = true
+ }
+
+ var mk uint64
+ var mv uint16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint64Uint32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint64]uint32)
+ v, changed := fastpathTV.DecMapUint64Uint32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint64]uint32)
+ fastpathTV.DecMapUint64Uint32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint64Uint32X(vp *map[uint64]uint32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint64Uint32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64Uint32V(v map[uint64]uint32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint64]uint32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uint64]uint32, xlen)
+ changed = true
+ }
+
+ var mk uint64
+ var mv uint32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint64Uint64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint64]uint64)
+ v, changed := fastpathTV.DecMapUint64Uint64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint64]uint64)
+ fastpathTV.DecMapUint64Uint64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint64Uint64X(vp *map[uint64]uint64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint64Uint64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64Uint64V(v map[uint64]uint64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint64]uint64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uint64]uint64, xlen)
+ changed = true
+ }
+
+ var mk uint64
+ var mv uint64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint64UintptrR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint64]uintptr)
+ v, changed := fastpathTV.DecMapUint64UintptrV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint64]uintptr)
+ fastpathTV.DecMapUint64UintptrV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint64UintptrX(vp *map[uint64]uintptr, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint64UintptrV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64UintptrV(v map[uint64]uintptr, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint64]uintptr, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uint64]uintptr, xlen)
+ changed = true
+ }
+
+ var mk uint64
+ var mv uintptr
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint64IntR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint64]int)
+ v, changed := fastpathTV.DecMapUint64IntV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint64]int)
+ fastpathTV.DecMapUint64IntV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint64IntX(vp *map[uint64]int, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint64IntV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64IntV(v map[uint64]int, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint64]int, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uint64]int, xlen)
+ changed = true
+ }
+
+ var mk uint64
+ var mv int
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint64Int8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint64]int8)
+ v, changed := fastpathTV.DecMapUint64Int8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint64]int8)
+ fastpathTV.DecMapUint64Int8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint64Int8X(vp *map[uint64]int8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint64Int8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64Int8V(v map[uint64]int8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint64]int8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uint64]int8, xlen)
+ changed = true
+ }
+
+ var mk uint64
+ var mv int8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint64Int16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint64]int16)
+ v, changed := fastpathTV.DecMapUint64Int16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint64]int16)
+ fastpathTV.DecMapUint64Int16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint64Int16X(vp *map[uint64]int16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint64Int16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64Int16V(v map[uint64]int16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint64]int16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[uint64]int16, xlen)
+ changed = true
+ }
+
+ var mk uint64
+ var mv int16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint64Int32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint64]int32)
+ v, changed := fastpathTV.DecMapUint64Int32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint64]int32)
+ fastpathTV.DecMapUint64Int32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint64Int32X(vp *map[uint64]int32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint64Int32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64Int32V(v map[uint64]int32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint64]int32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uint64]int32, xlen)
+ changed = true
+ }
+
+ var mk uint64
+ var mv int32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint64Int64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint64]int64)
+ v, changed := fastpathTV.DecMapUint64Int64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint64]int64)
+ fastpathTV.DecMapUint64Int64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint64Int64X(vp *map[uint64]int64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint64Int64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64Int64V(v map[uint64]int64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint64]int64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uint64]int64, xlen)
+ changed = true
+ }
+
+ var mk uint64
+ var mv int64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint64Float32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint64]float32)
+ v, changed := fastpathTV.DecMapUint64Float32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint64]float32)
+ fastpathTV.DecMapUint64Float32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint64Float32X(vp *map[uint64]float32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint64Float32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64Float32V(v map[uint64]float32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint64]float32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uint64]float32, xlen)
+ changed = true
+ }
+
+ var mk uint64
+ var mv float32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint64Float64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint64]float64)
+ v, changed := fastpathTV.DecMapUint64Float64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint64]float64)
+ fastpathTV.DecMapUint64Float64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint64Float64X(vp *map[uint64]float64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint64Float64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64Float64V(v map[uint64]float64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint64]float64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uint64]float64, xlen)
+ changed = true
+ }
+
+ var mk uint64
+ var mv float64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUint64BoolR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uint64]bool)
+ v, changed := fastpathTV.DecMapUint64BoolV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uint64]bool)
+ fastpathTV.DecMapUint64BoolV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUint64BoolX(vp *map[uint64]bool, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUint64BoolV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64BoolV(v map[uint64]bool, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uint64]bool, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uint64]bool, xlen)
+ changed = true
+ }
+
+ var mk uint64
+ var mv bool
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintptrIntfR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uintptr]interface{})
+ v, changed := fastpathTV.DecMapUintptrIntfV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uintptr]interface{})
+ fastpathTV.DecMapUintptrIntfV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintptrIntfX(vp *map[uintptr]interface{}, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintptrIntfV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrIntfV(v map[uintptr]interface{}, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uintptr]interface{}, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[uintptr]interface{}, xlen)
+ changed = true
+ }
+ mapGet := !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk uintptr
+ var mv interface{}
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintptrStringR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uintptr]string)
+ v, changed := fastpathTV.DecMapUintptrStringV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uintptr]string)
+ fastpathTV.DecMapUintptrStringV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintptrStringX(vp *map[uintptr]string, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintptrStringV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrStringV(v map[uintptr]string, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uintptr]string, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[uintptr]string, xlen)
+ changed = true
+ }
+
+ var mk uintptr
+ var mv string
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintptrUintR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uintptr]uint)
+ v, changed := fastpathTV.DecMapUintptrUintV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uintptr]uint)
+ fastpathTV.DecMapUintptrUintV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintptrUintX(vp *map[uintptr]uint, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintptrUintV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrUintV(v map[uintptr]uint, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uintptr]uint, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uintptr]uint, xlen)
+ changed = true
+ }
+
+ var mk uintptr
+ var mv uint
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintptrUint8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uintptr]uint8)
+ v, changed := fastpathTV.DecMapUintptrUint8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uintptr]uint8)
+ fastpathTV.DecMapUintptrUint8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintptrUint8X(vp *map[uintptr]uint8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintptrUint8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrUint8V(v map[uintptr]uint8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uintptr]uint8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uintptr]uint8, xlen)
+ changed = true
+ }
+
+ var mk uintptr
+ var mv uint8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintptrUint16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uintptr]uint16)
+ v, changed := fastpathTV.DecMapUintptrUint16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uintptr]uint16)
+ fastpathTV.DecMapUintptrUint16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintptrUint16X(vp *map[uintptr]uint16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintptrUint16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrUint16V(v map[uintptr]uint16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uintptr]uint16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[uintptr]uint16, xlen)
+ changed = true
+ }
+
+ var mk uintptr
+ var mv uint16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintptrUint32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uintptr]uint32)
+ v, changed := fastpathTV.DecMapUintptrUint32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uintptr]uint32)
+ fastpathTV.DecMapUintptrUint32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintptrUint32X(vp *map[uintptr]uint32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintptrUint32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrUint32V(v map[uintptr]uint32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uintptr]uint32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uintptr]uint32, xlen)
+ changed = true
+ }
+
+ var mk uintptr
+ var mv uint32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintptrUint64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uintptr]uint64)
+ v, changed := fastpathTV.DecMapUintptrUint64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uintptr]uint64)
+ fastpathTV.DecMapUintptrUint64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintptrUint64X(vp *map[uintptr]uint64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintptrUint64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrUint64V(v map[uintptr]uint64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uintptr]uint64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uintptr]uint64, xlen)
+ changed = true
+ }
+
+ var mk uintptr
+ var mv uint64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintptrUintptrR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uintptr]uintptr)
+ v, changed := fastpathTV.DecMapUintptrUintptrV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uintptr]uintptr)
+ fastpathTV.DecMapUintptrUintptrV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintptrUintptrX(vp *map[uintptr]uintptr, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintptrUintptrV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrUintptrV(v map[uintptr]uintptr, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uintptr]uintptr, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uintptr]uintptr, xlen)
+ changed = true
+ }
+
+ var mk uintptr
+ var mv uintptr
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintptrIntR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uintptr]int)
+ v, changed := fastpathTV.DecMapUintptrIntV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uintptr]int)
+ fastpathTV.DecMapUintptrIntV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintptrIntX(vp *map[uintptr]int, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintptrIntV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrIntV(v map[uintptr]int, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uintptr]int, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uintptr]int, xlen)
+ changed = true
+ }
+
+ var mk uintptr
+ var mv int
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintptrInt8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uintptr]int8)
+ v, changed := fastpathTV.DecMapUintptrInt8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uintptr]int8)
+ fastpathTV.DecMapUintptrInt8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintptrInt8X(vp *map[uintptr]int8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintptrInt8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrInt8V(v map[uintptr]int8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uintptr]int8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uintptr]int8, xlen)
+ changed = true
+ }
+
+ var mk uintptr
+ var mv int8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintptrInt16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uintptr]int16)
+ v, changed := fastpathTV.DecMapUintptrInt16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uintptr]int16)
+ fastpathTV.DecMapUintptrInt16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintptrInt16X(vp *map[uintptr]int16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintptrInt16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrInt16V(v map[uintptr]int16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uintptr]int16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[uintptr]int16, xlen)
+ changed = true
+ }
+
+ var mk uintptr
+ var mv int16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintptrInt32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uintptr]int32)
+ v, changed := fastpathTV.DecMapUintptrInt32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uintptr]int32)
+ fastpathTV.DecMapUintptrInt32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintptrInt32X(vp *map[uintptr]int32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintptrInt32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrInt32V(v map[uintptr]int32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uintptr]int32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uintptr]int32, xlen)
+ changed = true
+ }
+
+ var mk uintptr
+ var mv int32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintptrInt64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uintptr]int64)
+ v, changed := fastpathTV.DecMapUintptrInt64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uintptr]int64)
+ fastpathTV.DecMapUintptrInt64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintptrInt64X(vp *map[uintptr]int64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintptrInt64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrInt64V(v map[uintptr]int64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uintptr]int64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uintptr]int64, xlen)
+ changed = true
+ }
+
+ var mk uintptr
+ var mv int64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintptrFloat32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uintptr]float32)
+ v, changed := fastpathTV.DecMapUintptrFloat32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uintptr]float32)
+ fastpathTV.DecMapUintptrFloat32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintptrFloat32X(vp *map[uintptr]float32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintptrFloat32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrFloat32V(v map[uintptr]float32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uintptr]float32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uintptr]float32, xlen)
+ changed = true
+ }
+
+ var mk uintptr
+ var mv float32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintptrFloat64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uintptr]float64)
+ v, changed := fastpathTV.DecMapUintptrFloat64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uintptr]float64)
+ fastpathTV.DecMapUintptrFloat64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintptrFloat64X(vp *map[uintptr]float64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintptrFloat64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrFloat64V(v map[uintptr]float64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uintptr]float64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uintptr]float64, xlen)
+ changed = true
+ }
+
+ var mk uintptr
+ var mv float64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapUintptrBoolR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[uintptr]bool)
+ v, changed := fastpathTV.DecMapUintptrBoolV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[uintptr]bool)
+ fastpathTV.DecMapUintptrBoolV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapUintptrBoolX(vp *map[uintptr]bool, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapUintptrBoolV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrBoolV(v map[uintptr]bool, checkNil bool, canChange bool,
+ d *Decoder) (_ map[uintptr]bool, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uintptr]bool, xlen)
+ changed = true
+ }
+
+ var mk uintptr
+ var mv bool
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntIntfR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int]interface{})
+ v, changed := fastpathTV.DecMapIntIntfV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int]interface{})
+ fastpathTV.DecMapIntIntfV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntIntfX(vp *map[int]interface{}, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntIntfV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntIntfV(v map[int]interface{}, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int]interface{}, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[int]interface{}, xlen)
+ changed = true
+ }
+ mapGet := !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk int
+ var mv interface{}
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntStringR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int]string)
+ v, changed := fastpathTV.DecMapIntStringV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int]string)
+ fastpathTV.DecMapIntStringV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntStringX(vp *map[int]string, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntStringV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntStringV(v map[int]string, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int]string, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[int]string, xlen)
+ changed = true
+ }
+
+ var mk int
+ var mv string
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntUintR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int]uint)
+ v, changed := fastpathTV.DecMapIntUintV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int]uint)
+ fastpathTV.DecMapIntUintV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntUintX(vp *map[int]uint, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntUintV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntUintV(v map[int]uint, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int]uint, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[int]uint, xlen)
+ changed = true
+ }
+
+ var mk int
+ var mv uint
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntUint8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int]uint8)
+ v, changed := fastpathTV.DecMapIntUint8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int]uint8)
+ fastpathTV.DecMapIntUint8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntUint8X(vp *map[int]uint8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntUint8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntUint8V(v map[int]uint8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int]uint8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[int]uint8, xlen)
+ changed = true
+ }
+
+ var mk int
+ var mv uint8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntUint16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int]uint16)
+ v, changed := fastpathTV.DecMapIntUint16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int]uint16)
+ fastpathTV.DecMapIntUint16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntUint16X(vp *map[int]uint16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntUint16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntUint16V(v map[int]uint16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int]uint16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[int]uint16, xlen)
+ changed = true
+ }
+
+ var mk int
+ var mv uint16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntUint32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int]uint32)
+ v, changed := fastpathTV.DecMapIntUint32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int]uint32)
+ fastpathTV.DecMapIntUint32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntUint32X(vp *map[int]uint32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntUint32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntUint32V(v map[int]uint32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int]uint32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[int]uint32, xlen)
+ changed = true
+ }
+
+ var mk int
+ var mv uint32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntUint64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int]uint64)
+ v, changed := fastpathTV.DecMapIntUint64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int]uint64)
+ fastpathTV.DecMapIntUint64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntUint64X(vp *map[int]uint64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntUint64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntUint64V(v map[int]uint64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int]uint64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[int]uint64, xlen)
+ changed = true
+ }
+
+ var mk int
+ var mv uint64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntUintptrR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int]uintptr)
+ v, changed := fastpathTV.DecMapIntUintptrV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int]uintptr)
+ fastpathTV.DecMapIntUintptrV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntUintptrX(vp *map[int]uintptr, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntUintptrV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntUintptrV(v map[int]uintptr, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int]uintptr, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[int]uintptr, xlen)
+ changed = true
+ }
+
+ var mk int
+ var mv uintptr
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntIntR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int]int)
+ v, changed := fastpathTV.DecMapIntIntV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int]int)
+ fastpathTV.DecMapIntIntV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntIntX(vp *map[int]int, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntIntV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntIntV(v map[int]int, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int]int, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[int]int, xlen)
+ changed = true
+ }
+
+ var mk int
+ var mv int
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntInt8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int]int8)
+ v, changed := fastpathTV.DecMapIntInt8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int]int8)
+ fastpathTV.DecMapIntInt8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntInt8X(vp *map[int]int8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntInt8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntInt8V(v map[int]int8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int]int8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[int]int8, xlen)
+ changed = true
+ }
+
+ var mk int
+ var mv int8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntInt16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int]int16)
+ v, changed := fastpathTV.DecMapIntInt16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int]int16)
+ fastpathTV.DecMapIntInt16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntInt16X(vp *map[int]int16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntInt16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntInt16V(v map[int]int16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int]int16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[int]int16, xlen)
+ changed = true
+ }
+
+ var mk int
+ var mv int16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntInt32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int]int32)
+ v, changed := fastpathTV.DecMapIntInt32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int]int32)
+ fastpathTV.DecMapIntInt32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntInt32X(vp *map[int]int32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntInt32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntInt32V(v map[int]int32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int]int32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[int]int32, xlen)
+ changed = true
+ }
+
+ var mk int
+ var mv int32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntInt64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int]int64)
+ v, changed := fastpathTV.DecMapIntInt64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int]int64)
+ fastpathTV.DecMapIntInt64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntInt64X(vp *map[int]int64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntInt64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntInt64V(v map[int]int64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int]int64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[int]int64, xlen)
+ changed = true
+ }
+
+ var mk int
+ var mv int64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntFloat32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int]float32)
+ v, changed := fastpathTV.DecMapIntFloat32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int]float32)
+ fastpathTV.DecMapIntFloat32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntFloat32X(vp *map[int]float32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntFloat32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntFloat32V(v map[int]float32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int]float32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[int]float32, xlen)
+ changed = true
+ }
+
+ var mk int
+ var mv float32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntFloat64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int]float64)
+ v, changed := fastpathTV.DecMapIntFloat64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int]float64)
+ fastpathTV.DecMapIntFloat64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntFloat64X(vp *map[int]float64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntFloat64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntFloat64V(v map[int]float64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int]float64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[int]float64, xlen)
+ changed = true
+ }
+
+ var mk int
+ var mv float64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapIntBoolR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int]bool)
+ v, changed := fastpathTV.DecMapIntBoolV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int]bool)
+ fastpathTV.DecMapIntBoolV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapIntBoolX(vp *map[int]bool, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapIntBoolV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntBoolV(v map[int]bool, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int]bool, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[int]bool, xlen)
+ changed = true
+ }
+
+ var mk int
+ var mv bool
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt8IntfR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int8]interface{})
+ v, changed := fastpathTV.DecMapInt8IntfV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int8]interface{})
+ fastpathTV.DecMapInt8IntfV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt8IntfX(vp *map[int8]interface{}, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt8IntfV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8IntfV(v map[int8]interface{}, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int8]interface{}, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17)
+ v = make(map[int8]interface{}, xlen)
+ changed = true
+ }
+ mapGet := !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk int8
+ var mv interface{}
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt8StringR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int8]string)
+ v, changed := fastpathTV.DecMapInt8StringV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int8]string)
+ fastpathTV.DecMapInt8StringV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt8StringX(vp *map[int8]string, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt8StringV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8StringV(v map[int8]string, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int8]string, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17)
+ v = make(map[int8]string, xlen)
+ changed = true
+ }
+
+ var mk int8
+ var mv string
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt8UintR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int8]uint)
+ v, changed := fastpathTV.DecMapInt8UintV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int8]uint)
+ fastpathTV.DecMapInt8UintV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt8UintX(vp *map[int8]uint, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt8UintV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8UintV(v map[int8]uint, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int8]uint, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[int8]uint, xlen)
+ changed = true
+ }
+
+ var mk int8
+ var mv uint
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt8Uint8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int8]uint8)
+ v, changed := fastpathTV.DecMapInt8Uint8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int8]uint8)
+ fastpathTV.DecMapInt8Uint8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt8Uint8X(vp *map[int8]uint8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt8Uint8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8Uint8V(v map[int8]uint8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int8]uint8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2)
+ v = make(map[int8]uint8, xlen)
+ changed = true
+ }
+
+ var mk int8
+ var mv uint8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt8Uint16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int8]uint16)
+ v, changed := fastpathTV.DecMapInt8Uint16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int8]uint16)
+ fastpathTV.DecMapInt8Uint16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt8Uint16X(vp *map[int8]uint16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt8Uint16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8Uint16V(v map[int8]uint16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int8]uint16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3)
+ v = make(map[int8]uint16, xlen)
+ changed = true
+ }
+
+ var mk int8
+ var mv uint16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt8Uint32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int8]uint32)
+ v, changed := fastpathTV.DecMapInt8Uint32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int8]uint32)
+ fastpathTV.DecMapInt8Uint32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt8Uint32X(vp *map[int8]uint32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt8Uint32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8Uint32V(v map[int8]uint32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int8]uint32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[int8]uint32, xlen)
+ changed = true
+ }
+
+ var mk int8
+ var mv uint32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt8Uint64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int8]uint64)
+ v, changed := fastpathTV.DecMapInt8Uint64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int8]uint64)
+ fastpathTV.DecMapInt8Uint64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt8Uint64X(vp *map[int8]uint64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt8Uint64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8Uint64V(v map[int8]uint64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int8]uint64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[int8]uint64, xlen)
+ changed = true
+ }
+
+ var mk int8
+ var mv uint64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt8UintptrR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int8]uintptr)
+ v, changed := fastpathTV.DecMapInt8UintptrV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int8]uintptr)
+ fastpathTV.DecMapInt8UintptrV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt8UintptrX(vp *map[int8]uintptr, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt8UintptrV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8UintptrV(v map[int8]uintptr, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int8]uintptr, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[int8]uintptr, xlen)
+ changed = true
+ }
+
+ var mk int8
+ var mv uintptr
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt8IntR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int8]int)
+ v, changed := fastpathTV.DecMapInt8IntV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int8]int)
+ fastpathTV.DecMapInt8IntV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt8IntX(vp *map[int8]int, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt8IntV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8IntV(v map[int8]int, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int8]int, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[int8]int, xlen)
+ changed = true
+ }
+
+ var mk int8
+ var mv int
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt8Int8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int8]int8)
+ v, changed := fastpathTV.DecMapInt8Int8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int8]int8)
+ fastpathTV.DecMapInt8Int8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt8Int8X(vp *map[int8]int8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt8Int8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8Int8V(v map[int8]int8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int8]int8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2)
+ v = make(map[int8]int8, xlen)
+ changed = true
+ }
+
+ var mk int8
+ var mv int8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt8Int16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int8]int16)
+ v, changed := fastpathTV.DecMapInt8Int16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int8]int16)
+ fastpathTV.DecMapInt8Int16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt8Int16X(vp *map[int8]int16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt8Int16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8Int16V(v map[int8]int16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int8]int16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3)
+ v = make(map[int8]int16, xlen)
+ changed = true
+ }
+
+ var mk int8
+ var mv int16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt8Int32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int8]int32)
+ v, changed := fastpathTV.DecMapInt8Int32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int8]int32)
+ fastpathTV.DecMapInt8Int32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt8Int32X(vp *map[int8]int32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt8Int32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8Int32V(v map[int8]int32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int8]int32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[int8]int32, xlen)
+ changed = true
+ }
+
+ var mk int8
+ var mv int32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt8Int64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int8]int64)
+ v, changed := fastpathTV.DecMapInt8Int64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int8]int64)
+ fastpathTV.DecMapInt8Int64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt8Int64X(vp *map[int8]int64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt8Int64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8Int64V(v map[int8]int64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int8]int64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[int8]int64, xlen)
+ changed = true
+ }
+
+ var mk int8
+ var mv int64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt8Float32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int8]float32)
+ v, changed := fastpathTV.DecMapInt8Float32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int8]float32)
+ fastpathTV.DecMapInt8Float32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt8Float32X(vp *map[int8]float32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt8Float32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8Float32V(v map[int8]float32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int8]float32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[int8]float32, xlen)
+ changed = true
+ }
+
+ var mk int8
+ var mv float32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt8Float64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int8]float64)
+ v, changed := fastpathTV.DecMapInt8Float64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int8]float64)
+ fastpathTV.DecMapInt8Float64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt8Float64X(vp *map[int8]float64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt8Float64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8Float64V(v map[int8]float64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int8]float64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[int8]float64, xlen)
+ changed = true
+ }
+
+ var mk int8
+ var mv float64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt8BoolR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int8]bool)
+ v, changed := fastpathTV.DecMapInt8BoolV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int8]bool)
+ fastpathTV.DecMapInt8BoolV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt8BoolX(vp *map[int8]bool, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt8BoolV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8BoolV(v map[int8]bool, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int8]bool, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2)
+ v = make(map[int8]bool, xlen)
+ changed = true
+ }
+
+ var mk int8
+ var mv bool
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt16IntfR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int16]interface{})
+ v, changed := fastpathTV.DecMapInt16IntfV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int16]interface{})
+ fastpathTV.DecMapInt16IntfV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt16IntfX(vp *map[int16]interface{}, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt16IntfV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16IntfV(v map[int16]interface{}, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int16]interface{}, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18)
+ v = make(map[int16]interface{}, xlen)
+ changed = true
+ }
+ mapGet := !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk int16
+ var mv interface{}
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt16StringR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int16]string)
+ v, changed := fastpathTV.DecMapInt16StringV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int16]string)
+ fastpathTV.DecMapInt16StringV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt16StringX(vp *map[int16]string, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt16StringV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16StringV(v map[int16]string, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int16]string, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18)
+ v = make(map[int16]string, xlen)
+ changed = true
+ }
+
+ var mk int16
+ var mv string
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt16UintR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int16]uint)
+ v, changed := fastpathTV.DecMapInt16UintV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int16]uint)
+ fastpathTV.DecMapInt16UintV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt16UintX(vp *map[int16]uint, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt16UintV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16UintV(v map[int16]uint, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int16]uint, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[int16]uint, xlen)
+ changed = true
+ }
+
+ var mk int16
+ var mv uint
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt16Uint8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int16]uint8)
+ v, changed := fastpathTV.DecMapInt16Uint8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int16]uint8)
+ fastpathTV.DecMapInt16Uint8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt16Uint8X(vp *map[int16]uint8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt16Uint8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16Uint8V(v map[int16]uint8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int16]uint8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3)
+ v = make(map[int16]uint8, xlen)
+ changed = true
+ }
+
+ var mk int16
+ var mv uint8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt16Uint16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int16]uint16)
+ v, changed := fastpathTV.DecMapInt16Uint16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int16]uint16)
+ fastpathTV.DecMapInt16Uint16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt16Uint16X(vp *map[int16]uint16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt16Uint16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16Uint16V(v map[int16]uint16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int16]uint16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 4)
+ v = make(map[int16]uint16, xlen)
+ changed = true
+ }
+
+ var mk int16
+ var mv uint16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt16Uint32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int16]uint32)
+ v, changed := fastpathTV.DecMapInt16Uint32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int16]uint32)
+ fastpathTV.DecMapInt16Uint32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt16Uint32X(vp *map[int16]uint32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt16Uint32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16Uint32V(v map[int16]uint32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int16]uint32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6)
+ v = make(map[int16]uint32, xlen)
+ changed = true
+ }
+
+ var mk int16
+ var mv uint32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt16Uint64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int16]uint64)
+ v, changed := fastpathTV.DecMapInt16Uint64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int16]uint64)
+ fastpathTV.DecMapInt16Uint64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt16Uint64X(vp *map[int16]uint64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt16Uint64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16Uint64V(v map[int16]uint64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int16]uint64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[int16]uint64, xlen)
+ changed = true
+ }
+
+ var mk int16
+ var mv uint64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt16UintptrR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int16]uintptr)
+ v, changed := fastpathTV.DecMapInt16UintptrV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int16]uintptr)
+ fastpathTV.DecMapInt16UintptrV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt16UintptrX(vp *map[int16]uintptr, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt16UintptrV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16UintptrV(v map[int16]uintptr, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int16]uintptr, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[int16]uintptr, xlen)
+ changed = true
+ }
+
+ var mk int16
+ var mv uintptr
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt16IntR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int16]int)
+ v, changed := fastpathTV.DecMapInt16IntV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int16]int)
+ fastpathTV.DecMapInt16IntV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt16IntX(vp *map[int16]int, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt16IntV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16IntV(v map[int16]int, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int16]int, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[int16]int, xlen)
+ changed = true
+ }
+
+ var mk int16
+ var mv int
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt16Int8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int16]int8)
+ v, changed := fastpathTV.DecMapInt16Int8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int16]int8)
+ fastpathTV.DecMapInt16Int8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt16Int8X(vp *map[int16]int8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt16Int8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16Int8V(v map[int16]int8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int16]int8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3)
+ v = make(map[int16]int8, xlen)
+ changed = true
+ }
+
+ var mk int16
+ var mv int8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt16Int16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int16]int16)
+ v, changed := fastpathTV.DecMapInt16Int16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int16]int16)
+ fastpathTV.DecMapInt16Int16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt16Int16X(vp *map[int16]int16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt16Int16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16Int16V(v map[int16]int16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int16]int16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 4)
+ v = make(map[int16]int16, xlen)
+ changed = true
+ }
+
+ var mk int16
+ var mv int16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt16Int32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int16]int32)
+ v, changed := fastpathTV.DecMapInt16Int32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int16]int32)
+ fastpathTV.DecMapInt16Int32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt16Int32X(vp *map[int16]int32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt16Int32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16Int32V(v map[int16]int32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int16]int32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6)
+ v = make(map[int16]int32, xlen)
+ changed = true
+ }
+
+ var mk int16
+ var mv int32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt16Int64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int16]int64)
+ v, changed := fastpathTV.DecMapInt16Int64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int16]int64)
+ fastpathTV.DecMapInt16Int64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt16Int64X(vp *map[int16]int64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt16Int64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16Int64V(v map[int16]int64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int16]int64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[int16]int64, xlen)
+ changed = true
+ }
+
+ var mk int16
+ var mv int64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt16Float32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int16]float32)
+ v, changed := fastpathTV.DecMapInt16Float32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int16]float32)
+ fastpathTV.DecMapInt16Float32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt16Float32X(vp *map[int16]float32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt16Float32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16Float32V(v map[int16]float32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int16]float32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6)
+ v = make(map[int16]float32, xlen)
+ changed = true
+ }
+
+ var mk int16
+ var mv float32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt16Float64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int16]float64)
+ v, changed := fastpathTV.DecMapInt16Float64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int16]float64)
+ fastpathTV.DecMapInt16Float64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt16Float64X(vp *map[int16]float64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt16Float64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16Float64V(v map[int16]float64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int16]float64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[int16]float64, xlen)
+ changed = true
+ }
+
+ var mk int16
+ var mv float64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt16BoolR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int16]bool)
+ v, changed := fastpathTV.DecMapInt16BoolV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int16]bool)
+ fastpathTV.DecMapInt16BoolV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt16BoolX(vp *map[int16]bool, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt16BoolV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16BoolV(v map[int16]bool, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int16]bool, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3)
+ v = make(map[int16]bool, xlen)
+ changed = true
+ }
+
+ var mk int16
+ var mv bool
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt32IntfR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int32]interface{})
+ v, changed := fastpathTV.DecMapInt32IntfV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int32]interface{})
+ fastpathTV.DecMapInt32IntfV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt32IntfX(vp *map[int32]interface{}, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt32IntfV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32IntfV(v map[int32]interface{}, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int32]interface{}, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20)
+ v = make(map[int32]interface{}, xlen)
+ changed = true
+ }
+ mapGet := !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk int32
+ var mv interface{}
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt32StringR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int32]string)
+ v, changed := fastpathTV.DecMapInt32StringV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int32]string)
+ fastpathTV.DecMapInt32StringV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt32StringX(vp *map[int32]string, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt32StringV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32StringV(v map[int32]string, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int32]string, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20)
+ v = make(map[int32]string, xlen)
+ changed = true
+ }
+
+ var mk int32
+ var mv string
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt32UintR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int32]uint)
+ v, changed := fastpathTV.DecMapInt32UintV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int32]uint)
+ fastpathTV.DecMapInt32UintV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt32UintX(vp *map[int32]uint, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt32UintV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32UintV(v map[int32]uint, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int32]uint, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[int32]uint, xlen)
+ changed = true
+ }
+
+ var mk int32
+ var mv uint
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt32Uint8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int32]uint8)
+ v, changed := fastpathTV.DecMapInt32Uint8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int32]uint8)
+ fastpathTV.DecMapInt32Uint8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt32Uint8X(vp *map[int32]uint8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt32Uint8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32Uint8V(v map[int32]uint8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int32]uint8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[int32]uint8, xlen)
+ changed = true
+ }
+
+ var mk int32
+ var mv uint8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt32Uint16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int32]uint16)
+ v, changed := fastpathTV.DecMapInt32Uint16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int32]uint16)
+ fastpathTV.DecMapInt32Uint16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt32Uint16X(vp *map[int32]uint16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt32Uint16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32Uint16V(v map[int32]uint16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int32]uint16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6)
+ v = make(map[int32]uint16, xlen)
+ changed = true
+ }
+
+ var mk int32
+ var mv uint16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt32Uint32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int32]uint32)
+ v, changed := fastpathTV.DecMapInt32Uint32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int32]uint32)
+ fastpathTV.DecMapInt32Uint32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt32Uint32X(vp *map[int32]uint32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt32Uint32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32Uint32V(v map[int32]uint32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int32]uint32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8)
+ v = make(map[int32]uint32, xlen)
+ changed = true
+ }
+
+ var mk int32
+ var mv uint32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt32Uint64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int32]uint64)
+ v, changed := fastpathTV.DecMapInt32Uint64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int32]uint64)
+ fastpathTV.DecMapInt32Uint64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt32Uint64X(vp *map[int32]uint64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt32Uint64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32Uint64V(v map[int32]uint64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int32]uint64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[int32]uint64, xlen)
+ changed = true
+ }
+
+ var mk int32
+ var mv uint64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt32UintptrR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int32]uintptr)
+ v, changed := fastpathTV.DecMapInt32UintptrV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int32]uintptr)
+ fastpathTV.DecMapInt32UintptrV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt32UintptrX(vp *map[int32]uintptr, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt32UintptrV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32UintptrV(v map[int32]uintptr, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int32]uintptr, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[int32]uintptr, xlen)
+ changed = true
+ }
+
+ var mk int32
+ var mv uintptr
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt32IntR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int32]int)
+ v, changed := fastpathTV.DecMapInt32IntV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int32]int)
+ fastpathTV.DecMapInt32IntV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt32IntX(vp *map[int32]int, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt32IntV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32IntV(v map[int32]int, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int32]int, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[int32]int, xlen)
+ changed = true
+ }
+
+ var mk int32
+ var mv int
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt32Int8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int32]int8)
+ v, changed := fastpathTV.DecMapInt32Int8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int32]int8)
+ fastpathTV.DecMapInt32Int8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt32Int8X(vp *map[int32]int8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt32Int8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32Int8V(v map[int32]int8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int32]int8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[int32]int8, xlen)
+ changed = true
+ }
+
+ var mk int32
+ var mv int8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt32Int16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int32]int16)
+ v, changed := fastpathTV.DecMapInt32Int16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int32]int16)
+ fastpathTV.DecMapInt32Int16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt32Int16X(vp *map[int32]int16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt32Int16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32Int16V(v map[int32]int16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int32]int16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6)
+ v = make(map[int32]int16, xlen)
+ changed = true
+ }
+
+ var mk int32
+ var mv int16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt32Int32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int32]int32)
+ v, changed := fastpathTV.DecMapInt32Int32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int32]int32)
+ fastpathTV.DecMapInt32Int32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt32Int32X(vp *map[int32]int32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt32Int32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32Int32V(v map[int32]int32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int32]int32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8)
+ v = make(map[int32]int32, xlen)
+ changed = true
+ }
+
+ var mk int32
+ var mv int32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt32Int64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int32]int64)
+ v, changed := fastpathTV.DecMapInt32Int64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int32]int64)
+ fastpathTV.DecMapInt32Int64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt32Int64X(vp *map[int32]int64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt32Int64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32Int64V(v map[int32]int64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int32]int64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[int32]int64, xlen)
+ changed = true
+ }
+
+ var mk int32
+ var mv int64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt32Float32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int32]float32)
+ v, changed := fastpathTV.DecMapInt32Float32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int32]float32)
+ fastpathTV.DecMapInt32Float32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt32Float32X(vp *map[int32]float32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt32Float32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32Float32V(v map[int32]float32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int32]float32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8)
+ v = make(map[int32]float32, xlen)
+ changed = true
+ }
+
+ var mk int32
+ var mv float32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt32Float64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int32]float64)
+ v, changed := fastpathTV.DecMapInt32Float64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int32]float64)
+ fastpathTV.DecMapInt32Float64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt32Float64X(vp *map[int32]float64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt32Float64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32Float64V(v map[int32]float64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int32]float64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[int32]float64, xlen)
+ changed = true
+ }
+
+ var mk int32
+ var mv float64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt32BoolR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int32]bool)
+ v, changed := fastpathTV.DecMapInt32BoolV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int32]bool)
+ fastpathTV.DecMapInt32BoolV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt32BoolX(vp *map[int32]bool, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt32BoolV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32BoolV(v map[int32]bool, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int32]bool, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[int32]bool, xlen)
+ changed = true
+ }
+
+ var mk int32
+ var mv bool
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt64IntfR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int64]interface{})
+ v, changed := fastpathTV.DecMapInt64IntfV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int64]interface{})
+ fastpathTV.DecMapInt64IntfV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt64IntfX(vp *map[int64]interface{}, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt64IntfV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64IntfV(v map[int64]interface{}, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int64]interface{}, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[int64]interface{}, xlen)
+ changed = true
+ }
+ mapGet := !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk int64
+ var mv interface{}
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt64StringR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int64]string)
+ v, changed := fastpathTV.DecMapInt64StringV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int64]string)
+ fastpathTV.DecMapInt64StringV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt64StringX(vp *map[int64]string, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt64StringV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64StringV(v map[int64]string, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int64]string, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[int64]string, xlen)
+ changed = true
+ }
+
+ var mk int64
+ var mv string
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt64UintR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int64]uint)
+ v, changed := fastpathTV.DecMapInt64UintV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int64]uint)
+ fastpathTV.DecMapInt64UintV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt64UintX(vp *map[int64]uint, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt64UintV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64UintV(v map[int64]uint, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int64]uint, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[int64]uint, xlen)
+ changed = true
+ }
+
+ var mk int64
+ var mv uint
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt64Uint8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int64]uint8)
+ v, changed := fastpathTV.DecMapInt64Uint8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int64]uint8)
+ fastpathTV.DecMapInt64Uint8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt64Uint8X(vp *map[int64]uint8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt64Uint8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64Uint8V(v map[int64]uint8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int64]uint8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[int64]uint8, xlen)
+ changed = true
+ }
+
+ var mk int64
+ var mv uint8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt64Uint16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int64]uint16)
+ v, changed := fastpathTV.DecMapInt64Uint16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int64]uint16)
+ fastpathTV.DecMapInt64Uint16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt64Uint16X(vp *map[int64]uint16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt64Uint16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64Uint16V(v map[int64]uint16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int64]uint16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[int64]uint16, xlen)
+ changed = true
+ }
+
+ var mk int64
+ var mv uint16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt64Uint32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int64]uint32)
+ v, changed := fastpathTV.DecMapInt64Uint32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int64]uint32)
+ fastpathTV.DecMapInt64Uint32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt64Uint32X(vp *map[int64]uint32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt64Uint32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64Uint32V(v map[int64]uint32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int64]uint32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[int64]uint32, xlen)
+ changed = true
+ }
+
+ var mk int64
+ var mv uint32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt64Uint64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int64]uint64)
+ v, changed := fastpathTV.DecMapInt64Uint64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int64]uint64)
+ fastpathTV.DecMapInt64Uint64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt64Uint64X(vp *map[int64]uint64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt64Uint64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64Uint64V(v map[int64]uint64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int64]uint64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[int64]uint64, xlen)
+ changed = true
+ }
+
+ var mk int64
+ var mv uint64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt64UintptrR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int64]uintptr)
+ v, changed := fastpathTV.DecMapInt64UintptrV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int64]uintptr)
+ fastpathTV.DecMapInt64UintptrV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt64UintptrX(vp *map[int64]uintptr, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt64UintptrV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64UintptrV(v map[int64]uintptr, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int64]uintptr, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[int64]uintptr, xlen)
+ changed = true
+ }
+
+ var mk int64
+ var mv uintptr
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt64IntR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int64]int)
+ v, changed := fastpathTV.DecMapInt64IntV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int64]int)
+ fastpathTV.DecMapInt64IntV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt64IntX(vp *map[int64]int, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt64IntV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64IntV(v map[int64]int, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int64]int, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[int64]int, xlen)
+ changed = true
+ }
+
+ var mk int64
+ var mv int
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt64Int8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int64]int8)
+ v, changed := fastpathTV.DecMapInt64Int8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int64]int8)
+ fastpathTV.DecMapInt64Int8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt64Int8X(vp *map[int64]int8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt64Int8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64Int8V(v map[int64]int8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int64]int8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[int64]int8, xlen)
+ changed = true
+ }
+
+ var mk int64
+ var mv int8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt64Int16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int64]int16)
+ v, changed := fastpathTV.DecMapInt64Int16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int64]int16)
+ fastpathTV.DecMapInt64Int16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt64Int16X(vp *map[int64]int16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt64Int16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64Int16V(v map[int64]int16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int64]int16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[int64]int16, xlen)
+ changed = true
+ }
+
+ var mk int64
+ var mv int16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt64Int32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int64]int32)
+ v, changed := fastpathTV.DecMapInt64Int32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int64]int32)
+ fastpathTV.DecMapInt64Int32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt64Int32X(vp *map[int64]int32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt64Int32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64Int32V(v map[int64]int32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int64]int32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[int64]int32, xlen)
+ changed = true
+ }
+
+ var mk int64
+ var mv int32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt64Int64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int64]int64)
+ v, changed := fastpathTV.DecMapInt64Int64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int64]int64)
+ fastpathTV.DecMapInt64Int64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt64Int64X(vp *map[int64]int64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt64Int64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64Int64V(v map[int64]int64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int64]int64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[int64]int64, xlen)
+ changed = true
+ }
+
+ var mk int64
+ var mv int64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt64Float32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int64]float32)
+ v, changed := fastpathTV.DecMapInt64Float32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int64]float32)
+ fastpathTV.DecMapInt64Float32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt64Float32X(vp *map[int64]float32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt64Float32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64Float32V(v map[int64]float32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int64]float32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[int64]float32, xlen)
+ changed = true
+ }
+
+ var mk int64
+ var mv float32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt64Float64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int64]float64)
+ v, changed := fastpathTV.DecMapInt64Float64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int64]float64)
+ fastpathTV.DecMapInt64Float64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt64Float64X(vp *map[int64]float64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt64Float64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64Float64V(v map[int64]float64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int64]float64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[int64]float64, xlen)
+ changed = true
+ }
+
+ var mk int64
+ var mv float64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapInt64BoolR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[int64]bool)
+ v, changed := fastpathTV.DecMapInt64BoolV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[int64]bool)
+ fastpathTV.DecMapInt64BoolV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapInt64BoolX(vp *map[int64]bool, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapInt64BoolV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64BoolV(v map[int64]bool, checkNil bool, canChange bool,
+ d *Decoder) (_ map[int64]bool, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[int64]bool, xlen)
+ changed = true
+ }
+
+ var mk int64
+ var mv bool
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapBoolIntfR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[bool]interface{})
+ v, changed := fastpathTV.DecMapBoolIntfV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[bool]interface{})
+ fastpathTV.DecMapBoolIntfV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapBoolIntfX(vp *map[bool]interface{}, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapBoolIntfV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolIntfV(v map[bool]interface{}, checkNil bool, canChange bool,
+ d *Decoder) (_ map[bool]interface{}, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17)
+ v = make(map[bool]interface{}, xlen)
+ changed = true
+ }
+ mapGet := !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk bool
+ var mv interface{}
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapBoolStringR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[bool]string)
+ v, changed := fastpathTV.DecMapBoolStringV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[bool]string)
+ fastpathTV.DecMapBoolStringV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapBoolStringX(vp *map[bool]string, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapBoolStringV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolStringV(v map[bool]string, checkNil bool, canChange bool,
+ d *Decoder) (_ map[bool]string, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17)
+ v = make(map[bool]string, xlen)
+ changed = true
+ }
+
+ var mk bool
+ var mv string
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapBoolUintR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[bool]uint)
+ v, changed := fastpathTV.DecMapBoolUintV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[bool]uint)
+ fastpathTV.DecMapBoolUintV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapBoolUintX(vp *map[bool]uint, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapBoolUintV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolUintV(v map[bool]uint, checkNil bool, canChange bool,
+ d *Decoder) (_ map[bool]uint, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[bool]uint, xlen)
+ changed = true
+ }
+
+ var mk bool
+ var mv uint
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapBoolUint8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[bool]uint8)
+ v, changed := fastpathTV.DecMapBoolUint8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[bool]uint8)
+ fastpathTV.DecMapBoolUint8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapBoolUint8X(vp *map[bool]uint8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapBoolUint8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolUint8V(v map[bool]uint8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[bool]uint8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2)
+ v = make(map[bool]uint8, xlen)
+ changed = true
+ }
+
+ var mk bool
+ var mv uint8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint8(dd.DecodeUint(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapBoolUint16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[bool]uint16)
+ v, changed := fastpathTV.DecMapBoolUint16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[bool]uint16)
+ fastpathTV.DecMapBoolUint16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapBoolUint16X(vp *map[bool]uint16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapBoolUint16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolUint16V(v map[bool]uint16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[bool]uint16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3)
+ v = make(map[bool]uint16, xlen)
+ changed = true
+ }
+
+ var mk bool
+ var mv uint16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint16(dd.DecodeUint(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapBoolUint32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[bool]uint32)
+ v, changed := fastpathTV.DecMapBoolUint32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[bool]uint32)
+ fastpathTV.DecMapBoolUint32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapBoolUint32X(vp *map[bool]uint32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapBoolUint32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolUint32V(v map[bool]uint32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[bool]uint32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[bool]uint32, xlen)
+ changed = true
+ }
+
+ var mk bool
+ var mv uint32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uint32(dd.DecodeUint(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapBoolUint64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[bool]uint64)
+ v, changed := fastpathTV.DecMapBoolUint64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[bool]uint64)
+ fastpathTV.DecMapBoolUint64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapBoolUint64X(vp *map[bool]uint64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapBoolUint64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolUint64V(v map[bool]uint64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[bool]uint64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[bool]uint64, xlen)
+ changed = true
+ }
+
+ var mk bool
+ var mv uint64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeUint(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapBoolUintptrR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[bool]uintptr)
+ v, changed := fastpathTV.DecMapBoolUintptrV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[bool]uintptr)
+ fastpathTV.DecMapBoolUintptrV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapBoolUintptrX(vp *map[bool]uintptr, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapBoolUintptrV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolUintptrV(v map[bool]uintptr, checkNil bool, canChange bool,
+ d *Decoder) (_ map[bool]uintptr, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[bool]uintptr, xlen)
+ changed = true
+ }
+
+ var mk bool
+ var mv uintptr
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = uintptr(dd.DecodeUint(uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapBoolIntR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[bool]int)
+ v, changed := fastpathTV.DecMapBoolIntV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[bool]int)
+ fastpathTV.DecMapBoolIntV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapBoolIntX(vp *map[bool]int, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapBoolIntV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolIntV(v map[bool]int, checkNil bool, canChange bool,
+ d *Decoder) (_ map[bool]int, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[bool]int, xlen)
+ changed = true
+ }
+
+ var mk bool
+ var mv int
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int(dd.DecodeInt(intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapBoolInt8R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[bool]int8)
+ v, changed := fastpathTV.DecMapBoolInt8V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[bool]int8)
+ fastpathTV.DecMapBoolInt8V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapBoolInt8X(vp *map[bool]int8, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapBoolInt8V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolInt8V(v map[bool]int8, checkNil bool, canChange bool,
+ d *Decoder) (_ map[bool]int8, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2)
+ v = make(map[bool]int8, xlen)
+ changed = true
+ }
+
+ var mk bool
+ var mv int8
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int8(dd.DecodeInt(8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapBoolInt16R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[bool]int16)
+ v, changed := fastpathTV.DecMapBoolInt16V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[bool]int16)
+ fastpathTV.DecMapBoolInt16V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapBoolInt16X(vp *map[bool]int16, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapBoolInt16V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolInt16V(v map[bool]int16, checkNil bool, canChange bool,
+ d *Decoder) (_ map[bool]int16, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3)
+ v = make(map[bool]int16, xlen)
+ changed = true
+ }
+
+ var mk bool
+ var mv int16
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int16(dd.DecodeInt(16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapBoolInt32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[bool]int32)
+ v, changed := fastpathTV.DecMapBoolInt32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[bool]int32)
+ fastpathTV.DecMapBoolInt32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapBoolInt32X(vp *map[bool]int32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapBoolInt32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolInt32V(v map[bool]int32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[bool]int32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[bool]int32, xlen)
+ changed = true
+ }
+
+ var mk bool
+ var mv int32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = int32(dd.DecodeInt(32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapBoolInt64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[bool]int64)
+ v, changed := fastpathTV.DecMapBoolInt64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[bool]int64)
+ fastpathTV.DecMapBoolInt64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapBoolInt64X(vp *map[bool]int64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapBoolInt64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolInt64V(v map[bool]int64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[bool]int64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[bool]int64, xlen)
+ changed = true
+ }
+
+ var mk bool
+ var mv int64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeInt(64)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapBoolFloat32R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[bool]float32)
+ v, changed := fastpathTV.DecMapBoolFloat32V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[bool]float32)
+ fastpathTV.DecMapBoolFloat32V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapBoolFloat32X(vp *map[bool]float32, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapBoolFloat32V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolFloat32V(v map[bool]float32, checkNil bool, canChange bool,
+ d *Decoder) (_ map[bool]float32, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[bool]float32, xlen)
+ changed = true
+ }
+
+ var mk bool
+ var mv float32
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = float32(dd.DecodeFloat(true))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapBoolFloat64R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[bool]float64)
+ v, changed := fastpathTV.DecMapBoolFloat64V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[bool]float64)
+ fastpathTV.DecMapBoolFloat64V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapBoolFloat64X(vp *map[bool]float64, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapBoolFloat64V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolFloat64V(v map[bool]float64, checkNil bool, canChange bool,
+ d *Decoder) (_ map[bool]float64, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[bool]float64, xlen)
+ changed = true
+ }
+
+ var mk bool
+ var mv float64
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeFloat(false)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
+
+func (f *decFnInfo) fastpathDecMapBoolBoolR(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[bool]bool)
+ v, changed := fastpathTV.DecMapBoolBoolV(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[bool]bool)
+ fastpathTV.DecMapBoolBoolV(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) DecMapBoolBoolX(vp *map[bool]bool, checkNil bool, d *Decoder) {
+ v, changed := f.DecMapBoolBoolV(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolBoolV(v map[bool]bool, checkNil bool, canChange bool,
+ d *Decoder) (_ map[bool]bool, changed bool) {
+ dd := d.d
+ cr := d.cr
+
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2)
+ v = make(map[bool]bool, xlen)
+ changed = true
+ }
+
+ var mk bool
+ var mv bool
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
+ mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ return v, changed
+}
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl b/src/kube2msb/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl
new file mode 100644
index 0000000..04c173f
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl
@@ -0,0 +1,540 @@
+// +build !notfastpath
+
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// ************************************************************
+// DO NOT EDIT.
+// THIS FILE IS AUTO-GENERATED from fast-path.go.tmpl
+// ************************************************************
+
+package codec
+
+// Fast path functions try to create a fast path encode or decode implementation
+// for common maps and slices.
+//
+// We define the functions and register then in this single file
+// so as not to pollute the encode.go and decode.go, and create a dependency in there.
+// This file can be omitted without causing a build failure.
+//
+// The advantage of fast paths is:
+// - Many calls bypass reflection altogether
+//
+// Currently support
+// - slice of all builtin types,
+// - map of all builtin types to string or interface value
+// - symetrical maps of all builtin types (e.g. str-str, uint8-uint8)
+// This should provide adequate "typical" implementations.
+//
+// Note that fast track decode functions must handle values for which an address cannot be obtained.
+// For example:
+// m2 := map[string]int{}
+// p2 := []interface{}{m2}
+// // decoding into p2 will bomb if fast track functions do not treat like unaddressable.
+//
+
+import (
+ "reflect"
+ "sort"
+)
+
+const fastpathCheckNilFalse = false // for reflect
+const fastpathCheckNilTrue = true // for type switch
+
+type fastpathT struct {}
+
+var fastpathTV fastpathT
+
+type fastpathE struct {
+ rtid uintptr
+ rt reflect.Type
+ encfn func(*encFnInfo, reflect.Value)
+ decfn func(*decFnInfo, reflect.Value)
+}
+
+type fastpathA [{{ .FastpathLen }}]fastpathE
+
+func (x *fastpathA) index(rtid uintptr) int {
+ // use binary search to grab the index (adapted from sort/search.go)
+ h, i, j := 0, 0, {{ .FastpathLen }} // len(x)
+ for i < j {
+ h = i + (j-i)/2
+ if x[h].rtid < rtid {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ if i < {{ .FastpathLen }} && x[i].rtid == rtid {
+ return i
+ }
+ return -1
+}
+
+type fastpathAslice []fastpathE
+
+func (x fastpathAslice) Len() int { return len(x) }
+func (x fastpathAslice) Less(i, j int) bool { return x[i].rtid < x[j].rtid }
+func (x fastpathAslice) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+var fastpathAV fastpathA
+
+// due to possible initialization loop error, make fastpath in an init()
+func init() {
+ if !fastpathEnabled {
+ return
+ }
+ i := 0
+ fn := func(v interface{}, fe func(*encFnInfo, reflect.Value), fd func(*decFnInfo, reflect.Value)) (f fastpathE) {
+ xrt := reflect.TypeOf(v)
+ xptr := reflect.ValueOf(xrt).Pointer()
+ fastpathAV[i] = fastpathE{xptr, xrt, fe, fd}
+ i++
+ return
+ }
+
+ {{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
+ fn([]{{ .Elem }}(nil), (*encFnInfo).{{ .MethodNamePfx "fastpathEnc" false }}R, (*decFnInfo).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}}
+
+ {{range .Values}}{{if not .Primitive}}{{if .MapKey }}
+ fn(map[{{ .MapKey }}]{{ .Elem }}(nil), (*encFnInfo).{{ .MethodNamePfx "fastpathEnc" false }}R, (*decFnInfo).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}}
+
+ sort.Sort(fastpathAslice(fastpathAV[:]))
+}
+
+// -- encode
+
+// -- -- fast path type switch
+func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool {
+ if !fastpathEnabled {
+ return false
+ }
+ switch v := iv.(type) {
+{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
+ case []{{ .Elem }}:{{else}}
+ case map[{{ .MapKey }}]{{ .Elem }}:{{end}}
+ fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, fastpathCheckNilTrue, e){{if not .MapKey }}
+ case *[]{{ .Elem }}:{{else}}
+ case *map[{{ .MapKey }}]{{ .Elem }}:{{end}}
+ fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, fastpathCheckNilTrue, e)
+{{end}}{{end}}
+ default:
+ _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release)
+ return false
+ }
+ return true
+}
+
+func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool {
+ if !fastpathEnabled {
+ return false
+ }
+ switch v := iv.(type) {
+{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
+ case []{{ .Elem }}:
+ fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, fastpathCheckNilTrue, e)
+ case *[]{{ .Elem }}:
+ fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, fastpathCheckNilTrue, e)
+{{end}}{{end}}{{end}}
+ default:
+ _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release)
+ return false
+ }
+ return true
+}
+
+func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool {
+ if !fastpathEnabled {
+ return false
+ }
+ switch v := iv.(type) {
+{{range .Values}}{{if not .Primitive}}{{if .MapKey }}
+ case map[{{ .MapKey }}]{{ .Elem }}:
+ fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, fastpathCheckNilTrue, e)
+ case *map[{{ .MapKey }}]{{ .Elem }}:
+ fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, fastpathCheckNilTrue, e)
+{{end}}{{end}}{{end}}
+ default:
+ _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release)
+ return false
+ }
+ return true
+}
+
+// -- -- fast path functions
+{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
+
+func (f *encFnInfo) {{ .MethodNamePfx "fastpathEnc" false }}R(rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.{{ .MethodNamePfx "EncAsMap" false }}V(rv.Interface().([]{{ .Elem }}), fastpathCheckNilFalse, f.e)
+ } else {
+ fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv.Interface().([]{{ .Elem }}), fastpathCheckNilFalse, f.e)
+ }
+}
+func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v []{{ .Elem }}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeArrayStart(len(v))
+ for _, v2 := range v {
+ if cr != nil { cr.sendContainerState(containerArrayElem) }
+ {{ encmd .Elem "v2"}}
+ }
+ if cr != nil { cr.sendContainerState(containerArrayEnd) }{{/* ee.EncodeEnd() */}}
+}
+
+func (_ fastpathT) {{ .MethodNamePfx "EncAsMap" false }}V(v []{{ .Elem }}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.EncodeMapStart(len(v) / 2)
+ for j, v2 := range v {
+ if cr != nil {
+ if j%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ }
+ {{ encmd .Elem "v2"}}
+ }
+ if cr != nil { cr.sendContainerState(containerMapEnd) }
+}
+
+{{end}}{{end}}{{end}}
+
+{{range .Values}}{{if not .Primitive}}{{if .MapKey }}
+
+func (f *encFnInfo) {{ .MethodNamePfx "fastpathEnc" false }}R(rv reflect.Value) {
+ fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv.Interface().(map[{{ .MapKey }}]{{ .Elem }}), fastpathCheckNilFalse, f.e)
+}
+func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, checkNil bool, e *Encoder) {
+ ee := e.e
+ cr := e.cr
+ if checkNil && v == nil {
+ ee.EncodeNil()
+ return
+ }
+ ee.EncodeMapStart(len(v))
+ {{if eq .MapKey "string"}}asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ {{end}}if e.h.Canonical {
+ {{if eq .MapKey "interface{}"}}{{/* out of band
+ */}}var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI {{/* put loop variables outside. seems currently needed for better perf */}}
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ for j := range v2 {
+ if cr != nil { cr.sendContainerState(containerMapKey) }
+ e.asis(v2[j].v)
+ if cr != nil { cr.sendContainerState(containerMapValue) }
+ e.encode(v[v2[j].i])
+ } {{else}}{{ $x := sorttype .MapKey true}}v2 := make([]{{ $x }}, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = {{ $x }}(k)
+ i++
+ }
+ sort.Sort({{ sorttype .MapKey false}}(v2))
+ for _, k2 := range v2 {
+ if cr != nil { cr.sendContainerState(containerMapKey) }
+ {{if eq .MapKey "string"}}if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }{{else}}{{ $y := printf "%s(k2)" .MapKey }}{{ encmd .MapKey $y }}{{end}}
+ if cr != nil { cr.sendContainerState(containerMapValue) }
+ {{ $y := printf "v[%s(k2)]" .MapKey }}{{ encmd .Elem $y }}
+ } {{end}}
+ } else {
+ for k2, v2 := range v {
+ if cr != nil { cr.sendContainerState(containerMapKey) }
+ {{if eq .MapKey "string"}}if asSymbols {
+ ee.EncodeSymbol(k2)
+ } else {
+ ee.EncodeString(c_UTF8, k2)
+ }{{else}}{{ encmd .MapKey "k2"}}{{end}}
+ if cr != nil { cr.sendContainerState(containerMapValue) }
+ {{ encmd .Elem "v2"}}
+ }
+ }
+ if cr != nil { cr.sendContainerState(containerMapEnd) }{{/* ee.EncodeEnd() */}}
+}
+
+{{end}}{{end}}{{end}}
+
+// -- decode
+
+// -- -- fast path type switch
+func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool {
+ if !fastpathEnabled {
+ return false
+ }
+ switch v := iv.(type) {
+{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
+ case []{{ .Elem }}:{{else}}
+ case map[{{ .MapKey }}]{{ .Elem }}:{{end}}
+ fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, fastpathCheckNilFalse, false, d){{if not .MapKey }}
+ case *[]{{ .Elem }}:{{else}}
+ case *map[{{ .MapKey }}]{{ .Elem }}:{{end}}
+ v2, changed2 := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*v, fastpathCheckNilFalse, true, d)
+ if changed2 {
+ *v = v2
+ }
+{{end}}{{end}}
+ default:
+ _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release)
+ return false
+ }
+ return true
+}
+
+// -- -- fast path functions
+{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
+{{/*
+Slices can change if they
+- did not come from an array
+- are addressable (from a ptr)
+- are settable (e.g. contained in an interface{})
+*/}}
+func (f *decFnInfo) {{ .MethodNamePfx "fastpathDec" false }}R(rv reflect.Value) {
+ array := f.seq == seqTypeArray
+ if !array && rv.CanAddr() { {{/* // CanSet => CanAddr + Exported */}}
+ vp := rv.Addr().Interface().(*[]{{ .Elem }})
+ v, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*vp, fastpathCheckNilFalse, !array, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().([]{{ .Elem }})
+ fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+
+func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *[]{{ .Elem }}, checkNil bool, d *Decoder) {
+ v, changed := f.{{ .MethodNamePfx "Dec" false }}V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v []{{ .Elem }}, checkNil bool, canChange bool, d *Decoder) (_ []{{ .Elem }}, changed bool) {
+ dd := d.d
+ {{/* // if dd.isContainerType(valueTypeNil) { dd.TryDecodeAsNil() */}}
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []{{ .Elem }}{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+
+ if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
+ if containerLenS > cap(v) {
+ if canChange { {{/*
+ // fast-path is for "basic" immutable types, so no need to copy them over
+ // s := make([]{{ .Elem }}, decInferLen(containerLenS, d.h.MaxInitLen))
+ // copy(s, v[:cap(v)])
+ // v = s */}}
+ var xlen int
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, {{ .Size }})
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]{{ .Elem }}, xlen)
+ }
+ } else {
+ v = make([]{{ .Elem }}, xlen)
+ }
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), containerLenS)
+ }
+ x2read = len(v)
+ } else if containerLenS != len(v) {
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
+ } {{/* // all checks done. cannot go past len. */}}
+ j := 0
+ for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
+ {{ if eq .Elem "interface{}" }}d.decode(&v[j]){{ else }}v[j] = {{ decmd .Elem }}{{ end }}
+ }
+ if xtrunc { {{/* // means canChange=true, changed=true already. */}}
+ for ; j < containerLenS; j++ {
+ v = append(v, {{ zerocmd .Elem }})
+ slh.ElemContainerState(j)
+ {{ if eq .Elem "interface{}" }}d.decode(&v[j]){{ else }}v[j] = {{ decmd .Elem }}{{ end }}
+ }
+ } else if !canChange {
+ for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
+ d.swallow()
+ }
+ }
+ } else {
+ breakFound := dd.CheckBreak() {{/* check break first, so we can initialize v with a capacity of 4 if necessary */}}
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []{{ .Elem }}{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ if cap(v) == 0 {
+ v = make([]{{ .Elem }}, 1, 4)
+ changed = true
+ }
+ j := 0
+ for ; !breakFound; j++ {
+ if j >= len(v) {
+ if canChange {
+ v = append(v, {{ zerocmd .Elem }})
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ }
+ }
+ slh.ElemContainerState(j)
+ if j < len(v) { {{/* // all checks done. cannot go past len. */}}
+ {{ if eq .Elem "interface{}" }}d.decode(&v[j])
+ {{ else }}v[j] = {{ decmd .Elem }}{{ end }}
+ } else {
+ d.swallow()
+ }
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+{{end}}{{end}}{{end}}
+
+
+{{range .Values}}{{if not .Primitive}}{{if .MapKey }}
+{{/*
+Maps can change if they are
+- addressable (from a ptr)
+- settable (e.g. contained in an interface{})
+*/}}
+func (f *decFnInfo) {{ .MethodNamePfx "fastpathDec" false }}R(rv reflect.Value) {
+ if rv.CanAddr() {
+ vp := rv.Addr().Interface().(*map[{{ .MapKey }}]{{ .Elem }})
+ v, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*vp, fastpathCheckNilFalse, true, f.d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv.Interface().(map[{{ .MapKey }}]{{ .Elem }})
+ fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, fastpathCheckNilFalse, false, f.d)
+ }
+}
+func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *map[{{ .MapKey }}]{{ .Elem }}, checkNil bool, d *Decoder) {
+ v, changed := f.{{ .MethodNamePfx "Dec" false }}V(*vp, checkNil, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, checkNil bool, canChange bool,
+ d *Decoder) (_ map[{{ .MapKey }}]{{ .Elem }}, changed bool) {
+ dd := d.d
+ cr := d.cr
+ {{/* // if dd.isContainerType(valueTypeNil) {dd.TryDecodeAsNil() */}}
+ if checkNil && dd.TryDecodeAsNil() {
+ if v != nil {
+ changed = true
+ }
+ return nil, changed
+ }
+
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, {{ .Size }})
+ v = make(map[{{ .MapKey }}]{{ .Elem }}, xlen)
+ changed = true
+ }
+ {{ if eq .Elem "interface{}" }}mapGet := !d.h.MapValueReset && !d.h.InterfaceReset{{end}}
+ var mk {{ .MapKey }}
+ var mv {{ .Elem }}
+ if containerLen > 0 {
+ for j := 0; j < containerLen; j++ {
+ if cr != nil { cr.sendContainerState(containerMapKey) }
+ {{ if eq .MapKey "interface{}" }}mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv) {{/* // maps cannot have []byte as key. switch to string. */}}
+ }{{ else }}mk = {{ decmd .MapKey }}{{ end }}
+ if cr != nil { cr.sendContainerState(containerMapValue) }
+ {{ if eq .Elem "interface{}" }}if mapGet { mv = v[mk] } else { mv = nil }
+ d.decode(&mv){{ else }}mv = {{ decmd .Elem }}{{ end }}
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ } else if containerLen < 0 {
+ for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil { cr.sendContainerState(containerMapKey) }
+ {{ if eq .MapKey "interface{}" }}mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv) {{/* // maps cannot have []byte as key. switch to string. */}}
+ }{{ else }}mk = {{ decmd .MapKey }}{{ end }}
+ if cr != nil { cr.sendContainerState(containerMapValue) }
+ {{ if eq .Elem "interface{}" }}if mapGet { mv = v[mk] } else { mv = nil }
+ d.decode(&mv){{ else }}mv = {{ decmd .Elem }}{{ end }}
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ }
+ if cr != nil { cr.sendContainerState(containerMapEnd) }
+ return v, changed
+}
+
+{{end}}{{end}}{{end}}
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/fast-path.not.go b/src/kube2msb/vendor/github.com/ugorji/go/codec/fast-path.not.go
new file mode 100644
index 0000000..d6f5f0c
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/fast-path.not.go
@@ -0,0 +1,32 @@
+// +build notfastpath
+
+package codec
+
+import "reflect"
+
+// The generated fast-path code is very large, and adds a few seconds to the build time.
+// This causes test execution, execution of small tools which use codec, etc
+// to take a long time.
+//
+// To mitigate, we now support the notfastpath tag.
+// This tag disables fastpath during build, allowing for faster build, test execution,
+// short-program runs, etc.
+
+func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { return false }
+func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { return false }
+func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { return false }
+func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { return false }
+
+type fastpathT struct{}
+type fastpathE struct {
+ rtid uintptr
+ rt reflect.Type
+ encfn func(*encFnInfo, reflect.Value)
+ decfn func(*decFnInfo, reflect.Value)
+}
+type fastpathA [0]fastpathE
+
+func (x fastpathA) index(rtid uintptr) int { return -1 }
+
+var fastpathAV fastpathA
+var fastpathTV fastpathT
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl b/src/kube2msb/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl
new file mode 100644
index 0000000..32df541
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl
@@ -0,0 +1,104 @@
+{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }}
+{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}{{if not isArray}}
+var {{var "c"}} bool {{/* // changed */}}
+_ = {{var "c"}}{{end}}
+if {{var "l"}} == 0 {
+ {{if isSlice }}if {{var "v"}} == nil {
+ {{var "v"}} = []{{ .Typ }}{}
+ {{var "c"}} = true
+ } else if len({{var "v"}}) != 0 {
+ {{var "v"}} = {{var "v"}}[:0]
+ {{var "c"}} = true
+ } {{end}} {{if isChan }}if {{var "v"}} == nil {
+ {{var "v"}} = make({{ .CTyp }}, 0)
+ {{var "c"}} = true
+ } {{end}}
+} else if {{var "l"}} > 0 {
+ {{if isChan }}if {{var "v"}} == nil {
+ {{var "rl"}}, _ = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
+ {{var "v"}} = make({{ .CTyp }}, {{var "rl"}})
+ {{var "c"}} = true
+ }
+ for {{var "r"}} := 0; {{var "r"}} < {{var "l"}}; {{var "r"}}++ {
+ {{var "h"}}.ElemContainerState({{var "r"}})
+ var {{var "t"}} {{ .Typ }}
+ {{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }}
+ {{var "v"}} <- {{var "t"}}
+ }
+ {{ else }} var {{var "rr"}}, {{var "rl"}} int {{/* // num2read, length of slice/array/chan */}}
+ var {{var "rt"}} bool {{/* truncated */}}
+ _, _ = {{var "rl"}}, {{var "rt"}}
+ {{var "rr"}} = {{var "l"}} // len({{var "v"}})
+ if {{var "l"}} > cap({{var "v"}}) {
+ {{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "l"}})
+ {{ else }}{{if not .Immutable }}
+ {{var "rg"}} := len({{var "v"}}) > 0
+ {{var "v2"}} := {{var "v"}} {{end}}
+ {{var "rl"}}, {{var "rt"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
+ if {{var "rt"}} {
+ if {{var "rl"}} <= cap({{var "v"}}) {
+ {{var "v"}} = {{var "v"}}[:{{var "rl"}}]
+ } else {
+ {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}})
+ }
+ } else {
+ {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}})
+ }
+ {{var "c"}} = true
+ {{var "rr"}} = len({{var "v"}}) {{if not .Immutable }}
+ if {{var "rg"}} { copy({{var "v"}}, {{var "v2"}}) } {{end}} {{end}}{{/* end not Immutable, isArray */}}
+ } {{if isSlice }} else if {{var "l"}} != len({{var "v"}}) {
+ {{var "v"}} = {{var "v"}}[:{{var "l"}}]
+ {{var "c"}} = true
+ } {{end}} {{/* end isSlice:47 */}}
+ {{var "j"}} := 0
+ for ; {{var "j"}} < {{var "rr"}} ; {{var "j"}}++ {
+ {{var "h"}}.ElemContainerState({{var "j"}})
+ {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }}
+ }
+ {{if isArray }}for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ {
+ {{var "h"}}.ElemContainerState({{var "j"}})
+ z.DecSwallow()
+ }
+ {{ else }}if {{var "rt"}} {
+ for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ {
+ {{var "v"}} = append({{var "v"}}, {{ zero}})
+ {{var "h"}}.ElemContainerState({{var "j"}})
+ {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }}
+ }
+ } {{end}} {{/* end isArray:56 */}}
+ {{end}} {{/* end isChan:16 */}}
+} else { {{/* len < 0 */}}
+ {{var "j"}} := 0
+ for ; !r.CheckBreak(); {{var "j"}}++ {
+ {{if isChan }}
+ {{var "h"}}.ElemContainerState({{var "j"}})
+ var {{var "t"}} {{ .Typ }}
+ {{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }}
+ {{var "v"}} <- {{var "t"}}
+ {{ else }}
+ if {{var "j"}} >= len({{var "v"}}) {
+ {{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "j"}}+1)
+ {{ else }}{{var "v"}} = append({{var "v"}}, {{zero}})// var {{var "z"}} {{ .Typ }}
+ {{var "c"}} = true {{end}}
+ }
+ {{var "h"}}.ElemContainerState({{var "j"}})
+ if {{var "j"}} < len({{var "v"}}) {
+ {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }}
+ } else {
+ z.DecSwallow()
+ }
+ {{end}}
+ }
+ {{if isSlice }}if {{var "j"}} < len({{var "v"}}) {
+ {{var "v"}} = {{var "v"}}[:{{var "j"}}]
+ {{var "c"}} = true
+ } else if {{var "j"}} == 0 && {{var "v"}} == nil {
+ {{var "v"}} = []{{ .Typ }}{}
+ {{var "c"}} = true
+ }{{end}}
+}
+{{var "h"}}.End()
+{{if not isArray }}if {{var "c"}} {
+ *{{ .Varname }} = {{var "v"}}
+}{{end}}
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl b/src/kube2msb/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl
new file mode 100644
index 0000000..77400e0
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl
@@ -0,0 +1,58 @@
+{{var "v"}} := *{{ .Varname }}
+{{var "l"}} := r.ReadMapStart()
+{{var "bh"}} := z.DecBasicHandle()
+if {{var "v"}} == nil {
+ {{var "rl"}}, _ := z.DecInferLen({{var "l"}}, {{var "bh"}}.MaxInitLen, {{ .Size }})
+ {{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}})
+ *{{ .Varname }} = {{var "v"}}
+}
+var {{var "mk"}} {{ .KTyp }}
+var {{var "mv"}} {{ .Typ }}
+var {{var "mg"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool
+if {{var "bh"}}.MapValueReset {
+ {{if decElemKindPtr}}{{var "mg"}} = true
+ {{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true }
+ {{else if not decElemKindImmutable}}{{var "mg"}} = true
+ {{end}} }
+if {{var "l"}} > 0 {
+for {{var "j"}} := 0; {{var "j"}} < {{var "l"}}; {{var "j"}}++ {
+ z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }})
+ {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }}
+{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} {
+ {{var "mk"}} = string({{var "bv"}})
+ }{{ end }}{{if decElemKindPtr}}
+ {{var "ms"}} = true{{end}}
+ if {{var "mg"}} {
+ {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}]
+ if {{var "mok"}} {
+ {{var "ms"}} = false
+ } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}}
+ } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}}
+ z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }})
+ {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }}
+ if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil {
+ {{var "v"}}[{{var "mk"}}] = {{var "mv"}}
+ }
+}
+} else if {{var "l"}} < 0 {
+for {{var "j"}} := 0; !r.CheckBreak(); {{var "j"}}++ {
+ z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }})
+ {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }}
+{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} {
+ {{var "mk"}} = string({{var "bv"}})
+ }{{ end }}{{if decElemKindPtr}}
+ {{var "ms"}} = true {{ end }}
+ if {{var "mg"}} {
+ {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}]
+ if {{var "mok"}} {
+ {{var "ms"}} = false
+ } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}}
+ } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}}
+ z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }})
+ {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }}
+ if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil {
+ {{var "v"}}[{{var "mk"}}] = {{var "mv"}}
+ }
+}
+} // else len==0: TODO: Should we clear map entries?
+z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }})
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/gen-helper.generated.go b/src/kube2msb/vendor/github.com/ugorji/go/codec/gen-helper.generated.go
new file mode 100644
index 0000000..22bce77
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/gen-helper.generated.go
@@ -0,0 +1,233 @@
+// //+build ignore
+
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// ************************************************************
+// DO NOT EDIT.
+// THIS FILE IS AUTO-GENERATED from gen-helper.go.tmpl
+// ************************************************************
+
+package codec
+
+import (
+ "encoding"
+ "reflect"
+)
+
+// This file is used to generate helper code for codecgen.
+// The values here i.e. genHelper(En|De)coder are not to be used directly by
+// library users. They WILL change continously and without notice.
+//
+// To help enforce this, we create an unexported type with exported members.
+// The only way to get the type is via the one exported type that we control (somewhat).
+//
+// When static codecs are created for types, they will use this value
+// to perform encoding or decoding of primitives or known slice or map types.
+
+// GenHelperEncoder is exported so that it can be used externally by codecgen.
+// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.
+func GenHelperEncoder(e *Encoder) (genHelperEncoder, encDriver) {
+ return genHelperEncoder{e: e}, e.e
+}
+
+// GenHelperDecoder is exported so that it can be used externally by codecgen.
+// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.
+func GenHelperDecoder(d *Decoder) (genHelperDecoder, decDriver) {
+ return genHelperDecoder{d: d}, d.d
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+type genHelperEncoder struct {
+ e *Encoder
+ F fastpathT
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+type genHelperDecoder struct {
+ d *Decoder
+ F fastpathT
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncBasicHandle() *BasicHandle {
+ return f.e.h
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncBinary() bool {
+ return f.e.be // f.e.hh.isBinaryEncoding()
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncFallback(iv interface{}) {
+ // println(">>>>>>>>> EncFallback")
+ f.e.encodeI(iv, false, false)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) {
+ bs, fnerr := iv.MarshalText()
+ f.e.marshal(bs, fnerr, false, c_UTF8)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) {
+ bs, fnerr := iv.MarshalJSON()
+ f.e.marshal(bs, fnerr, true, c_UTF8)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) {
+ bs, fnerr := iv.MarshalBinary()
+ f.e.marshal(bs, fnerr, false, c_RAW)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) TimeRtidIfBinc() uintptr {
+ if _, ok := f.e.hh.(*BincHandle); ok {
+ return timeTypId
+ }
+ return 0
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) IsJSONHandle() bool {
+ return f.e.js
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) HasExtensions() bool {
+ return len(f.e.h.extHandle) != 0
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncExt(v interface{}) (r bool) {
+ rt := reflect.TypeOf(v)
+ if rt.Kind() == reflect.Ptr {
+ rt = rt.Elem()
+ }
+ rtid := reflect.ValueOf(rt).Pointer()
+ if xfFn := f.e.h.getExt(rtid); xfFn != nil {
+ f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e)
+ return true
+ }
+ return false
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncSendContainerState(c containerState) {
+ if f.e.cr != nil {
+ f.e.cr.sendContainerState(c)
+ }
+}
+
+// ---------------- DECODER FOLLOWS -----------------
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecBasicHandle() *BasicHandle {
+ return f.d.h
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecBinary() bool {
+ return f.d.be // f.d.hh.isBinaryEncoding()
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecSwallow() {
+ f.d.swallow()
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecScratchBuffer() []byte {
+ return f.d.b[:]
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) {
+ // println(">>>>>>>>> DecFallback")
+ f.d.decodeI(iv, chkPtr, false, false, false)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) {
+ return f.d.decSliceHelperStart()
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) {
+ f.d.structFieldNotFound(index, name)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) {
+ f.d.arrayCannotExpand(sliceLen, streamLen)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) {
+ fnerr := tm.UnmarshalText(f.d.d.DecodeBytes(f.d.b[:], true, true))
+ if fnerr != nil {
+ panic(fnerr)
+ }
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) {
+ // bs := f.dd.DecodeBytes(f.d.b[:], true, true)
+ // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself.
+ fnerr := tm.UnmarshalJSON(f.d.nextValueBytes())
+ if fnerr != nil {
+ panic(fnerr)
+ }
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) {
+ fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, false, true))
+ if fnerr != nil {
+ panic(fnerr)
+ }
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) TimeRtidIfBinc() uintptr {
+ if _, ok := f.d.hh.(*BincHandle); ok {
+ return timeTypId
+ }
+ return 0
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) IsJSONHandle() bool {
+ return f.d.js
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) HasExtensions() bool {
+ return len(f.d.h.extHandle) != 0
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecExt(v interface{}) (r bool) {
+ rt := reflect.TypeOf(v).Elem()
+ rtid := reflect.ValueOf(rt).Pointer()
+ if xfFn := f.d.h.getExt(rtid); xfFn != nil {
+ f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext)
+ return true
+ }
+ return false
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int, truncated bool) {
+ return decInferLen(clen, maxlen, unit)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecSendContainerState(c containerState) {
+ if f.d.cr != nil {
+ f.d.cr.sendContainerState(c)
+ }
+}
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl b/src/kube2msb/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl
new file mode 100644
index 0000000..3195857
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl
@@ -0,0 +1,364 @@
+// //+build ignore
+
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// ************************************************************
+// DO NOT EDIT.
+// THIS FILE IS AUTO-GENERATED from gen-helper.go.tmpl
+// ************************************************************
+
+package codec
+
+import (
+ "encoding"
+ "reflect"
+)
+
+// This file is used to generate helper code for codecgen.
+// The values here i.e. genHelper(En|De)coder are not to be used directly by
+// library users. They WILL change continously and without notice.
+//
+// To help enforce this, we create an unexported type with exported members.
+// The only way to get the type is via the one exported type that we control (somewhat).
+//
+// When static codecs are created for types, they will use this value
+// to perform encoding or decoding of primitives or known slice or map types.
+
+// GenHelperEncoder is exported so that it can be used externally by codecgen.
+// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.
+func GenHelperEncoder(e *Encoder) (genHelperEncoder, encDriver) {
+ return genHelperEncoder{e:e}, e.e
+}
+
+// GenHelperDecoder is exported so that it can be used externally by codecgen.
+// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.
+func GenHelperDecoder(d *Decoder) (genHelperDecoder, decDriver) {
+ return genHelperDecoder{d:d}, d.d
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+type genHelperEncoder struct {
+ e *Encoder
+ F fastpathT
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+type genHelperDecoder struct {
+ d *Decoder
+ F fastpathT
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncBasicHandle() *BasicHandle {
+ return f.e.h
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncBinary() bool {
+ return f.e.be // f.e.hh.isBinaryEncoding()
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncFallback(iv interface{}) {
+ // println(">>>>>>>>> EncFallback")
+ f.e.encodeI(iv, false, false)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) {
+ bs, fnerr := iv.MarshalText()
+ f.e.marshal(bs, fnerr, false, c_UTF8)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) {
+ bs, fnerr := iv.MarshalJSON()
+ f.e.marshal(bs, fnerr, true, c_UTF8)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) {
+ bs, fnerr := iv.MarshalBinary()
+ f.e.marshal(bs, fnerr, false, c_RAW)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) TimeRtidIfBinc() uintptr {
+ if _, ok := f.e.hh.(*BincHandle); ok {
+ return timeTypId
+ }
+ return 0
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) IsJSONHandle() bool {
+ return f.e.js
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) HasExtensions() bool {
+ return len(f.e.h.extHandle) != 0
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncExt(v interface{}) (r bool) {
+ rt := reflect.TypeOf(v)
+ if rt.Kind() == reflect.Ptr {
+ rt = rt.Elem()
+ }
+ rtid := reflect.ValueOf(rt).Pointer()
+ if xfFn := f.e.h.getExt(rtid); xfFn != nil {
+ f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e)
+ return true
+ }
+ return false
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncSendContainerState(c containerState) {
+ if f.e.cr != nil {
+ f.e.cr.sendContainerState(c)
+ }
+}
+
+// ---------------- DECODER FOLLOWS -----------------
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecBasicHandle() *BasicHandle {
+ return f.d.h
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecBinary() bool {
+ return f.d.be // f.d.hh.isBinaryEncoding()
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecSwallow() {
+ f.d.swallow()
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecScratchBuffer() []byte {
+ return f.d.b[:]
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) {
+ // println(">>>>>>>>> DecFallback")
+ f.d.decodeI(iv, chkPtr, false, false, false)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) {
+ return f.d.decSliceHelperStart()
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) {
+ f.d.structFieldNotFound(index, name)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) {
+ f.d.arrayCannotExpand(sliceLen, streamLen)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) {
+ fnerr := tm.UnmarshalText(f.d.d.DecodeBytes(f.d.b[:], true, true))
+ if fnerr != nil {
+ panic(fnerr)
+ }
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) {
+ // bs := f.dd.DecodeBytes(f.d.b[:], true, true)
+ // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself.
+ fnerr := tm.UnmarshalJSON(f.d.nextValueBytes())
+ if fnerr != nil {
+ panic(fnerr)
+ }
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) {
+ fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, false, true))
+ if fnerr != nil {
+ panic(fnerr)
+ }
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) TimeRtidIfBinc() uintptr {
+ if _, ok := f.d.hh.(*BincHandle); ok {
+ return timeTypId
+ }
+ return 0
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) IsJSONHandle() bool {
+ return f.d.js
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) HasExtensions() bool {
+ return len(f.d.h.extHandle) != 0
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecExt(v interface{}) (r bool) {
+ rt := reflect.TypeOf(v).Elem()
+ rtid := reflect.ValueOf(rt).Pointer()
+ if xfFn := f.d.h.getExt(rtid); xfFn != nil {
+ f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext)
+ return true
+ }
+ return false
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int, truncated bool) {
+ return decInferLen(clen, maxlen, unit)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecSendContainerState(c containerState) {
+ if f.d.cr != nil {
+ f.d.cr.sendContainerState(c)
+ }
+}
+
+{{/*
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncDriver() encDriver {
+ return f.e.e
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecDriver() decDriver {
+ return f.d.d
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncNil() {
+ f.e.e.EncodeNil()
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncBytes(v []byte) {
+ f.e.e.EncodeStringBytes(c_RAW, v)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncArrayStart(length int) {
+ f.e.e.EncodeArrayStart(length)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncArrayEnd() {
+ f.e.e.EncodeArrayEnd()
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncArrayEntrySeparator() {
+ f.e.e.EncodeArrayEntrySeparator()
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncMapStart(length int) {
+ f.e.e.EncodeMapStart(length)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncMapEnd() {
+ f.e.e.EncodeMapEnd()
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncMapEntrySeparator() {
+ f.e.e.EncodeMapEntrySeparator()
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncMapKVSeparator() {
+ f.e.e.EncodeMapKVSeparator()
+}
+
+// ---------
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecBytes(v *[]byte) {
+ *v = f.d.d.DecodeBytes(*v)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecTryNil() bool {
+ return f.d.d.TryDecodeAsNil()
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecContainerIsNil() (b bool) {
+ return f.d.d.IsContainerType(valueTypeNil)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecContainerIsMap() (b bool) {
+ return f.d.d.IsContainerType(valueTypeMap)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecContainerIsArray() (b bool) {
+ return f.d.d.IsContainerType(valueTypeArray)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecCheckBreak() bool {
+ return f.d.d.CheckBreak()
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecMapStart() int {
+ return f.d.d.ReadMapStart()
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecArrayStart() int {
+ return f.d.d.ReadArrayStart()
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecMapEnd() {
+ f.d.d.ReadMapEnd()
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecArrayEnd() {
+ f.d.d.ReadArrayEnd()
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecArrayEntrySeparator() {
+ f.d.d.ReadArrayEntrySeparator()
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecMapEntrySeparator() {
+ f.d.d.ReadMapEntrySeparator()
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecMapKVSeparator() {
+ f.d.d.ReadMapKVSeparator()
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) ReadStringAsBytes(bs []byte) []byte {
+ return f.d.d.DecodeStringAsBytes(bs)
+}
+
+
+// -- encode calls (primitives)
+{{range .Values}}{{if .Primitive }}{{if ne .Primitive "interface{}" }}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) {{ .MethodNamePfx "Enc" true }}(v {{ .Primitive }}) {
+ ee := f.e.e
+ {{ encmd .Primitive "v" }}
+}
+{{ end }}{{ end }}{{ end }}
+
+// -- decode calls (primitives)
+{{range .Values}}{{if .Primitive }}{{if ne .Primitive "interface{}" }}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) {{ .MethodNamePfx "Dec" true }}(vp *{{ .Primitive }}) {
+ dd := f.d.d
+ *vp = {{ decmd .Primitive }}
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) {{ .MethodNamePfx "Read" true }}() (v {{ .Primitive }}) {
+ dd := f.d.d
+ v = {{ decmd .Primitive }}
+ return
+}
+{{ end }}{{ end }}{{ end }}
+
+
+// -- encode calls (slices/maps)
+{{range .Values}}{{if not .Primitive }}{{if .Slice }}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) {{ .MethodNamePfx "Enc" false }}(v []{{ .Elem }}) { {{ else }}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) {{ .MethodNamePfx "Enc" false }}(v map[{{ .MapKey }}]{{ .Elem }}) { {{end}}
+ f.F.{{ .MethodNamePfx "Enc" false }}V(v, false, f.e)
+}
+{{ end }}{{ end }}
+
+// -- decode calls (slices/maps)
+{{range .Values}}{{if not .Primitive }}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+{{if .Slice }}func (f genHelperDecoder) {{ .MethodNamePfx "Dec" false }}(vp *[]{{ .Elem }}) {
+{{else}}func (f genHelperDecoder) {{ .MethodNamePfx "Dec" false }}(vp *map[{{ .MapKey }}]{{ .Elem }}) { {{end}}
+ v, changed := f.F.{{ .MethodNamePfx "Dec" false }}V(*vp, false, true, f.d)
+ if changed {
+ *vp = v
+ }
+}
+{{ end }}{{ end }}
+*/}}
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/gen.generated.go b/src/kube2msb/vendor/github.com/ugorji/go/codec/gen.generated.go
new file mode 100644
index 0000000..2ace97b
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/gen.generated.go
@@ -0,0 +1,175 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+// DO NOT EDIT. THIS FILE IS AUTO-GENERATED FROM gen-dec-(map|array).go.tmpl
+
+const genDecMapTmpl = `
+{{var "v"}} := *{{ .Varname }}
+{{var "l"}} := r.ReadMapStart()
+{{var "bh"}} := z.DecBasicHandle()
+if {{var "v"}} == nil {
+ {{var "rl"}}, _ := z.DecInferLen({{var "l"}}, {{var "bh"}}.MaxInitLen, {{ .Size }})
+ {{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}})
+ *{{ .Varname }} = {{var "v"}}
+}
+var {{var "mk"}} {{ .KTyp }}
+var {{var "mv"}} {{ .Typ }}
+var {{var "mg"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool
+if {{var "bh"}}.MapValueReset {
+ {{if decElemKindPtr}}{{var "mg"}} = true
+ {{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true }
+ {{else if not decElemKindImmutable}}{{var "mg"}} = true
+ {{end}} }
+if {{var "l"}} > 0 {
+for {{var "j"}} := 0; {{var "j"}} < {{var "l"}}; {{var "j"}}++ {
+ z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }})
+ {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }}
+{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} {
+ {{var "mk"}} = string({{var "bv"}})
+ }{{ end }}{{if decElemKindPtr}}
+ {{var "ms"}} = true{{end}}
+ if {{var "mg"}} {
+ {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}]
+ if {{var "mok"}} {
+ {{var "ms"}} = false
+ } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}}
+ } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}}
+ z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }})
+ {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }}
+ if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil {
+ {{var "v"}}[{{var "mk"}}] = {{var "mv"}}
+ }
+}
+} else if {{var "l"}} < 0 {
+for {{var "j"}} := 0; !r.CheckBreak(); {{var "j"}}++ {
+ z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }})
+ {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }}
+{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} {
+ {{var "mk"}} = string({{var "bv"}})
+ }{{ end }}{{if decElemKindPtr}}
+ {{var "ms"}} = true {{ end }}
+ if {{var "mg"}} {
+ {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}]
+ if {{var "mok"}} {
+ {{var "ms"}} = false
+ } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}}
+ } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}}
+ z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }})
+ {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }}
+ if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil {
+ {{var "v"}}[{{var "mk"}}] = {{var "mv"}}
+ }
+}
+} // else len==0: TODO: Should we clear map entries?
+z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }})
+`
+
+const genDecListTmpl = `
+{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }}
+{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}{{if not isArray}}
+var {{var "c"}} bool {{/* // changed */}}
+_ = {{var "c"}}{{end}}
+if {{var "l"}} == 0 {
+ {{if isSlice }}if {{var "v"}} == nil {
+ {{var "v"}} = []{{ .Typ }}{}
+ {{var "c"}} = true
+ } else if len({{var "v"}}) != 0 {
+ {{var "v"}} = {{var "v"}}[:0]
+ {{var "c"}} = true
+ } {{end}} {{if isChan }}if {{var "v"}} == nil {
+ {{var "v"}} = make({{ .CTyp }}, 0)
+ {{var "c"}} = true
+ } {{end}}
+} else if {{var "l"}} > 0 {
+ {{if isChan }}if {{var "v"}} == nil {
+ {{var "rl"}}, _ = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
+ {{var "v"}} = make({{ .CTyp }}, {{var "rl"}})
+ {{var "c"}} = true
+ }
+ for {{var "r"}} := 0; {{var "r"}} < {{var "l"}}; {{var "r"}}++ {
+ {{var "h"}}.ElemContainerState({{var "r"}})
+ var {{var "t"}} {{ .Typ }}
+ {{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }}
+ {{var "v"}} <- {{var "t"}}
+ }
+ {{ else }} var {{var "rr"}}, {{var "rl"}} int {{/* // num2read, length of slice/array/chan */}}
+ var {{var "rt"}} bool {{/* truncated */}}
+ _, _ = {{var "rl"}}, {{var "rt"}}
+ {{var "rr"}} = {{var "l"}} // len({{var "v"}})
+ if {{var "l"}} > cap({{var "v"}}) {
+ {{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "l"}})
+ {{ else }}{{if not .Immutable }}
+ {{var "rg"}} := len({{var "v"}}) > 0
+ {{var "v2"}} := {{var "v"}} {{end}}
+ {{var "rl"}}, {{var "rt"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
+ if {{var "rt"}} {
+ if {{var "rl"}} <= cap({{var "v"}}) {
+ {{var "v"}} = {{var "v"}}[:{{var "rl"}}]
+ } else {
+ {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}})
+ }
+ } else {
+ {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}})
+ }
+ {{var "c"}} = true
+ {{var "rr"}} = len({{var "v"}}) {{if not .Immutable }}
+ if {{var "rg"}} { copy({{var "v"}}, {{var "v2"}}) } {{end}} {{end}}{{/* end not Immutable, isArray */}}
+ } {{if isSlice }} else if {{var "l"}} != len({{var "v"}}) {
+ {{var "v"}} = {{var "v"}}[:{{var "l"}}]
+ {{var "c"}} = true
+ } {{end}} {{/* end isSlice:47 */}}
+ {{var "j"}} := 0
+ for ; {{var "j"}} < {{var "rr"}} ; {{var "j"}}++ {
+ {{var "h"}}.ElemContainerState({{var "j"}})
+ {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }}
+ }
+ {{if isArray }}for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ {
+ {{var "h"}}.ElemContainerState({{var "j"}})
+ z.DecSwallow()
+ }
+ {{ else }}if {{var "rt"}} {
+ for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ {
+ {{var "v"}} = append({{var "v"}}, {{ zero}})
+ {{var "h"}}.ElemContainerState({{var "j"}})
+ {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }}
+ }
+ } {{end}} {{/* end isArray:56 */}}
+ {{end}} {{/* end isChan:16 */}}
+} else { {{/* len < 0 */}}
+ {{var "j"}} := 0
+ for ; !r.CheckBreak(); {{var "j"}}++ {
+ {{if isChan }}
+ {{var "h"}}.ElemContainerState({{var "j"}})
+ var {{var "t"}} {{ .Typ }}
+ {{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }}
+ {{var "v"}} <- {{var "t"}}
+ {{ else }}
+ if {{var "j"}} >= len({{var "v"}}) {
+ {{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "j"}}+1)
+ {{ else }}{{var "v"}} = append({{var "v"}}, {{zero}})// var {{var "z"}} {{ .Typ }}
+ {{var "c"}} = true {{end}}
+ }
+ {{var "h"}}.ElemContainerState({{var "j"}})
+ if {{var "j"}} < len({{var "v"}}) {
+ {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }}
+ } else {
+ z.DecSwallow()
+ }
+ {{end}}
+ }
+ {{if isSlice }}if {{var "j"}} < len({{var "v"}}) {
+ {{var "v"}} = {{var "v"}}[:{{var "j"}}]
+ {{var "c"}} = true
+ } else if {{var "j"}} == 0 && {{var "v"}} == nil {
+ {{var "v"}} = []{{ .Typ }}{}
+ {{var "c"}} = true
+ }{{end}}
+}
+{{var "h"}}.End()
+{{if not isArray }}if {{var "c"}} {
+ *{{ .Varname }} = {{var "v"}}
+}{{end}}
+`
+
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/gen.go b/src/kube2msb/vendor/github.com/ugorji/go/codec/gen.go
new file mode 100644
index 0000000..ffc5aec
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/gen.go
@@ -0,0 +1,1997 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+ "bytes"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "go/format"
+ "io"
+ "io/ioutil"
+ "math/rand"
+ "os"
+ "reflect"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "text/template"
+ "time"
+ "unicode"
+ "unicode/utf8"
+)
+
+// ---------------------------------------------------
+// codecgen supports the full cycle of reflection-based codec:
+// - RawExt
+// - Builtins
+// - Extensions
+// - (Binary|Text|JSON)(Unm|M)arshal
+// - generic by-kind
+//
+// This means that, for dynamic things, we MUST use reflection to at least get the reflect.Type.
+// In those areas, we try to only do reflection or interface-conversion when NECESSARY:
+// - Extensions, only if Extensions are configured.
+//
+// However, codecgen doesn't support the following:
+// - Canonical option. (codecgen IGNORES it currently)
+// This is just because it has not been implemented.
+//
+// During encode/decode, Selfer takes precedence.
+// A type implementing Selfer will know how to encode/decode itself statically.
+//
+// The following field types are supported:
+// array: [n]T
+// slice: []T
+// map: map[K]V
+// primitive: [u]int[n], float(32|64), bool, string
+// struct
+//
+// ---------------------------------------------------
+// Note that a Selfer cannot call (e|d).(En|De)code on itself,
+// as this will cause a circular reference, as (En|De)code will call Selfer methods.
+// Any type that implements Selfer must implement completely and not fallback to (En|De)code.
+//
+// In addition, code in this file manages the generation of fast-path implementations of
+// encode/decode of slices/maps of primitive keys/values.
+//
+// Users MUST re-generate their implementations whenever the code shape changes.
+// The generated code will panic if it was generated with a version older than the supporting library.
+// ---------------------------------------------------
+//
+// codec framework is very feature rich.
+// When encoding or decoding into an interface, it depends on the runtime type of the interface.
+// The type of the interface may be a named type, an extension, etc.
+// Consequently, we fallback to runtime codec for encoding/decoding interfaces.
+// In addition, we fallback for any value which cannot be guaranteed at runtime.
+// This allows us support ANY value, including any named types, specifically those which
+// do not implement our interfaces (e.g. Selfer).
+//
+// This explains some slowness compared to other code generation codecs (e.g. msgp).
+// This reduction in speed is only seen when your refers to interfaces,
+// e.g. type T struct { A interface{}; B []interface{}; C map[string]interface{} }
+//
+// codecgen will panic if the file was generated with an old version of the library in use.
+//
+// Note:
+// It was a concious decision to have gen.go always explicitly call EncodeNil or TryDecodeAsNil.
+// This way, there isn't a function call overhead just to see that we should not enter a block of code.
+
+// GenVersion is the current version of codecgen.
+//
+// NOTE: Increment this value each time codecgen changes fundamentally.
+// Fundamental changes are:
+// - helper methods change (signature change, new ones added, some removed, etc)
+// - codecgen command line changes
+//
+// v1: Initial Version
+// v2:
+// v3: Changes for Kubernetes:
+// changes in signature of some unpublished helper methods and codecgen cmdline arguments.
+// v4: Removed separator support from (en|de)cDriver, and refactored codec(gen)
+// v5: changes to support faster json decoding. Let encoder/decoder maintain state of collections.
+const GenVersion = 5
+
+const (
+ genCodecPkg = "codec1978"
+ genTempVarPfx = "yy"
+ genTopLevelVarName = "x"
+
+ // ignore canBeNil parameter, and always set to true.
+ // This is because nil can appear anywhere, so we should always check.
+ genAnythingCanBeNil = true
+
+ // if genUseOneFunctionForDecStructMap, make a single codecDecodeSelferFromMap function;
+ // else make codecDecodeSelferFromMap{LenPrefix,CheckBreak} so that conditionals
+ // are not executed a lot.
+ //
+ // From testing, it didn't make much difference in runtime, so keep as true (one function only)
+ genUseOneFunctionForDecStructMap = true
+)
+
+type genStructMapStyle uint8
+
+const (
+ genStructMapStyleConsolidated genStructMapStyle = iota
+ genStructMapStyleLenPrefix
+ genStructMapStyleCheckBreak
+)
+
+var (
+ genAllTypesSamePkgErr = errors.New("All types must be in the same package")
+ genExpectArrayOrMapErr = errors.New("unexpected type. Expecting array/map/slice")
+ genBase64enc = base64.NewEncoding("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789__")
+ genQNameRegex = regexp.MustCompile(`[A-Za-z_.]+`)
+)
+
+// genRunner holds some state used during a Gen run.
+type genRunner struct {
+ w io.Writer // output
+ c uint64 // counter used for generating varsfx
+ t []reflect.Type // list of types to run selfer on
+
+ tc reflect.Type // currently running selfer on this type
+ te map[uintptr]bool // types for which the encoder has been created
+ td map[uintptr]bool // types for which the decoder has been created
+ cp string // codec import path
+
+ im map[string]reflect.Type // imports to add
+ imn map[string]string // package names of imports to add
+ imc uint64 // counter for import numbers
+
+ is map[reflect.Type]struct{} // types seen during import search
+ bp string // base PkgPath, for which we are generating for
+
+ cpfx string // codec package prefix
+ unsafe bool // is unsafe to be used in generated code?
+
+ tm map[reflect.Type]struct{} // types for which enc/dec must be generated
+ ts []reflect.Type // types for which enc/dec must be generated
+
+ xs string // top level variable/constant suffix
+ hn string // fn helper type name
+
+ ti *TypeInfos
+ // rr *rand.Rand // random generator for file-specific types
+}
+
+// Gen will write a complete go file containing Selfer implementations for each
+// type passed. All the types must be in the same package.
+//
+// Library users: *DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.*
+func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, ti *TypeInfos, typ ...reflect.Type) {
+ if len(typ) == 0 {
+ return
+ }
+ x := genRunner{
+ unsafe: useUnsafe,
+ w: w,
+ t: typ,
+ te: make(map[uintptr]bool),
+ td: make(map[uintptr]bool),
+ im: make(map[string]reflect.Type),
+ imn: make(map[string]string),
+ is: make(map[reflect.Type]struct{}),
+ tm: make(map[reflect.Type]struct{}),
+ ts: []reflect.Type{},
+ bp: genImportPath(typ[0]),
+ xs: uid,
+ ti: ti,
+ }
+ if x.ti == nil {
+ x.ti = defTypeInfos
+ }
+ if x.xs == "" {
+ rr := rand.New(rand.NewSource(time.Now().UnixNano()))
+ x.xs = strconv.FormatInt(rr.Int63n(9999), 10)
+ }
+
+ // gather imports first:
+ x.cp = genImportPath(reflect.TypeOf(x))
+ x.imn[x.cp] = genCodecPkg
+ for _, t := range typ {
+ // fmt.Printf("###########: PkgPath: '%v', Name: '%s'\n", genImportPath(t), t.Name())
+ if genImportPath(t) != x.bp {
+ panic(genAllTypesSamePkgErr)
+ }
+ x.genRefPkgs(t)
+ }
+ if buildTags != "" {
+ x.line("//+build " + buildTags)
+ x.line("")
+ }
+ x.line(`
+
+// ************************************************************
+// DO NOT EDIT.
+// THIS FILE IS AUTO-GENERATED BY codecgen.
+// ************************************************************
+
+`)
+ x.line("package " + pkgName)
+ x.line("")
+ x.line("import (")
+ if x.cp != x.bp {
+ x.cpfx = genCodecPkg + "."
+ x.linef("%s \"%s\"", genCodecPkg, x.cp)
+ }
+ // use a sorted set of im keys, so that we can get consistent output
+ imKeys := make([]string, 0, len(x.im))
+ for k, _ := range x.im {
+ imKeys = append(imKeys, k)
+ }
+ sort.Strings(imKeys)
+ for _, k := range imKeys { // for k, _ := range x.im {
+ x.linef("%s \"%s\"", x.imn[k], k)
+ }
+ // add required packages
+ for _, k := range [...]string{"reflect", "unsafe", "runtime", "fmt", "errors"} {
+ if _, ok := x.im[k]; !ok {
+ if k == "unsafe" && !x.unsafe {
+ continue
+ }
+ x.line("\"" + k + "\"")
+ }
+ }
+ x.line(")")
+ x.line("")
+
+ x.line("const (")
+ x.linef("// ----- content types ----")
+ x.linef("codecSelferC_UTF8%s = %v", x.xs, int64(c_UTF8))
+ x.linef("codecSelferC_RAW%s = %v", x.xs, int64(c_RAW))
+ x.linef("// ----- value types used ----")
+ x.linef("codecSelferValueTypeArray%s = %v", x.xs, int64(valueTypeArray))
+ x.linef("codecSelferValueTypeMap%s = %v", x.xs, int64(valueTypeMap))
+ x.linef("// ----- containerStateValues ----")
+ x.linef("codecSelfer_containerMapKey%s = %v", x.xs, int64(containerMapKey))
+ x.linef("codecSelfer_containerMapValue%s = %v", x.xs, int64(containerMapValue))
+ x.linef("codecSelfer_containerMapEnd%s = %v", x.xs, int64(containerMapEnd))
+ x.linef("codecSelfer_containerArrayElem%s = %v", x.xs, int64(containerArrayElem))
+ x.linef("codecSelfer_containerArrayEnd%s = %v", x.xs, int64(containerArrayEnd))
+ x.line(")")
+ x.line("var (")
+ x.line("codecSelferBitsize" + x.xs + " = uint8(reflect.TypeOf(uint(0)).Bits())")
+ x.line("codecSelferOnlyMapOrArrayEncodeToStructErr" + x.xs + " = errors.New(`only encoded map or array can be decoded into a struct`)")
+ x.line(")")
+ x.line("")
+
+ if x.unsafe {
+ x.line("type codecSelferUnsafeString" + x.xs + " struct { Data uintptr; Len int}")
+ x.line("")
+ }
+ x.hn = "codecSelfer" + x.xs
+ x.line("type " + x.hn + " struct{}")
+ x.line("")
+
+ x.varsfxreset()
+ x.line("func init() {")
+ x.linef("if %sGenVersion != %v {", x.cpfx, GenVersion)
+ x.line("_, file, _, _ := runtime.Caller(0)")
+ x.line(`err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", `)
+ x.linef(`%v, %sGenVersion, file)`, GenVersion, x.cpfx)
+ x.line("panic(err)")
+ x.linef("}")
+ x.line("if false { // reference the types, but skip this branch at build/run time")
+ var n int
+ // for k, t := range x.im {
+ for _, k := range imKeys {
+ t := x.im[k]
+ x.linef("var v%v %s.%s", n, x.imn[k], t.Name())
+ n++
+ }
+ if x.unsafe {
+ x.linef("var v%v unsafe.Pointer", n)
+ n++
+ }
+ if n > 0 {
+ x.out("_")
+ for i := 1; i < n; i++ {
+ x.out(", _")
+ }
+ x.out(" = v0")
+ for i := 1; i < n; i++ {
+ x.outf(", v%v", i)
+ }
+ }
+ x.line("} ") // close if false
+ x.line("}") // close init
+ x.line("")
+
+ // generate rest of type info
+ for _, t := range typ {
+ x.tc = t
+ x.selfer(true)
+ x.selfer(false)
+ }
+
+ for _, t := range x.ts {
+ rtid := reflect.ValueOf(t).Pointer()
+ // generate enc functions for all these slice/map types.
+ x.varsfxreset()
+ x.linef("func (x %s) enc%s(v %s%s, e *%sEncoder) {", x.hn, x.genMethodNameT(t), x.arr2str(t, "*"), x.genTypeName(t), x.cpfx)
+ x.genRequiredMethodVars(true)
+ switch t.Kind() {
+ case reflect.Array, reflect.Slice, reflect.Chan:
+ x.encListFallback("v", t)
+ case reflect.Map:
+ x.encMapFallback("v", t)
+ default:
+ panic(genExpectArrayOrMapErr)
+ }
+ x.line("}")
+ x.line("")
+
+ // generate dec functions for all these slice/map types.
+ x.varsfxreset()
+ x.linef("func (x %s) dec%s(v *%s, d *%sDecoder) {", x.hn, x.genMethodNameT(t), x.genTypeName(t), x.cpfx)
+ x.genRequiredMethodVars(false)
+ switch t.Kind() {
+ case reflect.Array, reflect.Slice, reflect.Chan:
+ x.decListFallback("v", rtid, t)
+ case reflect.Map:
+ x.decMapFallback("v", rtid, t)
+ default:
+ panic(genExpectArrayOrMapErr)
+ }
+ x.line("}")
+ x.line("")
+ }
+
+ x.line("")
+}
+
+func (x *genRunner) checkForSelfer(t reflect.Type, varname string) bool {
+ // return varname != genTopLevelVarName && t != x.tc
+ // the only time we checkForSelfer is if we are not at the TOP of the generated code.
+ return varname != genTopLevelVarName
+}
+
+func (x *genRunner) arr2str(t reflect.Type, s string) string {
+ if t.Kind() == reflect.Array {
+ return s
+ }
+ return ""
+}
+
+func (x *genRunner) genRequiredMethodVars(encode bool) {
+ x.line("var h " + x.hn)
+ if encode {
+ x.line("z, r := " + x.cpfx + "GenHelperEncoder(e)")
+ } else {
+ x.line("z, r := " + x.cpfx + "GenHelperDecoder(d)")
+ }
+ x.line("_, _, _ = h, z, r")
+}
+
+func (x *genRunner) genRefPkgs(t reflect.Type) {
+ if _, ok := x.is[t]; ok {
+ return
+ }
+ // fmt.Printf(">>>>>>: PkgPath: '%v', Name: '%s'\n", genImportPath(t), t.Name())
+ x.is[t] = struct{}{}
+ tpkg, tname := genImportPath(t), t.Name()
+ if tpkg != "" && tpkg != x.bp && tpkg != x.cp && tname != "" && tname[0] >= 'A' && tname[0] <= 'Z' {
+ if _, ok := x.im[tpkg]; !ok {
+ x.im[tpkg] = t
+ if idx := strings.LastIndex(tpkg, "/"); idx < 0 {
+ x.imn[tpkg] = tpkg
+ } else {
+ x.imc++
+ x.imn[tpkg] = "pkg" + strconv.FormatUint(x.imc, 10) + "_" + genGoIdentifier(tpkg[idx+1:], false)
+ }
+ }
+ }
+ switch t.Kind() {
+ case reflect.Array, reflect.Slice, reflect.Ptr, reflect.Chan:
+ x.genRefPkgs(t.Elem())
+ case reflect.Map:
+ x.genRefPkgs(t.Elem())
+ x.genRefPkgs(t.Key())
+ case reflect.Struct:
+ for i := 0; i < t.NumField(); i++ {
+ if fname := t.Field(i).Name; fname != "" && fname[0] >= 'A' && fname[0] <= 'Z' {
+ x.genRefPkgs(t.Field(i).Type)
+ }
+ }
+ }
+}
+
+func (x *genRunner) line(s string) {
+ x.out(s)
+ if len(s) == 0 || s[len(s)-1] != '\n' {
+ x.out("\n")
+ }
+}
+
+func (x *genRunner) varsfx() string {
+ x.c++
+ return strconv.FormatUint(x.c, 10)
+}
+
+func (x *genRunner) varsfxreset() {
+ x.c = 0
+}
+
+func (x *genRunner) out(s string) {
+ if _, err := io.WriteString(x.w, s); err != nil {
+ panic(err)
+ }
+}
+
+func (x *genRunner) linef(s string, params ...interface{}) {
+ x.line(fmt.Sprintf(s, params...))
+}
+
+func (x *genRunner) outf(s string, params ...interface{}) {
+ x.out(fmt.Sprintf(s, params...))
+}
+
+func (x *genRunner) genTypeName(t reflect.Type) (n string) {
+ // defer func() { fmt.Printf(">>>> ####: genTypeName: t: %v, name: '%s'\n", t, n) }()
+
+ // if the type has a PkgPath, which doesn't match the current package,
+ // then include it.
+ // We cannot depend on t.String() because it includes current package,
+ // or t.PkgPath because it includes full import path,
+ //
+ var ptrPfx string
+ for t.Kind() == reflect.Ptr {
+ ptrPfx += "*"
+ t = t.Elem()
+ }
+ if tn := t.Name(); tn != "" {
+ return ptrPfx + x.genTypeNamePrim(t)
+ }
+ switch t.Kind() {
+ case reflect.Map:
+ return ptrPfx + "map[" + x.genTypeName(t.Key()) + "]" + x.genTypeName(t.Elem())
+ case reflect.Slice:
+ return ptrPfx + "[]" + x.genTypeName(t.Elem())
+ case reflect.Array:
+ return ptrPfx + "[" + strconv.FormatInt(int64(t.Len()), 10) + "]" + x.genTypeName(t.Elem())
+ case reflect.Chan:
+ return ptrPfx + t.ChanDir().String() + " " + x.genTypeName(t.Elem())
+ default:
+ if t == intfTyp {
+ return ptrPfx + "interface{}"
+ } else {
+ return ptrPfx + x.genTypeNamePrim(t)
+ }
+ }
+}
+
+func (x *genRunner) genTypeNamePrim(t reflect.Type) (n string) {
+ if t.Name() == "" {
+ return t.String()
+ } else if genImportPath(t) == "" || genImportPath(t) == genImportPath(x.tc) {
+ return t.Name()
+ } else {
+ return x.imn[genImportPath(t)] + "." + t.Name()
+ // return t.String() // best way to get the package name inclusive
+ }
+}
+
+func (x *genRunner) genZeroValueR(t reflect.Type) string {
+ // if t is a named type, w
+ switch t.Kind() {
+ case reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func,
+ reflect.Slice, reflect.Map, reflect.Invalid:
+ return "nil"
+ case reflect.Bool:
+ return "false"
+ case reflect.String:
+ return `""`
+ case reflect.Struct, reflect.Array:
+ return x.genTypeName(t) + "{}"
+ default: // all numbers
+ return "0"
+ }
+}
+
+func (x *genRunner) genMethodNameT(t reflect.Type) (s string) {
+ return genMethodNameT(t, x.tc)
+}
+
+func (x *genRunner) selfer(encode bool) {
+ t := x.tc
+ t0 := t
+ // always make decode use a pointer receiver,
+ // and structs always use a ptr receiver (encode|decode)
+ isptr := !encode || t.Kind() == reflect.Struct
+ x.varsfxreset()
+ fnSigPfx := "func (x "
+ if isptr {
+ fnSigPfx += "*"
+ }
+ fnSigPfx += x.genTypeName(t)
+
+ x.out(fnSigPfx)
+ if isptr {
+ t = reflect.PtrTo(t)
+ }
+ if encode {
+ x.line(") CodecEncodeSelf(e *" + x.cpfx + "Encoder) {")
+ x.genRequiredMethodVars(true)
+ // x.enc(genTopLevelVarName, t)
+ x.encVar(genTopLevelVarName, t)
+ } else {
+ x.line(") CodecDecodeSelf(d *" + x.cpfx + "Decoder) {")
+ x.genRequiredMethodVars(false)
+ // do not use decVar, as there is no need to check TryDecodeAsNil
+ // or way to elegantly handle that, and also setting it to a
+ // non-nil value doesn't affect the pointer passed.
+ // x.decVar(genTopLevelVarName, t, false)
+ x.dec(genTopLevelVarName, t0)
+ }
+ x.line("}")
+ x.line("")
+
+ if encode || t0.Kind() != reflect.Struct {
+ return
+ }
+
+ // write is containerMap
+ if genUseOneFunctionForDecStructMap {
+ x.out(fnSigPfx)
+ x.line(") codecDecodeSelfFromMap(l int, d *" + x.cpfx + "Decoder) {")
+ x.genRequiredMethodVars(false)
+ x.decStructMap(genTopLevelVarName, "l", reflect.ValueOf(t0).Pointer(), t0, genStructMapStyleConsolidated)
+ x.line("}")
+ x.line("")
+ } else {
+ x.out(fnSigPfx)
+ x.line(") codecDecodeSelfFromMapLenPrefix(l int, d *" + x.cpfx + "Decoder) {")
+ x.genRequiredMethodVars(false)
+ x.decStructMap(genTopLevelVarName, "l", reflect.ValueOf(t0).Pointer(), t0, genStructMapStyleLenPrefix)
+ x.line("}")
+ x.line("")
+
+ x.out(fnSigPfx)
+ x.line(") codecDecodeSelfFromMapCheckBreak(l int, d *" + x.cpfx + "Decoder) {")
+ x.genRequiredMethodVars(false)
+ x.decStructMap(genTopLevelVarName, "l", reflect.ValueOf(t0).Pointer(), t0, genStructMapStyleCheckBreak)
+ x.line("}")
+ x.line("")
+ }
+
+ // write containerArray
+ x.out(fnSigPfx)
+ x.line(") codecDecodeSelfFromArray(l int, d *" + x.cpfx + "Decoder) {")
+ x.genRequiredMethodVars(false)
+ x.decStructArray(genTopLevelVarName, "l", "return", reflect.ValueOf(t0).Pointer(), t0)
+ x.line("}")
+ x.line("")
+
+}
+
+// used for chan, array, slice, map
+func (x *genRunner) xtraSM(varname string, encode bool, t reflect.Type) {
+ if encode {
+ x.linef("h.enc%s((%s%s)(%s), e)", x.genMethodNameT(t), x.arr2str(t, "*"), x.genTypeName(t), varname)
+ } else {
+ x.linef("h.dec%s((*%s)(%s), d)", x.genMethodNameT(t), x.genTypeName(t), varname)
+ }
+ x.registerXtraT(t)
+}
+
+func (x *genRunner) registerXtraT(t reflect.Type) {
+ // recursively register the types
+ if _, ok := x.tm[t]; ok {
+ return
+ }
+ var tkey reflect.Type
+ switch t.Kind() {
+ case reflect.Chan, reflect.Slice, reflect.Array:
+ case reflect.Map:
+ tkey = t.Key()
+ default:
+ return
+ }
+ x.tm[t] = struct{}{}
+ x.ts = append(x.ts, t)
+ // check if this refers to any xtra types eg. a slice of array: add the array
+ x.registerXtraT(t.Elem())
+ if tkey != nil {
+ x.registerXtraT(tkey)
+ }
+}
+
+// encVar will encode a variable.
+// The parameter, t, is the reflect.Type of the variable itself
+func (x *genRunner) encVar(varname string, t reflect.Type) {
+ // fmt.Printf(">>>>>> varname: %s, t: %v\n", varname, t)
+ var checkNil bool
+ switch t.Kind() {
+ case reflect.Ptr, reflect.Interface, reflect.Slice, reflect.Map, reflect.Chan:
+ checkNil = true
+ }
+ if checkNil {
+ x.linef("if %s == nil { r.EncodeNil() } else { ", varname)
+ }
+ switch t.Kind() {
+ case reflect.Ptr:
+ switch t.Elem().Kind() {
+ case reflect.Struct, reflect.Array:
+ x.enc(varname, genNonPtr(t))
+ default:
+ i := x.varsfx()
+ x.line(genTempVarPfx + i + " := *" + varname)
+ x.enc(genTempVarPfx+i, genNonPtr(t))
+ }
+ case reflect.Struct, reflect.Array:
+ i := x.varsfx()
+ x.line(genTempVarPfx + i + " := &" + varname)
+ x.enc(genTempVarPfx+i, t)
+ default:
+ x.enc(varname, t)
+ }
+
+ if checkNil {
+ x.line("}")
+ }
+
+}
+
+// enc will encode a variable (varname) of type t,
+// except t is of kind reflect.Struct or reflect.Array, wherein varname is of type ptrTo(T) (to prevent copying)
+func (x *genRunner) enc(varname string, t reflect.Type) {
+ rtid := reflect.ValueOf(t).Pointer()
+ // We call CodecEncodeSelf if one of the following are honored:
+ // - the type already implements Selfer, call that
+ // - the type has a Selfer implementation just created, use that
+ // - the type is in the list of the ones we will generate for, but it is not currently being generated
+
+ mi := x.varsfx()
+ tptr := reflect.PtrTo(t)
+ tk := t.Kind()
+ if x.checkForSelfer(t, varname) {
+ if tk == reflect.Array || tk == reflect.Struct { // varname is of type *T
+ if tptr.Implements(selferTyp) || t.Implements(selferTyp) {
+ x.line(varname + ".CodecEncodeSelf(e)")
+ return
+ }
+ } else { // varname is of type T
+ if t.Implements(selferTyp) {
+ x.line(varname + ".CodecEncodeSelf(e)")
+ return
+ } else if tptr.Implements(selferTyp) {
+ x.linef("%ssf%s := &%s", genTempVarPfx, mi, varname)
+ x.linef("%ssf%s.CodecEncodeSelf(e)", genTempVarPfx, mi)
+ return
+ }
+ }
+
+ if _, ok := x.te[rtid]; ok {
+ x.line(varname + ".CodecEncodeSelf(e)")
+ return
+ }
+ }
+
+ inlist := false
+ for _, t0 := range x.t {
+ if t == t0 {
+ inlist = true
+ if x.checkForSelfer(t, varname) {
+ x.line(varname + ".CodecEncodeSelf(e)")
+ return
+ }
+ break
+ }
+ }
+
+ var rtidAdded bool
+ if t == x.tc {
+ x.te[rtid] = true
+ rtidAdded = true
+ }
+
+ // check if
+ // - type is RawExt
+ // - the type implements (Text|JSON|Binary)(Unm|M)arshal
+ x.linef("%sm%s := z.EncBinary()", genTempVarPfx, mi)
+ x.linef("_ = %sm%s", genTempVarPfx, mi)
+ x.line("if false {") //start if block
+ defer func() { x.line("}") }() //end if block
+
+ if t == rawExtTyp {
+ x.linef("} else { r.EncodeRawExt(%v, e)", varname)
+ return
+ }
+ // HACK: Support for Builtins.
+ // Currently, only Binc supports builtins, and the only builtin type is time.Time.
+ // Have a method that returns the rtid for time.Time if Handle is Binc.
+ if t == timeTyp {
+ vrtid := genTempVarPfx + "m" + x.varsfx()
+ x.linef("} else if %s := z.TimeRtidIfBinc(); %s != 0 { ", vrtid, vrtid)
+ x.linef("r.EncodeBuiltin(%s, %s)", vrtid, varname)
+ }
+ // only check for extensions if the type is named, and has a packagePath.
+ if genImportPath(t) != "" && t.Name() != "" {
+ // first check if extensions are configued, before doing the interface conversion
+ x.linef("} else if z.HasExtensions() && z.EncExt(%s) {", varname)
+ }
+ if tk == reflect.Array || tk == reflect.Struct { // varname is of type *T
+ if t.Implements(binaryMarshalerTyp) || tptr.Implements(binaryMarshalerTyp) {
+ x.linef("} else if %sm%s { z.EncBinaryMarshal(%v) ", genTempVarPfx, mi, varname)
+ }
+ if t.Implements(jsonMarshalerTyp) || tptr.Implements(jsonMarshalerTyp) {
+ x.linef("} else if !%sm%s && z.IsJSONHandle() { z.EncJSONMarshal(%v) ", genTempVarPfx, mi, varname)
+ } else if t.Implements(textMarshalerTyp) || tptr.Implements(textMarshalerTyp) {
+ x.linef("} else if !%sm%s { z.EncTextMarshal(%v) ", genTempVarPfx, mi, varname)
+ }
+ } else { // varname is of type T
+ if t.Implements(binaryMarshalerTyp) {
+ x.linef("} else if %sm%s { z.EncBinaryMarshal(%v) ", genTempVarPfx, mi, varname)
+ } else if tptr.Implements(binaryMarshalerTyp) {
+ x.linef("} else if %sm%s { z.EncBinaryMarshal(&%v) ", genTempVarPfx, mi, varname)
+ }
+ if t.Implements(jsonMarshalerTyp) {
+ x.linef("} else if !%sm%s && z.IsJSONHandle() { z.EncJSONMarshal(%v) ", genTempVarPfx, mi, varname)
+ } else if tptr.Implements(jsonMarshalerTyp) {
+ x.linef("} else if !%sm%s && z.IsJSONHandle() { z.EncJSONMarshal(&%v) ", genTempVarPfx, mi, varname)
+ } else if t.Implements(textMarshalerTyp) {
+ x.linef("} else if !%sm%s { z.EncTextMarshal(%v) ", genTempVarPfx, mi, varname)
+ } else if tptr.Implements(textMarshalerTyp) {
+ x.linef("} else if !%sm%s { z.EncTextMarshal(&%v) ", genTempVarPfx, mi, varname)
+ }
+ }
+ x.line("} else {")
+
+ switch t.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ x.line("r.EncodeInt(int64(" + varname + "))")
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ x.line("r.EncodeUint(uint64(" + varname + "))")
+ case reflect.Float32:
+ x.line("r.EncodeFloat32(float32(" + varname + "))")
+ case reflect.Float64:
+ x.line("r.EncodeFloat64(float64(" + varname + "))")
+ case reflect.Bool:
+ x.line("r.EncodeBool(bool(" + varname + "))")
+ case reflect.String:
+ x.line("r.EncodeString(codecSelferC_UTF8" + x.xs + ", string(" + varname + "))")
+ case reflect.Chan:
+ x.xtraSM(varname, true, t)
+ // x.encListFallback(varname, rtid, t)
+ case reflect.Array:
+ x.xtraSM(varname, true, t)
+ case reflect.Slice:
+ // if nil, call dedicated function
+ // if a []uint8, call dedicated function
+ // if a known fastpath slice, call dedicated function
+ // else write encode function in-line.
+ // - if elements are primitives or Selfers, call dedicated function on each member.
+ // - else call Encoder.encode(XXX) on it.
+ if rtid == uint8SliceTypId {
+ x.line("r.EncodeStringBytes(codecSelferC_RAW" + x.xs + ", []byte(" + varname + "))")
+ } else if fastpathAV.index(rtid) != -1 {
+ g := x.newGenV(t)
+ x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", false, e)")
+ } else {
+ x.xtraSM(varname, true, t)
+ // x.encListFallback(varname, rtid, t)
+ }
+ case reflect.Map:
+ // if nil, call dedicated function
+ // if a known fastpath map, call dedicated function
+ // else write encode function in-line.
+ // - if elements are primitives or Selfers, call dedicated function on each member.
+ // - else call Encoder.encode(XXX) on it.
+ // x.line("if " + varname + " == nil { \nr.EncodeNil()\n } else { ")
+ if fastpathAV.index(rtid) != -1 {
+ g := x.newGenV(t)
+ x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", false, e)")
+ } else {
+ x.xtraSM(varname, true, t)
+ // x.encMapFallback(varname, rtid, t)
+ }
+ case reflect.Struct:
+ if !inlist {
+ delete(x.te, rtid)
+ x.line("z.EncFallback(" + varname + ")")
+ break
+ }
+ x.encStruct(varname, rtid, t)
+ default:
+ if rtidAdded {
+ delete(x.te, rtid)
+ }
+ x.line("z.EncFallback(" + varname + ")")
+ }
+}
+
+func (x *genRunner) encZero(t reflect.Type) {
+ switch t.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ x.line("r.EncodeInt(0)")
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ x.line("r.EncodeUint(0)")
+ case reflect.Float32:
+ x.line("r.EncodeFloat32(0)")
+ case reflect.Float64:
+ x.line("r.EncodeFloat64(0)")
+ case reflect.Bool:
+ x.line("r.EncodeBool(false)")
+ case reflect.String:
+ x.line("r.EncodeString(codecSelferC_UTF8" + x.xs + `, "")`)
+ default:
+ x.line("r.EncodeNil()")
+ }
+}
+
+func (x *genRunner) encStruct(varname string, rtid uintptr, t reflect.Type) {
+ // Use knowledge from structfieldinfo (mbs, encodable fields. Ignore omitempty. )
+ // replicate code in kStruct i.e. for each field, deref type to non-pointer, and call x.enc on it
+
+ // if t === type currently running selfer on, do for all
+ ti := x.ti.get(rtid, t)
+ i := x.varsfx()
+ sepVarname := genTempVarPfx + "sep" + i
+ numfieldsvar := genTempVarPfx + "q" + i
+ ti2arrayvar := genTempVarPfx + "r" + i
+ struct2arrvar := genTempVarPfx + "2arr" + i
+
+ x.line(sepVarname + " := !z.EncBinary()")
+ x.linef("%s := z.EncBasicHandle().StructToArray", struct2arrvar)
+ tisfi := ti.sfip // always use sequence from file. decStruct expects same thing.
+ // due to omitEmpty, we need to calculate the
+ // number of non-empty things we write out first.
+ // This is required as we need to pre-determine the size of the container,
+ // to support length-prefixing.
+ x.linef("var %s [%v]bool", numfieldsvar, len(tisfi))
+ x.linef("_, _, _ = %s, %s, %s", sepVarname, numfieldsvar, struct2arrvar)
+ x.linef("const %s bool = %v", ti2arrayvar, ti.toArray)
+ nn := 0
+ for j, si := range tisfi {
+ if !si.omitEmpty {
+ nn++
+ continue
+ }
+ var t2 reflect.StructField
+ var omitline string
+ if si.i != -1 {
+ t2 = t.Field(int(si.i))
+ } else {
+ t2typ := t
+ varname3 := varname
+ for _, ix := range si.is {
+ for t2typ.Kind() == reflect.Ptr {
+ t2typ = t2typ.Elem()
+ }
+ t2 = t2typ.Field(ix)
+ t2typ = t2.Type
+ varname3 = varname3 + "." + t2.Name
+ if t2typ.Kind() == reflect.Ptr {
+ omitline += varname3 + " != nil && "
+ }
+ }
+ }
+ // never check omitEmpty on a struct type, as it may contain uncomparable map/slice/etc.
+ // also, for maps/slices/arrays, check if len ! 0 (not if == zero value)
+ switch t2.Type.Kind() {
+ case reflect.Struct:
+ omitline += " true"
+ case reflect.Map, reflect.Slice, reflect.Array, reflect.Chan:
+ omitline += "len(" + varname + "." + t2.Name + ") != 0"
+ default:
+ omitline += varname + "." + t2.Name + " != " + x.genZeroValueR(t2.Type)
+ }
+ x.linef("%s[%v] = %s", numfieldsvar, j, omitline)
+ }
+ x.linef("var %snn%s int", genTempVarPfx, i)
+ x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray {
+ x.line("r.EncodeArrayStart(" + strconv.FormatInt(int64(len(tisfi)), 10) + ")")
+ x.linef("} else {") // if not ti.toArray
+ x.linef("%snn%s = %v", genTempVarPfx, i, nn)
+ x.linef("for _, b := range %s { if b { %snn%s++ } }", numfieldsvar, genTempVarPfx, i)
+ x.linef("r.EncodeMapStart(%snn%s)", genTempVarPfx, i)
+ x.linef("%snn%s = %v", genTempVarPfx, i, 0)
+ // x.line("r.EncodeMapStart(" + strconv.FormatInt(int64(len(tisfi)), 10) + ")")
+ x.line("}") // close if not StructToArray
+
+ for j, si := range tisfi {
+ i := x.varsfx()
+ isNilVarName := genTempVarPfx + "n" + i
+ var labelUsed bool
+ var t2 reflect.StructField
+ if si.i != -1 {
+ t2 = t.Field(int(si.i))
+ } else {
+ t2typ := t
+ varname3 := varname
+ for _, ix := range si.is {
+ // fmt.Printf("%%%% %v, ix: %v\n", t2typ, ix)
+ for t2typ.Kind() == reflect.Ptr {
+ t2typ = t2typ.Elem()
+ }
+ t2 = t2typ.Field(ix)
+ t2typ = t2.Type
+ varname3 = varname3 + "." + t2.Name
+ if t2typ.Kind() == reflect.Ptr {
+ if !labelUsed {
+ x.line("var " + isNilVarName + " bool")
+ }
+ x.line("if " + varname3 + " == nil { " + isNilVarName + " = true ")
+ x.line("goto LABEL" + i)
+ x.line("}")
+ labelUsed = true
+ // "varname3 = new(" + x.genTypeName(t3.Elem()) + ") }")
+ }
+ }
+ // t2 = t.FieldByIndex(si.is)
+ }
+ if labelUsed {
+ x.line("LABEL" + i + ":")
+ }
+ // if the type of the field is a Selfer, or one of the ones
+
+ x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray
+ if labelUsed {
+ x.line("if " + isNilVarName + " { r.EncodeNil() } else { ")
+ }
+ x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs)
+ if si.omitEmpty {
+ x.linef("if %s[%v] {", numfieldsvar, j)
+ }
+ x.encVar(varname+"."+t2.Name, t2.Type)
+ if si.omitEmpty {
+ x.linef("} else {")
+ x.encZero(t2.Type)
+ x.linef("}")
+ }
+ if labelUsed {
+ x.line("}")
+ }
+
+ x.linef("} else {") // if not ti.toArray
+
+ if si.omitEmpty {
+ x.linef("if %s[%v] {", numfieldsvar, j)
+ }
+ x.linef("z.EncSendContainerState(codecSelfer_containerMapKey%s)", x.xs)
+ x.line("r.EncodeString(codecSelferC_UTF8" + x.xs + ", string(\"" + si.encName + "\"))")
+ x.linef("z.EncSendContainerState(codecSelfer_containerMapValue%s)", x.xs)
+ if labelUsed {
+ x.line("if " + isNilVarName + " { r.EncodeNil() } else { ")
+ x.encVar(varname+"."+t2.Name, t2.Type)
+ x.line("}")
+ } else {
+ x.encVar(varname+"."+t2.Name, t2.Type)
+ }
+ if si.omitEmpty {
+ x.line("}")
+ }
+ x.linef("} ") // end if/else ti.toArray
+ }
+ x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray {
+ x.linef("z.EncSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs)
+ x.line("} else {")
+ x.linef("z.EncSendContainerState(codecSelfer_containerMapEnd%s)", x.xs)
+ x.line("}")
+
+}
+
+func (x *genRunner) encListFallback(varname string, t reflect.Type) {
+ i := x.varsfx()
+ g := genTempVarPfx
+ x.line("r.EncodeArrayStart(len(" + varname + "))")
+ if t.Kind() == reflect.Chan {
+ x.linef("for %si%s, %si2%s := 0, len(%s); %si%s < %si2%s; %si%s++ {", g, i, g, i, varname, g, i, g, i, g, i)
+ x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs)
+ x.linef("%sv%s := <-%s", g, i, varname)
+ } else {
+ // x.linef("for %si%s, %sv%s := range %s {", genTempVarPfx, i, genTempVarPfx, i, varname)
+ x.linef("for _, %sv%s := range %s {", genTempVarPfx, i, varname)
+ x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs)
+ }
+ x.encVar(genTempVarPfx+"v"+i, t.Elem())
+ x.line("}")
+ x.linef("z.EncSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs)
+}
+
+func (x *genRunner) encMapFallback(varname string, t reflect.Type) {
+ // TODO: expand this to handle canonical.
+ i := x.varsfx()
+ x.line("r.EncodeMapStart(len(" + varname + "))")
+ x.linef("for %sk%s, %sv%s := range %s {", genTempVarPfx, i, genTempVarPfx, i, varname)
+ // x.line("for " + genTempVarPfx + "k" + i + ", " + genTempVarPfx + "v" + i + " := range " + varname + " {")
+ x.linef("z.EncSendContainerState(codecSelfer_containerMapKey%s)", x.xs)
+ x.encVar(genTempVarPfx+"k"+i, t.Key())
+ x.linef("z.EncSendContainerState(codecSelfer_containerMapValue%s)", x.xs)
+ x.encVar(genTempVarPfx+"v"+i, t.Elem())
+ x.line("}")
+ x.linef("z.EncSendContainerState(codecSelfer_containerMapEnd%s)", x.xs)
+}
+
+func (x *genRunner) decVar(varname string, t reflect.Type, canBeNil bool) {
+ // We only encode as nil if a nillable value.
+ // This removes some of the wasted checks for TryDecodeAsNil.
+ // We need to think about this more, to see what happens if omitempty, etc
+ // cause a nil value to be stored when something is expected.
+ // This could happen when decoding from a struct encoded as an array.
+ // For that, decVar should be called with canNil=true, to force true as its value.
+ i := x.varsfx()
+ if !canBeNil {
+ canBeNil = genAnythingCanBeNil || !genIsImmutable(t)
+ }
+ if canBeNil {
+ x.line("if r.TryDecodeAsNil() {")
+ if t.Kind() == reflect.Ptr {
+ x.line("if " + varname + " != nil { ")
+
+ // if varname is a field of a struct (has a dot in it),
+ // then just set it to nil
+ if strings.IndexByte(varname, '.') != -1 {
+ x.line(varname + " = nil")
+ } else {
+ x.line("*" + varname + " = " + x.genZeroValueR(t.Elem()))
+ }
+ x.line("}")
+ } else {
+ x.line(varname + " = " + x.genZeroValueR(t))
+ }
+ x.line("} else {")
+ } else {
+ x.line("// cannot be nil")
+ }
+ if t.Kind() != reflect.Ptr {
+ if x.decTryAssignPrimitive(varname, t) {
+ x.line(genTempVarPfx + "v" + i + " := &" + varname)
+ x.dec(genTempVarPfx+"v"+i, t)
+ }
+ } else {
+ x.linef("if %s == nil { %s = new(%s) }", varname, varname, x.genTypeName(t.Elem()))
+ // Ensure we set underlying ptr to a non-nil value (so we can deref to it later).
+ // There's a chance of a **T in here which is nil.
+ var ptrPfx string
+ for t = t.Elem(); t.Kind() == reflect.Ptr; t = t.Elem() {
+ ptrPfx += "*"
+ x.linef("if %s%s == nil { %s%s = new(%s)}",
+ ptrPfx, varname, ptrPfx, varname, x.genTypeName(t))
+ }
+ // if varname has [ in it, then create temp variable for this ptr thingie
+ if strings.Index(varname, "[") >= 0 {
+ varname2 := genTempVarPfx + "w" + i
+ x.line(varname2 + " := " + varname)
+ varname = varname2
+ }
+
+ if ptrPfx == "" {
+ x.dec(varname, t)
+ } else {
+ x.line(genTempVarPfx + "z" + i + " := " + ptrPfx + varname)
+ x.dec(genTempVarPfx+"z"+i, t)
+ }
+
+ }
+
+ if canBeNil {
+ x.line("} ")
+ }
+}
+
+// dec will decode a variable (varname) of type ptrTo(t).
+// t is always a basetype (i.e. not of kind reflect.Ptr).
+func (x *genRunner) dec(varname string, t reflect.Type) {
+ // assumptions:
+ // - the varname is to a pointer already. No need to take address of it
+ // - t is always a baseType T (not a *T, etc).
+ rtid := reflect.ValueOf(t).Pointer()
+ tptr := reflect.PtrTo(t)
+ if x.checkForSelfer(t, varname) {
+ if t.Implements(selferTyp) || tptr.Implements(selferTyp) {
+ x.line(varname + ".CodecDecodeSelf(d)")
+ return
+ }
+ if _, ok := x.td[rtid]; ok {
+ x.line(varname + ".CodecDecodeSelf(d)")
+ return
+ }
+ }
+
+ inlist := false
+ for _, t0 := range x.t {
+ if t == t0 {
+ inlist = true
+ if x.checkForSelfer(t, varname) {
+ x.line(varname + ".CodecDecodeSelf(d)")
+ return
+ }
+ break
+ }
+ }
+
+ var rtidAdded bool
+ if t == x.tc {
+ x.td[rtid] = true
+ rtidAdded = true
+ }
+
+ // check if
+ // - type is RawExt
+ // - the type implements (Text|JSON|Binary)(Unm|M)arshal
+ mi := x.varsfx()
+ x.linef("%sm%s := z.DecBinary()", genTempVarPfx, mi)
+ x.linef("_ = %sm%s", genTempVarPfx, mi)
+ x.line("if false {") //start if block
+ defer func() { x.line("}") }() //end if block
+
+ if t == rawExtTyp {
+ x.linef("} else { r.DecodeExt(%v, 0, nil)", varname)
+ return
+ }
+
+ // HACK: Support for Builtins.
+ // Currently, only Binc supports builtins, and the only builtin type is time.Time.
+ // Have a method that returns the rtid for time.Time if Handle is Binc.
+ if t == timeTyp {
+ vrtid := genTempVarPfx + "m" + x.varsfx()
+ x.linef("} else if %s := z.TimeRtidIfBinc(); %s != 0 { ", vrtid, vrtid)
+ x.linef("r.DecodeBuiltin(%s, %s)", vrtid, varname)
+ }
+ // only check for extensions if the type is named, and has a packagePath.
+ if genImportPath(t) != "" && t.Name() != "" {
+ // first check if extensions are configued, before doing the interface conversion
+ x.linef("} else if z.HasExtensions() && z.DecExt(%s) {", varname)
+ }
+
+ if t.Implements(binaryUnmarshalerTyp) || tptr.Implements(binaryUnmarshalerTyp) {
+ x.linef("} else if %sm%s { z.DecBinaryUnmarshal(%v) ", genTempVarPfx, mi, varname)
+ }
+ if t.Implements(jsonUnmarshalerTyp) || tptr.Implements(jsonUnmarshalerTyp) {
+ x.linef("} else if !%sm%s && z.IsJSONHandle() { z.DecJSONUnmarshal(%v)", genTempVarPfx, mi, varname)
+ } else if t.Implements(textUnmarshalerTyp) || tptr.Implements(textUnmarshalerTyp) {
+ x.linef("} else if !%sm%s { z.DecTextUnmarshal(%v)", genTempVarPfx, mi, varname)
+ }
+
+ x.line("} else {")
+
+ // Since these are pointers, we cannot share, and have to use them one by one
+ switch t.Kind() {
+ case reflect.Int:
+ x.line("*((*int)(" + varname + ")) = int(r.DecodeInt(codecSelferBitsize" + x.xs + "))")
+ // x.line("z.DecInt((*int)(" + varname + "))")
+ case reflect.Int8:
+ x.line("*((*int8)(" + varname + ")) = int8(r.DecodeInt(8))")
+ // x.line("z.DecInt8((*int8)(" + varname + "))")
+ case reflect.Int16:
+ x.line("*((*int16)(" + varname + ")) = int16(r.DecodeInt(16))")
+ // x.line("z.DecInt16((*int16)(" + varname + "))")
+ case reflect.Int32:
+ x.line("*((*int32)(" + varname + ")) = int32(r.DecodeInt(32))")
+ // x.line("z.DecInt32((*int32)(" + varname + "))")
+ case reflect.Int64:
+ x.line("*((*int64)(" + varname + ")) = int64(r.DecodeInt(64))")
+ // x.line("z.DecInt64((*int64)(" + varname + "))")
+
+ case reflect.Uint:
+ x.line("*((*uint)(" + varname + ")) = uint(r.DecodeUint(codecSelferBitsize" + x.xs + "))")
+ // x.line("z.DecUint((*uint)(" + varname + "))")
+ case reflect.Uint8:
+ x.line("*((*uint8)(" + varname + ")) = uint8(r.DecodeUint(8))")
+ // x.line("z.DecUint8((*uint8)(" + varname + "))")
+ case reflect.Uint16:
+ x.line("*((*uint16)(" + varname + ")) = uint16(r.DecodeUint(16))")
+ //x.line("z.DecUint16((*uint16)(" + varname + "))")
+ case reflect.Uint32:
+ x.line("*((*uint32)(" + varname + ")) = uint32(r.DecodeUint(32))")
+ //x.line("z.DecUint32((*uint32)(" + varname + "))")
+ case reflect.Uint64:
+ x.line("*((*uint64)(" + varname + ")) = uint64(r.DecodeUint(64))")
+ //x.line("z.DecUint64((*uint64)(" + varname + "))")
+ case reflect.Uintptr:
+ x.line("*((*uintptr)(" + varname + ")) = uintptr(r.DecodeUint(codecSelferBitsize" + x.xs + "))")
+
+ case reflect.Float32:
+ x.line("*((*float32)(" + varname + ")) = float32(r.DecodeFloat(true))")
+ //x.line("z.DecFloat32((*float32)(" + varname + "))")
+ case reflect.Float64:
+ x.line("*((*float64)(" + varname + ")) = float64(r.DecodeFloat(false))")
+ // x.line("z.DecFloat64((*float64)(" + varname + "))")
+
+ case reflect.Bool:
+ x.line("*((*bool)(" + varname + ")) = r.DecodeBool()")
+ // x.line("z.DecBool((*bool)(" + varname + "))")
+ case reflect.String:
+ x.line("*((*string)(" + varname + ")) = r.DecodeString()")
+ // x.line("z.DecString((*string)(" + varname + "))")
+ case reflect.Array, reflect.Chan:
+ x.xtraSM(varname, false, t)
+ // x.decListFallback(varname, rtid, true, t)
+ case reflect.Slice:
+ // if a []uint8, call dedicated function
+ // if a known fastpath slice, call dedicated function
+ // else write encode function in-line.
+ // - if elements are primitives or Selfers, call dedicated function on each member.
+ // - else call Encoder.encode(XXX) on it.
+ if rtid == uint8SliceTypId {
+ x.line("*" + varname + " = r.DecodeBytes(*(*[]byte)(" + varname + "), false, false)")
+ } else if fastpathAV.index(rtid) != -1 {
+ g := x.newGenV(t)
+ x.line("z.F." + g.MethodNamePfx("Dec", false) + "X(" + varname + ", false, d)")
+ } else {
+ x.xtraSM(varname, false, t)
+ // x.decListFallback(varname, rtid, false, t)
+ }
+ case reflect.Map:
+ // if a known fastpath map, call dedicated function
+ // else write encode function in-line.
+ // - if elements are primitives or Selfers, call dedicated function on each member.
+ // - else call Encoder.encode(XXX) on it.
+ if fastpathAV.index(rtid) != -1 {
+ g := x.newGenV(t)
+ x.line("z.F." + g.MethodNamePfx("Dec", false) + "X(" + varname + ", false, d)")
+ } else {
+ x.xtraSM(varname, false, t)
+ // x.decMapFallback(varname, rtid, t)
+ }
+ case reflect.Struct:
+ if inlist {
+ x.decStruct(varname, rtid, t)
+ } else {
+ // delete(x.td, rtid)
+ x.line("z.DecFallback(" + varname + ", false)")
+ }
+ default:
+ if rtidAdded {
+ delete(x.te, rtid)
+ }
+ x.line("z.DecFallback(" + varname + ", true)")
+ }
+}
+
+func (x *genRunner) decTryAssignPrimitive(varname string, t reflect.Type) (tryAsPtr bool) {
+ // We have to use the actual type name when doing a direct assignment.
+ // We don't have the luxury of casting the pointer to the underlying type.
+ //
+ // Consequently, in the situation of a
+ // type Message int32
+ // var x Message
+ // var i int32 = 32
+ // x = i // this will bomb
+ // x = Message(i) // this will work
+ // *((*int32)(&x)) = i // this will work
+ //
+ // Consequently, we replace:
+ // case reflect.Uint32: x.line(varname + " = uint32(r.DecodeUint(32))")
+ // with:
+ // case reflect.Uint32: x.line(varname + " = " + genTypeNamePrim(t, x.tc) + "(r.DecodeUint(32))")
+
+ xfn := func(t reflect.Type) string {
+ return x.genTypeNamePrim(t)
+ }
+ switch t.Kind() {
+ case reflect.Int:
+ x.linef("%s = %s(r.DecodeInt(codecSelferBitsize%s))", varname, xfn(t), x.xs)
+ case reflect.Int8:
+ x.linef("%s = %s(r.DecodeInt(8))", varname, xfn(t))
+ case reflect.Int16:
+ x.linef("%s = %s(r.DecodeInt(16))", varname, xfn(t))
+ case reflect.Int32:
+ x.linef("%s = %s(r.DecodeInt(32))", varname, xfn(t))
+ case reflect.Int64:
+ x.linef("%s = %s(r.DecodeInt(64))", varname, xfn(t))
+
+ case reflect.Uint:
+ x.linef("%s = %s(r.DecodeUint(codecSelferBitsize%s))", varname, xfn(t), x.xs)
+ case reflect.Uint8:
+ x.linef("%s = %s(r.DecodeUint(8))", varname, xfn(t))
+ case reflect.Uint16:
+ x.linef("%s = %s(r.DecodeUint(16))", varname, xfn(t))
+ case reflect.Uint32:
+ x.linef("%s = %s(r.DecodeUint(32))", varname, xfn(t))
+ case reflect.Uint64:
+ x.linef("%s = %s(r.DecodeUint(64))", varname, xfn(t))
+ case reflect.Uintptr:
+ x.linef("%s = %s(r.DecodeUint(codecSelferBitsize%s))", varname, xfn(t), x.xs)
+
+ case reflect.Float32:
+ x.linef("%s = %s(r.DecodeFloat(true))", varname, xfn(t))
+ case reflect.Float64:
+ x.linef("%s = %s(r.DecodeFloat(false))", varname, xfn(t))
+
+ case reflect.Bool:
+ x.linef("%s = %s(r.DecodeBool())", varname, xfn(t))
+ case reflect.String:
+ x.linef("%s = %s(r.DecodeString())", varname, xfn(t))
+ default:
+ tryAsPtr = true
+ }
+ return
+}
+
+func (x *genRunner) decListFallback(varname string, rtid uintptr, t reflect.Type) {
+ type tstruc struct {
+ TempVar string
+ Rand string
+ Varname string
+ CTyp string
+ Typ string
+ Immutable bool
+ Size int
+ }
+ telem := t.Elem()
+ ts := tstruc{genTempVarPfx, x.varsfx(), varname, x.genTypeName(t), x.genTypeName(telem), genIsImmutable(telem), int(telem.Size())}
+
+ funcs := make(template.FuncMap)
+
+ funcs["decLineVar"] = func(varname string) string {
+ x.decVar(varname, telem, false)
+ return ""
+ }
+ funcs["decLine"] = func(pfx string) string {
+ x.decVar(ts.TempVar+pfx+ts.Rand, reflect.PtrTo(telem), false)
+ return ""
+ }
+ funcs["var"] = func(s string) string {
+ return ts.TempVar + s + ts.Rand
+ }
+ funcs["zero"] = func() string {
+ return x.genZeroValueR(telem)
+ }
+ funcs["isArray"] = func() bool {
+ return t.Kind() == reflect.Array
+ }
+ funcs["isSlice"] = func() bool {
+ return t.Kind() == reflect.Slice
+ }
+ funcs["isChan"] = func() bool {
+ return t.Kind() == reflect.Chan
+ }
+ tm, err := template.New("").Funcs(funcs).Parse(genDecListTmpl)
+ if err != nil {
+ panic(err)
+ }
+ if err = tm.Execute(x.w, &ts); err != nil {
+ panic(err)
+ }
+}
+
+func (x *genRunner) decMapFallback(varname string, rtid uintptr, t reflect.Type) {
+ type tstruc struct {
+ TempVar string
+ Sfx string
+ Rand string
+ Varname string
+ KTyp string
+ Typ string
+ Size int
+ }
+ telem := t.Elem()
+ tkey := t.Key()
+ ts := tstruc{
+ genTempVarPfx, x.xs, x.varsfx(), varname, x.genTypeName(tkey),
+ x.genTypeName(telem), int(telem.Size() + tkey.Size()),
+ }
+
+ funcs := make(template.FuncMap)
+ funcs["decElemZero"] = func() string {
+ return x.genZeroValueR(telem)
+ }
+ funcs["decElemKindImmutable"] = func() bool {
+ return genIsImmutable(telem)
+ }
+ funcs["decElemKindPtr"] = func() bool {
+ return telem.Kind() == reflect.Ptr
+ }
+ funcs["decElemKindIntf"] = func() bool {
+ return telem.Kind() == reflect.Interface
+ }
+ funcs["decLineVarK"] = func(varname string) string {
+ x.decVar(varname, tkey, false)
+ return ""
+ }
+ funcs["decLineVar"] = func(varname string) string {
+ x.decVar(varname, telem, false)
+ return ""
+ }
+ funcs["decLineK"] = func(pfx string) string {
+ x.decVar(ts.TempVar+pfx+ts.Rand, reflect.PtrTo(tkey), false)
+ return ""
+ }
+ funcs["decLine"] = func(pfx string) string {
+ x.decVar(ts.TempVar+pfx+ts.Rand, reflect.PtrTo(telem), false)
+ return ""
+ }
+ funcs["var"] = func(s string) string {
+ return ts.TempVar + s + ts.Rand
+ }
+
+ tm, err := template.New("").Funcs(funcs).Parse(genDecMapTmpl)
+ if err != nil {
+ panic(err)
+ }
+ if err = tm.Execute(x.w, &ts); err != nil {
+ panic(err)
+ }
+}
+
+func (x *genRunner) decStructMapSwitch(kName string, varname string, rtid uintptr, t reflect.Type) {
+ ti := x.ti.get(rtid, t)
+ tisfi := ti.sfip // always use sequence from file. decStruct expects same thing.
+ x.line("switch (" + kName + ") {")
+ for _, si := range tisfi {
+ x.line("case \"" + si.encName + "\":")
+ var t2 reflect.StructField
+ if si.i != -1 {
+ t2 = t.Field(int(si.i))
+ } else {
+ //we must accomodate anonymous fields, where the embedded field is a nil pointer in the value.
+ // t2 = t.FieldByIndex(si.is)
+ t2typ := t
+ varname3 := varname
+ for _, ix := range si.is {
+ for t2typ.Kind() == reflect.Ptr {
+ t2typ = t2typ.Elem()
+ }
+ t2 = t2typ.Field(ix)
+ t2typ = t2.Type
+ varname3 = varname3 + "." + t2.Name
+ if t2typ.Kind() == reflect.Ptr {
+ x.linef("if %s == nil { %s = new(%s) }", varname3, varname3, x.genTypeName(t2typ.Elem()))
+ }
+ }
+ }
+ x.decVar(varname+"."+t2.Name, t2.Type, false)
+ }
+ x.line("default:")
+ // pass the slice here, so that the string will not escape, and maybe save allocation
+ x.line("z.DecStructFieldNotFound(-1, " + kName + ")")
+ x.line("} // end switch " + kName)
+}
+
+func (x *genRunner) decStructMap(varname, lenvarname string, rtid uintptr, t reflect.Type, style genStructMapStyle) {
+ tpfx := genTempVarPfx
+ i := x.varsfx()
+ kName := tpfx + "s" + i
+
+ // We thought to use ReadStringAsBytes, as go compiler might optimize the copy out.
+ // However, using that was more expensive, as it seems that the switch expression
+ // is evaluated each time.
+ //
+ // We could depend on decodeString using a temporary/shared buffer internally.
+ // However, this model of creating a byte array, and using explicitly is faster,
+ // and allows optional use of unsafe []byte->string conversion without alloc.
+
+ // Also, ensure that the slice array doesn't escape.
+ // That will help escape analysis prevent allocation when it gets better.
+
+ // x.line("var " + kName + "Arr = [32]byte{} // default string to decode into")
+ // x.line("var " + kName + "Slc = " + kName + "Arr[:] // default slice to decode into")
+ // use the scratch buffer to avoid allocation (most field names are < 32).
+
+ x.line("var " + kName + "Slc = z.DecScratchBuffer() // default slice to decode into")
+
+ x.line("_ = " + kName + "Slc")
+ switch style {
+ case genStructMapStyleLenPrefix:
+ x.linef("for %sj%s := 0; %sj%s < %s; %sj%s++ {", tpfx, i, tpfx, i, lenvarname, tpfx, i)
+ case genStructMapStyleCheckBreak:
+ x.linef("for %sj%s := 0; !r.CheckBreak(); %sj%s++ {", tpfx, i, tpfx, i)
+ default: // 0, otherwise.
+ x.linef("var %shl%s bool = %s >= 0", tpfx, i, lenvarname) // has length
+ x.linef("for %sj%s := 0; ; %sj%s++ {", tpfx, i, tpfx, i)
+ x.linef("if %shl%s { if %sj%s >= %s { break }", tpfx, i, tpfx, i, lenvarname)
+ x.line("} else { if r.CheckBreak() { break }; }")
+ }
+ x.linef("z.DecSendContainerState(codecSelfer_containerMapKey%s)", x.xs)
+ x.line(kName + "Slc = r.DecodeBytes(" + kName + "Slc, true, true)")
+ // let string be scoped to this loop alone, so it doesn't escape.
+ if x.unsafe {
+ x.line(kName + "SlcHdr := codecSelferUnsafeString" + x.xs + "{uintptr(unsafe.Pointer(&" +
+ kName + "Slc[0])), len(" + kName + "Slc)}")
+ x.line(kName + " := *(*string)(unsafe.Pointer(&" + kName + "SlcHdr))")
+ } else {
+ x.line(kName + " := string(" + kName + "Slc)")
+ }
+ x.linef("z.DecSendContainerState(codecSelfer_containerMapValue%s)", x.xs)
+ x.decStructMapSwitch(kName, varname, rtid, t)
+
+ x.line("} // end for " + tpfx + "j" + i)
+ x.linef("z.DecSendContainerState(codecSelfer_containerMapEnd%s)", x.xs)
+}
+
+func (x *genRunner) decStructArray(varname, lenvarname, breakString string, rtid uintptr, t reflect.Type) {
+ tpfx := genTempVarPfx
+ i := x.varsfx()
+ ti := x.ti.get(rtid, t)
+ tisfi := ti.sfip // always use sequence from file. decStruct expects same thing.
+ x.linef("var %sj%s int", tpfx, i)
+ x.linef("var %sb%s bool", tpfx, i) // break
+ x.linef("var %shl%s bool = %s >= 0", tpfx, i, lenvarname) // has length
+ for _, si := range tisfi {
+ var t2 reflect.StructField
+ if si.i != -1 {
+ t2 = t.Field(int(si.i))
+ } else {
+ //we must accomodate anonymous fields, where the embedded field is a nil pointer in the value.
+ // t2 = t.FieldByIndex(si.is)
+ t2typ := t
+ varname3 := varname
+ for _, ix := range si.is {
+ for t2typ.Kind() == reflect.Ptr {
+ t2typ = t2typ.Elem()
+ }
+ t2 = t2typ.Field(ix)
+ t2typ = t2.Type
+ varname3 = varname3 + "." + t2.Name
+ if t2typ.Kind() == reflect.Ptr {
+ x.linef("if %s == nil { %s = new(%s) }", varname3, varname3, x.genTypeName(t2typ.Elem()))
+ }
+ }
+ }
+
+ x.linef("%sj%s++; if %shl%s { %sb%s = %sj%s > %s } else { %sb%s = r.CheckBreak() }",
+ tpfx, i, tpfx, i, tpfx, i,
+ tpfx, i, lenvarname, tpfx, i)
+ x.linef("if %sb%s { z.DecSendContainerState(codecSelfer_containerArrayEnd%s); %s }",
+ tpfx, i, x.xs, breakString)
+ x.linef("z.DecSendContainerState(codecSelfer_containerArrayElem%s)", x.xs)
+ x.decVar(varname+"."+t2.Name, t2.Type, true)
+ }
+ // read remaining values and throw away.
+ x.line("for {")
+ x.linef("%sj%s++; if %shl%s { %sb%s = %sj%s > %s } else { %sb%s = r.CheckBreak() }",
+ tpfx, i, tpfx, i, tpfx, i,
+ tpfx, i, lenvarname, tpfx, i)
+ x.linef("if %sb%s { break }", tpfx, i)
+ x.linef("z.DecSendContainerState(codecSelfer_containerArrayElem%s)", x.xs)
+ x.linef(`z.DecStructFieldNotFound(%sj%s - 1, "")`, tpfx, i)
+ x.line("}")
+ x.linef("z.DecSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs)
+}
+
+func (x *genRunner) decStruct(varname string, rtid uintptr, t reflect.Type) {
+ // if container is map
+ i := x.varsfx()
+ x.linef("%sct%s := r.ContainerType()", genTempVarPfx, i)
+ x.linef("if %sct%s == codecSelferValueTypeMap%s {", genTempVarPfx, i, x.xs)
+ x.line(genTempVarPfx + "l" + i + " := r.ReadMapStart()")
+ x.linef("if %sl%s == 0 {", genTempVarPfx, i)
+ x.linef("z.DecSendContainerState(codecSelfer_containerMapEnd%s)", x.xs)
+ if genUseOneFunctionForDecStructMap {
+ x.line("} else { ")
+ x.linef("x.codecDecodeSelfFromMap(%sl%s, d)", genTempVarPfx, i)
+ } else {
+ x.line("} else if " + genTempVarPfx + "l" + i + " > 0 { ")
+ x.line("x.codecDecodeSelfFromMapLenPrefix(" + genTempVarPfx + "l" + i + ", d)")
+ x.line("} else {")
+ x.line("x.codecDecodeSelfFromMapCheckBreak(" + genTempVarPfx + "l" + i + ", d)")
+ }
+ x.line("}")
+
+ // else if container is array
+ x.linef("} else if %sct%s == codecSelferValueTypeArray%s {", genTempVarPfx, i, x.xs)
+ x.line(genTempVarPfx + "l" + i + " := r.ReadArrayStart()")
+ x.linef("if %sl%s == 0 {", genTempVarPfx, i)
+ x.linef("z.DecSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs)
+ x.line("} else { ")
+ x.linef("x.codecDecodeSelfFromArray(%sl%s, d)", genTempVarPfx, i)
+ x.line("}")
+ // else panic
+ x.line("} else { ")
+ x.line("panic(codecSelferOnlyMapOrArrayEncodeToStructErr" + x.xs + ")")
+ x.line("} ")
+}
+
+// --------
+
+type genV struct {
+ // genV is either a primitive (Primitive != "") or a map (MapKey != "") or a slice
+ MapKey string
+ Elem string
+ Primitive string
+ Size int
+}
+
+func (x *genRunner) newGenV(t reflect.Type) (v genV) {
+ switch t.Kind() {
+ case reflect.Slice, reflect.Array:
+ te := t.Elem()
+ v.Elem = x.genTypeName(te)
+ v.Size = int(te.Size())
+ case reflect.Map:
+ te, tk := t.Elem(), t.Key()
+ v.Elem = x.genTypeName(te)
+ v.MapKey = x.genTypeName(tk)
+ v.Size = int(te.Size() + tk.Size())
+ default:
+ panic("unexpected type for newGenV. Requires map or slice type")
+ }
+ return
+}
+
+func (x *genV) MethodNamePfx(prefix string, prim bool) string {
+ var name []byte
+ if prefix != "" {
+ name = append(name, prefix...)
+ }
+ if prim {
+ name = append(name, genTitleCaseName(x.Primitive)...)
+ } else {
+ if x.MapKey == "" {
+ name = append(name, "Slice"...)
+ } else {
+ name = append(name, "Map"...)
+ name = append(name, genTitleCaseName(x.MapKey)...)
+ }
+ name = append(name, genTitleCaseName(x.Elem)...)
+ }
+ return string(name)
+
+}
+
+var genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") == "1"
+
+// genImportPath returns import path of a non-predeclared named typed, or an empty string otherwise.
+//
+// This handles the misbehaviour that occurs when 1.5-style vendoring is enabled,
+// where PkgPath returns the full path, including the vendoring pre-fix that should have been stripped.
+// We strip it here.
+func genImportPath(t reflect.Type) (s string) {
+ s = t.PkgPath()
+ if genCheckVendor {
+ // HACK: Misbehaviour occurs in go 1.5. May have to re-visit this later.
+ // if s contains /vendor/ OR startsWith vendor/, then return everything after it.
+ const vendorStart = "vendor/"
+ const vendorInline = "/vendor/"
+ if i := strings.LastIndex(s, vendorInline); i >= 0 {
+ s = s[i+len(vendorInline):]
+ } else if strings.HasPrefix(s, vendorStart) {
+ s = s[len(vendorStart):]
+ }
+ }
+ return
+}
+
+// A go identifier is (letter|_)[letter|number|_]*
+func genGoIdentifier(s string, checkFirstChar bool) string {
+ b := make([]byte, 0, len(s))
+ t := make([]byte, 4)
+ var n int
+ for i, r := range s {
+ if checkFirstChar && i == 0 && !unicode.IsLetter(r) {
+ b = append(b, '_')
+ }
+ // r must be unicode_letter, unicode_digit or _
+ if unicode.IsLetter(r) || unicode.IsDigit(r) {
+ n = utf8.EncodeRune(t, r)
+ b = append(b, t[:n]...)
+ } else {
+ b = append(b, '_')
+ }
+ }
+ return string(b)
+}
+
+func genNonPtr(t reflect.Type) reflect.Type {
+ for t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+ return t
+}
+
+func genTitleCaseName(s string) string {
+ switch s {
+ case "interface{}":
+ return "Intf"
+ default:
+ return strings.ToUpper(s[0:1]) + s[1:]
+ }
+}
+
+func genMethodNameT(t reflect.Type, tRef reflect.Type) (n string) {
+ var ptrPfx string
+ for t.Kind() == reflect.Ptr {
+ ptrPfx += "Ptrto"
+ t = t.Elem()
+ }
+ tstr := t.String()
+ if tn := t.Name(); tn != "" {
+ if tRef != nil && genImportPath(t) == genImportPath(tRef) {
+ return ptrPfx + tn
+ } else {
+ if genQNameRegex.MatchString(tstr) {
+ return ptrPfx + strings.Replace(tstr, ".", "_", 1000)
+ } else {
+ return ptrPfx + genCustomTypeName(tstr)
+ }
+ }
+ }
+ switch t.Kind() {
+ case reflect.Map:
+ return ptrPfx + "Map" + genMethodNameT(t.Key(), tRef) + genMethodNameT(t.Elem(), tRef)
+ case reflect.Slice:
+ return ptrPfx + "Slice" + genMethodNameT(t.Elem(), tRef)
+ case reflect.Array:
+ return ptrPfx + "Array" + strconv.FormatInt(int64(t.Len()), 10) + genMethodNameT(t.Elem(), tRef)
+ case reflect.Chan:
+ var cx string
+ switch t.ChanDir() {
+ case reflect.SendDir:
+ cx = "ChanSend"
+ case reflect.RecvDir:
+ cx = "ChanRecv"
+ default:
+ cx = "Chan"
+ }
+ return ptrPfx + cx + genMethodNameT(t.Elem(), tRef)
+ default:
+ if t == intfTyp {
+ return ptrPfx + "Interface"
+ } else {
+ if tRef != nil && genImportPath(t) == genImportPath(tRef) {
+ if t.Name() != "" {
+ return ptrPfx + t.Name()
+ } else {
+ return ptrPfx + genCustomTypeName(tstr)
+ }
+ } else {
+ // best way to get the package name inclusive
+ // return ptrPfx + strings.Replace(tstr, ".", "_", 1000)
+ // return ptrPfx + genBase64enc.EncodeToString([]byte(tstr))
+ if t.Name() != "" && genQNameRegex.MatchString(tstr) {
+ return ptrPfx + strings.Replace(tstr, ".", "_", 1000)
+ } else {
+ return ptrPfx + genCustomTypeName(tstr)
+ }
+ }
+ }
+ }
+}
+
+// genCustomNameForType base64encodes the t.String() value in such a way
+// that it can be used within a function name.
+func genCustomTypeName(tstr string) string {
+ len2 := genBase64enc.EncodedLen(len(tstr))
+ bufx := make([]byte, len2)
+ genBase64enc.Encode(bufx, []byte(tstr))
+ for i := len2 - 1; i >= 0; i-- {
+ if bufx[i] == '=' {
+ len2--
+ } else {
+ break
+ }
+ }
+ return string(bufx[:len2])
+}
+
+func genIsImmutable(t reflect.Type) (v bool) {
+ return isImmutableKind(t.Kind())
+}
+
+type genInternal struct {
+ Values []genV
+ Unsafe bool
+}
+
+func (x genInternal) FastpathLen() (l int) {
+ for _, v := range x.Values {
+ if v.Primitive == "" {
+ l++
+ }
+ }
+ return
+}
+
+func genInternalZeroValue(s string) string {
+ switch s {
+ case "interface{}":
+ return "nil"
+ case "bool":
+ return "false"
+ case "string":
+ return `""`
+ default:
+ return "0"
+ }
+}
+
+func genInternalEncCommandAsString(s string, vname string) string {
+ switch s {
+ case "uint", "uint8", "uint16", "uint32", "uint64":
+ return "ee.EncodeUint(uint64(" + vname + "))"
+ case "int", "int8", "int16", "int32", "int64":
+ return "ee.EncodeInt(int64(" + vname + "))"
+ case "string":
+ return "ee.EncodeString(c_UTF8, " + vname + ")"
+ case "float32":
+ return "ee.EncodeFloat32(" + vname + ")"
+ case "float64":
+ return "ee.EncodeFloat64(" + vname + ")"
+ case "bool":
+ return "ee.EncodeBool(" + vname + ")"
+ case "symbol":
+ return "ee.EncodeSymbol(" + vname + ")"
+ default:
+ return "e.encode(" + vname + ")"
+ }
+}
+
+func genInternalDecCommandAsString(s string) string {
+ switch s {
+ case "uint":
+ return "uint(dd.DecodeUint(uintBitsize))"
+ case "uint8":
+ return "uint8(dd.DecodeUint(8))"
+ case "uint16":
+ return "uint16(dd.DecodeUint(16))"
+ case "uint32":
+ return "uint32(dd.DecodeUint(32))"
+ case "uint64":
+ return "dd.DecodeUint(64)"
+ case "uintptr":
+ return "uintptr(dd.DecodeUint(uintBitsize))"
+ case "int":
+ return "int(dd.DecodeInt(intBitsize))"
+ case "int8":
+ return "int8(dd.DecodeInt(8))"
+ case "int16":
+ return "int16(dd.DecodeInt(16))"
+ case "int32":
+ return "int32(dd.DecodeInt(32))"
+ case "int64":
+ return "dd.DecodeInt(64)"
+
+ case "string":
+ return "dd.DecodeString()"
+ case "float32":
+ return "float32(dd.DecodeFloat(true))"
+ case "float64":
+ return "dd.DecodeFloat(false)"
+ case "bool":
+ return "dd.DecodeBool()"
+ default:
+ panic(errors.New("gen internal: unknown type for decode: " + s))
+ }
+}
+
+func genInternalSortType(s string, elem bool) string {
+ for _, v := range [...]string{"int", "uint", "float", "bool", "string"} {
+ if strings.HasPrefix(s, v) {
+ if elem {
+ if v == "int" || v == "uint" || v == "float" {
+ return v + "64"
+ } else {
+ return v
+ }
+ }
+ return v + "Slice"
+ }
+ }
+ panic("sorttype: unexpected type: " + s)
+}
+
+// var genInternalMu sync.Mutex
+var genInternalV genInternal
+var genInternalTmplFuncs template.FuncMap
+var genInternalOnce sync.Once
+
+func genInternalInit() {
+ types := [...]string{
+ "interface{}",
+ "string",
+ "float32",
+ "float64",
+ "uint",
+ "uint8",
+ "uint16",
+ "uint32",
+ "uint64",
+ "uintptr",
+ "int",
+ "int8",
+ "int16",
+ "int32",
+ "int64",
+ "bool",
+ }
+ // keep as slice, so it is in specific iteration order.
+ // Initial order was uint64, string, interface{}, int, int64
+ mapvaltypes := [...]string{
+ "interface{}",
+ "string",
+ "uint",
+ "uint8",
+ "uint16",
+ "uint32",
+ "uint64",
+ "uintptr",
+ "int",
+ "int8",
+ "int16",
+ "int32",
+ "int64",
+ "float32",
+ "float64",
+ "bool",
+ }
+ wordSizeBytes := int(intBitsize) / 8
+
+ mapvaltypes2 := map[string]int{
+ "interface{}": 2 * wordSizeBytes,
+ "string": 2 * wordSizeBytes,
+ "uint": 1 * wordSizeBytes,
+ "uint8": 1,
+ "uint16": 2,
+ "uint32": 4,
+ "uint64": 8,
+ "uintptr": 1 * wordSizeBytes,
+ "int": 1 * wordSizeBytes,
+ "int8": 1,
+ "int16": 2,
+ "int32": 4,
+ "int64": 8,
+ "float32": 4,
+ "float64": 8,
+ "bool": 1,
+ }
+ var gt genInternal
+
+ // For each slice or map type, there must be a (symetrical) Encode and Decode fast-path function
+ for _, s := range types {
+ gt.Values = append(gt.Values, genV{Primitive: s, Size: mapvaltypes2[s]})
+ if s != "uint8" { // do not generate fast path for slice of bytes. Treat specially already.
+ gt.Values = append(gt.Values, genV{Elem: s, Size: mapvaltypes2[s]})
+ }
+ if _, ok := mapvaltypes2[s]; !ok {
+ gt.Values = append(gt.Values, genV{MapKey: s, Elem: s, Size: 2 * mapvaltypes2[s]})
+ }
+ for _, ms := range mapvaltypes {
+ gt.Values = append(gt.Values, genV{MapKey: s, Elem: ms, Size: mapvaltypes2[s] + mapvaltypes2[ms]})
+ }
+ }
+
+ funcs := make(template.FuncMap)
+ // funcs["haspfx"] = strings.HasPrefix
+ funcs["encmd"] = genInternalEncCommandAsString
+ funcs["decmd"] = genInternalDecCommandAsString
+ funcs["zerocmd"] = genInternalZeroValue
+ funcs["hasprefix"] = strings.HasPrefix
+ funcs["sorttype"] = genInternalSortType
+
+ genInternalV = gt
+ genInternalTmplFuncs = funcs
+}
+
+// genInternalGoFile is used to generate source files from templates.
+// It is run by the program author alone.
+// Unfortunately, it has to be exported so that it can be called from a command line tool.
+// *** DO NOT USE ***
+func genInternalGoFile(r io.Reader, w io.Writer, safe bool) (err error) {
+ genInternalOnce.Do(genInternalInit)
+
+ gt := genInternalV
+ gt.Unsafe = !safe
+
+ t := template.New("").Funcs(genInternalTmplFuncs)
+
+ tmplstr, err := ioutil.ReadAll(r)
+ if err != nil {
+ return
+ }
+
+ if t, err = t.Parse(string(tmplstr)); err != nil {
+ return
+ }
+
+ var out bytes.Buffer
+ err = t.Execute(&out, gt)
+ if err != nil {
+ return
+ }
+
+ bout, err := format.Source(out.Bytes())
+ if err != nil {
+ w.Write(out.Bytes()) // write out if error, so we can still see.
+ // w.Write(bout) // write out if error, as much as possible, so we can still see.
+ return
+ }
+ w.Write(bout)
+ return
+}
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/helper.go b/src/kube2msb/vendor/github.com/ugorji/go/codec/helper.go
new file mode 100644
index 0000000..40065a0
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/helper.go
@@ -0,0 +1,1271 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+// Contains code shared by both encode and decode.
+
+// Some shared ideas around encoding/decoding
+// ------------------------------------------
+//
+// If an interface{} is passed, we first do a type assertion to see if it is
+// a primitive type or a map/slice of primitive types, and use a fastpath to handle it.
+//
+// If we start with a reflect.Value, we are already in reflect.Value land and
+// will try to grab the function for the underlying Type and directly call that function.
+// This is more performant than calling reflect.Value.Interface().
+//
+// This still helps us bypass many layers of reflection, and give best performance.
+//
+// Containers
+// ------------
+// Containers in the stream are either associative arrays (key-value pairs) or
+// regular arrays (indexed by incrementing integers).
+//
+// Some streams support indefinite-length containers, and use a breaking
+// byte-sequence to denote that the container has come to an end.
+//
+// Some streams also are text-based, and use explicit separators to denote the
+// end/beginning of different values.
+//
+// During encode, we use a high-level condition to determine how to iterate through
+// the container. That decision is based on whether the container is text-based (with
+// separators) or binary (without separators). If binary, we do not even call the
+// encoding of separators.
+//
+// During decode, we use a different high-level condition to determine how to iterate
+// through the containers. That decision is based on whether the stream contained
+// a length prefix, or if it used explicit breaks. If length-prefixed, we assume that
+// it has to be binary, and we do not even try to read separators.
+//
+// The only codec that may suffer (slightly) is cbor, and only when decoding indefinite-length.
+// It may suffer because we treat it like a text-based codec, and read separators.
+// However, this read is a no-op and the cost is insignificant.
+//
+// Philosophy
+// ------------
+// On decode, this codec will update containers appropriately:
+// - If struct, update fields from stream into fields of struct.
+// If field in stream not found in struct, handle appropriately (based on option).
+// If a struct field has no corresponding value in the stream, leave it AS IS.
+// If nil in stream, set value to nil/zero value.
+// - If map, update map from stream.
+// If the stream value is NIL, set the map to nil.
+// - if slice, try to update up to length of array in stream.
+// if container len is less than stream array length,
+// and container cannot be expanded, handled (based on option).
+// This means you can decode 4-element stream array into 1-element array.
+//
+// ------------------------------------
+// On encode, user can specify omitEmpty. This means that the value will be omitted
+// if the zero value. The problem may occur during decode, where omitted values do not affect
+// the value being decoded into. This means that if decoding into a struct with an
+// int field with current value=5, and the field is omitted in the stream, then after
+// decoding, the value will still be 5 (not 0).
+// omitEmpty only works if you guarantee that you always decode into zero-values.
+//
+// ------------------------------------
+// We could have truncated a map to remove keys not available in the stream,
+// or set values in the struct which are not in the stream to their zero values.
+// We decided against it because there is no efficient way to do it.
+// We may introduce it as an option later.
+// However, that will require enabling it for both runtime and code generation modes.
+//
+// To support truncate, we need to do 2 passes over the container:
+// map
+// - first collect all keys (e.g. in k1)
+// - for each key in stream, mark k1 that the key should not be removed
+// - after updating map, do second pass and call delete for all keys in k1 which are not marked
+// struct:
+// - for each field, track the *typeInfo s1
+// - iterate through all s1, and for each one not marked, set value to zero
+// - this involves checking the possible anonymous fields which are nil ptrs.
+// too much work.
+//
+// ------------------------------------------
+// Error Handling is done within the library using panic.
+//
+// This way, the code doesn't have to keep checking if an error has happened,
+// and we don't have to keep sending the error value along with each call
+// or storing it in the En|Decoder and checking it constantly along the way.
+//
+// The disadvantage is that small functions which use panics cannot be inlined.
+// The code accounts for that by only using panics behind an interface;
+// since interface calls cannot be inlined, this is irrelevant.
+//
+// We considered storing the error is En|Decoder.
+// - once it has its err field set, it cannot be used again.
+// - panicing will be optional, controlled by const flag.
+// - code should always check error first and return early.
+// We eventually decided against it as it makes the code clumsier to always
+// check for these error conditions.
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+)
+
+const (
+ scratchByteArrayLen = 32
+ initCollectionCap = 32 // 32 is defensive. 16 is preferred.
+
+ // Support encoding.(Binary|Text)(Unm|M)arshaler.
+ // This constant flag will enable or disable it.
+ supportMarshalInterfaces = true
+
+ // Each Encoder or Decoder uses a cache of functions based on conditionals,
+ // so that the conditionals are not run every time.
+ //
+ // Either a map or a slice is used to keep track of the functions.
+ // The map is more natural, but has a higher cost than a slice/array.
+ // This flag (useMapForCodecCache) controls which is used.
+ //
+ // From benchmarks, slices with linear search perform better with < 32 entries.
+ // We have typically seen a high threshold of about 24 entries.
+ useMapForCodecCache = false
+
+ // for debugging, set this to false, to catch panic traces.
+ // Note that this will always cause rpc tests to fail, since they need io.EOF sent via panic.
+ recoverPanicToErr = true
+
+ // Fast path functions try to create a fast path encode or decode implementation
+ // for common maps and slices, by by-passing reflection altogether.
+ fastpathEnabled = true
+
+ // if checkStructForEmptyValue, check structs fields to see if an empty value.
+ // This could be an expensive call, so possibly disable it.
+ checkStructForEmptyValue = false
+
+ // if derefForIsEmptyValue, deref pointers and interfaces when checking isEmptyValue
+ derefForIsEmptyValue = false
+
+ // if resetSliceElemToZeroValue, then on decoding a slice, reset the element to a zero value first.
+ // Only concern is that, if the slice already contained some garbage, we will decode into that garbage.
+ // The chances of this are slim, so leave this "optimization".
+ // TODO: should this be true, to ensure that we always decode into a "zero" "empty" value?
+ resetSliceElemToZeroValue bool = false
+)
+
+var (
+ oneByteArr = [1]byte{0}
+ zeroByteSlice = oneByteArr[:0:0]
+)
+
+type charEncoding uint8
+
+const (
+ c_RAW charEncoding = iota
+ c_UTF8
+ c_UTF16LE
+ c_UTF16BE
+ c_UTF32LE
+ c_UTF32BE
+)
+
+// valueType is the stream type
+type valueType uint8
+
+const (
+ valueTypeUnset valueType = iota
+ valueTypeNil
+ valueTypeInt
+ valueTypeUint
+ valueTypeFloat
+ valueTypeBool
+ valueTypeString
+ valueTypeSymbol
+ valueTypeBytes
+ valueTypeMap
+ valueTypeArray
+ valueTypeTimestamp
+ valueTypeExt
+
+ // valueTypeInvalid = 0xff
+)
+
+type seqType uint8
+
+const (
+ _ seqType = iota
+ seqTypeArray
+ seqTypeSlice
+ seqTypeChan
+)
+
+// note that containerMapStart and containerArraySend are not sent.
+// This is because the ReadXXXStart and EncodeXXXStart already does these.
+type containerState uint8
+
+const (
+ _ containerState = iota
+
+ containerMapStart // slot left open, since Driver method already covers it
+ containerMapKey
+ containerMapValue
+ containerMapEnd
+ containerArrayStart // slot left open, since Driver methods already cover it
+ containerArrayElem
+ containerArrayEnd
+)
+
+type rgetPoolT struct {
+ encNames [8]string
+ fNames [8]string
+ etypes [8]uintptr
+ sfis [8]*structFieldInfo
+}
+
+var rgetPool = sync.Pool{
+ New: func() interface{} { return new(rgetPoolT) },
+}
+
+type rgetT struct {
+ fNames []string
+ encNames []string
+ etypes []uintptr
+ sfis []*structFieldInfo
+}
+
+type containerStateRecv interface {
+ sendContainerState(containerState)
+}
+
+// mirror json.Marshaler and json.Unmarshaler here,
+// so we don't import the encoding/json package
+type jsonMarshaler interface {
+ MarshalJSON() ([]byte, error)
+}
+type jsonUnmarshaler interface {
+ UnmarshalJSON([]byte) error
+}
+
+var (
+ bigen = binary.BigEndian
+ structInfoFieldName = "_struct"
+
+ mapStrIntfTyp = reflect.TypeOf(map[string]interface{}(nil))
+ mapIntfIntfTyp = reflect.TypeOf(map[interface{}]interface{}(nil))
+ intfSliceTyp = reflect.TypeOf([]interface{}(nil))
+ intfTyp = intfSliceTyp.Elem()
+
+ stringTyp = reflect.TypeOf("")
+ timeTyp = reflect.TypeOf(time.Time{})
+ rawExtTyp = reflect.TypeOf(RawExt{})
+ uint8SliceTyp = reflect.TypeOf([]uint8(nil))
+
+ mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem()
+
+ binaryMarshalerTyp = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem()
+ binaryUnmarshalerTyp = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem()
+
+ textMarshalerTyp = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
+ textUnmarshalerTyp = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
+
+ jsonMarshalerTyp = reflect.TypeOf((*jsonMarshaler)(nil)).Elem()
+ jsonUnmarshalerTyp = reflect.TypeOf((*jsonUnmarshaler)(nil)).Elem()
+
+ selferTyp = reflect.TypeOf((*Selfer)(nil)).Elem()
+
+ uint8SliceTypId = reflect.ValueOf(uint8SliceTyp).Pointer()
+ rawExtTypId = reflect.ValueOf(rawExtTyp).Pointer()
+ intfTypId = reflect.ValueOf(intfTyp).Pointer()
+ timeTypId = reflect.ValueOf(timeTyp).Pointer()
+ stringTypId = reflect.ValueOf(stringTyp).Pointer()
+
+ mapStrIntfTypId = reflect.ValueOf(mapStrIntfTyp).Pointer()
+ mapIntfIntfTypId = reflect.ValueOf(mapIntfIntfTyp).Pointer()
+ intfSliceTypId = reflect.ValueOf(intfSliceTyp).Pointer()
+ // mapBySliceTypId = reflect.ValueOf(mapBySliceTyp).Pointer()
+
+ intBitsize uint8 = uint8(reflect.TypeOf(int(0)).Bits())
+ uintBitsize uint8 = uint8(reflect.TypeOf(uint(0)).Bits())
+
+ bsAll0x00 = []byte{0, 0, 0, 0, 0, 0, 0, 0}
+ bsAll0xff = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
+
+ chkOvf checkOverflow
+
+ noFieldNameToStructFieldInfoErr = errors.New("no field name passed to parseStructFieldInfo")
+)
+
+var defTypeInfos = NewTypeInfos([]string{"codec", "json"})
+
+// Selfer defines methods by which a value can encode or decode itself.
+//
+// Any type which implements Selfer will be able to encode or decode itself.
+// Consequently, during (en|de)code, this takes precedence over
+// (text|binary)(M|Unm)arshal or extension support.
+type Selfer interface {
+ CodecEncodeSelf(*Encoder)
+ CodecDecodeSelf(*Decoder)
+}
+
+// MapBySlice represents a slice which should be encoded as a map in the stream.
+// The slice contains a sequence of key-value pairs.
+// This affords storing a map in a specific sequence in the stream.
+//
+// The support of MapBySlice affords the following:
+// - A slice type which implements MapBySlice will be encoded as a map
+// - A slice can be decoded from a map in the stream
+type MapBySlice interface {
+ MapBySlice()
+}
+
+// WARNING: DO NOT USE DIRECTLY. EXPORTED FOR GODOC BENEFIT. WILL BE REMOVED.
+//
+// BasicHandle encapsulates the common options and extension functions.
+type BasicHandle struct {
+ // TypeInfos is used to get the type info for any type.
+ //
+ // If not configured, the default TypeInfos is used, which uses struct tag keys: codec, json
+ TypeInfos *TypeInfos
+
+ extHandle
+ EncodeOptions
+ DecodeOptions
+}
+
+func (x *BasicHandle) getBasicHandle() *BasicHandle {
+ return x
+}
+
+func (x *BasicHandle) getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
+ if x.TypeInfos != nil {
+ return x.TypeInfos.get(rtid, rt)
+ }
+ return defTypeInfos.get(rtid, rt)
+}
+
+// Handle is the interface for a specific encoding format.
+//
+// Typically, a Handle is pre-configured before first time use,
+// and not modified while in use. Such a pre-configured Handle
+// is safe for concurrent access.
+type Handle interface {
+ getBasicHandle() *BasicHandle
+ newEncDriver(w *Encoder) encDriver
+ newDecDriver(r *Decoder) decDriver
+ isBinary() bool
+}
+
+// RawExt represents raw unprocessed extension data.
+// Some codecs will decode extension data as a *RawExt if there is no registered extension for the tag.
+//
+// Only one of Data or Value is nil. If Data is nil, then the content of the RawExt is in the Value.
+type RawExt struct {
+ Tag uint64
+ // Data is the []byte which represents the raw ext. If Data is nil, ext is exposed in Value.
+ // Data is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types
+ Data []byte
+ // Value represents the extension, if Data is nil.
+ // Value is used by codecs (e.g. cbor) which use the format to do custom serialization of the types.
+ Value interface{}
+}
+
+// BytesExt handles custom (de)serialization of types to/from []byte.
+// It is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types.
+type BytesExt interface {
+ // WriteExt converts a value to a []byte.
+ //
+ // Note: v *may* be a pointer to the extension type, if the extension type was a struct or array.
+ WriteExt(v interface{}) []byte
+
+ // ReadExt updates a value from a []byte.
+ ReadExt(dst interface{}, src []byte)
+}
+
+// InterfaceExt handles custom (de)serialization of types to/from another interface{} value.
+// The Encoder or Decoder will then handle the further (de)serialization of that known type.
+//
+// It is used by codecs (e.g. cbor, json) which use the format to do custom serialization of the types.
+type InterfaceExt interface {
+ // ConvertExt converts a value into a simpler interface for easy encoding e.g. convert time.Time to int64.
+ //
+ // Note: v *may* be a pointer to the extension type, if the extension type was a struct or array.
+ ConvertExt(v interface{}) interface{}
+
+ // UpdateExt updates a value from a simpler interface for easy decoding e.g. convert int64 to time.Time.
+ UpdateExt(dst interface{}, src interface{})
+}
+
+// Ext handles custom (de)serialization of custom types / extensions.
+type Ext interface {
+ BytesExt
+ InterfaceExt
+}
+
+// addExtWrapper is a wrapper implementation to support former AddExt exported method.
+type addExtWrapper struct {
+ encFn func(reflect.Value) ([]byte, error)
+ decFn func(reflect.Value, []byte) error
+}
+
+func (x addExtWrapper) WriteExt(v interface{}) []byte {
+ bs, err := x.encFn(reflect.ValueOf(v))
+ if err != nil {
+ panic(err)
+ }
+ return bs
+}
+
+func (x addExtWrapper) ReadExt(v interface{}, bs []byte) {
+ if err := x.decFn(reflect.ValueOf(v), bs); err != nil {
+ panic(err)
+ }
+}
+
+func (x addExtWrapper) ConvertExt(v interface{}) interface{} {
+ return x.WriteExt(v)
+}
+
+func (x addExtWrapper) UpdateExt(dest interface{}, v interface{}) {
+ x.ReadExt(dest, v.([]byte))
+}
+
+type setExtWrapper struct {
+ b BytesExt
+ i InterfaceExt
+}
+
+func (x *setExtWrapper) WriteExt(v interface{}) []byte {
+ if x.b == nil {
+ panic("BytesExt.WriteExt is not supported")
+ }
+ return x.b.WriteExt(v)
+}
+
+func (x *setExtWrapper) ReadExt(v interface{}, bs []byte) {
+ if x.b == nil {
+ panic("BytesExt.WriteExt is not supported")
+
+ }
+ x.b.ReadExt(v, bs)
+}
+
+func (x *setExtWrapper) ConvertExt(v interface{}) interface{} {
+ if x.i == nil {
+ panic("InterfaceExt.ConvertExt is not supported")
+
+ }
+ return x.i.ConvertExt(v)
+}
+
+func (x *setExtWrapper) UpdateExt(dest interface{}, v interface{}) {
+ if x.i == nil {
+ panic("InterfaceExxt.UpdateExt is not supported")
+
+ }
+ x.i.UpdateExt(dest, v)
+}
+
+// type errorString string
+// func (x errorString) Error() string { return string(x) }
+
+type binaryEncodingType struct{}
+
+func (_ binaryEncodingType) isBinary() bool { return true }
+
+type textEncodingType struct{}
+
+func (_ textEncodingType) isBinary() bool { return false }
+
+// noBuiltInTypes is embedded into many types which do not support builtins
+// e.g. msgpack, simple, cbor.
+type noBuiltInTypes struct{}
+
+func (_ noBuiltInTypes) IsBuiltinType(rt uintptr) bool { return false }
+func (_ noBuiltInTypes) EncodeBuiltin(rt uintptr, v interface{}) {}
+func (_ noBuiltInTypes) DecodeBuiltin(rt uintptr, v interface{}) {}
+
+type noStreamingCodec struct{}
+
+func (_ noStreamingCodec) CheckBreak() bool { return false }
+
+// bigenHelper.
+// Users must already slice the x completely, because we will not reslice.
+type bigenHelper struct {
+ x []byte // must be correctly sliced to appropriate len. slicing is a cost.
+ w encWriter
+}
+
+func (z bigenHelper) writeUint16(v uint16) {
+ bigen.PutUint16(z.x, v)
+ z.w.writeb(z.x)
+}
+
+func (z bigenHelper) writeUint32(v uint32) {
+ bigen.PutUint32(z.x, v)
+ z.w.writeb(z.x)
+}
+
+func (z bigenHelper) writeUint64(v uint64) {
+ bigen.PutUint64(z.x, v)
+ z.w.writeb(z.x)
+}
+
+type extTypeTagFn struct {
+ rtid uintptr
+ rt reflect.Type
+ tag uint64
+ ext Ext
+}
+
+type extHandle []extTypeTagFn
+
+// DEPRECATED: Use SetBytesExt or SetInterfaceExt on the Handle instead.
+//
+// AddExt registes an encode and decode function for a reflect.Type.
+// AddExt internally calls SetExt.
+// To deregister an Ext, call AddExt with nil encfn and/or nil decfn.
+func (o *extHandle) AddExt(
+ rt reflect.Type, tag byte,
+ encfn func(reflect.Value) ([]byte, error), decfn func(reflect.Value, []byte) error,
+) (err error) {
+ if encfn == nil || decfn == nil {
+ return o.SetExt(rt, uint64(tag), nil)
+ }
+ return o.SetExt(rt, uint64(tag), addExtWrapper{encfn, decfn})
+}
+
+// DEPRECATED: Use SetBytesExt or SetInterfaceExt on the Handle instead.
+//
+// Note that the type must be a named type, and specifically not
+// a pointer or Interface. An error is returned if that is not honored.
+//
+// To Deregister an ext, call SetExt with nil Ext
+func (o *extHandle) SetExt(rt reflect.Type, tag uint64, ext Ext) (err error) {
+ // o is a pointer, because we may need to initialize it
+ if rt.PkgPath() == "" || rt.Kind() == reflect.Interface {
+ err = fmt.Errorf("codec.Handle.AddExt: Takes named type, especially not a pointer or interface: %T",
+ reflect.Zero(rt).Interface())
+ return
+ }
+
+ rtid := reflect.ValueOf(rt).Pointer()
+ for _, v := range *o {
+ if v.rtid == rtid {
+ v.tag, v.ext = tag, ext
+ return
+ }
+ }
+
+ if *o == nil {
+ *o = make([]extTypeTagFn, 0, 4)
+ }
+ *o = append(*o, extTypeTagFn{rtid, rt, tag, ext})
+ return
+}
+
+func (o extHandle) getExt(rtid uintptr) *extTypeTagFn {
+ var v *extTypeTagFn
+ for i := range o {
+ v = &o[i]
+ if v.rtid == rtid {
+ return v
+ }
+ }
+ return nil
+}
+
+func (o extHandle) getExtForTag(tag uint64) *extTypeTagFn {
+ var v *extTypeTagFn
+ for i := range o {
+ v = &o[i]
+ if v.tag == tag {
+ return v
+ }
+ }
+ return nil
+}
+
+type structFieldInfo struct {
+ encName string // encode name
+
+ // only one of 'i' or 'is' can be set. If 'i' is -1, then 'is' has been set.
+
+ is []int // (recursive/embedded) field index in struct
+ i int16 // field index in struct
+ omitEmpty bool
+ toArray bool // if field is _struct, is the toArray set?
+}
+
+// func (si *structFieldInfo) isZero() bool {
+// return si.encName == "" && len(si.is) == 0 && si.i == 0 && !si.omitEmpty && !si.toArray
+// }
+
+// rv returns the field of the struct.
+// If anonymous, it returns an Invalid
+func (si *structFieldInfo) field(v reflect.Value, update bool) (rv2 reflect.Value) {
+ if si.i != -1 {
+ v = v.Field(int(si.i))
+ return v
+ }
+ // replicate FieldByIndex
+ for _, x := range si.is {
+ for v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ if !update {
+ return
+ }
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ v = v.Elem()
+ }
+ v = v.Field(x)
+ }
+ return v
+}
+
+func (si *structFieldInfo) setToZeroValue(v reflect.Value) {
+ if si.i != -1 {
+ v = v.Field(int(si.i))
+ v.Set(reflect.Zero(v.Type()))
+ // v.Set(reflect.New(v.Type()).Elem())
+ // v.Set(reflect.New(v.Type()))
+ } else {
+ // replicate FieldByIndex
+ for _, x := range si.is {
+ for v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ return
+ }
+ v = v.Elem()
+ }
+ v = v.Field(x)
+ }
+ v.Set(reflect.Zero(v.Type()))
+ }
+}
+
+func parseStructFieldInfo(fname string, stag string) *structFieldInfo {
+ // if fname == "" {
+ // panic(noFieldNameToStructFieldInfoErr)
+ // }
+ si := structFieldInfo{
+ encName: fname,
+ }
+
+ if stag != "" {
+ for i, s := range strings.Split(stag, ",") {
+ if i == 0 {
+ if s != "" {
+ si.encName = s
+ }
+ } else {
+ if s == "omitempty" {
+ si.omitEmpty = true
+ } else if s == "toarray" {
+ si.toArray = true
+ }
+ }
+ }
+ }
+ // si.encNameBs = []byte(si.encName)
+ return &si
+}
+
+type sfiSortedByEncName []*structFieldInfo
+
+func (p sfiSortedByEncName) Len() int {
+ return len(p)
+}
+
+func (p sfiSortedByEncName) Less(i, j int) bool {
+ return p[i].encName < p[j].encName
+}
+
+func (p sfiSortedByEncName) Swap(i, j int) {
+ p[i], p[j] = p[j], p[i]
+}
+
+// typeInfo keeps information about each type referenced in the encode/decode sequence.
+//
+// During an encode/decode sequence, we work as below:
+// - If base is a built in type, en/decode base value
+// - If base is registered as an extension, en/decode base value
+// - If type is binary(M/Unm)arshaler, call Binary(M/Unm)arshal method
+// - If type is text(M/Unm)arshaler, call Text(M/Unm)arshal method
+// - Else decode appropriately based on the reflect.Kind
+type typeInfo struct {
+ sfi []*structFieldInfo // sorted. Used when enc/dec struct to map.
+ sfip []*structFieldInfo // unsorted. Used when enc/dec struct to array.
+
+ rt reflect.Type
+ rtid uintptr
+
+ numMeth uint16 // number of methods
+
+ // baseId gives pointer to the base reflect.Type, after deferencing
+ // the pointers. E.g. base type of ***time.Time is time.Time.
+ base reflect.Type
+ baseId uintptr
+ baseIndir int8 // number of indirections to get to base
+
+ mbs bool // base type (T or *T) is a MapBySlice
+
+ bm bool // base type (T or *T) is a binaryMarshaler
+ bunm bool // base type (T or *T) is a binaryUnmarshaler
+ bmIndir int8 // number of indirections to get to binaryMarshaler type
+ bunmIndir int8 // number of indirections to get to binaryUnmarshaler type
+
+ tm bool // base type (T or *T) is a textMarshaler
+ tunm bool // base type (T or *T) is a textUnmarshaler
+ tmIndir int8 // number of indirections to get to textMarshaler type
+ tunmIndir int8 // number of indirections to get to textUnmarshaler type
+
+ jm bool // base type (T or *T) is a jsonMarshaler
+ junm bool // base type (T or *T) is a jsonUnmarshaler
+ jmIndir int8 // number of indirections to get to jsonMarshaler type
+ junmIndir int8 // number of indirections to get to jsonUnmarshaler type
+
+ cs bool // base type (T or *T) is a Selfer
+ csIndir int8 // number of indirections to get to Selfer type
+
+ toArray bool // whether this (struct) type should be encoded as an array
+}
+
+func (ti *typeInfo) indexForEncName(name string) int {
+ //tisfi := ti.sfi
+ const binarySearchThreshold = 16
+ if sfilen := len(ti.sfi); sfilen < binarySearchThreshold {
+ // linear search. faster than binary search in my testing up to 16-field structs.
+ for i, si := range ti.sfi {
+ if si.encName == name {
+ return i
+ }
+ }
+ } else {
+ // binary search. adapted from sort/search.go.
+ h, i, j := 0, 0, sfilen
+ for i < j {
+ h = i + (j-i)/2
+ if ti.sfi[h].encName < name {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ if i < sfilen && ti.sfi[i].encName == name {
+ return i
+ }
+ }
+ return -1
+}
+
+// TypeInfos caches typeInfo for each type on first inspection.
+//
+// It is configured with a set of tag keys, which are used to get
+// configuration for the type.
+type TypeInfos struct {
+ infos map[uintptr]*typeInfo
+ mu sync.RWMutex
+ tags []string
+}
+
+// NewTypeInfos creates a TypeInfos given a set of struct tags keys.
+//
+// This allows users customize the struct tag keys which contain configuration
+// of their types.
+func NewTypeInfos(tags []string) *TypeInfos {
+ return &TypeInfos{tags: tags, infos: make(map[uintptr]*typeInfo, 64)}
+}
+
+func (x *TypeInfos) structTag(t reflect.StructTag) (s string) {
+ // check for tags: codec, json, in that order.
+ // this allows seamless support for many configured structs.
+ for _, x := range x.tags {
+ s = t.Get(x)
+ if s != "" {
+ return s
+ }
+ }
+ return
+}
+
+func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
+ var ok bool
+ x.mu.RLock()
+ pti, ok = x.infos[rtid]
+ x.mu.RUnlock()
+ if ok {
+ return
+ }
+
+ // do not hold lock while computing this.
+ // it may lead to duplication, but that's ok.
+ ti := typeInfo{rt: rt, rtid: rtid}
+ ti.numMeth = uint16(rt.NumMethod())
+
+ var indir int8
+ if ok, indir = implementsIntf(rt, binaryMarshalerTyp); ok {
+ ti.bm, ti.bmIndir = true, indir
+ }
+ if ok, indir = implementsIntf(rt, binaryUnmarshalerTyp); ok {
+ ti.bunm, ti.bunmIndir = true, indir
+ }
+ if ok, indir = implementsIntf(rt, textMarshalerTyp); ok {
+ ti.tm, ti.tmIndir = true, indir
+ }
+ if ok, indir = implementsIntf(rt, textUnmarshalerTyp); ok {
+ ti.tunm, ti.tunmIndir = true, indir
+ }
+ if ok, indir = implementsIntf(rt, jsonMarshalerTyp); ok {
+ ti.jm, ti.jmIndir = true, indir
+ }
+ if ok, indir = implementsIntf(rt, jsonUnmarshalerTyp); ok {
+ ti.junm, ti.junmIndir = true, indir
+ }
+ if ok, indir = implementsIntf(rt, selferTyp); ok {
+ ti.cs, ti.csIndir = true, indir
+ }
+ if ok, _ = implementsIntf(rt, mapBySliceTyp); ok {
+ ti.mbs = true
+ }
+
+ pt := rt
+ var ptIndir int8
+ // for ; pt.Kind() == reflect.Ptr; pt, ptIndir = pt.Elem(), ptIndir+1 { }
+ for pt.Kind() == reflect.Ptr {
+ pt = pt.Elem()
+ ptIndir++
+ }
+ if ptIndir == 0 {
+ ti.base = rt
+ ti.baseId = rtid
+ } else {
+ ti.base = pt
+ ti.baseId = reflect.ValueOf(pt).Pointer()
+ ti.baseIndir = ptIndir
+ }
+
+ if rt.Kind() == reflect.Struct {
+ var siInfo *structFieldInfo
+ if f, ok := rt.FieldByName(structInfoFieldName); ok {
+ siInfo = parseStructFieldInfo(structInfoFieldName, x.structTag(f.Tag))
+ ti.toArray = siInfo.toArray
+ }
+ pi := rgetPool.Get()
+ pv := pi.(*rgetPoolT)
+ pv.etypes[0] = ti.baseId
+ vv := rgetT{pv.fNames[:0], pv.encNames[:0], pv.etypes[:1], pv.sfis[:0]}
+ x.rget(rt, rtid, nil, &vv, siInfo)
+ ti.sfip = make([]*structFieldInfo, len(vv.sfis))
+ ti.sfi = make([]*structFieldInfo, len(vv.sfis))
+ copy(ti.sfip, vv.sfis)
+ sort.Sort(sfiSortedByEncName(vv.sfis))
+ copy(ti.sfi, vv.sfis)
+ rgetPool.Put(pi)
+ }
+ // sfi = sfip
+
+ x.mu.Lock()
+ if pti, ok = x.infos[rtid]; !ok {
+ pti = &ti
+ x.infos[rtid] = pti
+ }
+ x.mu.Unlock()
+ return
+}
+
+func (x *TypeInfos) rget(rt reflect.Type, rtid uintptr,
+ indexstack []int, pv *rgetT, siInfo *structFieldInfo,
+) {
+ // This will read up the fields and store how to access the value.
+ // It uses the go language's rules for embedding, as below:
+ // - if a field has been seen while traversing, skip it
+ // - if an encName has been seen while traversing, skip it
+ // - if an embedded type has been seen, skip it
+ //
+ // Also, per Go's rules, embedded fields must be analyzed AFTER all top-level fields.
+ //
+ // Note: we consciously use slices, not a map, to simulate a set.
+ // Typically, types have < 16 fields, and iteration using equals is faster than maps there
+
+ type anonField struct {
+ ft reflect.Type
+ idx int
+ }
+
+ var anonFields []anonField
+
+LOOP:
+ for j, jlen := 0, rt.NumField(); j < jlen; j++ {
+ f := rt.Field(j)
+ fkind := f.Type.Kind()
+ // skip if a func type, or is unexported, or structTag value == "-"
+ switch fkind {
+ case reflect.Func, reflect.Complex64, reflect.Complex128, reflect.UnsafePointer:
+ continue LOOP
+ }
+
+ // if r1, _ := utf8.DecodeRuneInString(f.Name); r1 == utf8.RuneError || !unicode.IsUpper(r1) {
+ if f.PkgPath != "" && !f.Anonymous { // unexported, not embedded
+ continue
+ }
+ stag := x.structTag(f.Tag)
+ if stag == "-" {
+ continue
+ }
+ var si *structFieldInfo
+ // if anonymous and no struct tag (or it's blank), and a struct (or pointer to struct), inline it.
+ if f.Anonymous && fkind != reflect.Interface {
+ doInline := stag == ""
+ if !doInline {
+ si = parseStructFieldInfo("", stag)
+ doInline = si.encName == ""
+ // doInline = si.isZero()
+ }
+ if doInline {
+ ft := f.Type
+ for ft.Kind() == reflect.Ptr {
+ ft = ft.Elem()
+ }
+ if ft.Kind() == reflect.Struct {
+ // handle anonymous fields after handling all the non-anon fields
+ anonFields = append(anonFields, anonField{ft, j})
+ continue
+ }
+ }
+ }
+
+ // after the anonymous dance: if an unexported field, skip
+ if f.PkgPath != "" { // unexported
+ continue
+ }
+
+ if f.Name == "" {
+ panic(noFieldNameToStructFieldInfoErr)
+ }
+
+ for _, k := range pv.fNames {
+ if k == f.Name {
+ continue LOOP
+ }
+ }
+ pv.fNames = append(pv.fNames, f.Name)
+
+ if si == nil {
+ si = parseStructFieldInfo(f.Name, stag)
+ } else if si.encName == "" {
+ si.encName = f.Name
+ }
+
+ for _, k := range pv.encNames {
+ if k == si.encName {
+ continue LOOP
+ }
+ }
+ pv.encNames = append(pv.encNames, si.encName)
+
+ // si.ikind = int(f.Type.Kind())
+ if len(indexstack) == 0 {
+ si.i = int16(j)
+ } else {
+ si.i = -1
+ si.is = make([]int, len(indexstack)+1)
+ copy(si.is, indexstack)
+ si.is[len(indexstack)] = j
+ // si.is = append(append(make([]int, 0, len(indexstack)+4), indexstack...), j)
+ }
+
+ if siInfo != nil {
+ if siInfo.omitEmpty {
+ si.omitEmpty = true
+ }
+ }
+ pv.sfis = append(pv.sfis, si)
+ }
+
+ // now handle anonymous fields
+LOOP2:
+ for _, af := range anonFields {
+ // if etypes contains this, then do not call rget again (as the fields are already seen here)
+ ftid := reflect.ValueOf(af.ft).Pointer()
+ for _, k := range pv.etypes {
+ if k == ftid {
+ continue LOOP2
+ }
+ }
+ pv.etypes = append(pv.etypes, ftid)
+
+ indexstack2 := make([]int, len(indexstack)+1)
+ copy(indexstack2, indexstack)
+ indexstack2[len(indexstack)] = af.idx
+ // indexstack2 := append(append(make([]int, 0, len(indexstack)+4), indexstack...), j)
+ x.rget(af.ft, ftid, indexstack2, pv, siInfo)
+ }
+}
+
+func panicToErr(err *error) {
+ if recoverPanicToErr {
+ if x := recover(); x != nil {
+ //debug.PrintStack()
+ panicValToErr(x, err)
+ }
+ }
+}
+
+// func doPanic(tag string, format string, params ...interface{}) {
+// params2 := make([]interface{}, len(params)+1)
+// params2[0] = tag
+// copy(params2[1:], params)
+// panic(fmt.Errorf("%s: "+format, params2...))
+// }
+
+func isImmutableKind(k reflect.Kind) (v bool) {
+ return false ||
+ k == reflect.Int ||
+ k == reflect.Int8 ||
+ k == reflect.Int16 ||
+ k == reflect.Int32 ||
+ k == reflect.Int64 ||
+ k == reflect.Uint ||
+ k == reflect.Uint8 ||
+ k == reflect.Uint16 ||
+ k == reflect.Uint32 ||
+ k == reflect.Uint64 ||
+ k == reflect.Uintptr ||
+ k == reflect.Float32 ||
+ k == reflect.Float64 ||
+ k == reflect.Bool ||
+ k == reflect.String
+}
+
+// these functions must be inlinable, and not call anybody
+type checkOverflow struct{}
+
+func (_ checkOverflow) Float32(f float64) (overflow bool) {
+ if f < 0 {
+ f = -f
+ }
+ return math.MaxFloat32 < f && f <= math.MaxFloat64
+}
+
+func (_ checkOverflow) Uint(v uint64, bitsize uint8) (overflow bool) {
+ if bitsize == 0 || bitsize >= 64 || v == 0 {
+ return
+ }
+ if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc {
+ overflow = true
+ }
+ return
+}
+
+func (_ checkOverflow) Int(v int64, bitsize uint8) (overflow bool) {
+ if bitsize == 0 || bitsize >= 64 || v == 0 {
+ return
+ }
+ if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc {
+ overflow = true
+ }
+ return
+}
+
+func (_ checkOverflow) SignedInt(v uint64) (i int64, overflow bool) {
+ //e.g. -127 to 128 for int8
+ pos := (v >> 63) == 0
+ ui2 := v & 0x7fffffffffffffff
+ if pos {
+ if ui2 > math.MaxInt64 {
+ overflow = true
+ return
+ }
+ } else {
+ if ui2 > math.MaxInt64-1 {
+ overflow = true
+ return
+ }
+ }
+ i = int64(v)
+ return
+}
+
+// ------------------ SORT -----------------
+
+func isNaN(f float64) bool { return f != f }
+
+// -----------------------
+
+type intSlice []int64
+type uintSlice []uint64
+type floatSlice []float64
+type boolSlice []bool
+type stringSlice []string
+type bytesSlice [][]byte
+
+func (p intSlice) Len() int { return len(p) }
+func (p intSlice) Less(i, j int) bool { return p[i] < p[j] }
+func (p intSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p uintSlice) Len() int { return len(p) }
+func (p uintSlice) Less(i, j int) bool { return p[i] < p[j] }
+func (p uintSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p floatSlice) Len() int { return len(p) }
+func (p floatSlice) Less(i, j int) bool {
+ return p[i] < p[j] || isNaN(p[i]) && !isNaN(p[j])
+}
+func (p floatSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p stringSlice) Len() int { return len(p) }
+func (p stringSlice) Less(i, j int) bool { return p[i] < p[j] }
+func (p stringSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p bytesSlice) Len() int { return len(p) }
+func (p bytesSlice) Less(i, j int) bool { return bytes.Compare(p[i], p[j]) == -1 }
+func (p bytesSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p boolSlice) Len() int { return len(p) }
+func (p boolSlice) Less(i, j int) bool { return !p[i] && p[j] }
+func (p boolSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+// ---------------------
+
+type intRv struct {
+ v int64
+ r reflect.Value
+}
+type intRvSlice []intRv
+type uintRv struct {
+ v uint64
+ r reflect.Value
+}
+type uintRvSlice []uintRv
+type floatRv struct {
+ v float64
+ r reflect.Value
+}
+type floatRvSlice []floatRv
+type boolRv struct {
+ v bool
+ r reflect.Value
+}
+type boolRvSlice []boolRv
+type stringRv struct {
+ v string
+ r reflect.Value
+}
+type stringRvSlice []stringRv
+type bytesRv struct {
+ v []byte
+ r reflect.Value
+}
+type bytesRvSlice []bytesRv
+
+func (p intRvSlice) Len() int { return len(p) }
+func (p intRvSlice) Less(i, j int) bool { return p[i].v < p[j].v }
+func (p intRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p uintRvSlice) Len() int { return len(p) }
+func (p uintRvSlice) Less(i, j int) bool { return p[i].v < p[j].v }
+func (p uintRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p floatRvSlice) Len() int { return len(p) }
+func (p floatRvSlice) Less(i, j int) bool {
+ return p[i].v < p[j].v || isNaN(p[i].v) && !isNaN(p[j].v)
+}
+func (p floatRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p stringRvSlice) Len() int { return len(p) }
+func (p stringRvSlice) Less(i, j int) bool { return p[i].v < p[j].v }
+func (p stringRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p bytesRvSlice) Len() int { return len(p) }
+func (p bytesRvSlice) Less(i, j int) bool { return bytes.Compare(p[i].v, p[j].v) == -1 }
+func (p bytesRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p boolRvSlice) Len() int { return len(p) }
+func (p boolRvSlice) Less(i, j int) bool { return !p[i].v && p[j].v }
+func (p boolRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+// -----------------
+
+type bytesI struct {
+ v []byte
+ i interface{}
+}
+
+type bytesISlice []bytesI
+
+func (p bytesISlice) Len() int { return len(p) }
+func (p bytesISlice) Less(i, j int) bool { return bytes.Compare(p[i].v, p[j].v) == -1 }
+func (p bytesISlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+// -----------------
+
+type set []uintptr
+
+func (s *set) add(v uintptr) (exists bool) {
+ // e.ci is always nil, or len >= 1
+ // defer func() { fmt.Printf("$$$$$$$$$$$ cirRef Add: %v, exists: %v\n", v, exists) }()
+ x := *s
+ if x == nil {
+ x = make([]uintptr, 1, 8)
+ x[0] = v
+ *s = x
+ return
+ }
+ // typically, length will be 1. make this perform.
+ if len(x) == 1 {
+ if j := x[0]; j == 0 {
+ x[0] = v
+ } else if j == v {
+ exists = true
+ } else {
+ x = append(x, v)
+ *s = x
+ }
+ return
+ }
+ // check if it exists
+ for _, j := range x {
+ if j == v {
+ exists = true
+ return
+ }
+ }
+ // try to replace a "deleted" slot
+ for i, j := range x {
+ if j == 0 {
+ x[i] = v
+ return
+ }
+ }
+ // if unable to replace deleted slot, just append it.
+ x = append(x, v)
+ *s = x
+ return
+}
+
+func (s *set) remove(v uintptr) (exists bool) {
+ // defer func() { fmt.Printf("$$$$$$$$$$$ cirRef Rm: %v, exists: %v\n", v, exists) }()
+ x := *s
+ if len(x) == 0 {
+ return
+ }
+ if len(x) == 1 {
+ if x[0] == v {
+ x[0] = 0
+ }
+ return
+ }
+ for i, j := range x {
+ if j == v {
+ exists = true
+ x[i] = 0 // set it to 0, as way to delete it.
+ // copy(x[i:], x[i+1:])
+ // x = x[:len(x)-1]
+ return
+ }
+ }
+ return
+}
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/helper_internal.go b/src/kube2msb/vendor/github.com/ugorji/go/codec/helper_internal.go
new file mode 100644
index 0000000..dea981f
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/helper_internal.go
@@ -0,0 +1,242 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+// All non-std package dependencies live in this file,
+// so porting to different environment is easy (just update functions).
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+)
+
+func panicValToErr(panicVal interface{}, err *error) {
+ if panicVal == nil {
+ return
+ }
+ // case nil
+ switch xerr := panicVal.(type) {
+ case error:
+ *err = xerr
+ case string:
+ *err = errors.New(xerr)
+ default:
+ *err = fmt.Errorf("%v", panicVal)
+ }
+ return
+}
+
+func hIsEmptyValue(v reflect.Value, deref, checkStruct bool) bool {
+ switch v.Kind() {
+ case reflect.Invalid:
+ return true
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ if deref {
+ if v.IsNil() {
+ return true
+ }
+ return hIsEmptyValue(v.Elem(), deref, checkStruct)
+ } else {
+ return v.IsNil()
+ }
+ case reflect.Struct:
+ if !checkStruct {
+ return false
+ }
+ // return true if all fields are empty. else return false.
+ // we cannot use equality check, because some fields may be maps/slices/etc
+ // and consequently the structs are not comparable.
+ // return v.Interface() == reflect.Zero(v.Type()).Interface()
+ for i, n := 0, v.NumField(); i < n; i++ {
+ if !hIsEmptyValue(v.Field(i), deref, checkStruct) {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
+
+func isEmptyValue(v reflect.Value) bool {
+ return hIsEmptyValue(v, derefForIsEmptyValue, checkStructForEmptyValue)
+}
+
+func pruneSignExt(v []byte, pos bool) (n int) {
+ if len(v) < 2 {
+ } else if pos && v[0] == 0 {
+ for ; v[n] == 0 && n+1 < len(v) && (v[n+1]&(1<<7) == 0); n++ {
+ }
+ } else if !pos && v[0] == 0xff {
+ for ; v[n] == 0xff && n+1 < len(v) && (v[n+1]&(1<<7) != 0); n++ {
+ }
+ }
+ return
+}
+
+func implementsIntf(typ, iTyp reflect.Type) (success bool, indir int8) {
+ if typ == nil {
+ return
+ }
+ rt := typ
+ // The type might be a pointer and we need to keep
+ // dereferencing to the base type until we find an implementation.
+ for {
+ if rt.Implements(iTyp) {
+ return true, indir
+ }
+ if p := rt; p.Kind() == reflect.Ptr {
+ indir++
+ if indir >= math.MaxInt8 { // insane number of indirections
+ return false, 0
+ }
+ rt = p.Elem()
+ continue
+ }
+ break
+ }
+ // No luck yet, but if this is a base type (non-pointer), the pointer might satisfy.
+ if typ.Kind() != reflect.Ptr {
+ // Not a pointer, but does the pointer work?
+ if reflect.PtrTo(typ).Implements(iTyp) {
+ return true, -1
+ }
+ }
+ return false, 0
+}
+
+// validate that this function is correct ...
+// culled from OGRE (Object-Oriented Graphics Rendering Engine)
+// function: halfToFloatI (http://stderr.org/doc/ogre-doc/api/OgreBitwise_8h-source.html)
+func halfFloatToFloatBits(yy uint16) (d uint32) {
+ y := uint32(yy)
+ s := (y >> 15) & 0x01
+ e := (y >> 10) & 0x1f
+ m := y & 0x03ff
+
+ if e == 0 {
+ if m == 0 { // plu or minus 0
+ return s << 31
+ } else { // Denormalized number -- renormalize it
+ for (m & 0x00000400) == 0 {
+ m <<= 1
+ e -= 1
+ }
+ e += 1
+ const zz uint32 = 0x0400
+ m &= ^zz
+ }
+ } else if e == 31 {
+ if m == 0 { // Inf
+ return (s << 31) | 0x7f800000
+ } else { // NaN
+ return (s << 31) | 0x7f800000 | (m << 13)
+ }
+ }
+ e = e + (127 - 15)
+ m = m << 13
+ return (s << 31) | (e << 23) | m
+}
+
+// GrowCap will return a new capacity for a slice, given the following:
+// - oldCap: current capacity
+// - unit: in-memory size of an element
+// - num: number of elements to add
+func growCap(oldCap, unit, num int) (newCap int) {
+ // appendslice logic (if cap < 1024, *2, else *1.25):
+ // leads to many copy calls, especially when copying bytes.
+ // bytes.Buffer model (2*cap + n): much better for bytes.
+ // smarter way is to take the byte-size of the appended element(type) into account
+
+ // maintain 3 thresholds:
+ // t1: if cap <= t1, newcap = 2x
+ // t2: if cap <= t2, newcap = 1.75x
+ // t3: if cap <= t3, newcap = 1.5x
+ // else newcap = 1.25x
+ //
+ // t1, t2, t3 >= 1024 always.
+ // i.e. if unit size >= 16, then always do 2x or 1.25x (ie t1, t2, t3 are all same)
+ //
+ // With this, appending for bytes increase by:
+ // 100% up to 4K
+ // 75% up to 8K
+ // 50% up to 16K
+ // 25% beyond that
+
+ // unit can be 0 e.g. for struct{}{}; handle that appropriately
+ var t1, t2, t3 int // thresholds
+ if unit <= 1 {
+ t1, t2, t3 = 4*1024, 8*1024, 16*1024
+ } else if unit < 16 {
+ t3 = 16 / unit * 1024
+ t1 = t3 * 1 / 4
+ t2 = t3 * 2 / 4
+ } else {
+ t1, t2, t3 = 1024, 1024, 1024
+ }
+
+ var x int // temporary variable
+
+ // x is multiplier here: one of 5, 6, 7 or 8; incr of 25%, 50%, 75% or 100% respectively
+ if oldCap <= t1 { // [0,t1]
+ x = 8
+ } else if oldCap > t3 { // (t3,infinity]
+ x = 5
+ } else if oldCap <= t2 { // (t1,t2]
+ x = 7
+ } else { // (t2,t3]
+ x = 6
+ }
+ newCap = x * oldCap / 4
+
+ if num > 0 {
+ newCap += num
+ }
+
+ // ensure newCap is a multiple of 64 (if it is > 64) or 16.
+ if newCap > 64 {
+ if x = newCap % 64; x != 0 {
+ x = newCap / 64
+ newCap = 64 * (x + 1)
+ }
+ } else {
+ if x = newCap % 16; x != 0 {
+ x = newCap / 16
+ newCap = 16 * (x + 1)
+ }
+ }
+ return
+}
+
+func expandSliceValue(s reflect.Value, num int) reflect.Value {
+ if num <= 0 {
+ return s
+ }
+ l0 := s.Len()
+ l1 := l0 + num // new slice length
+ if l1 < l0 {
+ panic("ExpandSlice: slice overflow")
+ }
+ c0 := s.Cap()
+ if l1 <= c0 {
+ return s.Slice(0, l1)
+ }
+ st := s.Type()
+ c1 := growCap(c0, int(st.Elem().Size()), num)
+ s2 := reflect.MakeSlice(st, l1, c1)
+ // println("expandslicevalue: cap-old: ", c0, ", cap-new: ", c1, ", len-new: ", l1)
+ reflect.Copy(s2, s)
+ return s2
+}
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go b/src/kube2msb/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go
new file mode 100644
index 0000000..7c2ffc0
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go
@@ -0,0 +1,20 @@
+//+build !unsafe
+
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+// stringView returns a view of the []byte as a string.
+// In unsafe mode, it doesn't incur allocation and copying caused by conversion.
+// In regular safe mode, it is an allocation and copy.
+func stringView(v []byte) string {
+ return string(v)
+}
+
+// bytesView returns a view of the string as a []byte.
+// In unsafe mode, it doesn't incur allocation and copying caused by conversion.
+// In regular safe mode, it is an allocation and copy.
+func bytesView(v string) []byte {
+ return []byte(v)
+}
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/helper_unsafe.go b/src/kube2msb/vendor/github.com/ugorji/go/codec/helper_unsafe.go
new file mode 100644
index 0000000..373b2b1
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/helper_unsafe.go
@@ -0,0 +1,45 @@
+//+build unsafe
+
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+ "unsafe"
+)
+
+// This file has unsafe variants of some helper methods.
+
+type unsafeString struct {
+ Data uintptr
+ Len int
+}
+
+type unsafeBytes struct {
+ Data uintptr
+ Len int
+ Cap int
+}
+
+// stringView returns a view of the []byte as a string.
+// In unsafe mode, it doesn't incur allocation and copying caused by conversion.
+// In regular safe mode, it is an allocation and copy.
+func stringView(v []byte) string {
+ if len(v) == 0 {
+ return ""
+ }
+ x := unsafeString{uintptr(unsafe.Pointer(&v[0])), len(v)}
+ return *(*string)(unsafe.Pointer(&x))
+}
+
+// bytesView returns a view of the string as a []byte.
+// In unsafe mode, it doesn't incur allocation and copying caused by conversion.
+// In regular safe mode, it is an allocation and copy.
+func bytesView(v string) []byte {
+ if len(v) == 0 {
+ return zeroByteSlice
+ }
+ x := unsafeBytes{uintptr(unsafe.Pointer(&v)), len(v), len(v)}
+ return *(*[]byte)(unsafe.Pointer(&x))
+}
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/json.go b/src/kube2msb/vendor/github.com/ugorji/go/codec/json.go
new file mode 100644
index 0000000..a04dfcb
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/json.go
@@ -0,0 +1,1213 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+// By default, this json support uses base64 encoding for bytes, because you cannot
+// store and read any arbitrary string in json (only unicode).
+// However, the user can configre how to encode/decode bytes.
+//
+// This library specifically supports UTF-8 for encoding and decoding only.
+//
+// Note that the library will happily encode/decode things which are not valid
+// json e.g. a map[int64]string. We do it for consistency. With valid json,
+// we will encode and decode appropriately.
+// Users can specify their map type if necessary to force it.
+//
+// Note:
+// - we cannot use strconv.Quote and strconv.Unquote because json quotes/unquotes differently.
+// We implement it here.
+// - Also, strconv.ParseXXX for floats and integers
+// - only works on strings resulting in unnecessary allocation and []byte-string conversion.
+// - it does a lot of redundant checks, because json numbers are simpler that what it supports.
+// - We parse numbers (floats and integers) directly here.
+// We only delegate parsing floats if it is a hairy float which could cause a loss of precision.
+// In that case, we delegate to strconv.ParseFloat.
+//
+// Note:
+// - encode does not beautify. There is no whitespace when encoding.
+// - rpc calls which take single integer arguments or write single numeric arguments will need care.
+
+// Top-level methods of json(End|Dec)Driver (which are implementations of (en|de)cDriver
+// MUST not call one-another.
+
+import (
+ "bytes"
+ "encoding/base64"
+ "fmt"
+ "reflect"
+ "strconv"
+ "unicode/utf16"
+ "unicode/utf8"
+)
+
+//--------------------------------
+
+var (
+ jsonLiterals = [...]byte{'t', 'r', 'u', 'e', 'f', 'a', 'l', 's', 'e', 'n', 'u', 'l', 'l'}
+
+ jsonFloat64Pow10 = [...]float64{
+ 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9,
+ 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19,
+ 1e20, 1e21, 1e22,
+ }
+
+ jsonUint64Pow10 = [...]uint64{
+ 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9,
+ 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19,
+ }
+
+ // jsonTabs and jsonSpaces are used as caches for indents
+ jsonTabs, jsonSpaces string
+)
+
+const (
+ // jsonUnreadAfterDecNum controls whether we unread after decoding a number.
+ //
+ // instead of unreading, just update d.tok (iff it's not a whitespace char)
+ // However, doing this means that we may HOLD onto some data which belongs to another stream.
+ // Thus, it is safest to unread the data when done.
+ // keep behind a constant flag for now.
+ jsonUnreadAfterDecNum = true
+
+ // If !jsonValidateSymbols, decoding will be faster, by skipping some checks:
+ // - If we see first character of null, false or true,
+ // do not validate subsequent characters.
+ // - e.g. if we see a n, assume null and skip next 3 characters,
+ // and do not validate they are ull.
+ // P.S. Do not expect a significant decoding boost from this.
+ jsonValidateSymbols = true
+
+ // if jsonTruncateMantissa, truncate mantissa if trailing 0's.
+ // This is important because it could allow some floats to be decoded without
+ // deferring to strconv.ParseFloat.
+ jsonTruncateMantissa = true
+
+ // if mantissa >= jsonNumUintCutoff before multiplying by 10, this is an overflow
+ jsonNumUintCutoff = (1<<64-1)/uint64(10) + 1 // cutoff64(base)
+
+ // if mantissa >= jsonNumUintMaxVal, this is an overflow
+ jsonNumUintMaxVal = 1<<uint64(64) - 1
+
+ // jsonNumDigitsUint64Largest = 19
+
+ jsonSpacesOrTabsLen = 128
+)
+
+func init() {
+ var bs [jsonSpacesOrTabsLen]byte
+ for i := 0; i < jsonSpacesOrTabsLen; i++ {
+ bs[i] = ' '
+ }
+ jsonSpaces = string(bs[:])
+
+ for i := 0; i < jsonSpacesOrTabsLen; i++ {
+ bs[i] = '\t'
+ }
+ jsonTabs = string(bs[:])
+}
+
+type jsonEncDriver struct {
+ e *Encoder
+ w encWriter
+ h *JsonHandle
+ b [64]byte // scratch
+ bs []byte // scratch
+ se setExtWrapper
+ ds string // indent string
+ dl uint16 // indent level
+ dt bool // indent using tabs
+ d bool // indent
+ c containerState
+ noBuiltInTypes
+}
+
+// indent is done as below:
+// - newline and indent are added before each mapKey or arrayElem
+// - newline and indent are added before each ending,
+// except there was no entry (so we can have {} or [])
+
+func (e *jsonEncDriver) sendContainerState(c containerState) {
+ // determine whether to output separators
+ if c == containerMapKey {
+ if e.c != containerMapStart {
+ e.w.writen1(',')
+ }
+ if e.d {
+ e.writeIndent()
+ }
+ } else if c == containerMapValue {
+ if e.d {
+ e.w.writen2(':', ' ')
+ } else {
+ e.w.writen1(':')
+ }
+ } else if c == containerMapEnd {
+ if e.d {
+ e.dl--
+ if e.c != containerMapStart {
+ e.writeIndent()
+ }
+ }
+ e.w.writen1('}')
+ } else if c == containerArrayElem {
+ if e.c != containerArrayStart {
+ e.w.writen1(',')
+ }
+ if e.d {
+ e.writeIndent()
+ }
+ } else if c == containerArrayEnd {
+ if e.d {
+ e.dl--
+ if e.c != containerArrayStart {
+ e.writeIndent()
+ }
+ }
+ e.w.writen1(']')
+ }
+ e.c = c
+}
+
+func (e *jsonEncDriver) writeIndent() {
+ e.w.writen1('\n')
+ if x := len(e.ds) * int(e.dl); x <= jsonSpacesOrTabsLen {
+ if e.dt {
+ e.w.writestr(jsonTabs[:x])
+ } else {
+ e.w.writestr(jsonSpaces[:x])
+ }
+ } else {
+ for i := uint16(0); i < e.dl; i++ {
+ e.w.writestr(e.ds)
+ }
+ }
+}
+
+func (e *jsonEncDriver) EncodeNil() {
+ e.w.writeb(jsonLiterals[9:13]) // null
+}
+
+func (e *jsonEncDriver) EncodeBool(b bool) {
+ if b {
+ e.w.writeb(jsonLiterals[0:4]) // true
+ } else {
+ e.w.writeb(jsonLiterals[4:9]) // false
+ }
+}
+
+func (e *jsonEncDriver) EncodeFloat32(f float32) {
+ e.w.writeb(strconv.AppendFloat(e.b[:0], float64(f), 'E', -1, 32))
+}
+
+func (e *jsonEncDriver) EncodeFloat64(f float64) {
+ // e.w.writestr(strconv.FormatFloat(f, 'E', -1, 64))
+ e.w.writeb(strconv.AppendFloat(e.b[:0], f, 'E', -1, 64))
+}
+
+func (e *jsonEncDriver) EncodeInt(v int64) {
+ if x := e.h.IntegerAsString; x == 'A' || x == 'L' && (v > 1<<53 || v < -(1<<53)) {
+ e.w.writen1('"')
+ e.w.writeb(strconv.AppendInt(e.b[:0], v, 10))
+ e.w.writen1('"')
+ return
+ }
+ e.w.writeb(strconv.AppendInt(e.b[:0], v, 10))
+}
+
+func (e *jsonEncDriver) EncodeUint(v uint64) {
+ if x := e.h.IntegerAsString; x == 'A' || x == 'L' && v > 1<<53 {
+ e.w.writen1('"')
+ e.w.writeb(strconv.AppendUint(e.b[:0], v, 10))
+ e.w.writen1('"')
+ return
+ }
+ e.w.writeb(strconv.AppendUint(e.b[:0], v, 10))
+}
+
+func (e *jsonEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Encoder) {
+ if v := ext.ConvertExt(rv); v == nil {
+ e.w.writeb(jsonLiterals[9:13]) // null // e.EncodeNil()
+ } else {
+ en.encode(v)
+ }
+}
+
+func (e *jsonEncDriver) EncodeRawExt(re *RawExt, en *Encoder) {
+ // only encodes re.Value (never re.Data)
+ if re.Value == nil {
+ e.w.writeb(jsonLiterals[9:13]) // null // e.EncodeNil()
+ } else {
+ en.encode(re.Value)
+ }
+}
+
+func (e *jsonEncDriver) EncodeArrayStart(length int) {
+ if e.d {
+ e.dl++
+ }
+ e.w.writen1('[')
+ e.c = containerArrayStart
+}
+
+func (e *jsonEncDriver) EncodeMapStart(length int) {
+ if e.d {
+ e.dl++
+ }
+ e.w.writen1('{')
+ e.c = containerMapStart
+}
+
+func (e *jsonEncDriver) EncodeString(c charEncoding, v string) {
+ // e.w.writestr(strconv.Quote(v))
+ e.quoteStr(v)
+}
+
+func (e *jsonEncDriver) EncodeSymbol(v string) {
+ // e.EncodeString(c_UTF8, v)
+ e.quoteStr(v)
+}
+
+func (e *jsonEncDriver) EncodeStringBytes(c charEncoding, v []byte) {
+ // if encoding raw bytes and RawBytesExt is configured, use it to encode
+ if c == c_RAW && e.se.i != nil {
+ e.EncodeExt(v, 0, &e.se, e.e)
+ return
+ }
+ if c == c_RAW {
+ slen := base64.StdEncoding.EncodedLen(len(v))
+ if cap(e.bs) >= slen {
+ e.bs = e.bs[:slen]
+ } else {
+ e.bs = make([]byte, slen)
+ }
+ base64.StdEncoding.Encode(e.bs, v)
+ e.w.writen1('"')
+ e.w.writeb(e.bs)
+ e.w.writen1('"')
+ } else {
+ // e.EncodeString(c, string(v))
+ e.quoteStr(stringView(v))
+ }
+}
+
+func (e *jsonEncDriver) EncodeAsis(v []byte) {
+ e.w.writeb(v)
+}
+
+func (e *jsonEncDriver) quoteStr(s string) {
+ // adapted from std pkg encoding/json
+ const hex = "0123456789abcdef"
+ w := e.w
+ w.writen1('"')
+ start := 0
+ for i := 0; i < len(s); {
+ if b := s[i]; b < utf8.RuneSelf {
+ if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' {
+ i++
+ continue
+ }
+ if start < i {
+ w.writestr(s[start:i])
+ }
+ switch b {
+ case '\\', '"':
+ w.writen2('\\', b)
+ case '\n':
+ w.writen2('\\', 'n')
+ case '\r':
+ w.writen2('\\', 'r')
+ case '\b':
+ w.writen2('\\', 'b')
+ case '\f':
+ w.writen2('\\', 'f')
+ case '\t':
+ w.writen2('\\', 't')
+ default:
+ // encode all bytes < 0x20 (except \r, \n).
+ // also encode < > & to prevent security holes when served to some browsers.
+ w.writestr(`\u00`)
+ w.writen2(hex[b>>4], hex[b&0xF])
+ }
+ i++
+ start = i
+ continue
+ }
+ c, size := utf8.DecodeRuneInString(s[i:])
+ if c == utf8.RuneError && size == 1 {
+ if start < i {
+ w.writestr(s[start:i])
+ }
+ w.writestr(`\ufffd`)
+ i += size
+ start = i
+ continue
+ }
+ // U+2028 is LINE SEPARATOR. U+2029 is PARAGRAPH SEPARATOR.
+ // Both technically valid JSON, but bomb on JSONP, so fix here.
+ if c == '\u2028' || c == '\u2029' {
+ if start < i {
+ w.writestr(s[start:i])
+ }
+ w.writestr(`\u202`)
+ w.writen1(hex[c&0xF])
+ i += size
+ start = i
+ continue
+ }
+ i += size
+ }
+ if start < len(s) {
+ w.writestr(s[start:])
+ }
+ w.writen1('"')
+}
+
+//--------------------------------
+
+type jsonNum struct {
+ // bytes []byte // may have [+-.eE0-9]
+ mantissa uint64 // where mantissa ends, and maybe dot begins.
+ exponent int16 // exponent value.
+ manOverflow bool
+ neg bool // started with -. No initial sign in the bytes above.
+ dot bool // has dot
+ explicitExponent bool // explicit exponent
+}
+
+func (x *jsonNum) reset() {
+ x.manOverflow = false
+ x.neg = false
+ x.dot = false
+ x.explicitExponent = false
+ x.mantissa = 0
+ x.exponent = 0
+}
+
+// uintExp is called only if exponent > 0.
+func (x *jsonNum) uintExp() (n uint64, overflow bool) {
+ n = x.mantissa
+ e := x.exponent
+ if e >= int16(len(jsonUint64Pow10)) {
+ overflow = true
+ return
+ }
+ n *= jsonUint64Pow10[e]
+ if n < x.mantissa || n > jsonNumUintMaxVal {
+ overflow = true
+ return
+ }
+ return
+ // for i := int16(0); i < e; i++ {
+ // if n >= jsonNumUintCutoff {
+ // overflow = true
+ // return
+ // }
+ // n *= 10
+ // }
+ // return
+}
+
+// these constants are only used withn floatVal.
+// They are brought out, so that floatVal can be inlined.
+const (
+ jsonUint64MantissaBits = 52
+ jsonMaxExponent = int16(len(jsonFloat64Pow10)) - 1
+)
+
+func (x *jsonNum) floatVal() (f float64, parseUsingStrConv bool) {
+ // We do not want to lose precision.
+ // Consequently, we will delegate to strconv.ParseFloat if any of the following happen:
+ // - There are more digits than in math.MaxUint64: 18446744073709551615 (20 digits)
+ // We expect up to 99.... (19 digits)
+ // - The mantissa cannot fit into a 52 bits of uint64
+ // - The exponent is beyond our scope ie beyong 22.
+ parseUsingStrConv = x.manOverflow ||
+ x.exponent > jsonMaxExponent ||
+ (x.exponent < 0 && -(x.exponent) > jsonMaxExponent) ||
+ x.mantissa>>jsonUint64MantissaBits != 0
+
+ if parseUsingStrConv {
+ return
+ }
+
+ // all good. so handle parse here.
+ f = float64(x.mantissa)
+ // fmt.Printf(".Float: uint64 value: %v, float: %v\n", m, f)
+ if x.neg {
+ f = -f
+ }
+ if x.exponent > 0 {
+ f *= jsonFloat64Pow10[x.exponent]
+ } else if x.exponent < 0 {
+ f /= jsonFloat64Pow10[-x.exponent]
+ }
+ return
+}
+
+type jsonDecDriver struct {
+ noBuiltInTypes
+ d *Decoder
+ h *JsonHandle
+ r decReader
+
+ c containerState
+ // tok is used to store the token read right after skipWhiteSpace.
+ tok uint8
+
+ bstr [8]byte // scratch used for string \UXXX parsing
+ b [64]byte // scratch, used for parsing strings or numbers
+ b2 [64]byte // scratch, used only for decodeBytes (after base64)
+ bs []byte // scratch. Initialized from b. Used for parsing strings or numbers.
+
+ se setExtWrapper
+
+ n jsonNum
+}
+
+func jsonIsWS(b byte) bool {
+ return b == ' ' || b == '\t' || b == '\r' || b == '\n'
+}
+
+// // This will skip whitespace characters and return the next byte to read.
+// // The next byte determines what the value will be one of.
+// func (d *jsonDecDriver) skipWhitespace() {
+// // fast-path: do not enter loop. Just check first (in case no whitespace).
+// b := d.r.readn1()
+// if jsonIsWS(b) {
+// r := d.r
+// for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+// }
+// }
+// d.tok = b
+// }
+
+func (d *jsonDecDriver) uncacheRead() {
+ if d.tok != 0 {
+ d.r.unreadn1()
+ d.tok = 0
+ }
+}
+
+func (d *jsonDecDriver) sendContainerState(c containerState) {
+ if d.tok == 0 {
+ var b byte
+ r := d.r
+ for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+ }
+ d.tok = b
+ }
+ var xc uint8 // char expected
+ if c == containerMapKey {
+ if d.c != containerMapStart {
+ xc = ','
+ }
+ } else if c == containerMapValue {
+ xc = ':'
+ } else if c == containerMapEnd {
+ xc = '}'
+ } else if c == containerArrayElem {
+ if d.c != containerArrayStart {
+ xc = ','
+ }
+ } else if c == containerArrayEnd {
+ xc = ']'
+ }
+ if xc != 0 {
+ if d.tok != xc {
+ d.d.errorf("json: expect char '%c' but got char '%c'", xc, d.tok)
+ }
+ d.tok = 0
+ }
+ d.c = c
+}
+
+func (d *jsonDecDriver) CheckBreak() bool {
+ if d.tok == 0 {
+ var b byte
+ r := d.r
+ for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+ }
+ d.tok = b
+ }
+ if d.tok == '}' || d.tok == ']' {
+ // d.tok = 0 // only checking, not consuming
+ return true
+ }
+ return false
+}
+
+func (d *jsonDecDriver) readStrIdx(fromIdx, toIdx uint8) {
+ bs := d.r.readx(int(toIdx - fromIdx))
+ d.tok = 0
+ if jsonValidateSymbols {
+ if !bytes.Equal(bs, jsonLiterals[fromIdx:toIdx]) {
+ d.d.errorf("json: expecting %s: got %s", jsonLiterals[fromIdx:toIdx], bs)
+ return
+ }
+ }
+}
+
+func (d *jsonDecDriver) TryDecodeAsNil() bool {
+ if d.tok == 0 {
+ var b byte
+ r := d.r
+ for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+ }
+ d.tok = b
+ }
+ if d.tok == 'n' {
+ d.readStrIdx(10, 13) // ull
+ return true
+ }
+ return false
+}
+
+func (d *jsonDecDriver) DecodeBool() bool {
+ if d.tok == 0 {
+ var b byte
+ r := d.r
+ for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+ }
+ d.tok = b
+ }
+ if d.tok == 'f' {
+ d.readStrIdx(5, 9) // alse
+ return false
+ }
+ if d.tok == 't' {
+ d.readStrIdx(1, 4) // rue
+ return true
+ }
+ d.d.errorf("json: decode bool: got first char %c", d.tok)
+ return false // "unreachable"
+}
+
+func (d *jsonDecDriver) ReadMapStart() int {
+ if d.tok == 0 {
+ var b byte
+ r := d.r
+ for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+ }
+ d.tok = b
+ }
+ if d.tok != '{' {
+ d.d.errorf("json: expect char '%c' but got char '%c'", '{', d.tok)
+ }
+ d.tok = 0
+ d.c = containerMapStart
+ return -1
+}
+
+func (d *jsonDecDriver) ReadArrayStart() int {
+ if d.tok == 0 {
+ var b byte
+ r := d.r
+ for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+ }
+ d.tok = b
+ }
+ if d.tok != '[' {
+ d.d.errorf("json: expect char '%c' but got char '%c'", '[', d.tok)
+ }
+ d.tok = 0
+ d.c = containerArrayStart
+ return -1
+}
+
+func (d *jsonDecDriver) ContainerType() (vt valueType) {
+ // check container type by checking the first char
+ if d.tok == 0 {
+ var b byte
+ r := d.r
+ for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+ }
+ d.tok = b
+ }
+ if b := d.tok; b == '{' {
+ return valueTypeMap
+ } else if b == '[' {
+ return valueTypeArray
+ } else if b == 'n' {
+ return valueTypeNil
+ } else if b == '"' {
+ return valueTypeString
+ }
+ return valueTypeUnset
+ // d.d.errorf("isContainerType: unsupported parameter: %v", vt)
+ // return false // "unreachable"
+}
+
+func (d *jsonDecDriver) decNum(storeBytes bool) {
+ // If it is has a . or an e|E, decode as a float; else decode as an int.
+ if d.tok == 0 {
+ var b byte
+ r := d.r
+ for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+ }
+ d.tok = b
+ }
+ b := d.tok
+ var str bool
+ if b == '"' {
+ str = true
+ b = d.r.readn1()
+ }
+ if !(b == '+' || b == '-' || b == '.' || (b >= '0' && b <= '9')) {
+ d.d.errorf("json: decNum: got first char '%c'", b)
+ return
+ }
+ d.tok = 0
+
+ const cutoff = (1<<64-1)/uint64(10) + 1 // cutoff64(base)
+ const jsonNumUintMaxVal = 1<<uint64(64) - 1
+
+ n := &d.n
+ r := d.r
+ n.reset()
+ d.bs = d.bs[:0]
+
+ if str && storeBytes {
+ d.bs = append(d.bs, '"')
+ }
+
+ // The format of a number is as below:
+ // parsing: sign? digit* dot? digit* e? sign? digit*
+ // states: 0 1* 2 3* 4 5* 6 7
+ // We honor this state so we can break correctly.
+ var state uint8 = 0
+ var eNeg bool
+ var e int16
+ var eof bool
+LOOP:
+ for !eof {
+ // fmt.Printf("LOOP: b: %q\n", b)
+ switch b {
+ case '+':
+ switch state {
+ case 0:
+ state = 2
+ // do not add sign to the slice ...
+ b, eof = r.readn1eof()
+ continue
+ case 6: // typ = jsonNumFloat
+ state = 7
+ default:
+ break LOOP
+ }
+ case '-':
+ switch state {
+ case 0:
+ state = 2
+ n.neg = true
+ // do not add sign to the slice ...
+ b, eof = r.readn1eof()
+ continue
+ case 6: // typ = jsonNumFloat
+ eNeg = true
+ state = 7
+ default:
+ break LOOP
+ }
+ case '.':
+ switch state {
+ case 0, 2: // typ = jsonNumFloat
+ state = 4
+ n.dot = true
+ default:
+ break LOOP
+ }
+ case 'e', 'E':
+ switch state {
+ case 0, 2, 4: // typ = jsonNumFloat
+ state = 6
+ // n.mantissaEndIndex = int16(len(n.bytes))
+ n.explicitExponent = true
+ default:
+ break LOOP
+ }
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ switch state {
+ case 0:
+ state = 2
+ fallthrough
+ case 2:
+ fallthrough
+ case 4:
+ if n.dot {
+ n.exponent--
+ }
+ if n.mantissa >= jsonNumUintCutoff {
+ n.manOverflow = true
+ break
+ }
+ v := uint64(b - '0')
+ n.mantissa *= 10
+ if v != 0 {
+ n1 := n.mantissa + v
+ if n1 < n.mantissa || n1 > jsonNumUintMaxVal {
+ n.manOverflow = true // n+v overflows
+ break
+ }
+ n.mantissa = n1
+ }
+ case 6:
+ state = 7
+ fallthrough
+ case 7:
+ if !(b == '0' && e == 0) {
+ e = e*10 + int16(b-'0')
+ }
+ default:
+ break LOOP
+ }
+ case '"':
+ if str {
+ if storeBytes {
+ d.bs = append(d.bs, '"')
+ }
+ b, eof = r.readn1eof()
+ }
+ break LOOP
+ default:
+ break LOOP
+ }
+ if storeBytes {
+ d.bs = append(d.bs, b)
+ }
+ b, eof = r.readn1eof()
+ }
+
+ if jsonTruncateMantissa && n.mantissa != 0 {
+ for n.mantissa%10 == 0 {
+ n.mantissa /= 10
+ n.exponent++
+ }
+ }
+
+ if e != 0 {
+ if eNeg {
+ n.exponent -= e
+ } else {
+ n.exponent += e
+ }
+ }
+
+ // d.n = n
+
+ if !eof {
+ if jsonUnreadAfterDecNum {
+ r.unreadn1()
+ } else {
+ if !jsonIsWS(b) {
+ d.tok = b
+ }
+ }
+ }
+ // fmt.Printf("1: n: bytes: %s, neg: %v, dot: %v, exponent: %v, mantissaEndIndex: %v\n",
+ // n.bytes, n.neg, n.dot, n.exponent, n.mantissaEndIndex)
+ return
+}
+
+func (d *jsonDecDriver) DecodeInt(bitsize uint8) (i int64) {
+ d.decNum(false)
+ n := &d.n
+ if n.manOverflow {
+ d.d.errorf("json: overflow integer after: %v", n.mantissa)
+ return
+ }
+ var u uint64
+ if n.exponent == 0 {
+ u = n.mantissa
+ } else if n.exponent < 0 {
+ d.d.errorf("json: fractional integer")
+ return
+ } else if n.exponent > 0 {
+ var overflow bool
+ if u, overflow = n.uintExp(); overflow {
+ d.d.errorf("json: overflow integer")
+ return
+ }
+ }
+ i = int64(u)
+ if n.neg {
+ i = -i
+ }
+ if chkOvf.Int(i, bitsize) {
+ d.d.errorf("json: overflow %v bits: %s", bitsize, d.bs)
+ return
+ }
+ // fmt.Printf("DecodeInt: %v\n", i)
+ return
+}
+
+// floatVal MUST only be called after a decNum, as d.bs now contains the bytes of the number
+func (d *jsonDecDriver) floatVal() (f float64) {
+ f, useStrConv := d.n.floatVal()
+ if useStrConv {
+ var err error
+ if f, err = strconv.ParseFloat(stringView(d.bs), 64); err != nil {
+ panic(fmt.Errorf("parse float: %s, %v", d.bs, err))
+ }
+ if d.n.neg {
+ f = -f
+ }
+ }
+ return
+}
+
+func (d *jsonDecDriver) DecodeUint(bitsize uint8) (u uint64) {
+ d.decNum(false)
+ n := &d.n
+ if n.neg {
+ d.d.errorf("json: unsigned integer cannot be negative")
+ return
+ }
+ if n.manOverflow {
+ d.d.errorf("json: overflow integer after: %v", n.mantissa)
+ return
+ }
+ if n.exponent == 0 {
+ u = n.mantissa
+ } else if n.exponent < 0 {
+ d.d.errorf("json: fractional integer")
+ return
+ } else if n.exponent > 0 {
+ var overflow bool
+ if u, overflow = n.uintExp(); overflow {
+ d.d.errorf("json: overflow integer")
+ return
+ }
+ }
+ if chkOvf.Uint(u, bitsize) {
+ d.d.errorf("json: overflow %v bits: %s", bitsize, d.bs)
+ return
+ }
+ // fmt.Printf("DecodeUint: %v\n", u)
+ return
+}
+
+func (d *jsonDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) {
+ d.decNum(true)
+ f = d.floatVal()
+ if chkOverflow32 && chkOvf.Float32(f) {
+ d.d.errorf("json: overflow float32: %v, %s", f, d.bs)
+ return
+ }
+ return
+}
+
+func (d *jsonDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
+ if ext == nil {
+ re := rv.(*RawExt)
+ re.Tag = xtag
+ d.d.decode(&re.Value)
+ } else {
+ var v interface{}
+ d.d.decode(&v)
+ ext.UpdateExt(rv, v)
+ }
+ return
+}
+
+func (d *jsonDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) {
+ // if decoding into raw bytes, and the RawBytesExt is configured, use it to decode.
+ if !isstring && d.se.i != nil {
+ bsOut = bs
+ d.DecodeExt(&bsOut, 0, &d.se)
+ return
+ }
+ d.appendStringAsBytes()
+ // if isstring, then just return the bytes, even if it is using the scratch buffer.
+ // the bytes will be converted to a string as needed.
+ if isstring {
+ return d.bs
+ }
+ bs0 := d.bs
+ slen := base64.StdEncoding.DecodedLen(len(bs0))
+ if slen <= cap(bs) {
+ bsOut = bs[:slen]
+ } else if zerocopy && slen <= cap(d.b2) {
+ bsOut = d.b2[:slen]
+ } else {
+ bsOut = make([]byte, slen)
+ }
+ slen2, err := base64.StdEncoding.Decode(bsOut, bs0)
+ if err != nil {
+ d.d.errorf("json: error decoding base64 binary '%s': %v", bs0, err)
+ return nil
+ }
+ if slen != slen2 {
+ bsOut = bsOut[:slen2]
+ }
+ return
+}
+
+func (d *jsonDecDriver) DecodeString() (s string) {
+ d.appendStringAsBytes()
+ // if x := d.s.sc; x != nil && x.so && x.st == '}' { // map key
+ if d.c == containerMapKey {
+ return d.d.string(d.bs)
+ }
+ return string(d.bs)
+}
+
+func (d *jsonDecDriver) appendStringAsBytes() {
+ if d.tok == 0 {
+ var b byte
+ r := d.r
+ for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+ }
+ d.tok = b
+ }
+ if d.tok != '"' {
+ d.d.errorf("json: expect char '%c' but got char '%c'", '"', d.tok)
+ }
+ d.tok = 0
+
+ v := d.bs[:0]
+ var c uint8
+ r := d.r
+ for {
+ c = r.readn1()
+ if c == '"' {
+ break
+ } else if c == '\\' {
+ c = r.readn1()
+ switch c {
+ case '"', '\\', '/', '\'':
+ v = append(v, c)
+ case 'b':
+ v = append(v, '\b')
+ case 'f':
+ v = append(v, '\f')
+ case 'n':
+ v = append(v, '\n')
+ case 'r':
+ v = append(v, '\r')
+ case 't':
+ v = append(v, '\t')
+ case 'u':
+ rr := d.jsonU4(false)
+ // fmt.Printf("$$$$$$$$$: is surrogate: %v\n", utf16.IsSurrogate(rr))
+ if utf16.IsSurrogate(rr) {
+ rr = utf16.DecodeRune(rr, d.jsonU4(true))
+ }
+ w2 := utf8.EncodeRune(d.bstr[:], rr)
+ v = append(v, d.bstr[:w2]...)
+ default:
+ d.d.errorf("json: unsupported escaped value: %c", c)
+ }
+ } else {
+ v = append(v, c)
+ }
+ }
+ d.bs = v
+}
+
+func (d *jsonDecDriver) jsonU4(checkSlashU bool) rune {
+ r := d.r
+ if checkSlashU && !(r.readn1() == '\\' && r.readn1() == 'u') {
+ d.d.errorf(`json: unquoteStr: invalid unicode sequence. Expecting \u`)
+ return 0
+ }
+ // u, _ := strconv.ParseUint(string(d.bstr[:4]), 16, 64)
+ var u uint32
+ for i := 0; i < 4; i++ {
+ v := r.readn1()
+ if '0' <= v && v <= '9' {
+ v = v - '0'
+ } else if 'a' <= v && v <= 'z' {
+ v = v - 'a' + 10
+ } else if 'A' <= v && v <= 'Z' {
+ v = v - 'A' + 10
+ } else {
+ d.d.errorf(`json: unquoteStr: invalid hex char in \u unicode sequence: %q`, v)
+ return 0
+ }
+ u = u*16 + uint32(v)
+ }
+ return rune(u)
+}
+
+func (d *jsonDecDriver) DecodeNaked() {
+ z := &d.d.n
+ // var decodeFurther bool
+
+ if d.tok == 0 {
+ var b byte
+ r := d.r
+ for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+ }
+ d.tok = b
+ }
+ switch d.tok {
+ case 'n':
+ d.readStrIdx(10, 13) // ull
+ z.v = valueTypeNil
+ case 'f':
+ d.readStrIdx(5, 9) // alse
+ z.v = valueTypeBool
+ z.b = false
+ case 't':
+ d.readStrIdx(1, 4) // rue
+ z.v = valueTypeBool
+ z.b = true
+ case '{':
+ z.v = valueTypeMap
+ // d.tok = 0 // don't consume. kInterfaceNaked will call ReadMapStart
+ // decodeFurther = true
+ case '[':
+ z.v = valueTypeArray
+ // d.tok = 0 // don't consume. kInterfaceNaked will call ReadArrayStart
+ // decodeFurther = true
+ case '"':
+ z.v = valueTypeString
+ z.s = d.DecodeString()
+ default: // number
+ d.decNum(true)
+ n := &d.n
+ // if the string had a any of [.eE], then decode as float.
+ switch {
+ case n.explicitExponent, n.dot, n.exponent < 0, n.manOverflow:
+ z.v = valueTypeFloat
+ z.f = d.floatVal()
+ case n.exponent == 0:
+ u := n.mantissa
+ switch {
+ case n.neg:
+ z.v = valueTypeInt
+ z.i = -int64(u)
+ case d.h.SignedInteger:
+ z.v = valueTypeInt
+ z.i = int64(u)
+ default:
+ z.v = valueTypeUint
+ z.u = u
+ }
+ default:
+ u, overflow := n.uintExp()
+ switch {
+ case overflow:
+ z.v = valueTypeFloat
+ z.f = d.floatVal()
+ case n.neg:
+ z.v = valueTypeInt
+ z.i = -int64(u)
+ case d.h.SignedInteger:
+ z.v = valueTypeInt
+ z.i = int64(u)
+ default:
+ z.v = valueTypeUint
+ z.u = u
+ }
+ }
+ // fmt.Printf("DecodeNaked: Number: %T, %v\n", v, v)
+ }
+ // if decodeFurther {
+ // d.s.sc.retryRead()
+ // }
+ return
+}
+
+//----------------------
+
+// JsonHandle is a handle for JSON encoding format.
+//
+// Json is comprehensively supported:
+// - decodes numbers into interface{} as int, uint or float64
+// - configurable way to encode/decode []byte .
+// by default, encodes and decodes []byte using base64 Std Encoding
+// - UTF-8 support for encoding and decoding
+//
+// It has better performance than the json library in the standard library,
+// by leveraging the performance improvements of the codec library and
+// minimizing allocations.
+//
+// In addition, it doesn't read more bytes than necessary during a decode, which allows
+// reading multiple values from a stream containing json and non-json content.
+// For example, a user can read a json value, then a cbor value, then a msgpack value,
+// all from the same stream in sequence.
+type JsonHandle struct {
+ textEncodingType
+ BasicHandle
+ // RawBytesExt, if configured, is used to encode and decode raw bytes in a custom way.
+ // If not configured, raw bytes are encoded to/from base64 text.
+ RawBytesExt InterfaceExt
+
+ // Indent indicates how a value is encoded.
+ // - If positive, indent by that number of spaces.
+ // - If negative, indent by that number of tabs.
+ Indent int8
+
+ // IntegerAsString controls how integers (signed and unsigned) are encoded.
+ //
+ // Per the JSON Spec, JSON numbers are 64-bit floating point numbers.
+ // Consequently, integers > 2^53 cannot be represented as a JSON number without losing precision.
+ // This can be mitigated by configuring how to encode integers.
+ //
+ // IntegerAsString interpretes the following values:
+ // - if 'L', then encode integers > 2^53 as a json string.
+ // - if 'A', then encode all integers as a json string
+ // containing the exact integer representation as a decimal.
+ // - else encode all integers as a json number (default)
+ IntegerAsString uint8
+}
+
+func (h *JsonHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) {
+ return h.SetExt(rt, tag, &setExtWrapper{i: ext})
+}
+
+func (h *JsonHandle) newEncDriver(e *Encoder) encDriver {
+ hd := jsonEncDriver{e: e, h: h}
+ hd.bs = hd.b[:0]
+
+ hd.reset()
+
+ return &hd
+}
+
+func (h *JsonHandle) newDecDriver(d *Decoder) decDriver {
+ // d := jsonDecDriver{r: r.(*bytesDecReader), h: h}
+ hd := jsonDecDriver{d: d, h: h}
+ hd.bs = hd.b[:0]
+ hd.reset()
+ return &hd
+}
+
+func (e *jsonEncDriver) reset() {
+ e.w = e.e.w
+ e.se.i = e.h.RawBytesExt
+ if e.bs != nil {
+ e.bs = e.bs[:0]
+ }
+ e.d, e.dt, e.dl, e.ds = false, false, 0, ""
+ e.c = 0
+ if e.h.Indent > 0 {
+ e.d = true
+ e.ds = jsonSpaces[:e.h.Indent]
+ } else if e.h.Indent < 0 {
+ e.d = true
+ e.dt = true
+ e.ds = jsonTabs[:-(e.h.Indent)]
+ }
+}
+
+func (d *jsonDecDriver) reset() {
+ d.r = d.d.r
+ d.se.i = d.h.RawBytesExt
+ if d.bs != nil {
+ d.bs = d.bs[:0]
+ }
+ d.c, d.tok = 0, 0
+ d.n.reset()
+}
+
+var jsonEncodeTerminate = []byte{' '}
+
+func (h *JsonHandle) rpcEncodeTerminate() []byte {
+ return jsonEncodeTerminate
+}
+
+var _ decDriver = (*jsonDecDriver)(nil)
+var _ encDriver = (*jsonEncDriver)(nil)
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/msgpack.go b/src/kube2msb/vendor/github.com/ugorji/go/codec/msgpack.go
new file mode 100644
index 0000000..f9f8723
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/msgpack.go
@@ -0,0 +1,845 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+/*
+MSGPACK
+
+Msgpack-c implementation powers the c, c++, python, ruby, etc libraries.
+We need to maintain compatibility with it and how it encodes integer values
+without caring about the type.
+
+For compatibility with behaviour of msgpack-c reference implementation:
+ - Go intX (>0) and uintX
+ IS ENCODED AS
+ msgpack +ve fixnum, unsigned
+ - Go intX (<0)
+ IS ENCODED AS
+ msgpack -ve fixnum, signed
+
+*/
+package codec
+
+import (
+ "fmt"
+ "io"
+ "math"
+ "net/rpc"
+ "reflect"
+)
+
+const (
+ mpPosFixNumMin byte = 0x00
+ mpPosFixNumMax = 0x7f
+ mpFixMapMin = 0x80
+ mpFixMapMax = 0x8f
+ mpFixArrayMin = 0x90
+ mpFixArrayMax = 0x9f
+ mpFixStrMin = 0xa0
+ mpFixStrMax = 0xbf
+ mpNil = 0xc0
+ _ = 0xc1
+ mpFalse = 0xc2
+ mpTrue = 0xc3
+ mpFloat = 0xca
+ mpDouble = 0xcb
+ mpUint8 = 0xcc
+ mpUint16 = 0xcd
+ mpUint32 = 0xce
+ mpUint64 = 0xcf
+ mpInt8 = 0xd0
+ mpInt16 = 0xd1
+ mpInt32 = 0xd2
+ mpInt64 = 0xd3
+
+ // extensions below
+ mpBin8 = 0xc4
+ mpBin16 = 0xc5
+ mpBin32 = 0xc6
+ mpExt8 = 0xc7
+ mpExt16 = 0xc8
+ mpExt32 = 0xc9
+ mpFixExt1 = 0xd4
+ mpFixExt2 = 0xd5
+ mpFixExt4 = 0xd6
+ mpFixExt8 = 0xd7
+ mpFixExt16 = 0xd8
+
+ mpStr8 = 0xd9 // new
+ mpStr16 = 0xda
+ mpStr32 = 0xdb
+
+ mpArray16 = 0xdc
+ mpArray32 = 0xdd
+
+ mpMap16 = 0xde
+ mpMap32 = 0xdf
+
+ mpNegFixNumMin = 0xe0
+ mpNegFixNumMax = 0xff
+)
+
+// MsgpackSpecRpcMultiArgs is a special type which signifies to the MsgpackSpecRpcCodec
+// that the backend RPC service takes multiple arguments, which have been arranged
+// in sequence in the slice.
+//
+// The Codec then passes it AS-IS to the rpc service (without wrapping it in an
+// array of 1 element).
+type MsgpackSpecRpcMultiArgs []interface{}
+
+// A MsgpackContainer type specifies the different types of msgpackContainers.
+type msgpackContainerType struct {
+ fixCutoff int
+ bFixMin, b8, b16, b32 byte
+ hasFixMin, has8, has8Always bool
+}
+
+var (
+ msgpackContainerStr = msgpackContainerType{32, mpFixStrMin, mpStr8, mpStr16, mpStr32, true, true, false}
+ msgpackContainerBin = msgpackContainerType{0, 0, mpBin8, mpBin16, mpBin32, false, true, true}
+ msgpackContainerList = msgpackContainerType{16, mpFixArrayMin, 0, mpArray16, mpArray32, true, false, false}
+ msgpackContainerMap = msgpackContainerType{16, mpFixMapMin, 0, mpMap16, mpMap32, true, false, false}
+)
+
+//---------------------------------------------
+
+type msgpackEncDriver struct {
+ noBuiltInTypes
+ encNoSeparator
+ e *Encoder
+ w encWriter
+ h *MsgpackHandle
+ x [8]byte
+}
+
+func (e *msgpackEncDriver) EncodeNil() {
+ e.w.writen1(mpNil)
+}
+
+func (e *msgpackEncDriver) EncodeInt(i int64) {
+ if i >= 0 {
+ e.EncodeUint(uint64(i))
+ } else if i >= -32 {
+ e.w.writen1(byte(i))
+ } else if i >= math.MinInt8 {
+ e.w.writen2(mpInt8, byte(i))
+ } else if i >= math.MinInt16 {
+ e.w.writen1(mpInt16)
+ bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i))
+ } else if i >= math.MinInt32 {
+ e.w.writen1(mpInt32)
+ bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i))
+ } else {
+ e.w.writen1(mpInt64)
+ bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i))
+ }
+}
+
+func (e *msgpackEncDriver) EncodeUint(i uint64) {
+ if i <= math.MaxInt8 {
+ e.w.writen1(byte(i))
+ } else if i <= math.MaxUint8 {
+ e.w.writen2(mpUint8, byte(i))
+ } else if i <= math.MaxUint16 {
+ e.w.writen1(mpUint16)
+ bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i))
+ } else if i <= math.MaxUint32 {
+ e.w.writen1(mpUint32)
+ bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i))
+ } else {
+ e.w.writen1(mpUint64)
+ bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i))
+ }
+}
+
+func (e *msgpackEncDriver) EncodeBool(b bool) {
+ if b {
+ e.w.writen1(mpTrue)
+ } else {
+ e.w.writen1(mpFalse)
+ }
+}
+
+func (e *msgpackEncDriver) EncodeFloat32(f float32) {
+ e.w.writen1(mpFloat)
+ bigenHelper{e.x[:4], e.w}.writeUint32(math.Float32bits(f))
+}
+
+func (e *msgpackEncDriver) EncodeFloat64(f float64) {
+ e.w.writen1(mpDouble)
+ bigenHelper{e.x[:8], e.w}.writeUint64(math.Float64bits(f))
+}
+
+func (e *msgpackEncDriver) EncodeExt(v interface{}, xtag uint64, ext Ext, _ *Encoder) {
+ bs := ext.WriteExt(v)
+ if bs == nil {
+ e.EncodeNil()
+ return
+ }
+ if e.h.WriteExt {
+ e.encodeExtPreamble(uint8(xtag), len(bs))
+ e.w.writeb(bs)
+ } else {
+ e.EncodeStringBytes(c_RAW, bs)
+ }
+}
+
+func (e *msgpackEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) {
+ e.encodeExtPreamble(uint8(re.Tag), len(re.Data))
+ e.w.writeb(re.Data)
+}
+
+func (e *msgpackEncDriver) encodeExtPreamble(xtag byte, l int) {
+ if l == 1 {
+ e.w.writen2(mpFixExt1, xtag)
+ } else if l == 2 {
+ e.w.writen2(mpFixExt2, xtag)
+ } else if l == 4 {
+ e.w.writen2(mpFixExt4, xtag)
+ } else if l == 8 {
+ e.w.writen2(mpFixExt8, xtag)
+ } else if l == 16 {
+ e.w.writen2(mpFixExt16, xtag)
+ } else if l < 256 {
+ e.w.writen2(mpExt8, byte(l))
+ e.w.writen1(xtag)
+ } else if l < 65536 {
+ e.w.writen1(mpExt16)
+ bigenHelper{e.x[:2], e.w}.writeUint16(uint16(l))
+ e.w.writen1(xtag)
+ } else {
+ e.w.writen1(mpExt32)
+ bigenHelper{e.x[:4], e.w}.writeUint32(uint32(l))
+ e.w.writen1(xtag)
+ }
+}
+
+func (e *msgpackEncDriver) EncodeArrayStart(length int) {
+ e.writeContainerLen(msgpackContainerList, length)
+}
+
+func (e *msgpackEncDriver) EncodeMapStart(length int) {
+ e.writeContainerLen(msgpackContainerMap, length)
+}
+
+func (e *msgpackEncDriver) EncodeString(c charEncoding, s string) {
+ if c == c_RAW && e.h.WriteExt {
+ e.writeContainerLen(msgpackContainerBin, len(s))
+ } else {
+ e.writeContainerLen(msgpackContainerStr, len(s))
+ }
+ if len(s) > 0 {
+ e.w.writestr(s)
+ }
+}
+
+func (e *msgpackEncDriver) EncodeSymbol(v string) {
+ e.EncodeString(c_UTF8, v)
+}
+
+func (e *msgpackEncDriver) EncodeStringBytes(c charEncoding, bs []byte) {
+ if c == c_RAW && e.h.WriteExt {
+ e.writeContainerLen(msgpackContainerBin, len(bs))
+ } else {
+ e.writeContainerLen(msgpackContainerStr, len(bs))
+ }
+ if len(bs) > 0 {
+ e.w.writeb(bs)
+ }
+}
+
+func (e *msgpackEncDriver) writeContainerLen(ct msgpackContainerType, l int) {
+ if ct.hasFixMin && l < ct.fixCutoff {
+ e.w.writen1(ct.bFixMin | byte(l))
+ } else if ct.has8 && l < 256 && (ct.has8Always || e.h.WriteExt) {
+ e.w.writen2(ct.b8, uint8(l))
+ } else if l < 65536 {
+ e.w.writen1(ct.b16)
+ bigenHelper{e.x[:2], e.w}.writeUint16(uint16(l))
+ } else {
+ e.w.writen1(ct.b32)
+ bigenHelper{e.x[:4], e.w}.writeUint32(uint32(l))
+ }
+}
+
+//---------------------------------------------
+
+type msgpackDecDriver struct {
+ d *Decoder
+ r decReader // *Decoder decReader decReaderT
+ h *MsgpackHandle
+ b [scratchByteArrayLen]byte
+ bd byte
+ bdRead bool
+ br bool // bytes reader
+ noBuiltInTypes
+ noStreamingCodec
+ decNoSeparator
+}
+
+// Note: This returns either a primitive (int, bool, etc) for non-containers,
+// or a containerType, or a specific type denoting nil or extension.
+// It is called when a nil interface{} is passed, leaving it up to the DecDriver
+// to introspect the stream and decide how best to decode.
+// It deciphers the value by looking at the stream first.
+func (d *msgpackDecDriver) DecodeNaked() {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ bd := d.bd
+ n := &d.d.n
+ var decodeFurther bool
+
+ switch bd {
+ case mpNil:
+ n.v = valueTypeNil
+ d.bdRead = false
+ case mpFalse:
+ n.v = valueTypeBool
+ n.b = false
+ case mpTrue:
+ n.v = valueTypeBool
+ n.b = true
+
+ case mpFloat:
+ n.v = valueTypeFloat
+ n.f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4))))
+ case mpDouble:
+ n.v = valueTypeFloat
+ n.f = math.Float64frombits(bigen.Uint64(d.r.readx(8)))
+
+ case mpUint8:
+ n.v = valueTypeUint
+ n.u = uint64(d.r.readn1())
+ case mpUint16:
+ n.v = valueTypeUint
+ n.u = uint64(bigen.Uint16(d.r.readx(2)))
+ case mpUint32:
+ n.v = valueTypeUint
+ n.u = uint64(bigen.Uint32(d.r.readx(4)))
+ case mpUint64:
+ n.v = valueTypeUint
+ n.u = uint64(bigen.Uint64(d.r.readx(8)))
+
+ case mpInt8:
+ n.v = valueTypeInt
+ n.i = int64(int8(d.r.readn1()))
+ case mpInt16:
+ n.v = valueTypeInt
+ n.i = int64(int16(bigen.Uint16(d.r.readx(2))))
+ case mpInt32:
+ n.v = valueTypeInt
+ n.i = int64(int32(bigen.Uint32(d.r.readx(4))))
+ case mpInt64:
+ n.v = valueTypeInt
+ n.i = int64(int64(bigen.Uint64(d.r.readx(8))))
+
+ default:
+ switch {
+ case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax:
+ // positive fixnum (always signed)
+ n.v = valueTypeInt
+ n.i = int64(int8(bd))
+ case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax:
+ // negative fixnum
+ n.v = valueTypeInt
+ n.i = int64(int8(bd))
+ case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax:
+ if d.h.RawToString {
+ n.v = valueTypeString
+ n.s = d.DecodeString()
+ } else {
+ n.v = valueTypeBytes
+ n.l = d.DecodeBytes(nil, false, false)
+ }
+ case bd == mpBin8, bd == mpBin16, bd == mpBin32:
+ n.v = valueTypeBytes
+ n.l = d.DecodeBytes(nil, false, false)
+ case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax:
+ n.v = valueTypeArray
+ decodeFurther = true
+ case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax:
+ n.v = valueTypeMap
+ decodeFurther = true
+ case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32:
+ n.v = valueTypeExt
+ clen := d.readExtLen()
+ n.u = uint64(d.r.readn1())
+ n.l = d.r.readx(clen)
+ default:
+ d.d.errorf("Nil-Deciphered DecodeValue: %s: hex: %x, dec: %d", msgBadDesc, bd, bd)
+ }
+ }
+ if !decodeFurther {
+ d.bdRead = false
+ }
+ if n.v == valueTypeUint && d.h.SignedInteger {
+ n.v = valueTypeInt
+ n.i = int64(n.u)
+ }
+ return
+}
+
+// int can be decoded from msgpack type: intXXX or uintXXX
+func (d *msgpackDecDriver) DecodeInt(bitsize uint8) (i int64) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ switch d.bd {
+ case mpUint8:
+ i = int64(uint64(d.r.readn1()))
+ case mpUint16:
+ i = int64(uint64(bigen.Uint16(d.r.readx(2))))
+ case mpUint32:
+ i = int64(uint64(bigen.Uint32(d.r.readx(4))))
+ case mpUint64:
+ i = int64(bigen.Uint64(d.r.readx(8)))
+ case mpInt8:
+ i = int64(int8(d.r.readn1()))
+ case mpInt16:
+ i = int64(int16(bigen.Uint16(d.r.readx(2))))
+ case mpInt32:
+ i = int64(int32(bigen.Uint32(d.r.readx(4))))
+ case mpInt64:
+ i = int64(bigen.Uint64(d.r.readx(8)))
+ default:
+ switch {
+ case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax:
+ i = int64(int8(d.bd))
+ case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax:
+ i = int64(int8(d.bd))
+ default:
+ d.d.errorf("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd)
+ return
+ }
+ }
+ // check overflow (logic adapted from std pkg reflect/value.go OverflowUint()
+ if bitsize > 0 {
+ if trunc := (i << (64 - bitsize)) >> (64 - bitsize); i != trunc {
+ d.d.errorf("Overflow int value: %v", i)
+ return
+ }
+ }
+ d.bdRead = false
+ return
+}
+
+// uint can be decoded from msgpack type: intXXX or uintXXX
+func (d *msgpackDecDriver) DecodeUint(bitsize uint8) (ui uint64) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ switch d.bd {
+ case mpUint8:
+ ui = uint64(d.r.readn1())
+ case mpUint16:
+ ui = uint64(bigen.Uint16(d.r.readx(2)))
+ case mpUint32:
+ ui = uint64(bigen.Uint32(d.r.readx(4)))
+ case mpUint64:
+ ui = bigen.Uint64(d.r.readx(8))
+ case mpInt8:
+ if i := int64(int8(d.r.readn1())); i >= 0 {
+ ui = uint64(i)
+ } else {
+ d.d.errorf("Assigning negative signed value: %v, to unsigned type", i)
+ return
+ }
+ case mpInt16:
+ if i := int64(int16(bigen.Uint16(d.r.readx(2)))); i >= 0 {
+ ui = uint64(i)
+ } else {
+ d.d.errorf("Assigning negative signed value: %v, to unsigned type", i)
+ return
+ }
+ case mpInt32:
+ if i := int64(int32(bigen.Uint32(d.r.readx(4)))); i >= 0 {
+ ui = uint64(i)
+ } else {
+ d.d.errorf("Assigning negative signed value: %v, to unsigned type", i)
+ return
+ }
+ case mpInt64:
+ if i := int64(bigen.Uint64(d.r.readx(8))); i >= 0 {
+ ui = uint64(i)
+ } else {
+ d.d.errorf("Assigning negative signed value: %v, to unsigned type", i)
+ return
+ }
+ default:
+ switch {
+ case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax:
+ ui = uint64(d.bd)
+ case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax:
+ d.d.errorf("Assigning negative signed value: %v, to unsigned type", int(d.bd))
+ return
+ default:
+ d.d.errorf("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd)
+ return
+ }
+ }
+ // check overflow (logic adapted from std pkg reflect/value.go OverflowUint()
+ if bitsize > 0 {
+ if trunc := (ui << (64 - bitsize)) >> (64 - bitsize); ui != trunc {
+ d.d.errorf("Overflow uint value: %v", ui)
+ return
+ }
+ }
+ d.bdRead = false
+ return
+}
+
+// float can either be decoded from msgpack type: float, double or intX
+func (d *msgpackDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == mpFloat {
+ f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4))))
+ } else if d.bd == mpDouble {
+ f = math.Float64frombits(bigen.Uint64(d.r.readx(8)))
+ } else {
+ f = float64(d.DecodeInt(0))
+ }
+ if chkOverflow32 && chkOvf.Float32(f) {
+ d.d.errorf("msgpack: float32 overflow: %v", f)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+// bool can be decoded from bool, fixnum 0 or 1.
+func (d *msgpackDecDriver) DecodeBool() (b bool) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == mpFalse || d.bd == 0 {
+ // b = false
+ } else if d.bd == mpTrue || d.bd == 1 {
+ b = true
+ } else {
+ d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *msgpackDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ var clen int
+ // ignore isstring. Expect that the bytes may be found from msgpackContainerStr or msgpackContainerBin
+ if bd := d.bd; bd == mpBin8 || bd == mpBin16 || bd == mpBin32 {
+ clen = d.readContainerLen(msgpackContainerBin)
+ } else {
+ clen = d.readContainerLen(msgpackContainerStr)
+ }
+ // println("DecodeBytes: clen: ", clen)
+ d.bdRead = false
+ // bytes may be nil, so handle it. if nil, clen=-1.
+ if clen < 0 {
+ return nil
+ }
+ if zerocopy {
+ if d.br {
+ return d.r.readx(clen)
+ } else if len(bs) == 0 {
+ bs = d.b[:]
+ }
+ }
+ return decByteSlice(d.r, clen, bs)
+}
+
+func (d *msgpackDecDriver) DecodeString() (s string) {
+ return string(d.DecodeBytes(d.b[:], true, true))
+}
+
+func (d *msgpackDecDriver) readNextBd() {
+ d.bd = d.r.readn1()
+ d.bdRead = true
+}
+
+func (d *msgpackDecDriver) ContainerType() (vt valueType) {
+ bd := d.bd
+ if bd == mpNil {
+ return valueTypeNil
+ } else if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 ||
+ (!d.h.RawToString &&
+ (bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax))) {
+ return valueTypeBytes
+ } else if d.h.RawToString &&
+ (bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax)) {
+ return valueTypeString
+ } else if bd == mpArray16 || bd == mpArray32 || (bd >= mpFixArrayMin && bd <= mpFixArrayMax) {
+ return valueTypeArray
+ } else if bd == mpMap16 || bd == mpMap32 || (bd >= mpFixMapMin && bd <= mpFixMapMax) {
+ return valueTypeMap
+ } else {
+ // d.d.errorf("isContainerType: unsupported parameter: %v", vt)
+ }
+ return valueTypeUnset
+}
+
+func (d *msgpackDecDriver) TryDecodeAsNil() (v bool) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == mpNil {
+ d.bdRead = false
+ v = true
+ }
+ return
+}
+
+func (d *msgpackDecDriver) readContainerLen(ct msgpackContainerType) (clen int) {
+ bd := d.bd
+ if bd == mpNil {
+ clen = -1 // to represent nil
+ } else if bd == ct.b8 {
+ clen = int(d.r.readn1())
+ } else if bd == ct.b16 {
+ clen = int(bigen.Uint16(d.r.readx(2)))
+ } else if bd == ct.b32 {
+ clen = int(bigen.Uint32(d.r.readx(4)))
+ } else if (ct.bFixMin & bd) == ct.bFixMin {
+ clen = int(ct.bFixMin ^ bd)
+ } else {
+ d.d.errorf("readContainerLen: %s: hex: %x, decimal: %d", msgBadDesc, bd, bd)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *msgpackDecDriver) ReadMapStart() int {
+ return d.readContainerLen(msgpackContainerMap)
+}
+
+func (d *msgpackDecDriver) ReadArrayStart() int {
+ return d.readContainerLen(msgpackContainerList)
+}
+
+func (d *msgpackDecDriver) readExtLen() (clen int) {
+ switch d.bd {
+ case mpNil:
+ clen = -1 // to represent nil
+ case mpFixExt1:
+ clen = 1
+ case mpFixExt2:
+ clen = 2
+ case mpFixExt4:
+ clen = 4
+ case mpFixExt8:
+ clen = 8
+ case mpFixExt16:
+ clen = 16
+ case mpExt8:
+ clen = int(d.r.readn1())
+ case mpExt16:
+ clen = int(bigen.Uint16(d.r.readx(2)))
+ case mpExt32:
+ clen = int(bigen.Uint32(d.r.readx(4)))
+ default:
+ d.d.errorf("decoding ext bytes: found unexpected byte: %x", d.bd)
+ return
+ }
+ return
+}
+
+func (d *msgpackDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
+ if xtag > 0xff {
+ d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag)
+ return
+ }
+ realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag))
+ realxtag = uint64(realxtag1)
+ if ext == nil {
+ re := rv.(*RawExt)
+ re.Tag = realxtag
+ re.Data = detachZeroCopyBytes(d.br, re.Data, xbs)
+ } else {
+ ext.ReadExt(rv, xbs)
+ }
+ return
+}
+
+func (d *msgpackDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ xbd := d.bd
+ if xbd == mpBin8 || xbd == mpBin16 || xbd == mpBin32 {
+ xbs = d.DecodeBytes(nil, false, true)
+ } else if xbd == mpStr8 || xbd == mpStr16 || xbd == mpStr32 ||
+ (xbd >= mpFixStrMin && xbd <= mpFixStrMax) {
+ xbs = d.DecodeBytes(nil, true, true)
+ } else {
+ clen := d.readExtLen()
+ xtag = d.r.readn1()
+ if verifyTag && xtag != tag {
+ d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", xtag, tag)
+ return
+ }
+ xbs = d.r.readx(clen)
+ }
+ d.bdRead = false
+ return
+}
+
+//--------------------------------------------------
+
+//MsgpackHandle is a Handle for the Msgpack Schema-Free Encoding Format.
+type MsgpackHandle struct {
+ BasicHandle
+
+ // RawToString controls how raw bytes are decoded into a nil interface{}.
+ RawToString bool
+
+ // WriteExt flag supports encoding configured extensions with extension tags.
+ // It also controls whether other elements of the new spec are encoded (ie Str8).
+ //
+ // With WriteExt=false, configured extensions are serialized as raw bytes
+ // and Str8 is not encoded.
+ //
+ // A stream can still be decoded into a typed value, provided an appropriate value
+ // is provided, but the type cannot be inferred from the stream. If no appropriate
+ // type is provided (e.g. decoding into a nil interface{}), you get back
+ // a []byte or string based on the setting of RawToString.
+ WriteExt bool
+ binaryEncodingType
+}
+
+func (h *MsgpackHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
+ return h.SetExt(rt, tag, &setExtWrapper{b: ext})
+}
+
+func (h *MsgpackHandle) newEncDriver(e *Encoder) encDriver {
+ return &msgpackEncDriver{e: e, w: e.w, h: h}
+}
+
+func (h *MsgpackHandle) newDecDriver(d *Decoder) decDriver {
+ return &msgpackDecDriver{d: d, r: d.r, h: h, br: d.bytes}
+}
+
+func (e *msgpackEncDriver) reset() {
+ e.w = e.e.w
+}
+
+func (d *msgpackDecDriver) reset() {
+ d.r = d.d.r
+ d.bd, d.bdRead = 0, false
+}
+
+//--------------------------------------------------
+
+type msgpackSpecRpcCodec struct {
+ rpcCodec
+}
+
+// /////////////// Spec RPC Codec ///////////////////
+func (c *msgpackSpecRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error {
+ // WriteRequest can write to both a Go service, and other services that do
+ // not abide by the 1 argument rule of a Go service.
+ // We discriminate based on if the body is a MsgpackSpecRpcMultiArgs
+ var bodyArr []interface{}
+ if m, ok := body.(MsgpackSpecRpcMultiArgs); ok {
+ bodyArr = ([]interface{})(m)
+ } else {
+ bodyArr = []interface{}{body}
+ }
+ r2 := []interface{}{0, uint32(r.Seq), r.ServiceMethod, bodyArr}
+ return c.write(r2, nil, false, true)
+}
+
+func (c *msgpackSpecRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error {
+ var moe interface{}
+ if r.Error != "" {
+ moe = r.Error
+ }
+ if moe != nil && body != nil {
+ body = nil
+ }
+ r2 := []interface{}{1, uint32(r.Seq), moe, body}
+ return c.write(r2, nil, false, true)
+}
+
+func (c *msgpackSpecRpcCodec) ReadResponseHeader(r *rpc.Response) error {
+ return c.parseCustomHeader(1, &r.Seq, &r.Error)
+}
+
+func (c *msgpackSpecRpcCodec) ReadRequestHeader(r *rpc.Request) error {
+ return c.parseCustomHeader(0, &r.Seq, &r.ServiceMethod)
+}
+
+func (c *msgpackSpecRpcCodec) ReadRequestBody(body interface{}) error {
+ if body == nil { // read and discard
+ return c.read(nil)
+ }
+ bodyArr := []interface{}{body}
+ return c.read(&bodyArr)
+}
+
+func (c *msgpackSpecRpcCodec) parseCustomHeader(expectTypeByte byte, msgid *uint64, methodOrError *string) (err error) {
+
+ if c.isClosed() {
+ return io.EOF
+ }
+
+ // We read the response header by hand
+ // so that the body can be decoded on its own from the stream at a later time.
+
+ const fia byte = 0x94 //four item array descriptor value
+ // Not sure why the panic of EOF is swallowed above.
+ // if bs1 := c.dec.r.readn1(); bs1 != fia {
+ // err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, bs1)
+ // return
+ // }
+ var b byte
+ b, err = c.br.ReadByte()
+ if err != nil {
+ return
+ }
+ if b != fia {
+ err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, b)
+ return
+ }
+
+ if err = c.read(&b); err != nil {
+ return
+ }
+ if b != expectTypeByte {
+ err = fmt.Errorf("Unexpected byte descriptor in header. Expecting %v. Received %v", expectTypeByte, b)
+ return
+ }
+ if err = c.read(msgid); err != nil {
+ return
+ }
+ if err = c.read(methodOrError); err != nil {
+ return
+ }
+ return
+}
+
+//--------------------------------------------------
+
+// msgpackSpecRpc is the implementation of Rpc that uses custom communication protocol
+// as defined in the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
+type msgpackSpecRpc struct{}
+
+// MsgpackSpecRpc implements Rpc using the communication protocol defined in
+// the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md .
+// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered.
+var MsgpackSpecRpc msgpackSpecRpc
+
+func (x msgpackSpecRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec {
+ return &msgpackSpecRpcCodec{newRPCCodec(conn, h)}
+}
+
+func (x msgpackSpecRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec {
+ return &msgpackSpecRpcCodec{newRPCCodec(conn, h)}
+}
+
+var _ decDriver = (*msgpackDecDriver)(nil)
+var _ encDriver = (*msgpackEncDriver)(nil)
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/noop.go b/src/kube2msb/vendor/github.com/ugorji/go/codec/noop.go
new file mode 100644
index 0000000..cfee3d0
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/noop.go
@@ -0,0 +1,213 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+ "math/rand"
+ "time"
+)
+
+// NoopHandle returns a no-op handle. It basically does nothing.
+// It is only useful for benchmarking, as it gives an idea of the
+// overhead from the codec framework.
+//
+// LIBRARY USERS: *** DO NOT USE ***
+func NoopHandle(slen int) *noopHandle {
+ h := noopHandle{}
+ h.rand = rand.New(rand.NewSource(time.Now().UnixNano()))
+ h.B = make([][]byte, slen)
+ h.S = make([]string, slen)
+ for i := 0; i < len(h.S); i++ {
+ b := make([]byte, i+1)
+ for j := 0; j < len(b); j++ {
+ b[j] = 'a' + byte(i)
+ }
+ h.B[i] = b
+ h.S[i] = string(b)
+ }
+ return &h
+}
+
+// noopHandle does nothing.
+// It is used to simulate the overhead of the codec framework.
+type noopHandle struct {
+ BasicHandle
+ binaryEncodingType
+ noopDrv // noopDrv is unexported here, so we can get a copy of it when needed.
+}
+
+type noopDrv struct {
+ d *Decoder
+ e *Encoder
+ i int
+ S []string
+ B [][]byte
+ mks []bool // stack. if map (true), else if array (false)
+ mk bool // top of stack. what container are we on? map or array?
+ ct valueType // last response for IsContainerType.
+ cb int // counter for ContainerType
+ rand *rand.Rand
+}
+
+func (h *noopDrv) r(v int) int { return h.rand.Intn(v) }
+func (h *noopDrv) m(v int) int { h.i++; return h.i % v }
+
+func (h *noopDrv) newEncDriver(e *Encoder) encDriver { h.e = e; return h }
+func (h *noopDrv) newDecDriver(d *Decoder) decDriver { h.d = d; return h }
+
+func (h *noopDrv) reset() {}
+func (h *noopDrv) uncacheRead() {}
+
+// --- encDriver
+
+// stack functions (for map and array)
+func (h *noopDrv) start(b bool) {
+ // println("start", len(h.mks)+1)
+ h.mks = append(h.mks, b)
+ h.mk = b
+}
+func (h *noopDrv) end() {
+ // println("end: ", len(h.mks)-1)
+ h.mks = h.mks[:len(h.mks)-1]
+ if len(h.mks) > 0 {
+ h.mk = h.mks[len(h.mks)-1]
+ } else {
+ h.mk = false
+ }
+}
+
+func (h *noopDrv) EncodeBuiltin(rt uintptr, v interface{}) {}
+func (h *noopDrv) EncodeNil() {}
+func (h *noopDrv) EncodeInt(i int64) {}
+func (h *noopDrv) EncodeUint(i uint64) {}
+func (h *noopDrv) EncodeBool(b bool) {}
+func (h *noopDrv) EncodeFloat32(f float32) {}
+func (h *noopDrv) EncodeFloat64(f float64) {}
+func (h *noopDrv) EncodeRawExt(re *RawExt, e *Encoder) {}
+func (h *noopDrv) EncodeArrayStart(length int) { h.start(true) }
+func (h *noopDrv) EncodeMapStart(length int) { h.start(false) }
+func (h *noopDrv) EncodeEnd() { h.end() }
+
+func (h *noopDrv) EncodeString(c charEncoding, v string) {}
+func (h *noopDrv) EncodeSymbol(v string) {}
+func (h *noopDrv) EncodeStringBytes(c charEncoding, v []byte) {}
+
+func (h *noopDrv) EncodeExt(rv interface{}, xtag uint64, ext Ext, e *Encoder) {}
+
+// ---- decDriver
+func (h *noopDrv) initReadNext() {}
+func (h *noopDrv) CheckBreak() bool { return false }
+func (h *noopDrv) IsBuiltinType(rt uintptr) bool { return false }
+func (h *noopDrv) DecodeBuiltin(rt uintptr, v interface{}) {}
+func (h *noopDrv) DecodeInt(bitsize uint8) (i int64) { return int64(h.m(15)) }
+func (h *noopDrv) DecodeUint(bitsize uint8) (ui uint64) { return uint64(h.m(35)) }
+func (h *noopDrv) DecodeFloat(chkOverflow32 bool) (f float64) { return float64(h.m(95)) }
+func (h *noopDrv) DecodeBool() (b bool) { return h.m(2) == 0 }
+func (h *noopDrv) DecodeString() (s string) { return h.S[h.m(8)] }
+
+// func (h *noopDrv) DecodeStringAsBytes(bs []byte) []byte { return h.DecodeBytes(bs) }
+
+func (h *noopDrv) DecodeBytes(bs []byte, isstring, zerocopy bool) []byte { return h.B[h.m(len(h.B))] }
+
+func (h *noopDrv) ReadEnd() { h.end() }
+
+// toggle map/slice
+func (h *noopDrv) ReadMapStart() int { h.start(true); return h.m(10) }
+func (h *noopDrv) ReadArrayStart() int { h.start(false); return h.m(10) }
+
+func (h *noopDrv) ContainerType() (vt valueType) {
+ // return h.m(2) == 0
+ // handle kStruct, which will bomb is it calls this and doesn't get back a map or array.
+ // consequently, if the return value is not map or array, reset it to one of them based on h.m(7) % 2
+ // for kstruct: at least one out of every 2 times, return one of valueTypeMap or Array (else kstruct bombs)
+ // however, every 10th time it is called, we just return something else.
+ var vals = [...]valueType{valueTypeArray, valueTypeMap}
+ // ------------ TAKE ------------
+ // if h.cb%2 == 0 {
+ // if h.ct == valueTypeMap || h.ct == valueTypeArray {
+ // } else {
+ // h.ct = vals[h.m(2)]
+ // }
+ // } else if h.cb%5 == 0 {
+ // h.ct = valueType(h.m(8))
+ // } else {
+ // h.ct = vals[h.m(2)]
+ // }
+ // ------------ TAKE ------------
+ // if h.cb%16 == 0 {
+ // h.ct = valueType(h.cb % 8)
+ // } else {
+ // h.ct = vals[h.cb%2]
+ // }
+ h.ct = vals[h.cb%2]
+ h.cb++
+ return h.ct
+
+ // if h.ct == valueTypeNil || h.ct == valueTypeString || h.ct == valueTypeBytes {
+ // return h.ct
+ // }
+ // return valueTypeUnset
+ // TODO: may need to tweak this so it works.
+ // if h.ct == valueTypeMap && vt == valueTypeArray || h.ct == valueTypeArray && vt == valueTypeMap {
+ // h.cb = !h.cb
+ // h.ct = vt
+ // return h.cb
+ // }
+ // // go in a loop and check it.
+ // h.ct = vt
+ // h.cb = h.m(7) == 0
+ // return h.cb
+}
+func (h *noopDrv) TryDecodeAsNil() bool {
+ if h.mk {
+ return false
+ } else {
+ return h.m(8) == 0
+ }
+}
+func (h *noopDrv) DecodeExt(rv interface{}, xtag uint64, ext Ext) uint64 {
+ return 0
+}
+
+func (h *noopDrv) DecodeNaked() {
+ // use h.r (random) not h.m() because h.m() could cause the same value to be given.
+ var sk int
+ if h.mk {
+ // if mapkey, do not support values of nil OR bytes, array, map or rawext
+ sk = h.r(7) + 1
+ } else {
+ sk = h.r(12)
+ }
+ n := &h.d.n
+ switch sk {
+ case 0:
+ n.v = valueTypeNil
+ case 1:
+ n.v, n.b = valueTypeBool, false
+ case 2:
+ n.v, n.b = valueTypeBool, true
+ case 3:
+ n.v, n.i = valueTypeInt, h.DecodeInt(64)
+ case 4:
+ n.v, n.u = valueTypeUint, h.DecodeUint(64)
+ case 5:
+ n.v, n.f = valueTypeFloat, h.DecodeFloat(true)
+ case 6:
+ n.v, n.f = valueTypeFloat, h.DecodeFloat(false)
+ case 7:
+ n.v, n.s = valueTypeString, h.DecodeString()
+ case 8:
+ n.v, n.l = valueTypeBytes, h.B[h.m(len(h.B))]
+ case 9:
+ n.v = valueTypeArray
+ case 10:
+ n.v = valueTypeMap
+ default:
+ n.v = valueTypeExt
+ n.u = h.DecodeUint(64)
+ n.l = h.B[h.m(len(h.B))]
+ }
+ h.ct = n.v
+ return
+}
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/prebuild.go b/src/kube2msb/vendor/github.com/ugorji/go/codec/prebuild.go
new file mode 100644
index 0000000..2353263
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/prebuild.go
@@ -0,0 +1,3 @@
+package codec
+
+//go:generate bash prebuild.sh
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/prebuild.sh b/src/kube2msb/vendor/github.com/ugorji/go/codec/prebuild.sh
new file mode 100644
index 0000000..909f4bb
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/prebuild.sh
@@ -0,0 +1,199 @@
+#!/bin/bash
+
+# _needgen is a helper function to tell if we need to generate files for msgp, codecgen.
+_needgen() {
+ local a="$1"
+ zneedgen=0
+ if [[ ! -e "$a" ]]
+ then
+ zneedgen=1
+ echo 1
+ return 0
+ fi
+ for i in `ls -1 *.go.tmpl gen.go values_test.go`
+ do
+ if [[ "$a" -ot "$i" ]]
+ then
+ zneedgen=1
+ echo 1
+ return 0
+ fi
+ done
+ echo 0
+}
+
+# _build generates fast-path.go and gen-helper.go.
+#
+# It is needed because there is some dependency between the generated code
+# and the other classes. Consequently, we have to totally remove the
+# generated files and put stubs in place, before calling "go run" again
+# to recreate them.
+_build() {
+ if ! [[ "${zforce}" == "1" ||
+ "1" == $( _needgen "fast-path.generated.go" ) ||
+ "1" == $( _needgen "gen-helper.generated.go" ) ||
+ "1" == $( _needgen "gen.generated.go" ) ||
+ 1 == 0 ]]
+ then
+ return 0
+ fi
+
+ # echo "Running prebuild"
+ if [ "${zbak}" == "1" ]
+ then
+ # echo "Backing up old generated files"
+ _zts=`date '+%m%d%Y_%H%M%S'`
+ _gg=".generated.go"
+ [ -e "gen-helper${_gg}" ] && mv gen-helper${_gg} gen-helper${_gg}__${_zts}.bak
+ [ -e "fast-path${_gg}" ] && mv fast-path${_gg} fast-path${_gg}__${_zts}.bak
+ # [ -e "safe${_gg}" ] && mv safe${_gg} safe${_gg}__${_zts}.bak
+ # [ -e "unsafe${_gg}" ] && mv unsafe${_gg} unsafe${_gg}__${_zts}.bak
+ else
+ rm -f fast-path.generated.go gen.generated.go gen-helper.generated.go \
+ *safe.generated.go *_generated_test.go *.generated_ffjson_expose.go
+ fi
+
+ cat > gen.generated.go <<EOF
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+// DO NOT EDIT. THIS FILE IS AUTO-GENERATED FROM gen-dec-(map|array).go.tmpl
+
+const genDecMapTmpl = \`
+EOF
+
+ cat >> gen.generated.go < gen-dec-map.go.tmpl
+
+ cat >> gen.generated.go <<EOF
+\`
+
+const genDecListTmpl = \`
+EOF
+
+ cat >> gen.generated.go < gen-dec-array.go.tmpl
+
+ cat >> gen.generated.go <<EOF
+\`
+
+EOF
+
+ cat > gen-from-tmpl.codec.generated.go <<EOF
+package codec
+import "io"
+func GenInternalGoFile(r io.Reader, w io.Writer, safe bool) error {
+return genInternalGoFile(r, w, safe)
+}
+EOF
+
+ cat > gen-from-tmpl.generated.go <<EOF
+//+build ignore
+
+package main
+
+//import "flag"
+import "ugorji.net/codec"
+import "os"
+
+func run(fnameIn, fnameOut string, safe bool) {
+fin, err := os.Open(fnameIn)
+if err != nil { panic(err) }
+defer fin.Close()
+fout, err := os.Create(fnameOut)
+if err != nil { panic(err) }
+defer fout.Close()
+err = codec.GenInternalGoFile(fin, fout, safe)
+if err != nil { panic(err) }
+}
+
+func main() {
+// do not make safe/unsafe variants.
+// Instead, depend on escape analysis, and place string creation and usage appropriately.
+// run("unsafe.go.tmpl", "safe.generated.go", true)
+// run("unsafe.go.tmpl", "unsafe.generated.go", false)
+run("fast-path.go.tmpl", "fast-path.generated.go", false)
+run("gen-helper.go.tmpl", "gen-helper.generated.go", false)
+}
+
+EOF
+ go run -tags=notfastpath gen-from-tmpl.generated.go && \
+ rm -f gen-from-tmpl.*generated.go
+}
+
+_codegenerators() {
+ if [[ $zforce == "1" ||
+ "1" == $( _needgen "values_codecgen${zsfx}" ) ||
+ "1" == $( _needgen "values_msgp${zsfx}" ) ||
+ "1" == $( _needgen "values_ffjson${zsfx}" ) ||
+ 1 == 0 ]]
+ then
+ # codecgen creates some temporary files in the directory (main, pkg).
+ # Consequently, we should start msgp and ffjson first, and also put a small time latency before
+ # starting codecgen.
+ # Without this, ffjson chokes on one of the temporary files from codecgen.
+ if [[ $zexternal == "1" ]]
+ then
+ echo "ffjson ... " && \
+ ffjson -w values_ffjson${zsfx} $zfin &
+ zzzIdFF=$!
+ echo "msgp ... " && \
+ msgp -tests=false -o=values_msgp${zsfx} -file=$zfin &
+ zzzIdMsgp=$!
+
+ sleep 1 # give ffjson and msgp some buffer time. see note above.
+ fi
+
+ echo "codecgen - !unsafe ... " && \
+ codecgen -rt codecgen -t 'x,codecgen,!unsafe' -o values_codecgen${zsfx} -d 19780 $zfin &
+ zzzIdC=$!
+ echo "codecgen - unsafe ... " && \
+ codecgen -u -rt codecgen -t 'x,codecgen,unsafe' -o values_codecgen_unsafe${zsfx} -d 19781 $zfin &
+ zzzIdCU=$!
+ wait $zzzIdC $zzzIdCU $zzzIdMsgp $zzzIdFF && \
+ # remove (M|Unm)arshalJSON implementations, so they don't conflict with encoding/json bench \
+ if [[ $zexternal == "1" ]]
+ then
+ sed -i 's+ MarshalJSON(+ _MarshalJSON(+g' values_ffjson${zsfx} && \
+ sed -i 's+ UnmarshalJSON(+ _UnmarshalJSON(+g' values_ffjson${zsfx}
+ fi && \
+ echo "generators done!" && \
+ true
+ fi
+}
+
+# _init reads the arguments and sets up the flags
+_init() {
+OPTIND=1
+while getopts "fbx" flag
+do
+ case "x$flag" in
+ 'xf') zforce=1;;
+ 'xb') zbak=1;;
+ 'xx') zexternal=1;;
+ *) echo "prebuild.sh accepts [-fbx] only"; return 1;;
+ esac
+done
+shift $((OPTIND-1))
+OPTIND=1
+}
+
+# main script.
+# First ensure that this is being run from the basedir (i.e. dirname of script is .)
+if [ "." = `dirname $0` ]
+then
+ zmydir=`pwd`
+ zfin="test_values.generated.go"
+ zsfx="_generated_test.go"
+ # rm -f *_generated_test.go
+ rm -f codecgen-*.go && \
+ _init "$@" && \
+ _build && \
+ cp $zmydir/values_test.go $zmydir/$zfin && \
+ _codegenerators && \
+ echo prebuild done successfully
+ rm -f $zmydir/$zfin
+else
+ echo "Script must be run from the directory it resides in"
+fi
+
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/rpc.go b/src/kube2msb/vendor/github.com/ugorji/go/codec/rpc.go
new file mode 100644
index 0000000..dad53d0
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/rpc.go
@@ -0,0 +1,180 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+ "bufio"
+ "io"
+ "net/rpc"
+ "sync"
+)
+
+// rpcEncodeTerminator allows a handler specify a []byte terminator to send after each Encode.
+//
+// Some codecs like json need to put a space after each encoded value, to serve as a
+// delimiter for things like numbers (else json codec will continue reading till EOF).
+type rpcEncodeTerminator interface {
+ rpcEncodeTerminate() []byte
+}
+
+// Rpc provides a rpc Server or Client Codec for rpc communication.
+type Rpc interface {
+ ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec
+ ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec
+}
+
+// RpcCodecBuffered allows access to the underlying bufio.Reader/Writer
+// used by the rpc connection. It accomodates use-cases where the connection
+// should be used by rpc and non-rpc functions, e.g. streaming a file after
+// sending an rpc response.
+type RpcCodecBuffered interface {
+ BufferedReader() *bufio.Reader
+ BufferedWriter() *bufio.Writer
+}
+
+// -------------------------------------
+
+// rpcCodec defines the struct members and common methods.
+type rpcCodec struct {
+ rwc io.ReadWriteCloser
+ dec *Decoder
+ enc *Encoder
+ bw *bufio.Writer
+ br *bufio.Reader
+ mu sync.Mutex
+ h Handle
+
+ cls bool
+ clsmu sync.RWMutex
+}
+
+func newRPCCodec(conn io.ReadWriteCloser, h Handle) rpcCodec {
+ bw := bufio.NewWriter(conn)
+ br := bufio.NewReader(conn)
+ return rpcCodec{
+ rwc: conn,
+ bw: bw,
+ br: br,
+ enc: NewEncoder(bw, h),
+ dec: NewDecoder(br, h),
+ h: h,
+ }
+}
+
+func (c *rpcCodec) BufferedReader() *bufio.Reader {
+ return c.br
+}
+
+func (c *rpcCodec) BufferedWriter() *bufio.Writer {
+ return c.bw
+}
+
+func (c *rpcCodec) write(obj1, obj2 interface{}, writeObj2, doFlush bool) (err error) {
+ if c.isClosed() {
+ return io.EOF
+ }
+ if err = c.enc.Encode(obj1); err != nil {
+ return
+ }
+ t, tOk := c.h.(rpcEncodeTerminator)
+ if tOk {
+ c.bw.Write(t.rpcEncodeTerminate())
+ }
+ if writeObj2 {
+ if err = c.enc.Encode(obj2); err != nil {
+ return
+ }
+ if tOk {
+ c.bw.Write(t.rpcEncodeTerminate())
+ }
+ }
+ if doFlush {
+ return c.bw.Flush()
+ }
+ return
+}
+
+func (c *rpcCodec) read(obj interface{}) (err error) {
+ if c.isClosed() {
+ return io.EOF
+ }
+ //If nil is passed in, we should still attempt to read content to nowhere.
+ if obj == nil {
+ var obj2 interface{}
+ return c.dec.Decode(&obj2)
+ }
+ return c.dec.Decode(obj)
+}
+
+func (c *rpcCodec) isClosed() bool {
+ c.clsmu.RLock()
+ x := c.cls
+ c.clsmu.RUnlock()
+ return x
+}
+
+func (c *rpcCodec) Close() error {
+ if c.isClosed() {
+ return io.EOF
+ }
+ c.clsmu.Lock()
+ c.cls = true
+ c.clsmu.Unlock()
+ return c.rwc.Close()
+}
+
+func (c *rpcCodec) ReadResponseBody(body interface{}) error {
+ return c.read(body)
+}
+
+// -------------------------------------
+
+type goRpcCodec struct {
+ rpcCodec
+}
+
+func (c *goRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error {
+ // Must protect for concurrent access as per API
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ return c.write(r, body, true, true)
+}
+
+func (c *goRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ return c.write(r, body, true, true)
+}
+
+func (c *goRpcCodec) ReadResponseHeader(r *rpc.Response) error {
+ return c.read(r)
+}
+
+func (c *goRpcCodec) ReadRequestHeader(r *rpc.Request) error {
+ return c.read(r)
+}
+
+func (c *goRpcCodec) ReadRequestBody(body interface{}) error {
+ return c.read(body)
+}
+
+// -------------------------------------
+
+// goRpc is the implementation of Rpc that uses the communication protocol
+// as defined in net/rpc package.
+type goRpc struct{}
+
+// GoRpc implements Rpc using the communication protocol defined in net/rpc package.
+// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered.
+var GoRpc goRpc
+
+func (x goRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec {
+ return &goRpcCodec{newRPCCodec(conn, h)}
+}
+
+func (x goRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec {
+ return &goRpcCodec{newRPCCodec(conn, h)}
+}
+
+var _ RpcCodecBuffered = (*rpcCodec)(nil) // ensure *rpcCodec implements RpcCodecBuffered
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/simple.go b/src/kube2msb/vendor/github.com/ugorji/go/codec/simple.go
new file mode 100644
index 0000000..7c0ba7a
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/simple.go
@@ -0,0 +1,519 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+ "math"
+ "reflect"
+)
+
+const (
+ _ uint8 = iota
+ simpleVdNil = 1
+ simpleVdFalse = 2
+ simpleVdTrue = 3
+ simpleVdFloat32 = 4
+ simpleVdFloat64 = 5
+
+ // each lasts for 4 (ie n, n+1, n+2, n+3)
+ simpleVdPosInt = 8
+ simpleVdNegInt = 12
+
+ // containers: each lasts for 4 (ie n, n+1, n+2, ... n+7)
+ simpleVdString = 216
+ simpleVdByteArray = 224
+ simpleVdArray = 232
+ simpleVdMap = 240
+ simpleVdExt = 248
+)
+
+type simpleEncDriver struct {
+ noBuiltInTypes
+ encNoSeparator
+ e *Encoder
+ h *SimpleHandle
+ w encWriter
+ b [8]byte
+}
+
+func (e *simpleEncDriver) EncodeNil() {
+ e.w.writen1(simpleVdNil)
+}
+
+func (e *simpleEncDriver) EncodeBool(b bool) {
+ if b {
+ e.w.writen1(simpleVdTrue)
+ } else {
+ e.w.writen1(simpleVdFalse)
+ }
+}
+
+func (e *simpleEncDriver) EncodeFloat32(f float32) {
+ e.w.writen1(simpleVdFloat32)
+ bigenHelper{e.b[:4], e.w}.writeUint32(math.Float32bits(f))
+}
+
+func (e *simpleEncDriver) EncodeFloat64(f float64) {
+ e.w.writen1(simpleVdFloat64)
+ bigenHelper{e.b[:8], e.w}.writeUint64(math.Float64bits(f))
+}
+
+func (e *simpleEncDriver) EncodeInt(v int64) {
+ if v < 0 {
+ e.encUint(uint64(-v), simpleVdNegInt)
+ } else {
+ e.encUint(uint64(v), simpleVdPosInt)
+ }
+}
+
+func (e *simpleEncDriver) EncodeUint(v uint64) {
+ e.encUint(v, simpleVdPosInt)
+}
+
+func (e *simpleEncDriver) encUint(v uint64, bd uint8) {
+ if v <= math.MaxUint8 {
+ e.w.writen2(bd, uint8(v))
+ } else if v <= math.MaxUint16 {
+ e.w.writen1(bd + 1)
+ bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v))
+ } else if v <= math.MaxUint32 {
+ e.w.writen1(bd + 2)
+ bigenHelper{e.b[:4], e.w}.writeUint32(uint32(v))
+ } else { // if v <= math.MaxUint64 {
+ e.w.writen1(bd + 3)
+ bigenHelper{e.b[:8], e.w}.writeUint64(v)
+ }
+}
+
+func (e *simpleEncDriver) encLen(bd byte, length int) {
+ if length == 0 {
+ e.w.writen1(bd)
+ } else if length <= math.MaxUint8 {
+ e.w.writen1(bd + 1)
+ e.w.writen1(uint8(length))
+ } else if length <= math.MaxUint16 {
+ e.w.writen1(bd + 2)
+ bigenHelper{e.b[:2], e.w}.writeUint16(uint16(length))
+ } else if int64(length) <= math.MaxUint32 {
+ e.w.writen1(bd + 3)
+ bigenHelper{e.b[:4], e.w}.writeUint32(uint32(length))
+ } else {
+ e.w.writen1(bd + 4)
+ bigenHelper{e.b[:8], e.w}.writeUint64(uint64(length))
+ }
+}
+
+func (e *simpleEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, _ *Encoder) {
+ bs := ext.WriteExt(rv)
+ if bs == nil {
+ e.EncodeNil()
+ return
+ }
+ e.encodeExtPreamble(uint8(xtag), len(bs))
+ e.w.writeb(bs)
+}
+
+func (e *simpleEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) {
+ e.encodeExtPreamble(uint8(re.Tag), len(re.Data))
+ e.w.writeb(re.Data)
+}
+
+func (e *simpleEncDriver) encodeExtPreamble(xtag byte, length int) {
+ e.encLen(simpleVdExt, length)
+ e.w.writen1(xtag)
+}
+
+func (e *simpleEncDriver) EncodeArrayStart(length int) {
+ e.encLen(simpleVdArray, length)
+}
+
+func (e *simpleEncDriver) EncodeMapStart(length int) {
+ e.encLen(simpleVdMap, length)
+}
+
+func (e *simpleEncDriver) EncodeString(c charEncoding, v string) {
+ e.encLen(simpleVdString, len(v))
+ e.w.writestr(v)
+}
+
+func (e *simpleEncDriver) EncodeSymbol(v string) {
+ e.EncodeString(c_UTF8, v)
+}
+
+func (e *simpleEncDriver) EncodeStringBytes(c charEncoding, v []byte) {
+ e.encLen(simpleVdByteArray, len(v))
+ e.w.writeb(v)
+}
+
+//------------------------------------
+
+type simpleDecDriver struct {
+ d *Decoder
+ h *SimpleHandle
+ r decReader
+ bdRead bool
+ bd byte
+ br bool // bytes reader
+ noBuiltInTypes
+ noStreamingCodec
+ decNoSeparator
+ b [scratchByteArrayLen]byte
+}
+
+func (d *simpleDecDriver) readNextBd() {
+ d.bd = d.r.readn1()
+ d.bdRead = true
+}
+
+func (d *simpleDecDriver) ContainerType() (vt valueType) {
+ if d.bd == simpleVdNil {
+ return valueTypeNil
+ } else if d.bd == simpleVdByteArray || d.bd == simpleVdByteArray+1 ||
+ d.bd == simpleVdByteArray+2 || d.bd == simpleVdByteArray+3 || d.bd == simpleVdByteArray+4 {
+ return valueTypeBytes
+ } else if d.bd == simpleVdString || d.bd == simpleVdString+1 ||
+ d.bd == simpleVdString+2 || d.bd == simpleVdString+3 || d.bd == simpleVdString+4 {
+ return valueTypeString
+ } else if d.bd == simpleVdArray || d.bd == simpleVdArray+1 ||
+ d.bd == simpleVdArray+2 || d.bd == simpleVdArray+3 || d.bd == simpleVdArray+4 {
+ return valueTypeArray
+ } else if d.bd == simpleVdMap || d.bd == simpleVdMap+1 ||
+ d.bd == simpleVdMap+2 || d.bd == simpleVdMap+3 || d.bd == simpleVdMap+4 {
+ return valueTypeMap
+ } else {
+ // d.d.errorf("isContainerType: unsupported parameter: %v", vt)
+ }
+ return valueTypeUnset
+}
+
+func (d *simpleDecDriver) TryDecodeAsNil() bool {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == simpleVdNil {
+ d.bdRead = false
+ return true
+ }
+ return false
+}
+
+func (d *simpleDecDriver) decCheckInteger() (ui uint64, neg bool) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ switch d.bd {
+ case simpleVdPosInt:
+ ui = uint64(d.r.readn1())
+ case simpleVdPosInt + 1:
+ ui = uint64(bigen.Uint16(d.r.readx(2)))
+ case simpleVdPosInt + 2:
+ ui = uint64(bigen.Uint32(d.r.readx(4)))
+ case simpleVdPosInt + 3:
+ ui = uint64(bigen.Uint64(d.r.readx(8)))
+ case simpleVdNegInt:
+ ui = uint64(d.r.readn1())
+ neg = true
+ case simpleVdNegInt + 1:
+ ui = uint64(bigen.Uint16(d.r.readx(2)))
+ neg = true
+ case simpleVdNegInt + 2:
+ ui = uint64(bigen.Uint32(d.r.readx(4)))
+ neg = true
+ case simpleVdNegInt + 3:
+ ui = uint64(bigen.Uint64(d.r.readx(8)))
+ neg = true
+ default:
+ d.d.errorf("decIntAny: Integer only valid from pos/neg integer1..8. Invalid descriptor: %v", d.bd)
+ return
+ }
+ // don't do this check, because callers may only want the unsigned value.
+ // if ui > math.MaxInt64 {
+ // d.d.errorf("decIntAny: Integer out of range for signed int64: %v", ui)
+ // return
+ // }
+ return
+}
+
+func (d *simpleDecDriver) DecodeInt(bitsize uint8) (i int64) {
+ ui, neg := d.decCheckInteger()
+ i, overflow := chkOvf.SignedInt(ui)
+ if overflow {
+ d.d.errorf("simple: overflow converting %v to signed integer", ui)
+ return
+ }
+ if neg {
+ i = -i
+ }
+ if chkOvf.Int(i, bitsize) {
+ d.d.errorf("simple: overflow integer: %v", i)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *simpleDecDriver) DecodeUint(bitsize uint8) (ui uint64) {
+ ui, neg := d.decCheckInteger()
+ if neg {
+ d.d.errorf("Assigning negative signed value to unsigned type")
+ return
+ }
+ if chkOvf.Uint(ui, bitsize) {
+ d.d.errorf("simple: overflow integer: %v", ui)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *simpleDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == simpleVdFloat32 {
+ f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4))))
+ } else if d.bd == simpleVdFloat64 {
+ f = math.Float64frombits(bigen.Uint64(d.r.readx(8)))
+ } else {
+ if d.bd >= simpleVdPosInt && d.bd <= simpleVdNegInt+3 {
+ f = float64(d.DecodeInt(64))
+ } else {
+ d.d.errorf("Float only valid from float32/64: Invalid descriptor: %v", d.bd)
+ return
+ }
+ }
+ if chkOverflow32 && chkOvf.Float32(f) {
+ d.d.errorf("msgpack: float32 overflow: %v", f)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+// bool can be decoded from bool only (single byte).
+func (d *simpleDecDriver) DecodeBool() (b bool) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == simpleVdTrue {
+ b = true
+ } else if d.bd == simpleVdFalse {
+ } else {
+ d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *simpleDecDriver) ReadMapStart() (length int) {
+ d.bdRead = false
+ return d.decLen()
+}
+
+func (d *simpleDecDriver) ReadArrayStart() (length int) {
+ d.bdRead = false
+ return d.decLen()
+}
+
+func (d *simpleDecDriver) decLen() int {
+ switch d.bd % 8 {
+ case 0:
+ return 0
+ case 1:
+ return int(d.r.readn1())
+ case 2:
+ return int(bigen.Uint16(d.r.readx(2)))
+ case 3:
+ ui := uint64(bigen.Uint32(d.r.readx(4)))
+ if chkOvf.Uint(ui, intBitsize) {
+ d.d.errorf("simple: overflow integer: %v", ui)
+ return 0
+ }
+ return int(ui)
+ case 4:
+ ui := bigen.Uint64(d.r.readx(8))
+ if chkOvf.Uint(ui, intBitsize) {
+ d.d.errorf("simple: overflow integer: %v", ui)
+ return 0
+ }
+ return int(ui)
+ }
+ d.d.errorf("decLen: Cannot read length: bd%8 must be in range 0..4. Got: %d", d.bd%8)
+ return -1
+}
+
+func (d *simpleDecDriver) DecodeString() (s string) {
+ return string(d.DecodeBytes(d.b[:], true, true))
+}
+
+func (d *simpleDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == simpleVdNil {
+ d.bdRead = false
+ return
+ }
+ clen := d.decLen()
+ d.bdRead = false
+ if zerocopy {
+ if d.br {
+ return d.r.readx(clen)
+ } else if len(bs) == 0 {
+ bs = d.b[:]
+ }
+ }
+ return decByteSlice(d.r, clen, bs)
+}
+
+func (d *simpleDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
+ if xtag > 0xff {
+ d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag)
+ return
+ }
+ realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag))
+ realxtag = uint64(realxtag1)
+ if ext == nil {
+ re := rv.(*RawExt)
+ re.Tag = realxtag
+ re.Data = detachZeroCopyBytes(d.br, re.Data, xbs)
+ } else {
+ ext.ReadExt(rv, xbs)
+ }
+ return
+}
+
+func (d *simpleDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ switch d.bd {
+ case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4:
+ l := d.decLen()
+ xtag = d.r.readn1()
+ if verifyTag && xtag != tag {
+ d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", xtag, tag)
+ return
+ }
+ xbs = d.r.readx(l)
+ case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4:
+ xbs = d.DecodeBytes(nil, false, true)
+ default:
+ d.d.errorf("Invalid d.bd for extensions (Expecting extensions or byte array). Got: 0x%x", d.bd)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *simpleDecDriver) DecodeNaked() {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+
+ n := &d.d.n
+ var decodeFurther bool
+
+ switch d.bd {
+ case simpleVdNil:
+ n.v = valueTypeNil
+ case simpleVdFalse:
+ n.v = valueTypeBool
+ n.b = false
+ case simpleVdTrue:
+ n.v = valueTypeBool
+ n.b = true
+ case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3:
+ if d.h.SignedInteger {
+ n.v = valueTypeInt
+ n.i = d.DecodeInt(64)
+ } else {
+ n.v = valueTypeUint
+ n.u = d.DecodeUint(64)
+ }
+ case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3:
+ n.v = valueTypeInt
+ n.i = d.DecodeInt(64)
+ case simpleVdFloat32:
+ n.v = valueTypeFloat
+ n.f = d.DecodeFloat(true)
+ case simpleVdFloat64:
+ n.v = valueTypeFloat
+ n.f = d.DecodeFloat(false)
+ case simpleVdString, simpleVdString + 1, simpleVdString + 2, simpleVdString + 3, simpleVdString + 4:
+ n.v = valueTypeString
+ n.s = d.DecodeString()
+ case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4:
+ n.v = valueTypeBytes
+ n.l = d.DecodeBytes(nil, false, false)
+ case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4:
+ n.v = valueTypeExt
+ l := d.decLen()
+ n.u = uint64(d.r.readn1())
+ n.l = d.r.readx(l)
+ case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4:
+ n.v = valueTypeArray
+ decodeFurther = true
+ case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4:
+ n.v = valueTypeMap
+ decodeFurther = true
+ default:
+ d.d.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd)
+ }
+
+ if !decodeFurther {
+ d.bdRead = false
+ }
+ return
+}
+
+//------------------------------------
+
+// SimpleHandle is a Handle for a very simple encoding format.
+//
+// simple is a simplistic codec similar to binc, but not as compact.
+// - Encoding of a value is always preceeded by the descriptor byte (bd)
+// - True, false, nil are encoded fully in 1 byte (the descriptor)
+// - Integers (intXXX, uintXXX) are encoded in 1, 2, 4 or 8 bytes (plus a descriptor byte).
+// There are positive (uintXXX and intXXX >= 0) and negative (intXXX < 0) integers.
+// - Floats are encoded in 4 or 8 bytes (plus a descriptor byte)
+// - Lenght of containers (strings, bytes, array, map, extensions)
+// are encoded in 0, 1, 2, 4 or 8 bytes.
+// Zero-length containers have no length encoded.
+// For others, the number of bytes is given by pow(2, bd%3)
+// - maps are encoded as [bd] [length] [[key][value]]...
+// - arrays are encoded as [bd] [length] [value]...
+// - extensions are encoded as [bd] [length] [tag] [byte]...
+// - strings/bytearrays are encoded as [bd] [length] [byte]...
+//
+// The full spec will be published soon.
+type SimpleHandle struct {
+ BasicHandle
+ binaryEncodingType
+}
+
+func (h *SimpleHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
+ return h.SetExt(rt, tag, &setExtWrapper{b: ext})
+}
+
+func (h *SimpleHandle) newEncDriver(e *Encoder) encDriver {
+ return &simpleEncDriver{e: e, w: e.w, h: h}
+}
+
+func (h *SimpleHandle) newDecDriver(d *Decoder) decDriver {
+ return &simpleDecDriver{d: d, r: d.r, h: h, br: d.bytes}
+}
+
+func (e *simpleEncDriver) reset() {
+ e.w = e.e.w
+}
+
+func (d *simpleDecDriver) reset() {
+ d.r = d.d.r
+ d.bd, d.bdRead = 0, false
+}
+
+var _ decDriver = (*simpleDecDriver)(nil)
+var _ encDriver = (*simpleEncDriver)(nil)
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/test-cbor-goldens.json b/src/kube2msb/vendor/github.com/ugorji/go/codec/test-cbor-goldens.json
new file mode 100644
index 0000000..9028586
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/test-cbor-goldens.json
@@ -0,0 +1,639 @@
+[
+ {
+ "cbor": "AA==",
+ "hex": "00",
+ "roundtrip": true,
+ "decoded": 0
+ },
+ {
+ "cbor": "AQ==",
+ "hex": "01",
+ "roundtrip": true,
+ "decoded": 1
+ },
+ {
+ "cbor": "Cg==",
+ "hex": "0a",
+ "roundtrip": true,
+ "decoded": 10
+ },
+ {
+ "cbor": "Fw==",
+ "hex": "17",
+ "roundtrip": true,
+ "decoded": 23
+ },
+ {
+ "cbor": "GBg=",
+ "hex": "1818",
+ "roundtrip": true,
+ "decoded": 24
+ },
+ {
+ "cbor": "GBk=",
+ "hex": "1819",
+ "roundtrip": true,
+ "decoded": 25
+ },
+ {
+ "cbor": "GGQ=",
+ "hex": "1864",
+ "roundtrip": true,
+ "decoded": 100
+ },
+ {
+ "cbor": "GQPo",
+ "hex": "1903e8",
+ "roundtrip": true,
+ "decoded": 1000
+ },
+ {
+ "cbor": "GgAPQkA=",
+ "hex": "1a000f4240",
+ "roundtrip": true,
+ "decoded": 1000000
+ },
+ {
+ "cbor": "GwAAAOjUpRAA",
+ "hex": "1b000000e8d4a51000",
+ "roundtrip": true,
+ "decoded": 1000000000000
+ },
+ {
+ "cbor": "G///////////",
+ "hex": "1bffffffffffffffff",
+ "roundtrip": true,
+ "decoded": 18446744073709551615
+ },
+ {
+ "cbor": "wkkBAAAAAAAAAAA=",
+ "hex": "c249010000000000000000",
+ "roundtrip": true,
+ "decoded": 18446744073709551616
+ },
+ {
+ "cbor": "O///////////",
+ "hex": "3bffffffffffffffff",
+ "roundtrip": true,
+ "decoded": -18446744073709551616,
+ "skip": true
+ },
+ {
+ "cbor": "w0kBAAAAAAAAAAA=",
+ "hex": "c349010000000000000000",
+ "roundtrip": true,
+ "decoded": -18446744073709551617
+ },
+ {
+ "cbor": "IA==",
+ "hex": "20",
+ "roundtrip": true,
+ "decoded": -1
+ },
+ {
+ "cbor": "KQ==",
+ "hex": "29",
+ "roundtrip": true,
+ "decoded": -10
+ },
+ {
+ "cbor": "OGM=",
+ "hex": "3863",
+ "roundtrip": true,
+ "decoded": -100
+ },
+ {
+ "cbor": "OQPn",
+ "hex": "3903e7",
+ "roundtrip": true,
+ "decoded": -1000
+ },
+ {
+ "cbor": "+QAA",
+ "hex": "f90000",
+ "roundtrip": true,
+ "decoded": 0.0
+ },
+ {
+ "cbor": "+YAA",
+ "hex": "f98000",
+ "roundtrip": true,
+ "decoded": -0.0
+ },
+ {
+ "cbor": "+TwA",
+ "hex": "f93c00",
+ "roundtrip": true,
+ "decoded": 1.0
+ },
+ {
+ "cbor": "+z/xmZmZmZma",
+ "hex": "fb3ff199999999999a",
+ "roundtrip": true,
+ "decoded": 1.1
+ },
+ {
+ "cbor": "+T4A",
+ "hex": "f93e00",
+ "roundtrip": true,
+ "decoded": 1.5
+ },
+ {
+ "cbor": "+Xv/",
+ "hex": "f97bff",
+ "roundtrip": true,
+ "decoded": 65504.0
+ },
+ {
+ "cbor": "+kfDUAA=",
+ "hex": "fa47c35000",
+ "roundtrip": true,
+ "decoded": 100000.0
+ },
+ {
+ "cbor": "+n9///8=",
+ "hex": "fa7f7fffff",
+ "roundtrip": true,
+ "decoded": 3.4028234663852886e+38
+ },
+ {
+ "cbor": "+3435DyIAHWc",
+ "hex": "fb7e37e43c8800759c",
+ "roundtrip": true,
+ "decoded": 1.0e+300
+ },
+ {
+ "cbor": "+QAB",
+ "hex": "f90001",
+ "roundtrip": true,
+ "decoded": 5.960464477539063e-08
+ },
+ {
+ "cbor": "+QQA",
+ "hex": "f90400",
+ "roundtrip": true,
+ "decoded": 6.103515625e-05
+ },
+ {
+ "cbor": "+cQA",
+ "hex": "f9c400",
+ "roundtrip": true,
+ "decoded": -4.0
+ },
+ {
+ "cbor": "+8AQZmZmZmZm",
+ "hex": "fbc010666666666666",
+ "roundtrip": true,
+ "decoded": -4.1
+ },
+ {
+ "cbor": "+XwA",
+ "hex": "f97c00",
+ "roundtrip": true,
+ "diagnostic": "Infinity"
+ },
+ {
+ "cbor": "+X4A",
+ "hex": "f97e00",
+ "roundtrip": true,
+ "diagnostic": "NaN"
+ },
+ {
+ "cbor": "+fwA",
+ "hex": "f9fc00",
+ "roundtrip": true,
+ "diagnostic": "-Infinity"
+ },
+ {
+ "cbor": "+n+AAAA=",
+ "hex": "fa7f800000",
+ "roundtrip": false,
+ "diagnostic": "Infinity"
+ },
+ {
+ "cbor": "+n/AAAA=",
+ "hex": "fa7fc00000",
+ "roundtrip": false,
+ "diagnostic": "NaN"
+ },
+ {
+ "cbor": "+v+AAAA=",
+ "hex": "faff800000",
+ "roundtrip": false,
+ "diagnostic": "-Infinity"
+ },
+ {
+ "cbor": "+3/wAAAAAAAA",
+ "hex": "fb7ff0000000000000",
+ "roundtrip": false,
+ "diagnostic": "Infinity"
+ },
+ {
+ "cbor": "+3/4AAAAAAAA",
+ "hex": "fb7ff8000000000000",
+ "roundtrip": false,
+ "diagnostic": "NaN"
+ },
+ {
+ "cbor": "+//wAAAAAAAA",
+ "hex": "fbfff0000000000000",
+ "roundtrip": false,
+ "diagnostic": "-Infinity"
+ },
+ {
+ "cbor": "9A==",
+ "hex": "f4",
+ "roundtrip": true,
+ "decoded": false
+ },
+ {
+ "cbor": "9Q==",
+ "hex": "f5",
+ "roundtrip": true,
+ "decoded": true
+ },
+ {
+ "cbor": "9g==",
+ "hex": "f6",
+ "roundtrip": true,
+ "decoded": null
+ },
+ {
+ "cbor": "9w==",
+ "hex": "f7",
+ "roundtrip": true,
+ "diagnostic": "undefined"
+ },
+ {
+ "cbor": "8A==",
+ "hex": "f0",
+ "roundtrip": true,
+ "diagnostic": "simple(16)"
+ },
+ {
+ "cbor": "+Bg=",
+ "hex": "f818",
+ "roundtrip": true,
+ "diagnostic": "simple(24)"
+ },
+ {
+ "cbor": "+P8=",
+ "hex": "f8ff",
+ "roundtrip": true,
+ "diagnostic": "simple(255)"
+ },
+ {
+ "cbor": "wHQyMDEzLTAzLTIxVDIwOjA0OjAwWg==",
+ "hex": "c074323031332d30332d32315432303a30343a30305a",
+ "roundtrip": true,
+ "diagnostic": "0(\"2013-03-21T20:04:00Z\")"
+ },
+ {
+ "cbor": "wRpRS2ew",
+ "hex": "c11a514b67b0",
+ "roundtrip": true,
+ "diagnostic": "1(1363896240)"
+ },
+ {
+ "cbor": "wftB1FLZ7CAAAA==",
+ "hex": "c1fb41d452d9ec200000",
+ "roundtrip": true,
+ "diagnostic": "1(1363896240.5)"
+ },
+ {
+ "cbor": "10QBAgME",
+ "hex": "d74401020304",
+ "roundtrip": true,
+ "diagnostic": "23(h'01020304')"
+ },
+ {
+ "cbor": "2BhFZElFVEY=",
+ "hex": "d818456449455446",
+ "roundtrip": true,
+ "diagnostic": "24(h'6449455446')"
+ },
+ {
+ "cbor": "2CB2aHR0cDovL3d3dy5leGFtcGxlLmNvbQ==",
+ "hex": "d82076687474703a2f2f7777772e6578616d706c652e636f6d",
+ "roundtrip": true,
+ "diagnostic": "32(\"http://www.example.com\")"
+ },
+ {
+ "cbor": "QA==",
+ "hex": "40",
+ "roundtrip": true,
+ "diagnostic": "h''"
+ },
+ {
+ "cbor": "RAECAwQ=",
+ "hex": "4401020304",
+ "roundtrip": true,
+ "diagnostic": "h'01020304'"
+ },
+ {
+ "cbor": "YA==",
+ "hex": "60",
+ "roundtrip": true,
+ "decoded": ""
+ },
+ {
+ "cbor": "YWE=",
+ "hex": "6161",
+ "roundtrip": true,
+ "decoded": "a"
+ },
+ {
+ "cbor": "ZElFVEY=",
+ "hex": "6449455446",
+ "roundtrip": true,
+ "decoded": "IETF"
+ },
+ {
+ "cbor": "YiJc",
+ "hex": "62225c",
+ "roundtrip": true,
+ "decoded": "\"\\"
+ },
+ {
+ "cbor": "YsO8",
+ "hex": "62c3bc",
+ "roundtrip": true,
+ "decoded": "ü"
+ },
+ {
+ "cbor": "Y+awtA==",
+ "hex": "63e6b0b4",
+ "roundtrip": true,
+ "decoded": "水"
+ },
+ {
+ "cbor": "ZPCQhZE=",
+ "hex": "64f0908591",
+ "roundtrip": true,
+ "decoded": "𐅑"
+ },
+ {
+ "cbor": "gA==",
+ "hex": "80",
+ "roundtrip": true,
+ "decoded": [
+
+ ]
+ },
+ {
+ "cbor": "gwECAw==",
+ "hex": "83010203",
+ "roundtrip": true,
+ "decoded": [
+ 1,
+ 2,
+ 3
+ ]
+ },
+ {
+ "cbor": "gwGCAgOCBAU=",
+ "hex": "8301820203820405",
+ "roundtrip": true,
+ "decoded": [
+ 1,
+ [
+ 2,
+ 3
+ ],
+ [
+ 4,
+ 5
+ ]
+ ]
+ },
+ {
+ "cbor": "mBkBAgMEBQYHCAkKCwwNDg8QERITFBUWFxgYGBk=",
+ "hex": "98190102030405060708090a0b0c0d0e0f101112131415161718181819",
+ "roundtrip": true,
+ "decoded": [
+ 1,
+ 2,
+ 3,
+ 4,
+ 5,
+ 6,
+ 7,
+ 8,
+ 9,
+ 10,
+ 11,
+ 12,
+ 13,
+ 14,
+ 15,
+ 16,
+ 17,
+ 18,
+ 19,
+ 20,
+ 21,
+ 22,
+ 23,
+ 24,
+ 25
+ ]
+ },
+ {
+ "cbor": "oA==",
+ "hex": "a0",
+ "roundtrip": true,
+ "decoded": {
+ }
+ },
+ {
+ "cbor": "ogECAwQ=",
+ "hex": "a201020304",
+ "roundtrip": true,
+ "skip": true,
+ "diagnostic": "{1: 2, 3: 4}"
+ },
+ {
+ "cbor": "omFhAWFiggID",
+ "hex": "a26161016162820203",
+ "roundtrip": true,
+ "decoded": {
+ "a": 1,
+ "b": [
+ 2,
+ 3
+ ]
+ }
+ },
+ {
+ "cbor": "gmFhoWFiYWM=",
+ "hex": "826161a161626163",
+ "roundtrip": true,
+ "decoded": [
+ "a",
+ {
+ "b": "c"
+ }
+ ]
+ },
+ {
+ "cbor": "pWFhYUFhYmFCYWNhQ2FkYURhZWFF",
+ "hex": "a56161614161626142616361436164614461656145",
+ "roundtrip": true,
+ "decoded": {
+ "a": "A",
+ "b": "B",
+ "c": "C",
+ "d": "D",
+ "e": "E"
+ }
+ },
+ {
+ "cbor": "X0IBAkMDBAX/",
+ "hex": "5f42010243030405ff",
+ "roundtrip": false,
+ "skip": true,
+ "diagnostic": "(_ h'0102', h'030405')"
+ },
+ {
+ "cbor": "f2VzdHJlYWRtaW5n/w==",
+ "hex": "7f657374726561646d696e67ff",
+ "roundtrip": false,
+ "decoded": "streaming"
+ },
+ {
+ "cbor": "n/8=",
+ "hex": "9fff",
+ "roundtrip": false,
+ "decoded": [
+
+ ]
+ },
+ {
+ "cbor": "nwGCAgOfBAX//w==",
+ "hex": "9f018202039f0405ffff",
+ "roundtrip": false,
+ "decoded": [
+ 1,
+ [
+ 2,
+ 3
+ ],
+ [
+ 4,
+ 5
+ ]
+ ]
+ },
+ {
+ "cbor": "nwGCAgOCBAX/",
+ "hex": "9f01820203820405ff",
+ "roundtrip": false,
+ "decoded": [
+ 1,
+ [
+ 2,
+ 3
+ ],
+ [
+ 4,
+ 5
+ ]
+ ]
+ },
+ {
+ "cbor": "gwGCAgOfBAX/",
+ "hex": "83018202039f0405ff",
+ "roundtrip": false,
+ "decoded": [
+ 1,
+ [
+ 2,
+ 3
+ ],
+ [
+ 4,
+ 5
+ ]
+ ]
+ },
+ {
+ "cbor": "gwGfAgP/ggQF",
+ "hex": "83019f0203ff820405",
+ "roundtrip": false,
+ "decoded": [
+ 1,
+ [
+ 2,
+ 3
+ ],
+ [
+ 4,
+ 5
+ ]
+ ]
+ },
+ {
+ "cbor": "nwECAwQFBgcICQoLDA0ODxAREhMUFRYXGBgYGf8=",
+ "hex": "9f0102030405060708090a0b0c0d0e0f101112131415161718181819ff",
+ "roundtrip": false,
+ "decoded": [
+ 1,
+ 2,
+ 3,
+ 4,
+ 5,
+ 6,
+ 7,
+ 8,
+ 9,
+ 10,
+ 11,
+ 12,
+ 13,
+ 14,
+ 15,
+ 16,
+ 17,
+ 18,
+ 19,
+ 20,
+ 21,
+ 22,
+ 23,
+ 24,
+ 25
+ ]
+ },
+ {
+ "cbor": "v2FhAWFinwID//8=",
+ "hex": "bf61610161629f0203ffff",
+ "roundtrip": false,
+ "decoded": {
+ "a": 1,
+ "b": [
+ 2,
+ 3
+ ]
+ }
+ },
+ {
+ "cbor": "gmFhv2FiYWP/",
+ "hex": "826161bf61626163ff",
+ "roundtrip": false,
+ "decoded": [
+ "a",
+ {
+ "b": "c"
+ }
+ ]
+ },
+ {
+ "cbor": "v2NGdW71Y0FtdCH/",
+ "hex": "bf6346756ef563416d7421ff",
+ "roundtrip": false,
+ "decoded": {
+ "Fun": true,
+ "Amt": -2
+ }
+ }
+]
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/test.py b/src/kube2msb/vendor/github.com/ugorji/go/codec/test.py
new file mode 100644
index 0000000..c0ad20b
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/test.py
@@ -0,0 +1,126 @@
+#!/usr/bin/env python
+
+# This will create golden files in a directory passed to it.
+# A Test calls this internally to create the golden files
+# So it can process them (so we don't have to checkin the files).
+
+# Ensure msgpack-python and cbor are installed first, using:
+# sudo apt-get install python-dev
+# sudo apt-get install python-pip
+# pip install --user msgpack-python msgpack-rpc-python cbor
+
+# Ensure all "string" keys are utf strings (else encoded as bytes)
+
+import cbor, msgpack, msgpackrpc, sys, os, threading
+
+def get_test_data_list():
+ # get list with all primitive types, and a combo type
+ l0 = [
+ -8,
+ -1616,
+ -32323232,
+ -6464646464646464,
+ 192,
+ 1616,
+ 32323232,
+ 6464646464646464,
+ 192,
+ -3232.0,
+ -6464646464.0,
+ 3232.0,
+ 6464.0,
+ 6464646464.0,
+ False,
+ True,
+ u"null",
+ None,
+ u"someday",
+ 1328176922000002000,
+ u"",
+ -2206187877999998000,
+ u"bytestring",
+ 270,
+ u"none",
+ -2013855847999995777,
+ #-6795364578871345152,
+ ]
+ l1 = [
+ { "true": True,
+ "false": False },
+ { "true": u"True",
+ "false": False,
+ "uint16(1616)": 1616 },
+ { "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ],
+ "int32":32323232, "bool": True,
+ "LONG STRING": u"123456789012345678901234567890123456789012345678901234567890",
+ "SHORT STRING": u"1234567890" },
+ { True: "true", 138: False, "false": 200 }
+ ]
+
+ l = []
+ l.extend(l0)
+ l.append(l0)
+ l.append(1)
+ l.extend(l1)
+ return l
+
+def build_test_data(destdir):
+ l = get_test_data_list()
+ for i in range(len(l)):
+ # packer = msgpack.Packer()
+ serialized = msgpack.dumps(l[i])
+ f = open(os.path.join(destdir, str(i) + '.msgpack.golden'), 'wb')
+ f.write(serialized)
+ f.close()
+ serialized = cbor.dumps(l[i])
+ f = open(os.path.join(destdir, str(i) + '.cbor.golden'), 'wb')
+ f.write(serialized)
+ f.close()
+
+def doRpcServer(port, stopTimeSec):
+ class EchoHandler(object):
+ def Echo123(self, msg1, msg2, msg3):
+ return ("1:%s 2:%s 3:%s" % (msg1, msg2, msg3))
+ def EchoStruct(self, msg):
+ return ("%s" % msg)
+
+ addr = msgpackrpc.Address('localhost', port)
+ server = msgpackrpc.Server(EchoHandler())
+ server.listen(addr)
+ # run thread to stop it after stopTimeSec seconds if > 0
+ if stopTimeSec > 0:
+ def myStopRpcServer():
+ server.stop()
+ t = threading.Timer(stopTimeSec, myStopRpcServer)
+ t.start()
+ server.start()
+
+def doRpcClientToPythonSvc(port):
+ address = msgpackrpc.Address('localhost', port)
+ client = msgpackrpc.Client(address, unpack_encoding='utf-8')
+ print client.call("Echo123", "A1", "B2", "C3")
+ print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
+
+def doRpcClientToGoSvc(port):
+ # print ">>>> port: ", port, " <<<<<"
+ address = msgpackrpc.Address('localhost', port)
+ client = msgpackrpc.Client(address, unpack_encoding='utf-8')
+ print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"])
+ print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
+
+def doMain(args):
+ if len(args) == 2 and args[0] == "testdata":
+ build_test_data(args[1])
+ elif len(args) == 3 and args[0] == "rpc-server":
+ doRpcServer(int(args[1]), int(args[2]))
+ elif len(args) == 2 and args[0] == "rpc-client-python-service":
+ doRpcClientToPythonSvc(int(args[1]))
+ elif len(args) == 2 and args[0] == "rpc-client-go-service":
+ doRpcClientToGoSvc(int(args[1]))
+ else:
+ print("Usage: test.py " +
+ "[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...")
+
+if __name__ == "__main__":
+ doMain(sys.argv[1:])
+
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/tests.sh b/src/kube2msb/vendor/github.com/ugorji/go/codec/tests.sh
new file mode 100644
index 0000000..00857b6
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/tests.sh
@@ -0,0 +1,80 @@
+#!/bin/bash
+
+# Run all the different permutations of all the tests.
+# This helps ensure that nothing gets broken.
+
+_run() {
+ # 1. VARIATIONS: regular (t), canonical (c), IO R/W (i),
+ # binc-nosymbols (n), struct2array (s), intern string (e),
+ # json-indent (d), circular (l)
+ # 2. MODE: reflection (r), external (x), codecgen (g), unsafe (u), notfastpath (f)
+ # 3. OPTIONS: verbose (v), reset (z), must (m),
+ #
+ # Use combinations of mode to get exactly what you want,
+ # and then pass the variations you need.
+
+ ztags=""
+ zargs=""
+ local OPTIND
+ OPTIND=1
+ while getopts "_xurtcinsvgzmefdl" flag
+ do
+ case "x$flag" in
+ 'xr') ;;
+ 'xf') ztags="$ztags notfastpath" ;;
+ 'xg') ztags="$ztags codecgen" ;;
+ 'xx') ztags="$ztags x" ;;
+ 'xu') ztags="$ztags unsafe" ;;
+ 'xv') zargs="$zargs -tv" ;;
+ 'xz') zargs="$zargs -tr" ;;
+ 'xm') zargs="$zargs -tm" ;;
+ 'xl') zargs="$zargs -tl" ;;
+ *) ;;
+ esac
+ done
+ # shift $((OPTIND-1))
+ printf '............. TAGS: %s .............\n' "$ztags"
+ # echo ">>>>>>> TAGS: $ztags"
+
+ OPTIND=1
+ while getopts "_xurtcinsvgzmefdl" flag
+ do
+ case "x$flag" in
+ 'xt') printf ">>>>>>> REGULAR : "; go test "-tags=$ztags" $zargs ; sleep 2 ;;
+ 'xc') printf ">>>>>>> CANONICAL : "; go test "-tags=$ztags" $zargs -tc; sleep 2 ;;
+ 'xi') printf ">>>>>>> I/O : "; go test "-tags=$ztags" $zargs -ti; sleep 2 ;;
+ 'xn') printf ">>>>>>> NO_SYMBOLS : "; go test "-tags=$ztags" -run=Binc $zargs -tn; sleep 2 ;;
+ 'xs') printf ">>>>>>> TO_ARRAY : "; go test "-tags=$ztags" $zargs -ts; sleep 2 ;;
+ 'xe') printf ">>>>>>> INTERN : "; go test "-tags=$ztags" $zargs -te; sleep 2 ;;
+ 'xd') printf ">>>>>>> INDENT : ";
+ go test "-tags=$ztags" -run=JsonCodecsTable -td=-1 $zargs;
+ go test "-tags=$ztags" -run=JsonCodecsTable -td=8 $zargs;
+ sleep 2 ;;
+ *) ;;
+ esac
+ done
+ shift $((OPTIND-1))
+
+ OPTIND=1
+}
+
+# echo ">>>>>>> RUNNING VARIATIONS OF TESTS"
+if [[ "x$@" = "x" ]]; then
+ # All: r, x, g, gu
+ _run "-_tcinsed_ml" # regular
+ _run "-_tcinsed_ml_z" # regular with reset
+ _run "-_tcinsed_ml_f" # regular with no fastpath (notfastpath)
+ _run "-x_tcinsed_ml" # external
+ _run "-gx_tcinsed_ml" # codecgen: requires external
+ _run "-gxu_tcinsed_ml" # codecgen + unsafe
+elif [[ "x$@" = "x-Z" ]]; then
+ # Regular
+ _run "-_tcinsed_ml" # regular
+ _run "-_tcinsed_ml_z" # regular with reset
+elif [[ "x$@" = "x-F" ]]; then
+ # regular with notfastpath
+ _run "-_tcinsed_ml_f" # regular
+ _run "-_tcinsed_ml_zf" # regular with reset
+else
+ _run "$@"
+fi
diff --git a/src/kube2msb/vendor/github.com/ugorji/go/codec/time.go b/src/kube2msb/vendor/github.com/ugorji/go/codec/time.go
new file mode 100644
index 0000000..718b731
--- /dev/null
+++ b/src/kube2msb/vendor/github.com/ugorji/go/codec/time.go
@@ -0,0 +1,233 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+ "fmt"
+ "reflect"
+ "time"
+)
+
+var (
+ timeDigits = [...]byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}
+ timeExtEncFn = func(rv reflect.Value) (bs []byte, err error) {
+ defer panicToErr(&err)
+ bs = timeExt{}.WriteExt(rv.Interface())
+ return
+ }
+ timeExtDecFn = func(rv reflect.Value, bs []byte) (err error) {
+ defer panicToErr(&err)
+ timeExt{}.ReadExt(rv.Interface(), bs)
+ return
+ }
+)
+
+type timeExt struct{}
+
+func (x timeExt) WriteExt(v interface{}) (bs []byte) {
+ switch v2 := v.(type) {
+ case time.Time:
+ bs = encodeTime(v2)
+ case *time.Time:
+ bs = encodeTime(*v2)
+ default:
+ panic(fmt.Errorf("unsupported format for time conversion: expecting time.Time; got %T", v2))
+ }
+ return
+}
+func (x timeExt) ReadExt(v interface{}, bs []byte) {
+ tt, err := decodeTime(bs)
+ if err != nil {
+ panic(err)
+ }
+ *(v.(*time.Time)) = tt
+}
+
+func (x timeExt) ConvertExt(v interface{}) interface{} {
+ return x.WriteExt(v)
+}
+func (x timeExt) UpdateExt(v interface{}, src interface{}) {
+ x.ReadExt(v, src.([]byte))
+}
+
+// EncodeTime encodes a time.Time as a []byte, including
+// information on the instant in time and UTC offset.
+//
+// Format Description
+//
+// A timestamp is composed of 3 components:
+//
+// - secs: signed integer representing seconds since unix epoch
+// - nsces: unsigned integer representing fractional seconds as a
+// nanosecond offset within secs, in the range 0 <= nsecs < 1e9
+// - tz: signed integer representing timezone offset in minutes east of UTC,
+// and a dst (daylight savings time) flag
+//
+// When encoding a timestamp, the first byte is the descriptor, which
+// defines which components are encoded and how many bytes are used to
+// encode secs and nsecs components. *If secs/nsecs is 0 or tz is UTC, it
+// is not encoded in the byte array explicitly*.
+//
+// Descriptor 8 bits are of the form `A B C DDD EE`:
+// A: Is secs component encoded? 1 = true
+// B: Is nsecs component encoded? 1 = true
+// C: Is tz component encoded? 1 = true
+// DDD: Number of extra bytes for secs (range 0-7).
+// If A = 1, secs encoded in DDD+1 bytes.
+// If A = 0, secs is not encoded, and is assumed to be 0.
+// If A = 1, then we need at least 1 byte to encode secs.
+// DDD says the number of extra bytes beyond that 1.
+// E.g. if DDD=0, then secs is represented in 1 byte.
+// if DDD=2, then secs is represented in 3 bytes.
+// EE: Number of extra bytes for nsecs (range 0-3).
+// If B = 1, nsecs encoded in EE+1 bytes (similar to secs/DDD above)
+//
+// Following the descriptor bytes, subsequent bytes are:
+//
+// secs component encoded in `DDD + 1` bytes (if A == 1)
+// nsecs component encoded in `EE + 1` bytes (if B == 1)
+// tz component encoded in 2 bytes (if C == 1)
+//
+// secs and nsecs components are integers encoded in a BigEndian
+// 2-complement encoding format.
+//
+// tz component is encoded as 2 bytes (16 bits). Most significant bit 15 to
+// Least significant bit 0 are described below:
+//
+// Timezone offset has a range of -12:00 to +14:00 (ie -720 to +840 minutes).
+// Bit 15 = have\_dst: set to 1 if we set the dst flag.
+// Bit 14 = dst\_on: set to 1 if dst is in effect at the time, or 0 if not.
+// Bits 13..0 = timezone offset in minutes. It is a signed integer in Big Endian format.
+//
+func encodeTime(t time.Time) []byte {
+ //t := rv.Interface().(time.Time)
+ tsecs, tnsecs := t.Unix(), t.Nanosecond()
+ var (
+ bd byte
+ btmp [8]byte
+ bs [16]byte
+ i int = 1
+ )
+ l := t.Location()
+ if l == time.UTC {
+ l = nil
+ }
+ if tsecs != 0 {
+ bd = bd | 0x80
+ bigen.PutUint64(btmp[:], uint64(tsecs))
+ f := pruneSignExt(btmp[:], tsecs >= 0)
+ bd = bd | (byte(7-f) << 2)
+ copy(bs[i:], btmp[f:])
+ i = i + (8 - f)
+ }
+ if tnsecs != 0 {
+ bd = bd | 0x40
+ bigen.PutUint32(btmp[:4], uint32(tnsecs))
+ f := pruneSignExt(btmp[:4], true)
+ bd = bd | byte(3-f)
+ copy(bs[i:], btmp[f:4])
+ i = i + (4 - f)
+ }
+ if l != nil {
+ bd = bd | 0x20
+ // Note that Go Libs do not give access to dst flag.
+ _, zoneOffset := t.Zone()
+ //zoneName, zoneOffset := t.Zone()
+ zoneOffset /= 60
+ z := uint16(zoneOffset)
+ bigen.PutUint16(btmp[:2], z)
+ // clear dst flags
+ bs[i] = btmp[0] & 0x3f
+ bs[i+1] = btmp[1]
+ i = i + 2
+ }
+ bs[0] = bd
+ return bs[0:i]
+}
+
+// DecodeTime decodes a []byte into a time.Time.
+func decodeTime(bs []byte) (tt time.Time, err error) {
+ bd := bs[0]
+ var (
+ tsec int64
+ tnsec uint32
+ tz uint16
+ i byte = 1
+ i2 byte
+ n byte
+ )
+ if bd&(1<<7) != 0 {
+ var btmp [8]byte
+ n = ((bd >> 2) & 0x7) + 1
+ i2 = i + n
+ copy(btmp[8-n:], bs[i:i2])
+ //if first bit of bs[i] is set, then fill btmp[0..8-n] with 0xff (ie sign extend it)
+ if bs[i]&(1<<7) != 0 {
+ copy(btmp[0:8-n], bsAll0xff)
+ //for j,k := byte(0), 8-n; j < k; j++ { btmp[j] = 0xff }
+ }
+ i = i2
+ tsec = int64(bigen.Uint64(btmp[:]))
+ }
+ if bd&(1<<6) != 0 {
+ var btmp [4]byte
+ n = (bd & 0x3) + 1
+ i2 = i + n
+ copy(btmp[4-n:], bs[i:i2])
+ i = i2
+ tnsec = bigen.Uint32(btmp[:])
+ }
+ if bd&(1<<5) == 0 {
+ tt = time.Unix(tsec, int64(tnsec)).UTC()
+ return
+ }
+ // In stdlib time.Parse, when a date is parsed without a zone name, it uses "" as zone name.
+ // However, we need name here, so it can be shown when time is printed.
+ // Zone name is in form: UTC-08:00.
+ // Note that Go Libs do not give access to dst flag, so we ignore dst bits
+
+ i2 = i + 2
+ tz = bigen.Uint16(bs[i:i2])
+ i = i2
+ // sign extend sign bit into top 2 MSB (which were dst bits):
+ if tz&(1<<13) == 0 { // positive
+ tz = tz & 0x3fff //clear 2 MSBs: dst bits
+ } else { // negative
+ tz = tz | 0xc000 //set 2 MSBs: dst bits
+ //tzname[3] = '-' (TODO: verify. this works here)
+ }
+ tzint := int16(tz)
+ if tzint == 0 {
+ tt = time.Unix(tsec, int64(tnsec)).UTC()
+ } else {
+ // For Go Time, do not use a descriptive timezone.
+ // It's unnecessary, and makes it harder to do a reflect.DeepEqual.
+ // The Offset already tells what the offset should be, if not on UTC and unknown zone name.
+ // var zoneName = timeLocUTCName(tzint)
+ tt = time.Unix(tsec, int64(tnsec)).In(time.FixedZone("", int(tzint)*60))
+ }
+ return
+}
+
+func timeLocUTCName(tzint int16) string {
+ if tzint == 0 {
+ return "UTC"
+ }
+ var tzname = []byte("UTC+00:00")
+ //tzname := fmt.Sprintf("UTC%s%02d:%02d", tzsign, tz/60, tz%60) //perf issue using Sprintf. inline below.
+ //tzhr, tzmin := tz/60, tz%60 //faster if u convert to int first
+ var tzhr, tzmin int16
+ if tzint < 0 {
+ tzname[3] = '-' // (TODO: verify. this works here)
+ tzhr, tzmin = -tzint/60, (-tzint)%60
+ } else {
+ tzhr, tzmin = tzint/60, tzint%60
+ }
+ tzname[4] = timeDigits[tzhr/10]
+ tzname[5] = timeDigits[tzhr%10]
+ tzname[7] = timeDigits[tzmin/10]
+ tzname[8] = timeDigits[tzmin%10]
+ return string(tzname)
+ //return time.FixedZone(string(tzname), int(tzint)*60)
+}
diff --git a/src/kube2msb/vendor/golang.org/x/net/LICENSE b/src/kube2msb/vendor/golang.org/x/net/LICENSE
new file mode 100644
index 0000000..6a66aea
--- /dev/null
+++ b/src/kube2msb/vendor/golang.org/x/net/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/kube2msb/vendor/golang.org/x/net/PATENTS b/src/kube2msb/vendor/golang.org/x/net/PATENTS
new file mode 100644
index 0000000..7330990
--- /dev/null
+++ b/src/kube2msb/vendor/golang.org/x/net/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/src/kube2msb/vendor/golang.org/x/net/context/context.go b/src/kube2msb/vendor/golang.org/x/net/context/context.go
new file mode 100644
index 0000000..11bd8d3
--- /dev/null
+++ b/src/kube2msb/vendor/golang.org/x/net/context/context.go
@@ -0,0 +1,447 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package context defines the Context type, which carries deadlines,
+// cancelation signals, and other request-scoped values across API boundaries
+// and between processes.
+//
+// Incoming requests to a server should create a Context, and outgoing calls to
+// servers should accept a Context. The chain of function calls between must
+// propagate the Context, optionally replacing it with a modified copy created
+// using WithDeadline, WithTimeout, WithCancel, or WithValue.
+//
+// Programs that use Contexts should follow these rules to keep interfaces
+// consistent across packages and enable static analysis tools to check context
+// propagation:
+//
+// Do not store Contexts inside a struct type; instead, pass a Context
+// explicitly to each function that needs it. The Context should be the first
+// parameter, typically named ctx:
+//
+// func DoSomething(ctx context.Context, arg Arg) error {
+// // ... use ctx ...
+// }
+//
+// Do not pass a nil Context, even if a function permits it. Pass context.TODO
+// if you are unsure about which Context to use.
+//
+// Use context Values only for request-scoped data that transits processes and
+// APIs, not for passing optional parameters to functions.
+//
+// The same Context may be passed to functions running in different goroutines;
+// Contexts are safe for simultaneous use by multiple goroutines.
+//
+// See http://blog.golang.org/context for example code for a server that uses
+// Contexts.
+package context
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+ "time"
+)
+
+// A Context carries a deadline, a cancelation signal, and other values across
+// API boundaries.
+//
+// Context's methods may be called by multiple goroutines simultaneously.
+type Context interface {
+ // Deadline returns the time when work done on behalf of this context
+ // should be canceled. Deadline returns ok==false when no deadline is
+ // set. Successive calls to Deadline return the same results.
+ Deadline() (deadline time.Time, ok bool)
+
+ // Done returns a channel that's closed when work done on behalf of this
+ // context should be canceled. Done may return nil if this context can
+ // never be canceled. Successive calls to Done return the same value.
+ //
+ // WithCancel arranges for Done to be closed when cancel is called;
+ // WithDeadline arranges for Done to be closed when the deadline
+ // expires; WithTimeout arranges for Done to be closed when the timeout
+ // elapses.
+ //
+ // Done is provided for use in select statements:
+ //
+ // // Stream generates values with DoSomething and sends them to out
+ // // until DoSomething returns an error or ctx.Done is closed.
+ // func Stream(ctx context.Context, out <-chan Value) error {
+ // for {
+ // v, err := DoSomething(ctx)
+ // if err != nil {
+ // return err
+ // }
+ // select {
+ // case <-ctx.Done():
+ // return ctx.Err()
+ // case out <- v:
+ // }
+ // }
+ // }
+ //
+ // See http://blog.golang.org/pipelines for more examples of how to use
+ // a Done channel for cancelation.
+ Done() <-chan struct{}
+
+ // Err returns a non-nil error value after Done is closed. Err returns
+ // Canceled if the context was canceled or DeadlineExceeded if the
+ // context's deadline passed. No other values for Err are defined.
+ // After Done is closed, successive calls to Err return the same value.
+ Err() error
+
+ // Value returns the value associated with this context for key, or nil
+ // if no value is associated with key. Successive calls to Value with
+ // the same key returns the same result.
+ //
+ // Use context values only for request-scoped data that transits
+ // processes and API boundaries, not for passing optional parameters to
+ // functions.
+ //
+ // A key identifies a specific value in a Context. Functions that wish
+ // to store values in Context typically allocate a key in a global
+ // variable then use that key as the argument to context.WithValue and
+ // Context.Value. A key can be any type that supports equality;
+ // packages should define keys as an unexported type to avoid
+ // collisions.
+ //
+ // Packages that define a Context key should provide type-safe accessors
+ // for the values stores using that key:
+ //
+ // // Package user defines a User type that's stored in Contexts.
+ // package user
+ //
+ // import "golang.org/x/net/context"
+ //
+ // // User is the type of value stored in the Contexts.
+ // type User struct {...}
+ //
+ // // key is an unexported type for keys defined in this package.
+ // // This prevents collisions with keys defined in other packages.
+ // type key int
+ //
+ // // userKey is the key for user.User values in Contexts. It is
+ // // unexported; clients use user.NewContext and user.FromContext
+ // // instead of using this key directly.
+ // var userKey key = 0
+ //
+ // // NewContext returns a new Context that carries value u.
+ // func NewContext(ctx context.Context, u *User) context.Context {
+ // return context.WithValue(ctx, userKey, u)
+ // }
+ //
+ // // FromContext returns the User value stored in ctx, if any.
+ // func FromContext(ctx context.Context) (*User, bool) {
+ // u, ok := ctx.Value(userKey).(*User)
+ // return u, ok
+ // }
+ Value(key interface{}) interface{}
+}
+
+// Canceled is the error returned by Context.Err when the context is canceled.
+var Canceled = errors.New("context canceled")
+
+// DeadlineExceeded is the error returned by Context.Err when the context's
+// deadline passes.
+var DeadlineExceeded = errors.New("context deadline exceeded")
+
+// An emptyCtx is never canceled, has no values, and has no deadline. It is not
+// struct{}, since vars of this type must have distinct addresses.
+type emptyCtx int
+
+func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
+ return
+}
+
+func (*emptyCtx) Done() <-chan struct{} {
+ return nil
+}
+
+func (*emptyCtx) Err() error {
+ return nil
+}
+
+func (*emptyCtx) Value(key interface{}) interface{} {
+ return nil
+}
+
+func (e *emptyCtx) String() string {
+ switch e {
+ case background:
+ return "context.Background"
+ case todo:
+ return "context.TODO"
+ }
+ return "unknown empty Context"
+}
+
+var (
+ background = new(emptyCtx)
+ todo = new(emptyCtx)
+)
+
+// Background returns a non-nil, empty Context. It is never canceled, has no
+// values, and has no deadline. It is typically used by the main function,
+// initialization, and tests, and as the top-level Context for incoming
+// requests.
+func Background() Context {
+ return background
+}
+
+// TODO returns a non-nil, empty Context. Code should use context.TODO when
+// it's unclear which Context to use or it is not yet available (because the
+// surrounding function has not yet been extended to accept a Context
+// parameter). TODO is recognized by static analysis tools that determine
+// whether Contexts are propagated correctly in a program.
+func TODO() Context {
+ return todo
+}
+
+// A CancelFunc tells an operation to abandon its work.
+// A CancelFunc does not wait for the work to stop.
+// After the first call, subsequent calls to a CancelFunc do nothing.
+type CancelFunc func()
+
+// WithCancel returns a copy of parent with a new Done channel. The returned
+// context's Done channel is closed when the returned cancel function is called
+// or when the parent context's Done channel is closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete.
+func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
+ c := newCancelCtx(parent)
+ propagateCancel(parent, &c)
+ return &c, func() { c.cancel(true, Canceled) }
+}
+
+// newCancelCtx returns an initialized cancelCtx.
+func newCancelCtx(parent Context) cancelCtx {
+ return cancelCtx{
+ Context: parent,
+ done: make(chan struct{}),
+ }
+}
+
+// propagateCancel arranges for child to be canceled when parent is.
+func propagateCancel(parent Context, child canceler) {
+ if parent.Done() == nil {
+ return // parent is never canceled
+ }
+ if p, ok := parentCancelCtx(parent); ok {
+ p.mu.Lock()
+ if p.err != nil {
+ // parent has already been canceled
+ child.cancel(false, p.err)
+ } else {
+ if p.children == nil {
+ p.children = make(map[canceler]bool)
+ }
+ p.children[child] = true
+ }
+ p.mu.Unlock()
+ } else {
+ go func() {
+ select {
+ case <-parent.Done():
+ child.cancel(false, parent.Err())
+ case <-child.Done():
+ }
+ }()
+ }
+}
+
+// parentCancelCtx follows a chain of parent references until it finds a
+// *cancelCtx. This function understands how each of the concrete types in this
+// package represents its parent.
+func parentCancelCtx(parent Context) (*cancelCtx, bool) {
+ for {
+ switch c := parent.(type) {
+ case *cancelCtx:
+ return c, true
+ case *timerCtx:
+ return &c.cancelCtx, true
+ case *valueCtx:
+ parent = c.Context
+ default:
+ return nil, false
+ }
+ }
+}
+
+// removeChild removes a context from its parent.
+func removeChild(parent Context, child canceler) {
+ p, ok := parentCancelCtx(parent)
+ if !ok {
+ return
+ }
+ p.mu.Lock()
+ if p.children != nil {
+ delete(p.children, child)
+ }
+ p.mu.Unlock()
+}
+
+// A canceler is a context type that can be canceled directly. The
+// implementations are *cancelCtx and *timerCtx.
+type canceler interface {
+ cancel(removeFromParent bool, err error)
+ Done() <-chan struct{}
+}
+
+// A cancelCtx can be canceled. When canceled, it also cancels any children
+// that implement canceler.
+type cancelCtx struct {
+ Context
+
+ done chan struct{} // closed by the first cancel call.
+
+ mu sync.Mutex
+ children map[canceler]bool // set to nil by the first cancel call
+ err error // set to non-nil by the first cancel call
+}
+
+func (c *cancelCtx) Done() <-chan struct{} {
+ return c.done
+}
+
+func (c *cancelCtx) Err() error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ return c.err
+}
+
+func (c *cancelCtx) String() string {
+ return fmt.Sprintf("%v.WithCancel", c.Context)
+}
+
+// cancel closes c.done, cancels each of c's children, and, if
+// removeFromParent is true, removes c from its parent's children.
+func (c *cancelCtx) cancel(removeFromParent bool, err error) {
+ if err == nil {
+ panic("context: internal error: missing cancel error")
+ }
+ c.mu.Lock()
+ if c.err != nil {
+ c.mu.Unlock()
+ return // already canceled
+ }
+ c.err = err
+ close(c.done)
+ for child := range c.children {
+ // NOTE: acquiring the child's lock while holding parent's lock.
+ child.cancel(false, err)
+ }
+ c.children = nil
+ c.mu.Unlock()
+
+ if removeFromParent {
+ removeChild(c.Context, c)
+ }
+}
+
+// WithDeadline returns a copy of the parent context with the deadline adjusted
+// to be no later than d. If the parent's deadline is already earlier than d,
+// WithDeadline(parent, d) is semantically equivalent to parent. The returned
+// context's Done channel is closed when the deadline expires, when the returned
+// cancel function is called, or when the parent context's Done channel is
+// closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete.
+func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
+ if cur, ok := parent.Deadline(); ok && cur.Before(deadline) {
+ // The current deadline is already sooner than the new one.
+ return WithCancel(parent)
+ }
+ c := &timerCtx{
+ cancelCtx: newCancelCtx(parent),
+ deadline: deadline,
+ }
+ propagateCancel(parent, c)
+ d := deadline.Sub(time.Now())
+ if d <= 0 {
+ c.cancel(true, DeadlineExceeded) // deadline has already passed
+ return c, func() { c.cancel(true, Canceled) }
+ }
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ if c.err == nil {
+ c.timer = time.AfterFunc(d, func() {
+ c.cancel(true, DeadlineExceeded)
+ })
+ }
+ return c, func() { c.cancel(true, Canceled) }
+}
+
+// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to
+// implement Done and Err. It implements cancel by stopping its timer then
+// delegating to cancelCtx.cancel.
+type timerCtx struct {
+ cancelCtx
+ timer *time.Timer // Under cancelCtx.mu.
+
+ deadline time.Time
+}
+
+func (c *timerCtx) Deadline() (deadline time.Time, ok bool) {
+ return c.deadline, true
+}
+
+func (c *timerCtx) String() string {
+ return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now()))
+}
+
+func (c *timerCtx) cancel(removeFromParent bool, err error) {
+ c.cancelCtx.cancel(false, err)
+ if removeFromParent {
+ // Remove this timerCtx from its parent cancelCtx's children.
+ removeChild(c.cancelCtx.Context, c)
+ }
+ c.mu.Lock()
+ if c.timer != nil {
+ c.timer.Stop()
+ c.timer = nil
+ }
+ c.mu.Unlock()
+}
+
+// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete:
+//
+// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
+// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
+// defer cancel() // releases resources if slowOperation completes before timeout elapses
+// return slowOperation(ctx)
+// }
+func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
+ return WithDeadline(parent, time.Now().Add(timeout))
+}
+
+// WithValue returns a copy of parent in which the value associated with key is
+// val.
+//
+// Use context Values only for request-scoped data that transits processes and
+// APIs, not for passing optional parameters to functions.
+func WithValue(parent Context, key interface{}, val interface{}) Context {
+ return &valueCtx{parent, key, val}
+}
+
+// A valueCtx carries a key-value pair. It implements Value for that key and
+// delegates all other calls to the embedded Context.
+type valueCtx struct {
+ Context
+ key, val interface{}
+}
+
+func (c *valueCtx) String() string {
+ return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val)
+}
+
+func (c *valueCtx) Value(key interface{}) interface{} {
+ if c.key == key {
+ return c.val
+ }
+ return c.Context.Value(key)
+}
diff --git a/src/kube2msb/vendor/golang.org/x/net/context/ctxhttp/cancelreq.go b/src/kube2msb/vendor/golang.org/x/net/context/ctxhttp/cancelreq.go
new file mode 100644
index 0000000..e3170e3
--- /dev/null
+++ b/src/kube2msb/vendor/golang.org/x/net/context/ctxhttp/cancelreq.go
@@ -0,0 +1,19 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.5
+
+package ctxhttp
+
+import "net/http"
+
+func canceler(client *http.Client, req *http.Request) func() {
+ // TODO(djd): Respect any existing value of req.Cancel.
+ ch := make(chan struct{})
+ req.Cancel = ch
+
+ return func() {
+ close(ch)
+ }
+}
diff --git a/src/kube2msb/vendor/golang.org/x/net/context/ctxhttp/cancelreq_go14.go b/src/kube2msb/vendor/golang.org/x/net/context/ctxhttp/cancelreq_go14.go
new file mode 100644
index 0000000..56bcbad
--- /dev/null
+++ b/src/kube2msb/vendor/golang.org/x/net/context/ctxhttp/cancelreq_go14.go
@@ -0,0 +1,23 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.5
+
+package ctxhttp
+
+import "net/http"
+
+type requestCanceler interface {
+ CancelRequest(*http.Request)
+}
+
+func canceler(client *http.Client, req *http.Request) func() {
+ rc, ok := client.Transport.(requestCanceler)
+ if !ok {
+ return func() {}
+ }
+ return func() {
+ rc.CancelRequest(req)
+ }
+}
diff --git a/src/kube2msb/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go b/src/kube2msb/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
new file mode 100644
index 0000000..26a5e19
--- /dev/null
+++ b/src/kube2msb/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
@@ -0,0 +1,140 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package ctxhttp provides helper functions for performing context-aware HTTP requests.
+package ctxhttp
+
+import (
+ "io"
+ "net/http"
+ "net/url"
+ "strings"
+
+ "golang.org/x/net/context"
+)
+
+func nop() {}
+
+var (
+ testHookContextDoneBeforeHeaders = nop
+ testHookDoReturned = nop
+ testHookDidBodyClose = nop
+)
+
+// Do sends an HTTP request with the provided http.Client and returns an HTTP response.
+// If the client is nil, http.DefaultClient is used.
+// If the context is canceled or times out, ctx.Err() will be returned.
+func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
+ if client == nil {
+ client = http.DefaultClient
+ }
+
+ // Request cancelation changed in Go 1.5, see cancelreq.go and cancelreq_go14.go.
+ cancel := canceler(client, req)
+
+ type responseAndError struct {
+ resp *http.Response
+ err error
+ }
+ result := make(chan responseAndError, 1)
+
+ go func() {
+ resp, err := client.Do(req)
+ testHookDoReturned()
+ result <- responseAndError{resp, err}
+ }()
+
+ var resp *http.Response
+
+ select {
+ case <-ctx.Done():
+ testHookContextDoneBeforeHeaders()
+ cancel()
+ // Clean up after the goroutine calling client.Do:
+ go func() {
+ if r := <-result; r.resp != nil {
+ testHookDidBodyClose()
+ r.resp.Body.Close()
+ }
+ }()
+ return nil, ctx.Err()
+ case r := <-result:
+ var err error
+ resp, err = r.resp, r.err
+ if err != nil {
+ return resp, err
+ }
+ }
+
+ c := make(chan struct{})
+ go func() {
+ select {
+ case <-ctx.Done():
+ cancel()
+ case <-c:
+ // The response's Body is closed.
+ }
+ }()
+ resp.Body = &notifyingReader{resp.Body, c}
+
+ return resp, nil
+}
+
+// Get issues a GET request via the Do function.
+func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
+ req, err := http.NewRequest("GET", url, nil)
+ if err != nil {
+ return nil, err
+ }
+ return Do(ctx, client, req)
+}
+
+// Head issues a HEAD request via the Do function.
+func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
+ req, err := http.NewRequest("HEAD", url, nil)
+ if err != nil {
+ return nil, err
+ }
+ return Do(ctx, client, req)
+}
+
+// Post issues a POST request via the Do function.
+func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) {
+ req, err := http.NewRequest("POST", url, body)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Content-Type", bodyType)
+ return Do(ctx, client, req)
+}
+
+// PostForm issues a POST request via the Do function.
+func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) {
+ return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
+}
+
+// notifyingReader is an io.ReadCloser that closes the notify channel after
+// Close is called or a Read fails on the underlying ReadCloser.
+type notifyingReader struct {
+ io.ReadCloser
+ notify chan<- struct{}
+}
+
+func (r *notifyingReader) Read(p []byte) (int, error) {
+ n, err := r.ReadCloser.Read(p)
+ if err != nil && r.notify != nil {
+ close(r.notify)
+ r.notify = nil
+ }
+ return n, err
+}
+
+func (r *notifyingReader) Close() error {
+ err := r.ReadCloser.Close()
+ if r.notify != nil {
+ close(r.notify)
+ r.notify = nil
+ }
+ return err
+}
diff --git a/src/kube2msb/vendor/golang.org/x/net/http2/Dockerfile b/src/kube2msb/vendor/golang.org/x/net/http2/Dockerfile
new file mode 100644
index 0000000..53fc525
--- /dev/null
+++ b/src/kube2msb/vendor/golang.org/x/net/http2/Dockerfile
@@ -0,0 +1,51 @@
+#
+# This Dockerfile builds a recent curl with HTTP/2 client support, using
+# a recent nghttp2 build.
+#
+# See the Makefile for how to tag it. If Docker and that image is found, the
+# Go tests use this curl binary for integration tests.
+#
+
+FROM ubuntu:trusty
+
+RUN apt-get update && \
+ apt-get upgrade -y && \
+ apt-get install -y git-core build-essential wget
+
+RUN apt-get install -y --no-install-recommends \
+ autotools-dev libtool pkg-config zlib1g-dev \
+ libcunit1-dev libssl-dev libxml2-dev libevent-dev \
+ automake autoconf
+
+# The list of packages nghttp2 recommends for h2load:
+RUN apt-get install -y --no-install-recommends make binutils \
+ autoconf automake autotools-dev \
+ libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \
+ libev-dev libevent-dev libjansson-dev libjemalloc-dev \
+ cython python3.4-dev python-setuptools
+
+# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached:
+ENV NGHTTP2_VER 895da9a
+RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git
+
+WORKDIR /root/nghttp2
+RUN git reset --hard $NGHTTP2_VER
+RUN autoreconf -i
+RUN automake
+RUN autoconf
+RUN ./configure
+RUN make
+RUN make install
+
+WORKDIR /root
+RUN wget http://curl.haxx.se/download/curl-7.45.0.tar.gz
+RUN tar -zxvf curl-7.45.0.tar.gz
+WORKDIR /root/curl-7.45.0
+RUN ./configure --with-ssl --with-nghttp2=/usr/local
+RUN make
+RUN make install
+RUN ldconfig
+
+CMD ["-h"]
+ENTRYPOINT ["/usr/local/bin/curl"]
+
diff --git a/src/kube2msb/vendor/golang.org/x/net/http2/Makefile b/src/kube2msb/vendor/golang.org/x/net/http2/Makefile
new file mode 100644
index 0000000..55fd826
--- /dev/null
+++ b/src/kube2msb/vendor/golang.org/x/net/http2/Makefile
@@ -0,0 +1,3 @@
+curlimage:
+ docker build -t gohttp2/curl .
+
diff --git a/src/kube2msb/vendor/golang.org/x/net/http2/README b/src/kube2msb/vendor/golang.org/x/net/http2/README
new file mode 100644
index 0000000..360d5aa
--- /dev/null
+++ b/src/kube2msb/vendor/golang.org/x/net/http2/README
@@ -0,0 +1,20 @@
+This is a work-in-progress HTTP/2 implementation for Go.
+
+It will eventually live in the Go standard library and won't require
+any changes to your code to use. It will just be automatic.
+
+Status:
+
+* The server support is pretty good. A few things are missing
+ but are being worked on.
+* The client work has just started but shares a lot of code
+ is coming along much quicker.
+
+Docs are at https://godoc.org/golang.org/x/net/http2
+
+Demo test server at https://http2.golang.org/
+
+Help & bug reports welcome!
+
+Contributing: https://golang.org/doc/contribute.html
+Bugs: https://golang.org/issue/new?title=x/net/http2:+
diff --git a/src/kube2msb/vendor/golang.org/x/net/http2/client_conn_pool.go b/src/kube2msb/vendor/golang.org/x/net/http2/client_conn_pool.go
new file mode 100644
index 0000000..772ea5e
--- /dev/null
+++ b/src/kube2msb/vendor/golang.org/x/net/http2/client_conn_pool.go
@@ -0,0 +1,225 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Transport code's client connection pooling.
+
+package http2
+
+import (
+ "crypto/tls"
+ "net/http"
+ "sync"
+)
+
+// ClientConnPool manages a pool of HTTP/2 client connections.
+type ClientConnPool interface {
+ GetClientConn(req *http.Request, addr string) (*ClientConn, error)
+ MarkDead(*ClientConn)
+}
+
+// TODO: use singleflight for dialing and addConnCalls?
+type clientConnPool struct {
+ t *Transport
+
+ mu sync.Mutex // TODO: maybe switch to RWMutex
+ // TODO: add support for sharing conns based on cert names
+ // (e.g. share conn for googleapis.com and appspot.com)
+ conns map[string][]*ClientConn // key is host:port
+ dialing map[string]*dialCall // currently in-flight dials
+ keys map[*ClientConn][]string
+ addConnCalls map[string]*addConnCall // in-flight addConnIfNeede calls
+}
+
+func (p *clientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) {
+ return p.getClientConn(req, addr, dialOnMiss)
+}
+
+const (
+ dialOnMiss = true
+ noDialOnMiss = false
+)
+
+func (p *clientConnPool) getClientConn(_ *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) {
+ p.mu.Lock()
+ for _, cc := range p.conns[addr] {
+ if cc.CanTakeNewRequest() {
+ p.mu.Unlock()
+ return cc, nil
+ }
+ }
+ if !dialOnMiss {
+ p.mu.Unlock()
+ return nil, ErrNoCachedConn
+ }
+ call := p.getStartDialLocked(addr)
+ p.mu.Unlock()
+ <-call.done
+ return call.res, call.err
+}
+
+// dialCall is an in-flight Transport dial call to a host.
+type dialCall struct {
+ p *clientConnPool
+ done chan struct{} // closed when done
+ res *ClientConn // valid after done is closed
+ err error // valid after done is closed
+}
+
+// requires p.mu is held.
+func (p *clientConnPool) getStartDialLocked(addr string) *dialCall {
+ if call, ok := p.dialing[addr]; ok {
+ // A dial is already in-flight. Don't start another.
+ return call
+ }
+ call := &dialCall{p: p, done: make(chan struct{})}
+ if p.dialing == nil {
+ p.dialing = make(map[string]*dialCall)
+ }
+ p.dialing[addr] = call
+ go call.dial(addr)
+ return call
+}
+
+// run in its own goroutine.
+func (c *dialCall) dial(addr string) {
+ c.res, c.err = c.p.t.dialClientConn(addr)
+ close(c.done)
+
+ c.p.mu.Lock()
+ delete(c.p.dialing, addr)
+ if c.err == nil {
+ c.p.addConnLocked(addr, c.res)
+ }
+ c.p.mu.Unlock()
+}
+
+// addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't
+// already exist. It coalesces concurrent calls with the same key.
+// This is used by the http1 Transport code when it creates a new connection. Because
+// the http1 Transport doesn't de-dup TCP dials to outbound hosts (because it doesn't know
+// the protocol), it can get into a situation where it has multiple TLS connections.
+// This code decides which ones live or die.
+// The return value used is whether c was used.
+// c is never closed.
+func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) (used bool, err error) {
+ p.mu.Lock()
+ for _, cc := range p.conns[key] {
+ if cc.CanTakeNewRequest() {
+ p.mu.Unlock()
+ return false, nil
+ }
+ }
+ call, dup := p.addConnCalls[key]
+ if !dup {
+ if p.addConnCalls == nil {
+ p.addConnCalls = make(map[string]*addConnCall)
+ }
+ call = &addConnCall{
+ p: p,
+ done: make(chan struct{}),
+ }
+ p.addConnCalls[key] = call
+ go call.run(t, key, c)
+ }
+ p.mu.Unlock()
+
+ <-call.done
+ if call.err != nil {
+ return false, call.err
+ }
+ return !dup, nil
+}
+
+type addConnCall struct {
+ p *clientConnPool
+ done chan struct{} // closed when done
+ err error
+}
+
+func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) {
+ cc, err := t.NewClientConn(tc)
+
+ p := c.p
+ p.mu.Lock()
+ if err != nil {
+ c.err = err
+ } else {
+ p.addConnLocked(key, cc)
+ }
+ delete(p.addConnCalls, key)
+ p.mu.Unlock()
+ close(c.done)
+}
+
+func (p *clientConnPool) addConn(key string, cc *ClientConn) {
+ p.mu.Lock()
+ p.addConnLocked(key, cc)
+ p.mu.Unlock()
+}
+
+// p.mu must be held
+func (p *clientConnPool) addConnLocked(key string, cc *ClientConn) {
+ for _, v := range p.conns[key] {
+ if v == cc {
+ return
+ }
+ }
+ if p.conns == nil {
+ p.conns = make(map[string][]*ClientConn)
+ }
+ if p.keys == nil {
+ p.keys = make(map[*ClientConn][]string)
+ }
+ p.conns[key] = append(p.conns[key], cc)
+ p.keys[cc] = append(p.keys[cc], key)
+}
+
+func (p *clientConnPool) MarkDead(cc *ClientConn) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ for _, key := range p.keys[cc] {
+ vv, ok := p.conns[key]
+ if !ok {
+ continue
+ }
+ newList := filterOutClientConn(vv, cc)
+ if len(newList) > 0 {
+ p.conns[key] = newList
+ } else {
+ delete(p.conns, key)
+ }
+ }
+ delete(p.keys, cc)
+}
+
+func (p *clientConnPool) closeIdleConnections() {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ // TODO: don't close a cc if it was just added to the pool
+ // milliseconds ago and has never been used. There's currently
+ // a small race window with the HTTP/1 Transport's integration
+ // where it can add an idle conn just before using it, and
+ // somebody else can concurrently call CloseIdleConns and
+ // break some caller's RoundTrip.
+ for _, vv := range p.conns {
+ for _, cc := range vv {
+ cc.closeIfIdle()
+ }
+ }
+}
+
+func filterOutClientConn(in []*ClientConn, exclude *ClientConn) []*ClientConn {
+ out := in[:0]
+ for _, v := range in {
+ if v != exclude {
+ out = append(out, v)
+ }
+ }
+ // If we filtered it out, zero out the last item to prevent
+ // the GC from seeing it.
+ if len(in) != len(out) {
+ in[len(in)-1] = nil
+ }
+ return out
+}
diff --git a/src/kube2msb/vendor/golang.org/x/net/http2/configure_transport.go b/src/kube2msb/vendor/golang.org/x/net/http2/configure_transport.go
new file mode 100644
index 0000000..daa17f5
--- /dev/null
+++ b/src/kube2msb/vendor/golang.org/x/net/http2/configure_transport.go
@@ -0,0 +1,89 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.6
+
+package http2
+
+import (
+ "crypto/tls"
+ "fmt"
+ "net/http"
+)
+
+func configureTransport(t1 *http.Transport) (*Transport, error) {
+ connPool := new(clientConnPool)
+ t2 := &Transport{
+ ConnPool: noDialClientConnPool{connPool},
+ t1: t1,
+ }
+ connPool.t = t2
+ if err := registerHTTPSProtocol(t1, noDialH2RoundTripper{t2}); err != nil {
+ return nil, err
+ }
+ if t1.TLSClientConfig == nil {
+ t1.TLSClientConfig = new(tls.Config)
+ }
+ if !strSliceContains(t1.TLSClientConfig.NextProtos, "h2") {
+ t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...)
+ }
+ if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") {
+ t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1")
+ }
+ upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper {
+ addr := authorityAddr(authority)
+ if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil {
+ go c.Close()
+ return erringRoundTripper{err}
+ } else if !used {
+ // Turns out we don't need this c.
+ // For example, two goroutines made requests to the same host
+ // at the same time, both kicking off TCP dials. (since protocol
+ // was unknown)
+ go c.Close()
+ }
+ return t2
+ }
+ if m := t1.TLSNextProto; len(m) == 0 {
+ t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{
+ "h2": upgradeFn,
+ }
+ } else {
+ m["h2"] = upgradeFn
+ }
+ return t2, nil
+}
+
+// registerHTTPSProtocol calls Transport.RegisterProtocol but
+// convering panics into errors.
+func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) {
+ defer func() {
+ if e := recover(); e != nil {
+ err = fmt.Errorf("%v", e)
+ }
+ }()
+ t.RegisterProtocol("https", rt)
+ return nil
+}
+
+// noDialClientConnPool is an implementation of http2.ClientConnPool
+// which never dials. We let the HTTP/1.1 client dial and use its TLS
+// connection instead.
+type noDialClientConnPool struct{ *clientConnPool }
+
+func (p noDialClientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) {
+ return p.getClientConn(req, addr, noDialOnMiss)
+}
+
+// noDialH2RoundTripper is a RoundTripper which only tries to complete the request
+// if there's already has a cached connection to the host.
+type noDialH2RoundTripper struct{ t *Transport }
+
+func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+ res, err := rt.t.RoundTrip(req)
+ if err == ErrNoCachedConn {
+ return nil, http.ErrSkipAltProtocol
+ }
+ return res, err
+}
diff --git a/src/kube2msb/vendor/golang.org/x/net/http2/errors.go b/src/kube2msb/vendor/golang.org/x/net/http2/errors.go
new file mode 100644
index 0000000..f320b2c
--- /dev/null
+++ b/src/kube2msb/vendor/golang.org/x/net/http2/errors.go
@@ -0,0 +1,90 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import "fmt"
+
+// An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec.
+type ErrCode uint32
+
+const (
+ ErrCodeNo ErrCode = 0x0
+ ErrCodeProtocol ErrCode = 0x1
+ ErrCodeInternal ErrCode = 0x2
+ ErrCodeFlowControl ErrCode = 0x3
+ ErrCodeSettingsTimeout ErrCode = 0x4
+ ErrCodeStreamClosed ErrCode = 0x5
+ ErrCodeFrameSize ErrCode = 0x6
+ ErrCodeRefusedStream ErrCode = 0x7
+ ErrCodeCancel ErrCode = 0x8
+ ErrCodeCompression ErrCode = 0x9
+ ErrCodeConnect ErrCode = 0xa
+ ErrCodeEnhanceYourCalm ErrCode = 0xb
+ ErrCodeInadequateSecurity ErrCode = 0xc
+ ErrCodeHTTP11Required ErrCode = 0xd
+)
+
+var errCodeName = map[ErrCode]string{
+ ErrCodeNo: "NO_ERROR",
+ ErrCodeProtocol: "PROTOCOL_ERROR",
+ ErrCodeInternal: "INTERNAL_ERROR",
+ ErrCodeFlowControl: "FLOW_CONTROL_ERROR",
+ ErrCodeSettingsTimeout: "SETTINGS_TIMEOUT",
+ ErrCodeStreamClosed: "STREAM_CLOSED",
+ ErrCodeFrameSize: "FRAME_SIZE_ERROR",
+ ErrCodeRefusedStream: "REFUSED_STREAM",
+ ErrCodeCancel: "CANCEL",
+ ErrCodeCompression: "COMPRESSION_ERROR",
+ ErrCodeConnect: "CONNECT_ERROR",
+ ErrCodeEnhanceYourCalm: "ENHANCE_YOUR_CALM",
+ ErrCodeInadequateSecurity: "INADEQUATE_SECURITY",
+ ErrCodeHTTP11Required: "HTTP_1_1_REQUIRED",
+}
+
+func (e ErrCode) String() string {
+ if s, ok := errCodeName[e]; ok {
+ return s
+ }
+ return fmt.Sprintf("unknown error code 0x%x", uint32(e))
+}
+
+// ConnectionError is an error that results in the termination of the
+// entire connection.
+type ConnectionError ErrCode
+
+func (e ConnectionError) Error() string { return fmt.Sprintf("connection error: %s", ErrCode(e)) }
+
+// StreamError is an error that only affects one stream within an
+// HTTP/2 connection.
+type StreamError struct {
+ StreamID uint32
+ Code ErrCode
+}
+
+func (e StreamError) Error() string {
+ return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code)
+}
+
+// 6.9.1 The Flow Control Window
+// "If a sender receives a WINDOW_UPDATE that causes a flow control
+// window to exceed this maximum it MUST terminate either the stream
+// or the connection, as appropriate. For streams, [...]; for the
+// connection, a GOAWAY frame with a FLOW_CONTROL_ERROR code."
+type goAwayFlowError struct{}
+
+func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" }
+
+// connErrorReason wraps a ConnectionError with an informative error about why it occurs.
+
+// Errors of this type are only returned by the frame parser functions
+// and converted into ConnectionError(ErrCodeProtocol).
+type connError struct {
+ Code ErrCode
+ Reason string
+}
+
+func (e connError) Error() string {
+ return fmt.Sprintf("http2: connection error: %v: %v", e.Code, e.Reason)
+}
diff --git a/src/kube2msb/vendor/golang.org/x/net/http2/fixed_buffer.go b/src/kube2msb/vendor/golang.org/x/net/http2/fixed_buffer.go
new file mode 100644
index 0000000..47da0f0
--- /dev/null
+++ b/src/kube2msb/vendor/golang.org/x/net/http2/fixed_buffer.go
@@ -0,0 +1,60 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "errors"
+)
+
+// fixedBuffer is an io.ReadWriter backed by a fixed size buffer.
+// It never allocates, but moves old data as new data is written.
+type fixedBuffer struct {
+ buf []byte
+ r, w int
+}
+
+var (
+ errReadEmpty = errors.New("read from empty fixedBuffer")
+ errWriteFull = errors.New("write on full fixedBuffer")
+)
+
+// Read copies bytes from the buffer into p.
+// It is an error to read when no data is available.
+func (b *fixedBuffer) Read(p []byte) (n int, err error) {
+ if b.r == b.w {
+ return 0, errReadEmpty
+ }
+ n = copy(p, b.buf[b.r:b.w])
+ b.r += n
+ if b.r == b.w {
+ b.r = 0
+ b.w = 0
+ }
+ return n, nil
+}
+
+// Len returns the number of bytes of the unread portion of the buffer.
+func (b *fixedBuffer) Len() int {
+ return b.w - b.r
+}
+
+// Write copies bytes from p into the buffer.
+// It is an error to write more data than the buffer can hold.
+func (b *fixedBuffer) Write(p []byte) (n int, err error) {
+ // Slide existing data to beginning.
+ if b.r > 0 && len(p) > len(b.buf)-b.w {
+ copy(b.buf, b.buf[b.r:b.w])
+ b.w -= b.r
+ b.r = 0
+ }
+
+ // Write new data.
+ n = copy(b.buf[b.w:], p)
+ b.w += n
+ if n < len(p) {
+ err = errWriteFull
+ }
+ return n, err
+}
diff --git a/src/kube2msb/vendor/golang.org/x/net/http2/flow.go b/src/kube2msb/vendor/golang.org/x/net/http2/flow.go
new file mode 100644
index 0000000..957de25
--- /dev/null
+++ b/src/kube2msb/vendor/golang.org/x/net/http2/flow.go
@@ -0,0 +1,50 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Flow control
+
+package http2
+
+// flow is the flow control window's size.
+type flow struct {
+ // n is the number of DATA bytes we're allowed to send.
+ // A flow is kept both on a conn and a per-stream.
+ n int32
+
+ // conn points to the shared connection-level flow that is
+ // shared by all streams on that conn. It is nil for the flow
+ // that's on the conn directly.
+ conn *flow
+}
+
+func (f *flow) setConnFlow(cf *flow) { f.conn = cf }
+
+func (f *flow) available() int32 {
+ n := f.n
+ if f.conn != nil && f.conn.n < n {
+ n = f.conn.n
+ }
+ return n
+}
+
+func (f *flow) take(n int32) {
+ if n > f.available() {
+ panic("internal error: took too much")
+ }
+ f.n -= n
+ if f.conn != nil {
+ f.conn.n -= n
+ }
+}
+
+// add adds n bytes (positive or negative) to the flow control window.
+// It returns false if the sum would exceed 2^31-1.
+func (f *flow) add(n int32) bool {
+ remain := (1<<31 - 1) - f.n
+ if n > remain {
+ return false
+ }
+ f.n += n
+ return true
+}
diff --git a/src/kube2msb/vendor/golang.org/x/net/http2/frame.go b/src/kube2msb/vendor/golang.org/x/net/http2/frame.go
new file mode 100644
index 0000000..e1e837c
--- /dev/null
+++ b/src/kube2msb/vendor/golang.org/x/net/http2/frame.go
@@ -0,0 +1,1269 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "sync"
+)
+
+const frameHeaderLen = 9
+
+var padZeros = make([]byte, 255) // zeros for padding
+
+// A FrameType is a registered frame type as defined in
+// http://http2.github.io/http2-spec/#rfc.section.11.2
+type FrameType uint8
+
+const (
+ FrameData FrameType = 0x0
+ FrameHeaders FrameType = 0x1
+ FramePriority FrameType = 0x2
+ FrameRSTStream FrameType = 0x3
+ FrameSettings FrameType = 0x4
+ FramePushPromise FrameType = 0x5
+ FramePing FrameType = 0x6
+ FrameGoAway FrameType = 0x7
+ FrameWindowUpdate FrameType = 0x8
+ FrameContinuation FrameType = 0x9
+)
+
+var frameName = map[FrameType]string{
+ FrameData: "DATA",
+ FrameHeaders: "HEADERS",
+ FramePriority: "PRIORITY",
+ FrameRSTStream: "RST_STREAM",
+ FrameSettings: "SETTINGS",
+ FramePushPromise: "PUSH_PROMISE",
+ FramePing: "PING",
+ FrameGoAway: "GOAWAY",
+ FrameWindowUpdate: "WINDOW_UPDATE",
+ FrameContinuation: "CONTINUATION",
+}
+
+func (t FrameType) String() string {
+ if s, ok := frameName[t]; ok {
+ return s
+ }
+ return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", uint8(t))
+}
+
+// Flags is a bitmask of HTTP/2 flags.
+// The meaning of flags varies depending on the frame type.
+type Flags uint8
+
+// Has reports whether f contains all (0 or more) flags in v.
+func (f Flags) Has(v Flags) bool {
+ return (f & v) == v
+}
+
+// Frame-specific FrameHeader flag bits.
+const (
+ // Data Frame
+ FlagDataEndStream Flags = 0x1
+ FlagDataPadded Flags = 0x8
+
+ // Headers Frame
+ FlagHeadersEndStream Flags = 0x1
+ FlagHeadersEndHeaders Flags = 0x4
+ FlagHeadersPadded Flags = 0x8
+ FlagHeadersPriority Flags = 0x20
+
+ // Settings Frame
+ FlagSettingsAck Flags = 0x1
+
+ // Ping Frame
+ FlagPingAck Flags = 0x1
+
+ // Continuation Frame
+ FlagContinuationEndHeaders Flags = 0x4
+
+ FlagPushPromiseEndHeaders Flags = 0x4
+ FlagPushPromisePadded Flags = 0x8
+)
+
+var flagName = map[FrameType]map[Flags]string{
+ FrameData: {
+ FlagDataEndStream: "END_STREAM",
+ FlagDataPadded: "PADDED",
+ },
+ FrameHeaders: {
+ FlagHeadersEndStream: "END_STREAM",
+ FlagHeadersEndHeaders: "END_HEADERS",
+ FlagHeadersPadded: "PADDED",
+ FlagHeadersPriority: "PRIORITY",
+ },
+ FrameSettings: {
+ FlagSettingsAck: "ACK",
+ },
+ FramePing: {
+ FlagPingAck: "ACK",
+ },
+ FrameContinuation: {
+ FlagContinuationEndHeaders: "END_HEADERS",
+ },
+ FramePushPromise: {
+ FlagPushPromiseEndHeaders: "END_HEADERS",
+ FlagPushPromisePadded: "PADDED",
+ },
+}
+
+// a frameParser parses a frame given its FrameHeader and payload
+// bytes. The length of payload will always equal fh.Length (which
+// might be 0).
+type frameParser func(fh FrameHeader, payload []byte) (Frame, error)
+
+var frameParsers = map[FrameType]frameParser{
+ FrameData: parseDataFrame,
+ FrameHeaders: parseHeadersFrame,
+ FramePriority: parsePriorityFrame,
+ FrameRSTStream: parseRSTStreamFrame,
+ FrameSettings: parseSettingsFrame,
+ FramePushPromise: parsePushPromise,
+ FramePing: parsePingFrame,
+ FrameGoAway: parseGoAwayFrame,
+ FrameWindowUpdate: parseWindowUpdateFrame,
+ FrameContinuation: parseContinuationFrame,
+}
+
+func typeFrameParser(t FrameType) frameParser {
+ if f := frameParsers[t]; f != nil {
+ return f
+ }
+ return parseUnknownFrame
+}
+
+// A FrameHeader is the 9 byte header of all HTTP/2 frames.
+//
+// See http://http2.github.io/http2-spec/#FrameHeader
+type FrameHeader struct {
+ valid bool // caller can access []byte fields in the Frame
+
+ // Type is the 1 byte frame type. There are ten standard frame
+ // types, but extension frame types may be written by WriteRawFrame
+ // and will be returned by ReadFrame (as UnknownFrame).
+ Type FrameType
+
+ // Flags are the 1 byte of 8 potential bit flags per frame.
+ // They are specific to the frame type.
+ Flags Flags
+
+ // Length is the length of the frame, not including the 9 byte header.
+ // The maximum size is one byte less than 16MB (uint24), but only
+ // frames up to 16KB are allowed without peer agreement.
+ Length uint32
+
+ // StreamID is which stream this frame is for. Certain frames
+ // are not stream-specific, in which case this field is 0.
+ StreamID uint32
+}
+
+// Header returns h. It exists so FrameHeaders can be embedded in other
+// specific frame types and implement the Frame interface.
+func (h FrameHeader) Header() FrameHeader { return h }
+
+func (h FrameHeader) String() string {
+ var buf bytes.Buffer
+ buf.WriteString("[FrameHeader ")
+ h.writeDebug(&buf)
+ buf.WriteByte(']')
+ return buf.String()
+}
+
+func (h FrameHeader) writeDebug(buf *bytes.Buffer) {
+ buf.WriteString(h.Type.String())
+ if h.Flags != 0 {
+ buf.WriteString(" flags=")
+ set := 0
+ for i := uint8(0); i < 8; i++ {
+ if h.Flags&(1<<i) == 0 {
+ continue
+ }
+ set++
+ if set > 1 {
+ buf.WriteByte('|')
+ }
+ name := flagName[h.Type][Flags(1<<i)]
+ if name != "" {
+ buf.WriteString(name)
+ } else {
+ fmt.Fprintf(buf, "0x%x", 1<<i)
+ }
+ }
+ }
+ if h.StreamID != 0 {
+ fmt.Fprintf(buf, " stream=%d", h.StreamID)
+ }
+ fmt.Fprintf(buf, " len=%d", h.Length)
+}
+
+func (h *FrameHeader) checkValid() {
+ if !h.valid {
+ panic("Frame accessor called on non-owned Frame")
+ }
+}
+
+func (h *FrameHeader) invalidate() { h.valid = false }
+
+// frame header bytes.
+// Used only by ReadFrameHeader.
+var fhBytes = sync.Pool{
+ New: func() interface{} {
+ buf := make([]byte, frameHeaderLen)
+ return &buf
+ },
+}
+
+// ReadFrameHeader reads 9 bytes from r and returns a FrameHeader.
+// Most users should use Framer.ReadFrame instead.
+func ReadFrameHeader(r io.Reader) (FrameHeader, error) {
+ bufp := fhBytes.Get().(*[]byte)
+ defer fhBytes.Put(bufp)
+ return readFrameHeader(*bufp, r)
+}
+
+func readFrameHeader(buf []byte, r io.Reader) (FrameHeader, error) {
+ _, err := io.ReadFull(r, buf[:frameHeaderLen])
+ if err != nil {
+ return FrameHeader{}, err
+ }
+ return FrameHeader{
+ Length: (uint32(buf[0])<<16 | uint32(buf[1])<<8 | uint32(buf[2])),
+ Type: FrameType(buf[3]),
+ Flags: Flags(buf[4]),
+ StreamID: binary.BigEndian.Uint32(buf[5:]) & (1<<31 - 1),
+ valid: true,
+ }, nil
+}
+
+// A Frame is the base interface implemented by all frame types.
+// Callers will generally type-assert the specific frame type:
+// *HeadersFrame, *SettingsFrame, *WindowUpdateFrame, etc.
+//
+// Frames are only valid until the next call to Framer.ReadFrame.
+type Frame interface {
+ Header() FrameHeader
+
+ // invalidate is called by Framer.ReadFrame to make this
+ // frame's buffers as being invalid, since the subsequent
+ // frame will reuse them.
+ invalidate()
+}
+
+// A Framer reads and writes Frames.
+type Framer struct {
+ r io.Reader
+ lastFrame Frame
+ errReason string
+
+ // lastHeaderStream is non-zero if the last frame was an
+ // unfinished HEADERS/CONTINUATION.
+ lastHeaderStream uint32
+
+ maxReadSize uint32
+ headerBuf [frameHeaderLen]byte
+
+ // TODO: let getReadBuf be configurable, and use a less memory-pinning
+ // allocator in server.go to minimize memory pinned for many idle conns.
+ // Will probably also need to make frame invalidation have a hook too.
+ getReadBuf func(size uint32) []byte
+ readBuf []byte // cache for default getReadBuf
+
+ maxWriteSize uint32 // zero means unlimited; TODO: implement
+
+ w io.Writer
+ wbuf []byte
+
+ // AllowIllegalWrites permits the Framer's Write methods to
+ // write frames that do not conform to the HTTP/2 spec. This
+ // permits using the Framer to test other HTTP/2
+ // implementations' conformance to the spec.
+ // If false, the Write methods will prefer to return an error
+ // rather than comply.
+ AllowIllegalWrites bool
+
+ // AllowIllegalReads permits the Framer's ReadFrame method
+ // to return non-compliant frames or frame orders.
+ // This is for testing and permits using the Framer to test
+ // other HTTP/2 implementations' conformance to the spec.
+ AllowIllegalReads bool
+
+ // TODO: track which type of frame & with which flags was sent
+ // last. Then return an error (unless AllowIllegalWrites) if
+ // we're in the middle of a header block and a
+ // non-Continuation or Continuation on a different stream is
+ // attempted to be written.
+
+ logReads bool
+
+ debugFramer *Framer // only use for logging written writes
+ debugFramerBuf *bytes.Buffer
+}
+
+func (f *Framer) startWrite(ftype FrameType, flags Flags, streamID uint32) {
+ // Write the FrameHeader.
+ f.wbuf = append(f.wbuf[:0],
+ 0, // 3 bytes of length, filled in in endWrite
+ 0,
+ 0,
+ byte(ftype),
+ byte(flags),
+ byte(streamID>>24),
+ byte(streamID>>16),
+ byte(streamID>>8),
+ byte(streamID))
+}
+
+func (f *Framer) endWrite() error {
+ // Now that we know the final size, fill in the FrameHeader in
+ // the space previously reserved for it. Abuse append.
+ length := len(f.wbuf) - frameHeaderLen
+ if length >= (1 << 24) {
+ return ErrFrameTooLarge
+ }
+ _ = append(f.wbuf[:0],
+ byte(length>>16),
+ byte(length>>8),
+ byte(length))
+ if logFrameWrites {
+ f.logWrite()
+ }
+
+ n, err := f.w.Write(f.wbuf)
+ if err == nil && n != len(f.wbuf) {
+ err = io.ErrShortWrite
+ }
+ return err
+}
+
+func (f *Framer) logWrite() {
+ if f.debugFramer == nil {
+ f.debugFramerBuf = new(bytes.Buffer)
+ f.debugFramer = NewFramer(nil, f.debugFramerBuf)
+ f.debugFramer.logReads = false // we log it ourselves, saying "wrote" below
+ // Let us read anything, even if we accidentally wrote it
+ // in the wrong order:
+ f.debugFramer.AllowIllegalReads = true
+ }
+ f.debugFramerBuf.Write(f.wbuf)
+ fr, err := f.debugFramer.ReadFrame()
+ if err != nil {
+ log.Printf("http2: Framer %p: failed to decode just-written frame", f)
+ return
+ }
+ log.Printf("http2: Framer %p: wrote %v", f, summarizeFrame(fr))
+}
+
+func (f *Framer) writeByte(v byte) { f.wbuf = append(f.wbuf, v) }
+func (f *Framer) writeBytes(v []byte) { f.wbuf = append(f.wbuf, v...) }
+func (f *Framer) writeUint16(v uint16) { f.wbuf = append(f.wbuf, byte(v>>8), byte(v)) }
+func (f *Framer) writeUint32(v uint32) {
+ f.wbuf = append(f.wbuf, byte(v>>24), byte(v>>16), byte(v>>8), byte(v))
+}
+
+const (
+ minMaxFrameSize = 1 << 14
+ maxFrameSize = 1<<24 - 1
+)
+
+// NewFramer returns a Framer that writes frames to w and reads them from r.
+func NewFramer(w io.Writer, r io.Reader) *Framer {
+ fr := &Framer{
+ w: w,
+ r: r,
+ logReads: logFrameReads,
+ }
+ fr.getReadBuf = func(size uint32) []byte {
+ if cap(fr.readBuf) >= int(size) {
+ return fr.readBuf[:size]
+ }
+ fr.readBuf = make([]byte, size)
+ return fr.readBuf
+ }
+ fr.SetMaxReadFrameSize(maxFrameSize)
+ return fr
+}
+
+// SetMaxReadFrameSize sets the maximum size of a frame
+// that will be read by a subsequent call to ReadFrame.
+// It is the caller's responsibility to advertise this
+// limit with a SETTINGS frame.
+func (fr *Framer) SetMaxReadFrameSize(v uint32) {
+ if v > maxFrameSize {
+ v = maxFrameSize
+ }
+ fr.maxReadSize = v
+}
+
+// ErrFrameTooLarge is returned from Framer.ReadFrame when the peer
+// sends a frame that is larger than declared with SetMaxReadFrameSize.
+var ErrFrameTooLarge = errors.New("http2: frame too large")
+
+// terminalReadFrameError reports whether err is an unrecoverable
+// error from ReadFrame and no other frames should be read.
+func terminalReadFrameError(err error) bool {
+ if _, ok := err.(StreamError); ok {
+ return false
+ }
+ return err != nil
+}
+
+// ReadFrame reads a single frame. The returned Frame is only valid
+// until the next call to ReadFrame.
+//
+// If the frame is larger than previously set with SetMaxReadFrameSize, the
+// returned error is ErrFrameTooLarge. Other errors may be of type
+// ConnectionError, StreamError, or anything else from from the underlying
+// reader.
+func (fr *Framer) ReadFrame() (Frame, error) {
+ if fr.lastFrame != nil {
+ fr.lastFrame.invalidate()
+ }
+ fh, err := readFrameHeader(fr.headerBuf[:], fr.r)
+ if err != nil {
+ return nil, err
+ }
+ if fh.Length > fr.maxReadSize {
+ return nil, ErrFrameTooLarge
+ }
+ payload := fr.getReadBuf(fh.Length)
+ if _, err := io.ReadFull(fr.r, payload); err != nil {
+ return nil, err
+ }
+ f, err := typeFrameParser(fh.Type)(fh, payload)
+ if err != nil {
+ if ce, ok := err.(connError); ok {
+ return nil, fr.connError(ce.Code, ce.Reason)
+ }
+ return nil, err
+ }
+ if err := fr.checkFrameOrder(f); err != nil {
+ return nil, err
+ }
+ if fr.logReads {
+ log.Printf("http2: Framer %p: read %v", fr, summarizeFrame(f))
+ }
+ return f, nil
+}
+
+// connError returns ConnectionError(code) but first
+// stashes away a public reason to the caller can optionally relay it
+// to the peer before hanging up on them. This might help others debug
+// their implementations.
+func (fr *Framer) connError(code ErrCode, reason string) error {
+ fr.errReason = reason
+ return ConnectionError(code)
+}
+
+// checkFrameOrder reports an error if f is an invalid frame to return
+// next from ReadFrame. Mostly it checks whether HEADERS and
+// CONTINUATION frames are contiguous.
+func (fr *Framer) checkFrameOrder(f Frame) error {
+ last := fr.lastFrame
+ fr.lastFrame = f
+ if fr.AllowIllegalReads {
+ return nil
+ }
+
+ fh := f.Header()
+ if fr.lastHeaderStream != 0 {
+ if fh.Type != FrameContinuation {
+ return fr.connError(ErrCodeProtocol,
+ fmt.Sprintf("got %s for stream %d; expected CONTINUATION following %s for stream %d",
+ fh.Type, fh.StreamID,
+ last.Header().Type, fr.lastHeaderStream))
+ }
+ if fh.StreamID != fr.lastHeaderStream {
+ return fr.connError(ErrCodeProtocol,
+ fmt.Sprintf("got CONTINUATION for stream %d; expected stream %d",
+ fh.StreamID, fr.lastHeaderStream))
+ }
+ } else if fh.Type == FrameContinuation {
+ return fr.connError(ErrCodeProtocol, fmt.Sprintf("unexpected CONTINUATION for stream %d", fh.StreamID))
+ }
+
+ switch fh.Type {
+ case FrameHeaders, FrameContinuation:
+ if fh.Flags.Has(FlagHeadersEndHeaders) {
+ fr.lastHeaderStream = 0
+ } else {
+ fr.lastHeaderStream = fh.StreamID
+ }
+ }
+
+ return nil
+}
+
+// A DataFrame conveys arbitrary, variable-length sequences of octets
+// associated with a stream.
+// See http://http2.github.io/http2-spec/#rfc.section.6.1
+type DataFrame struct {
+ FrameHeader
+ data []byte
+}
+
+func (f *DataFrame) StreamEnded() bool {
+ return f.FrameHeader.Flags.Has(FlagDataEndStream)
+}
+
+// Data returns the frame's data octets, not including any padding
+// size byte or padding suffix bytes.
+// The caller must not retain the returned memory past the next
+// call to ReadFrame.
+func (f *DataFrame) Data() []byte {
+ f.checkValid()
+ return f.data
+}
+
+func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) {
+ if fh.StreamID == 0 {
+ // DATA frames MUST be associated with a stream. If a
+ // DATA frame is received whose stream identifier
+ // field is 0x0, the recipient MUST respond with a
+ // connection error (Section 5.4.1) of type
+ // PROTOCOL_ERROR.
+ return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"}
+ }
+ f := &DataFrame{
+ FrameHeader: fh,
+ }
+ var padSize byte
+ if fh.Flags.Has(FlagDataPadded) {
+ var err error
+ payload, padSize, err = readByte(payload)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if int(padSize) > len(payload) {
+ // If the length of the padding is greater than the
+ // length of the frame payload, the recipient MUST
+ // treat this as a connection error.
+ // Filed: https://github.com/http2/http2-spec/issues/610
+ return nil, connError{ErrCodeProtocol, "pad size larger than data payload"}
+ }
+ f.data = payload[:len(payload)-int(padSize)]
+ return f, nil
+}
+
+var errStreamID = errors.New("invalid streamid")
+
+func validStreamID(streamID uint32) bool {
+ return streamID != 0 && streamID&(1<<31) == 0
+}
+
+// WriteData writes a DATA frame.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error {
+ // TODO: ignoring padding for now. will add when somebody cares.
+ if !validStreamID(streamID) && !f.AllowIllegalWrites {
+ return errStreamID
+ }
+ var flags Flags
+ if endStream {
+ flags |= FlagDataEndStream
+ }
+ f.startWrite(FrameData, flags, streamID)
+ f.wbuf = append(f.wbuf, data...)
+ return f.endWrite()
+}
+
+// A SettingsFrame conveys configuration parameters that affect how
+// endpoints communicate, such as preferences and constraints on peer
+// behavior.
+//
+// See http://http2.github.io/http2-spec/#SETTINGS
+type SettingsFrame struct {
+ FrameHeader
+ p []byte
+}
+
+func parseSettingsFrame(fh FrameHeader, p []byte) (Frame, error) {
+ if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 {
+ // When this (ACK 0x1) bit is set, the payload of the
+ // SETTINGS frame MUST be empty. Receipt of a
+ // SETTINGS frame with the ACK flag set and a length
+ // field value other than 0 MUST be treated as a
+ // connection error (Section 5.4.1) of type
+ // FRAME_SIZE_ERROR.
+ return nil, ConnectionError(ErrCodeFrameSize)
+ }
+ if fh.StreamID != 0 {
+ // SETTINGS frames always apply to a connection,
+ // never a single stream. The stream identifier for a
+ // SETTINGS frame MUST be zero (0x0). If an endpoint
+ // receives a SETTINGS frame whose stream identifier
+ // field is anything other than 0x0, the endpoint MUST
+ // respond with a connection error (Section 5.4.1) of
+ // type PROTOCOL_ERROR.
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ if len(p)%6 != 0 {
+ // Expecting even number of 6 byte settings.
+ return nil, ConnectionError(ErrCodeFrameSize)
+ }
+ f := &SettingsFrame{FrameHeader: fh, p: p}
+ if v, ok := f.Value(SettingInitialWindowSize); ok && v > (1<<31)-1 {
+ // Values above the maximum flow control window size of 2^31 - 1 MUST
+ // be treated as a connection error (Section 5.4.1) of type
+ // FLOW_CONTROL_ERROR.
+ return nil, ConnectionError(ErrCodeFlowControl)
+ }
+ return f, nil
+}
+
+func (f *SettingsFrame) IsAck() bool {
+ return f.FrameHeader.Flags.Has(FlagSettingsAck)
+}
+
+func (f *SettingsFrame) Value(s SettingID) (v uint32, ok bool) {
+ f.checkValid()
+ buf := f.p
+ for len(buf) > 0 {
+ settingID := SettingID(binary.BigEndian.Uint16(buf[:2]))
+ if settingID == s {
+ return binary.BigEndian.Uint32(buf[2:6]), true
+ }
+ buf = buf[6:]
+ }
+ return 0, false
+}
+
+// ForeachSetting runs fn for each setting.
+// It stops and returns the first error.
+func (f *SettingsFrame) ForeachSetting(fn func(Setting) error) error {
+ f.checkValid()
+ buf := f.p
+ for len(buf) > 0 {
+ if err := fn(Setting{
+ SettingID(binary.BigEndian.Uint16(buf[:2])),
+ binary.BigEndian.Uint32(buf[2:6]),
+ }); err != nil {
+ return err
+ }
+ buf = buf[6:]
+ }
+ return nil
+}
+
+// WriteSettings writes a SETTINGS frame with zero or more settings
+// specified and the ACK bit not set.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WriteSettings(settings ...Setting) error {
+ f.startWrite(FrameSettings, 0, 0)
+ for _, s := range settings {
+ f.writeUint16(uint16(s.ID))
+ f.writeUint32(s.Val)
+ }
+ return f.endWrite()
+}
+
+// WriteSettings writes an empty SETTINGS frame with the ACK bit set.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WriteSettingsAck() error {
+ f.startWrite(FrameSettings, FlagSettingsAck, 0)
+ return f.endWrite()
+}
+
+// A PingFrame is a mechanism for measuring a minimal round trip time
+// from the sender, as well as determining whether an idle connection
+// is still functional.
+// See http://http2.github.io/http2-spec/#rfc.section.6.7
+type PingFrame struct {
+ FrameHeader
+ Data [8]byte
+}
+
+func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) }
+
+func parsePingFrame(fh FrameHeader, payload []byte) (Frame, error) {
+ if len(payload) != 8 {
+ return nil, ConnectionError(ErrCodeFrameSize)
+ }
+ if fh.StreamID != 0 {
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ f := &PingFrame{FrameHeader: fh}
+ copy(f.Data[:], payload)
+ return f, nil
+}
+
+func (f *Framer) WritePing(ack bool, data [8]byte) error {
+ var flags Flags
+ if ack {
+ flags = FlagPingAck
+ }
+ f.startWrite(FramePing, flags, 0)
+ f.writeBytes(data[:])
+ return f.endWrite()
+}
+
+// A GoAwayFrame informs the remote peer to stop creating streams on this connection.
+// See http://http2.github.io/http2-spec/#rfc.section.6.8
+type GoAwayFrame struct {
+ FrameHeader
+ LastStreamID uint32
+ ErrCode ErrCode
+ debugData []byte
+}
+
+// DebugData returns any debug data in the GOAWAY frame. Its contents
+// are not defined.
+// The caller must not retain the returned memory past the next
+// call to ReadFrame.
+func (f *GoAwayFrame) DebugData() []byte {
+ f.checkValid()
+ return f.debugData
+}
+
+func parseGoAwayFrame(fh FrameHeader, p []byte) (Frame, error) {
+ if fh.StreamID != 0 {
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ if len(p) < 8 {
+ return nil, ConnectionError(ErrCodeFrameSize)
+ }
+ return &GoAwayFrame{
+ FrameHeader: fh,
+ LastStreamID: binary.BigEndian.Uint32(p[:4]) & (1<<31 - 1),
+ ErrCode: ErrCode(binary.BigEndian.Uint32(p[4:8])),
+ debugData: p[8:],
+ }, nil
+}
+
+func (f *Framer) WriteGoAway(maxStreamID uint32, code ErrCode, debugData []byte) error {
+ f.startWrite(FrameGoAway, 0, 0)
+ f.writeUint32(maxStreamID & (1<<31 - 1))
+ f.writeUint32(uint32(code))
+ f.writeBytes(debugData)
+ return f.endWrite()
+}
+
+// An UnknownFrame is the frame type returned when the frame type is unknown
+// or no specific frame type parser exists.
+type UnknownFrame struct {
+ FrameHeader
+ p []byte
+}
+
+// Payload returns the frame's payload (after the header). It is not
+// valid to call this method after a subsequent call to
+// Framer.ReadFrame, nor is it valid to retain the returned slice.
+// The memory is owned by the Framer and is invalidated when the next
+// frame is read.
+func (f *UnknownFrame) Payload() []byte {
+ f.checkValid()
+ return f.p
+}
+
+func parseUnknownFrame(fh FrameHeader, p []byte) (Frame, error) {
+ return &UnknownFrame{fh, p}, nil
+}
+
+// A WindowUpdateFrame is used to implement flow control.
+// See http://http2.github.io/http2-spec/#rfc.section.6.9
+type WindowUpdateFrame struct {
+ FrameHeader
+ Increment uint32 // never read with high bit set
+}
+
+func parseWindowUpdateFrame(fh FrameHeader, p []byte) (Frame, error) {
+ if len(p) != 4 {
+ return nil, ConnectionError(ErrCodeFrameSize)
+ }
+ inc := binary.BigEndian.Uint32(p[:4]) & 0x7fffffff // mask off high reserved bit
+ if inc == 0 {
+ // A receiver MUST treat the receipt of a
+ // WINDOW_UPDATE frame with an flow control window
+ // increment of 0 as a stream error (Section 5.4.2) of
+ // type PROTOCOL_ERROR; errors on the connection flow
+ // control window MUST be treated as a connection
+ // error (Section 5.4.1).
+ if fh.StreamID == 0 {
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ return nil, StreamError{fh.StreamID, ErrCodeProtocol}
+ }
+ return &WindowUpdateFrame{
+ FrameHeader: fh,
+ Increment: inc,
+ }, nil
+}
+
+// WriteWindowUpdate writes a WINDOW_UPDATE frame.
+// The increment value must be between 1 and 2,147,483,647, inclusive.
+// If the Stream ID is zero, the window update applies to the
+// connection as a whole.
+func (f *Framer) WriteWindowUpdate(streamID, incr uint32) error {
+ // "The legal range for the increment to the flow control window is 1 to 2^31-1 (2,147,483,647) octets."
+ if (incr < 1 || incr > 2147483647) && !f.AllowIllegalWrites {
+ return errors.New("illegal window increment value")
+ }
+ f.startWrite(FrameWindowUpdate, 0, streamID)
+ f.writeUint32(incr)
+ return f.endWrite()
+}
+
+// A HeadersFrame is used to open a stream and additionally carries a
+// header block fragment.
+type HeadersFrame struct {
+ FrameHeader
+
+ // Priority is set if FlagHeadersPriority is set in the FrameHeader.
+ Priority PriorityParam
+
+ headerFragBuf []byte // not owned
+}
+
+func (f *HeadersFrame) HeaderBlockFragment() []byte {
+ f.checkValid()
+ return f.headerFragBuf
+}
+
+func (f *HeadersFrame) HeadersEnded() bool {
+ return f.FrameHeader.Flags.Has(FlagHeadersEndHeaders)
+}
+
+func (f *HeadersFrame) StreamEnded() bool {
+ return f.FrameHeader.Flags.Has(FlagHeadersEndStream)
+}
+
+func (f *HeadersFrame) HasPriority() bool {
+ return f.FrameHeader.Flags.Has(FlagHeadersPriority)
+}
+
+func parseHeadersFrame(fh FrameHeader, p []byte) (_ Frame, err error) {
+ hf := &HeadersFrame{
+ FrameHeader: fh,
+ }
+ if fh.StreamID == 0 {
+ // HEADERS frames MUST be associated with a stream. If a HEADERS frame
+ // is received whose stream identifier field is 0x0, the recipient MUST
+ // respond with a connection error (Section 5.4.1) of type
+ // PROTOCOL_ERROR.
+ return nil, connError{ErrCodeProtocol, "HEADERS frame with stream ID 0"}
+ }
+ var padLength uint8
+ if fh.Flags.Has(FlagHeadersPadded) {
+ if p, padLength, err = readByte(p); err != nil {
+ return
+ }
+ }
+ if fh.Flags.Has(FlagHeadersPriority) {
+ var v uint32
+ p, v, err = readUint32(p)
+ if err != nil {
+ return nil, err
+ }
+ hf.Priority.StreamDep = v & 0x7fffffff
+ hf.Priority.Exclusive = (v != hf.Priority.StreamDep) // high bit was set
+ p, hf.Priority.Weight, err = readByte(p)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if len(p)-int(padLength) <= 0 {
+ return nil, StreamError{fh.StreamID, ErrCodeProtocol}
+ }
+ hf.headerFragBuf = p[:len(p)-int(padLength)]
+ return hf, nil
+}
+
+// HeadersFrameParam are the parameters for writing a HEADERS frame.
+type HeadersFrameParam struct {
+ // StreamID is the required Stream ID to initiate.
+ StreamID uint32
+ // BlockFragment is part (or all) of a Header Block.
+ BlockFragment []byte
+
+ // EndStream indicates that the header block is the last that
+ // the endpoint will send for the identified stream. Setting
+ // this flag causes the stream to enter one of "half closed"
+ // states.
+ EndStream bool
+
+ // EndHeaders indicates that this frame contains an entire
+ // header block and is not followed by any
+ // CONTINUATION frames.
+ EndHeaders bool
+
+ // PadLength is the optional number of bytes of zeros to add
+ // to this frame.
+ PadLength uint8
+
+ // Priority, if non-zero, includes stream priority information
+ // in the HEADER frame.
+ Priority PriorityParam
+}
+
+// WriteHeaders writes a single HEADERS frame.
+//
+// This is a low-level header writing method. Encoding headers and
+// splitting them into any necessary CONTINUATION frames is handled
+// elsewhere.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WriteHeaders(p HeadersFrameParam) error {
+ if !validStreamID(p.StreamID) && !f.AllowIllegalWrites {
+ return errStreamID
+ }
+ var flags Flags
+ if p.PadLength != 0 {
+ flags |= FlagHeadersPadded
+ }
+ if p.EndStream {
+ flags |= FlagHeadersEndStream
+ }
+ if p.EndHeaders {
+ flags |= FlagHeadersEndHeaders
+ }
+ if !p.Priority.IsZero() {
+ flags |= FlagHeadersPriority
+ }
+ f.startWrite(FrameHeaders, flags, p.StreamID)
+ if p.PadLength != 0 {
+ f.writeByte(p.PadLength)
+ }
+ if !p.Priority.IsZero() {
+ v := p.Priority.StreamDep
+ if !validStreamID(v) && !f.AllowIllegalWrites {
+ return errors.New("invalid dependent stream id")
+ }
+ if p.Priority.Exclusive {
+ v |= 1 << 31
+ }
+ f.writeUint32(v)
+ f.writeByte(p.Priority.Weight)
+ }
+ f.wbuf = append(f.wbuf, p.BlockFragment...)
+ f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...)
+ return f.endWrite()
+}
+
+// A PriorityFrame specifies the sender-advised priority of a stream.
+// See http://http2.github.io/http2-spec/#rfc.section.6.3
+type PriorityFrame struct {
+ FrameHeader
+ PriorityParam
+}
+
+// PriorityParam are the stream prioritzation parameters.
+type PriorityParam struct {
+ // StreamDep is a 31-bit stream identifier for the
+ // stream that this stream depends on. Zero means no
+ // dependency.
+ StreamDep uint32
+
+ // Exclusive is whether the dependency is exclusive.
+ Exclusive bool
+
+ // Weight is the stream's zero-indexed weight. It should be
+ // set together with StreamDep, or neither should be set. Per
+ // the spec, "Add one to the value to obtain a weight between
+ // 1 and 256."
+ Weight uint8
+}
+
+func (p PriorityParam) IsZero() bool {
+ return p == PriorityParam{}
+}
+
+func parsePriorityFrame(fh FrameHeader, payload []byte) (Frame, error) {
+ if fh.StreamID == 0 {
+ return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"}
+ }
+ if len(payload) != 5 {
+ return nil, connError{ErrCodeFrameSize, fmt.Sprintf("PRIORITY frame payload size was %d; want 5", len(payload))}
+ }
+ v := binary.BigEndian.Uint32(payload[:4])
+ streamID := v & 0x7fffffff // mask off high bit
+ return &PriorityFrame{
+ FrameHeader: fh,
+ PriorityParam: PriorityParam{
+ Weight: payload[4],
+ StreamDep: streamID,
+ Exclusive: streamID != v, // was high bit set?
+ },
+ }, nil
+}
+
+// WritePriority writes a PRIORITY frame.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WritePriority(streamID uint32, p PriorityParam) error {
+ if !validStreamID(streamID) && !f.AllowIllegalWrites {
+ return errStreamID
+ }
+ f.startWrite(FramePriority, 0, streamID)
+ v := p.StreamDep
+ if p.Exclusive {
+ v |= 1 << 31
+ }
+ f.writeUint32(v)
+ f.writeByte(p.Weight)
+ return f.endWrite()
+}
+
+// A RSTStreamFrame allows for abnormal termination of a stream.
+// See http://http2.github.io/http2-spec/#rfc.section.6.4
+type RSTStreamFrame struct {
+ FrameHeader
+ ErrCode ErrCode
+}
+
+func parseRSTStreamFrame(fh FrameHeader, p []byte) (Frame, error) {
+ if len(p) != 4 {
+ return nil, ConnectionError(ErrCodeFrameSize)
+ }
+ if fh.StreamID == 0 {
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ return &RSTStreamFrame{fh, ErrCode(binary.BigEndian.Uint32(p[:4]))}, nil
+}
+
+// WriteRSTStream writes a RST_STREAM frame.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WriteRSTStream(streamID uint32, code ErrCode) error {
+ if !validStreamID(streamID) && !f.AllowIllegalWrites {
+ return errStreamID
+ }
+ f.startWrite(FrameRSTStream, 0, streamID)
+ f.writeUint32(uint32(code))
+ return f.endWrite()
+}
+
+// A ContinuationFrame is used to continue a sequence of header block fragments.
+// See http://http2.github.io/http2-spec/#rfc.section.6.10
+type ContinuationFrame struct {
+ FrameHeader
+ headerFragBuf []byte
+}
+
+func parseContinuationFrame(fh FrameHeader, p []byte) (Frame, error) {
+ if fh.StreamID == 0 {
+ return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"}
+ }
+ return &ContinuationFrame{fh, p}, nil
+}
+
+func (f *ContinuationFrame) HeaderBlockFragment() []byte {
+ f.checkValid()
+ return f.headerFragBuf
+}
+
+func (f *ContinuationFrame) HeadersEnded() bool {
+ return f.FrameHeader.Flags.Has(FlagContinuationEndHeaders)
+}
+
+// WriteContinuation writes a CONTINUATION frame.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WriteContinuation(streamID uint32, endHeaders bool, headerBlockFragment []byte) error {
+ if !validStreamID(streamID) && !f.AllowIllegalWrites {
+ return errStreamID
+ }
+ var flags Flags
+ if endHeaders {
+ flags |= FlagContinuationEndHeaders
+ }
+ f.startWrite(FrameContinuation, flags, streamID)
+ f.wbuf = append(f.wbuf, headerBlockFragment...)
+ return f.endWrite()
+}
+
+// A PushPromiseFrame is used to initiate a server stream.
+// See http://http2.github.io/http2-spec/#rfc.section.6.6
+type PushPromiseFrame struct {
+ FrameHeader
+ PromiseID uint32
+ headerFragBuf []byte // not owned
+}
+
+func (f *PushPromiseFrame) HeaderBlockFragment() []byte {
+ f.checkValid()
+ return f.headerFragBuf
+}
+
+func (f *PushPromiseFrame) HeadersEnded() bool {
+ return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders)
+}
+
+func parsePushPromise(fh FrameHeader, p []byte) (_ Frame, err error) {
+ pp := &PushPromiseFrame{
+ FrameHeader: fh,
+ }
+ if pp.StreamID == 0 {
+ // PUSH_PROMISE frames MUST be associated with an existing,
+ // peer-initiated stream. The stream identifier of a
+ // PUSH_PROMISE frame indicates the stream it is associated
+ // with. If the stream identifier field specifies the value
+ // 0x0, a recipient MUST respond with a connection error
+ // (Section 5.4.1) of type PROTOCOL_ERROR.
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ // The PUSH_PROMISE frame includes optional padding.
+ // Padding fields and flags are identical to those defined for DATA frames
+ var padLength uint8
+ if fh.Flags.Has(FlagPushPromisePadded) {
+ if p, padLength, err = readByte(p); err != nil {
+ return
+ }
+ }
+
+ p, pp.PromiseID, err = readUint32(p)
+ if err != nil {
+ return
+ }
+ pp.PromiseID = pp.PromiseID & (1<<31 - 1)
+
+ if int(padLength) > len(p) {
+ // like the DATA frame, error out if padding is longer than the body.
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ pp.headerFragBuf = p[:len(p)-int(padLength)]
+ return pp, nil
+}
+
+// PushPromiseParam are the parameters for writing a PUSH_PROMISE frame.
+type PushPromiseParam struct {
+ // StreamID is the required Stream ID to initiate.
+ StreamID uint32
+
+ // PromiseID is the required Stream ID which this
+ // Push Promises
+ PromiseID uint32
+
+ // BlockFragment is part (or all) of a Header Block.
+ BlockFragment []byte
+
+ // EndHeaders indicates that this frame contains an entire
+ // header block and is not followed by any
+ // CONTINUATION frames.
+ EndHeaders bool
+
+ // PadLength is the optional number of bytes of zeros to add
+ // to this frame.
+ PadLength uint8
+}
+
+// WritePushPromise writes a single PushPromise Frame.
+//
+// As with Header Frames, This is the low level call for writing
+// individual frames. Continuation frames are handled elsewhere.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WritePushPromise(p PushPromiseParam) error {
+ if !validStreamID(p.StreamID) && !f.AllowIllegalWrites {
+ return errStreamID
+ }
+ var flags Flags
+ if p.PadLength != 0 {
+ flags |= FlagPushPromisePadded
+ }
+ if p.EndHeaders {
+ flags |= FlagPushPromiseEndHeaders
+ }
+ f.startWrite(FramePushPromise, flags, p.StreamID)
+ if p.PadLength != 0 {
+ f.writeByte(p.PadLength)
+ }
+ if !validStreamID(p.PromiseID) && !f.AllowIllegalWrites {
+ return errStreamID
+ }
+ f.writeUint32(p.PromiseID)
+ f.wbuf = append(f.wbuf, p.BlockFragment...)
+ f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...)
+ return f.endWrite()
+}
+
+// WriteRawFrame writes a raw frame. This can be used to write
+// extension frames unknown to this package.
+func (f *Framer) WriteRawFrame(t FrameType, flags Flags, streamID uint32, payload []byte) error {
+ f.startWrite(t, flags, streamID)
+ f.writeBytes(payload)
+ return f.endWrite()
+}
+
+func readByte(p []byte) (remain []byte, b byte, err error) {
+ if len(p) == 0 {
+ return nil, 0, io.ErrUnexpectedEOF
+ }
+ return p[1:], p[0], nil
+}
+
+func readUint32(p []byte) (remain []byte, v uint32, err error) {
+ if len(p) < 4 {
+ return nil, 0, io.ErrUnexpectedEOF
+ }
+ return p[4:], binary.BigEndian.Uint32(p[:4]), nil
+}
+
+type streamEnder interface {
+ StreamEnded() bool
+}
+
+type headersEnder interface {
+ HeadersEnded() bool
+}
+
+func summarizeFrame(f Frame) string {
+ var buf bytes.Buffer
+ f.Header().writeDebug(&buf)
+ switch f := f.(type) {
+ case *SettingsFrame:
+ n := 0
+ f.ForeachSetting(func(s Setting) error {
+ n++
+ if n == 1 {
+ buf.WriteString(", settings:")
+ }
+ fmt.Fprintf(&buf, " %v=%v,", s.ID, s.Val)
+ return nil
+ })
+ if n > 0 {
+ buf.Truncate(buf.Len() - 1) // remove trailing comma
+ }
+ case *DataFrame:
+ data := f.Data()
+ const max = 256
+ if len(data) > max {
+ data = data[:max]
+ }
+ fmt.Fprintf(&buf, " data=%q", data)
+ if len(f.Data()) > max {
+ fmt.Fprintf(&buf, " (%d bytes omitted)", len(f.Data())-max)
+ }
+ case *WindowUpdateFrame:
+ if f.StreamID == 0 {
+ buf.WriteString(" (conn)")
+ }
+ fmt.Fprintf(&buf, " incr=%v", f.Increment)
+ case *PingFrame:
+ fmt.Fprintf(&buf, " ping=%q", f.Data[:])
+ case *GoAwayFrame:
+ fmt.Fprintf(&buf, " LastStreamID=%v ErrCode=%v Debug=%q",
+ f.LastStreamID, f.ErrCode, f.debugData)
+ case *RSTStreamFrame:
+ fmt.Fprintf(&buf, " ErrCode=%v", f.ErrCode)
+ }
+ return buf.String()
+}
diff --git a/src/kube2msb/vendor/golang.org/x/net/http2/go15.go b/src/kube2msb/vendor/golang.org/x/net/http2/go15.go
new file mode 100644
index 0000000..f0a5624
--- /dev/null
+++ b/src/kube2msb/vendor/golang.org/x/net/http2/go15.go
@@ -0,0 +1,11 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.5
+
+package http2
+
+import "net/http"
+
+func requestCancel(req *http.Request) <-chan struct{} { return req.Cancel }
diff --git a/src/kube2msb/vendor/golang.org/x/net/http2/gotrack.go b/src/kube2msb/vendor/golang.org/x/net/http2/gotrack.go
new file mode 100644
index 0000000..9933c9f
--- /dev/null
+++ b/src/kube2msb/vendor/golang.org/x/net/http2/gotrack.go
@@ -0,0 +1,170 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Defensive debug-only utility to track that functions run on the
+// goroutine that they're supposed to.
+
+package http2
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "os"
+ "runtime"
+ "strconv"
+ "sync"
+)
+
+var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1"
+
+type goroutineLock uint64
+
+func newGoroutineLock() goroutineLock {
+ if !DebugGoroutines {
+ return 0
+ }
+ return goroutineLock(curGoroutineID())
+}
+
+func (g goroutineLock) check() {
+ if !DebugGoroutines {
+ return
+ }
+ if curGoroutineID() != uint64(g) {
+ panic("running on the wrong goroutine")
+ }
+}
+
+func (g goroutineLock) checkNotOn() {
+ if !DebugGoroutines {
+ return
+ }
+ if curGoroutineID() == uint64(g) {
+ panic("running on the wrong goroutine")
+ }
+}
+
+var goroutineSpace = []byte("goroutine ")
+
+func curGoroutineID() uint64 {
+ bp := littleBuf.Get().(*[]byte)
+ defer littleBuf.Put(bp)
+ b := *bp
+ b = b[:runtime.Stack(b, false)]
+ // Parse the 4707 out of "goroutine 4707 ["
+ b = bytes.TrimPrefix(b, goroutineSpace)
+ i := bytes.IndexByte(b, ' ')
+ if i < 0 {
+ panic(fmt.Sprintf("No space found in %q", b))
+ }
+ b = b[:i]
+ n, err := parseUintBytes(b, 10, 64)
+ if err != nil {
+ panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err))
+ }
+ return n
+}
+
+var littleBuf = sync.Pool{
+ New: func() interface{} {
+ buf := make([]byte, 64)
+ return &buf
+ },
+}
+
+// parseUintBytes is like strconv.ParseUint, but using a []byte.
+func parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) {
+ var cutoff, maxVal uint64
+
+ if bitSize == 0 {
+ bitSize = int(strconv.IntSize)
+ }
+
+ s0 := s
+ switch {
+ case len(s) < 1:
+ err = strconv.ErrSyntax
+ goto Error
+
+ case 2 <= base && base <= 36:
+ // valid base; nothing to do
+
+ case base == 0:
+ // Look for octal, hex prefix.
+ switch {
+ case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'):
+ base = 16
+ s = s[2:]
+ if len(s) < 1 {
+ err = strconv.ErrSyntax
+ goto Error
+ }
+ case s[0] == '0':
+ base = 8
+ default:
+ base = 10
+ }
+
+ default:
+ err = errors.New("invalid base " + strconv.Itoa(base))
+ goto Error
+ }
+
+ n = 0
+ cutoff = cutoff64(base)
+ maxVal = 1<<uint(bitSize) - 1
+
+ for i := 0; i < len(s); i++ {
+ var v byte
+ d := s[i]
+ switch {
+ case '0' <= d && d <= '9':
+ v = d - '0'
+ case 'a' <= d && d <= 'z':
+ v = d - 'a' + 10
+ case 'A' <= d && d <= 'Z':
+ v = d - 'A' + 10
+ default:
+ n = 0
+ err = strconv.ErrSyntax
+ goto Error
+ }
+ if int(v) >= base {
+ n = 0
+ err = strconv.ErrSyntax
+ goto Error
+ }
+
+ if n >= cutoff {
+ // n*base overflows
+ n = 1<<64 - 1
+ err = strconv.ErrRange
+ goto Error
+ }
+ n *= uint64(base)
+
+ n1 := n + uint64(v)
+ if n1 < n || n1 > maxVal {
+ // n+v overflows
+ n = 1<<64 - 1
+ err = strconv.ErrRange
+ goto Error
+ }
+ n = n1
+ }
+
+ return n, nil
+
+Error:
+ return n, &strconv.NumError{Func: "ParseUint", Num: string(s0), Err: err}
+}
+
+// Return the first number n such that n*base >= 1<<64.
+func cutoff64(base int) uint64 {
+ if base < 2 {
+ return 0
+ }
+ return (1<<64-1)/uint64(base) + 1
+}
diff --git a/src/kube2msb/vendor/golang.org/x/net/http2/headermap.go b/src/kube2msb/vendor/golang.org/x/net/http2/headermap.go
new file mode 100644
index 0000000..c2805f6
--- /dev/null
+++ b/src/kube2msb/vendor/golang.org/x/net/http2/headermap.go
@@ -0,0 +1,78 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "net/http"
+ "strings"
+)
+
+var (
+ commonLowerHeader = map[string]string{} // Go-Canonical-Case -> lower-case
+ commonCanonHeader = map[string]string{} // lower-case -> Go-Canonical-Case
+)
+
+func init() {
+ for _, v := range []string{
+ "accept",
+ "accept-charset",
+ "accept-encoding",
+ "accept-language",
+ "accept-ranges",
+ "age",
+ "access-control-allow-origin",
+ "allow",
+ "authorization",
+ "cache-control",
+ "content-disposition",
+ "content-encoding",
+ "content-language",
+ "content-length",
+ "content-location",
+ "content-range",
+ "content-type",
+ "cookie",
+ "date",
+ "etag",
+ "expect",
+ "expires",
+ "from",
+ "host",
+ "if-match",
+ "if-modified-since",
+ "if-none-match",
+ "if-unmodified-since",
+ "last-modified",
+ "link",
+ "location",
+ "max-forwards",
+ "proxy-authenticate",
+ "proxy-authorization",
+ "range",
+ "referer",
+ "refresh",
+ "retry-after",
+ "server",
+ "set-cookie",
+ "strict-transport-security",
+ "trailer",
+ "transfer-encoding",
+ "user-agent",
+ "vary",
+ "via",
+ "www-authenticate",
+ } {
+ chk := http.CanonicalHeaderKey(v)
+ commonLowerHeader[chk] = v
+ commonCanonHeader[v] = chk
+ }
+}
+
+func lowerHeader(v string) string {
+ if s, ok := commonLowerHeader[v]; ok {
+ return s
+ }
+ return strings.ToLower(v)
+}
diff --git a/src/kube2msb/vendor/golang.org/x/net/http2/hpack/encode.go b/src/kube2msb/vendor/golang.org/x/net/http2/hpack/encode.go
new file mode 100644
index 0000000..80d621c
--- /dev/null
+++ b/src/kube2msb/vendor/golang.org/x/net/http2/hpack/encode.go
@@ -0,0 +1,251 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package hpack
+
+import (
+ "io"
+)
+
+const (
+ uint32Max = ^uint32(0)
+ initialHeaderTableSize = 4096
+)
+
+type Encoder struct {
+ dynTab dynamicTable
+ // minSize is the minimum table size set by
+ // SetMaxDynamicTableSize after the previous Header Table Size
+ // Update.
+ minSize uint32
+ // maxSizeLimit is the maximum table size this encoder
+ // supports. This will protect the encoder from too large
+ // size.
+ maxSizeLimit uint32
+ // tableSizeUpdate indicates whether "Header Table Size
+ // Update" is required.
+ tableSizeUpdate bool
+ w io.Writer
+ buf []byte
+}
+
+// NewEncoder returns a new Encoder which performs HPACK encoding. An
+// encoded data is written to w.
+func NewEncoder(w io.Writer) *Encoder {
+ e := &Encoder{
+ minSize: uint32Max,
+ maxSizeLimit: initialHeaderTableSize,
+ tableSizeUpdate: false,
+ w: w,
+ }
+ e.dynTab.setMaxSize(initialHeaderTableSize)
+ return e
+}
+
+// WriteField encodes f into a single Write to e's underlying Writer.
+// This function may also produce bytes for "Header Table Size Update"
+// if necessary. If produced, it is done before encoding f.
+func (e *Encoder) WriteField(f HeaderField) error {
+ e.buf = e.buf[:0]
+
+ if e.tableSizeUpdate {
+ e.tableSizeUpdate = false
+ if e.minSize < e.dynTab.maxSize {
+ e.buf = appendTableSize(e.buf, e.minSize)
+ }
+ e.minSize = uint32Max
+ e.buf = appendTableSize(e.buf, e.dynTab.maxSize)
+ }
+
+ idx, nameValueMatch := e.searchTable(f)
+ if nameValueMatch {
+ e.buf = appendIndexed(e.buf, idx)
+ } else {
+ indexing := e.shouldIndex(f)
+ if indexing {
+ e.dynTab.add(f)
+ }
+
+ if idx == 0 {
+ e.buf = appendNewName(e.buf, f, indexing)
+ } else {
+ e.buf = appendIndexedName(e.buf, f, idx, indexing)
+ }
+ }
+ n, err := e.w.Write(e.buf)
+ if err == nil && n != len(e.buf) {
+ err = io.ErrShortWrite
+ }
+ return err
+}
+
+// searchTable searches f in both stable and dynamic header tables.
+// The static header table is searched first. Only when there is no
+// exact match for both name and value, the dynamic header table is
+// then searched. If there is no match, i is 0. If both name and value
+// match, i is the matched index and nameValueMatch becomes true. If
+// only name matches, i points to that index and nameValueMatch
+// becomes false.
+func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) {
+ for idx, hf := range staticTable {
+ if !constantTimeStringCompare(hf.Name, f.Name) {
+ continue
+ }
+ if i == 0 {
+ i = uint64(idx + 1)
+ }
+ if f.Sensitive {
+ continue
+ }
+ if !constantTimeStringCompare(hf.Value, f.Value) {
+ continue
+ }
+ i = uint64(idx + 1)
+ nameValueMatch = true
+ return
+ }
+
+ j, nameValueMatch := e.dynTab.search(f)
+ if nameValueMatch || (i == 0 && j != 0) {
+ i = j + uint64(len(staticTable))
+ }
+ return
+}
+
+// SetMaxDynamicTableSize changes the dynamic header table size to v.
+// The actual size is bounded by the value passed to
+// SetMaxDynamicTableSizeLimit.
+func (e *Encoder) SetMaxDynamicTableSize(v uint32) {
+ if v > e.maxSizeLimit {
+ v = e.maxSizeLimit
+ }
+ if v < e.minSize {
+ e.minSize = v
+ }
+ e.tableSizeUpdate = true
+ e.dynTab.setMaxSize(v)
+}
+
+// SetMaxDynamicTableSizeLimit changes the maximum value that can be
+// specified in SetMaxDynamicTableSize to v. By default, it is set to
+// 4096, which is the same size of the default dynamic header table
+// size described in HPACK specification. If the current maximum
+// dynamic header table size is strictly greater than v, "Header Table
+// Size Update" will be done in the next WriteField call and the
+// maximum dynamic header table size is truncated to v.
+func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) {
+ e.maxSizeLimit = v
+ if e.dynTab.maxSize > v {
+ e.tableSizeUpdate = true
+ e.dynTab.setMaxSize(v)
+ }
+}
+
+// shouldIndex reports whether f should be indexed.
+func (e *Encoder) shouldIndex(f HeaderField) bool {
+ return !f.Sensitive && f.size() <= e.dynTab.maxSize
+}
+
+// appendIndexed appends index i, as encoded in "Indexed Header Field"
+// representation, to dst and returns the extended buffer.
+func appendIndexed(dst []byte, i uint64) []byte {
+ first := len(dst)
+ dst = appendVarInt(dst, 7, i)
+ dst[first] |= 0x80
+ return dst
+}
+
+// appendNewName appends f, as encoded in one of "Literal Header field
+// - New Name" representation variants, to dst and returns the
+// extended buffer.
+//
+// If f.Sensitive is true, "Never Indexed" representation is used. If
+// f.Sensitive is false and indexing is true, "Inremental Indexing"
+// representation is used.
+func appendNewName(dst []byte, f HeaderField, indexing bool) []byte {
+ dst = append(dst, encodeTypeByte(indexing, f.Sensitive))
+ dst = appendHpackString(dst, f.Name)
+ return appendHpackString(dst, f.Value)
+}
+
+// appendIndexedName appends f and index i referring indexed name
+// entry, as encoded in one of "Literal Header field - Indexed Name"
+// representation variants, to dst and returns the extended buffer.
+//
+// If f.Sensitive is true, "Never Indexed" representation is used. If
+// f.Sensitive is false and indexing is true, "Incremental Indexing"
+// representation is used.
+func appendIndexedName(dst []byte, f HeaderField, i uint64, indexing bool) []byte {
+ first := len(dst)
+ var n byte
+ if indexing {
+ n = 6
+ } else {
+ n = 4
+ }
+ dst = appendVarInt(dst, n, i)
+ dst[first] |= encodeTypeByte(indexing, f.Sensitive)
+ return appendHpackString(dst, f.Value)
+}
+
+// appendTableSize appends v, as encoded in "Header Table Size Update"
+// representation, to dst and returns the extended buffer.
+func appendTableSize(dst []byte, v uint32) []byte {
+ first := len(dst)
+ dst = appendVarInt(dst, 5, uint64(v))
+ dst[first] |= 0x20
+ return dst
+}
+
+// appendVarInt appends i, as encoded in variable integer form using n
+// bit prefix, to dst and returns the extended buffer.
+//
+// See
+// http://http2.github.io/http2-spec/compression.html#integer.representation
+func appendVarInt(dst []byte, n byte, i uint64) []byte {
+ k := uint64((1 << n) - 1)
+ if i < k {
+ return append(dst, byte(i))
+ }
+ dst = append(dst, byte(k))
+ i -= k
+ for ; i >= 128; i >>= 7 {
+ dst = append(dst, byte(0x80|(i&0x7f)))
+ }
+ return append(dst, byte(i))
+}
+
+// appendHpackString appends s, as encoded in "String Literal"
+// representation, to dst and returns the the extended buffer.
+//
+// s will be encoded in Huffman codes only when it produces strictly
+// shorter byte string.
+func appendHpackString(dst []byte, s string) []byte {
+ huffmanLength := HuffmanEncodeLength(s)
+ if huffmanLength < uint64(len(s)) {
+ first := len(dst)
+ dst = appendVarInt(dst, 7, huffmanLength)
+ dst = AppendHuffmanString(dst, s)
+ dst[first] |= 0x80
+ } else {
+ dst = appendVarInt(dst, 7, uint64(len(s)))
+ dst = append(dst, s...)
+ }
+ return dst
+}
+
+// encodeTypeByte returns type byte. If sensitive is true, type byte
+// for "Never Indexed" representation is returned. If sensitive is
+// false and indexing is true, type byte for "Incremental Indexing"
+// representation is returned. Otherwise, type byte for "Without
+// Indexing" is returned.
+func encodeTypeByte(indexing, sensitive bool) byte {
+ if sensitive {
+ return 0x10
+ }
+ if indexing {
+ return 0x40
+ }
+ return 0
+}
diff --git a/src/kube2msb/vendor/golang.org/x/net/http2/hpack/hpack.go b/src/kube2msb/vendor/golang.org/x/net/http2/hpack/hpack.go
new file mode 100644
index 0000000..2ea4949
--- /dev/null
+++ b/src/kube2msb/vendor/golang.org/x/net/http2/hpack/hpack.go
@@ -0,0 +1,533 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package hpack implements HPACK, a compression format for
+// efficiently representing HTTP header fields in the context of HTTP/2.
+//
+// See http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09
+package hpack
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+)
+
+// A DecodingError is something the spec defines as a decoding error.
+type DecodingError struct {
+ Err error
+}
+
+func (de DecodingError) Error() string {
+ return fmt.Sprintf("decoding error: %v", de.Err)
+}
+
+// An InvalidIndexError is returned when an encoder references a table
+// entry before the static table or after the end of the dynamic table.
+type InvalidIndexError int
+
+func (e InvalidIndexError) Error() string {
+ return fmt.Sprintf("invalid indexed representation index %d", int(e))
+}
+
+// A HeaderField is a name-value pair. Both the name and value are
+// treated as opaque sequences of octets.
+type HeaderField struct {
+ Name, Value string
+
+ // Sensitive means that this header field should never be
+ // indexed.
+ Sensitive bool
+}
+
+func (hf HeaderField) String() string {
+ var suffix string
+ if hf.Sensitive {
+ suffix = " (sensitive)"
+ }
+ return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix)
+}
+
+func (hf *HeaderField) size() uint32 {
+ // http://http2.github.io/http2-spec/compression.html#rfc.section.4.1
+ // "The size of the dynamic table is the sum of the size of
+ // its entries. The size of an entry is the sum of its name's
+ // length in octets (as defined in Section 5.2), its value's
+ // length in octets (see Section 5.2), plus 32. The size of
+ // an entry is calculated using the length of the name and
+ // value without any Huffman encoding applied."
+
+ // This can overflow if somebody makes a large HeaderField
+ // Name and/or Value by hand, but we don't care, because that
+ // won't happen on the wire because the encoding doesn't allow
+ // it.
+ return uint32(len(hf.Name) + len(hf.Value) + 32)
+}
+
+// A Decoder is the decoding context for incremental processing of
+// header blocks.
+type Decoder struct {
+ dynTab dynamicTable
+ emit func(f HeaderField)
+
+ emitEnabled bool // whether calls to emit are enabled
+ maxStrLen int // 0 means unlimited
+
+ // buf is the unparsed buffer. It's only written to
+ // saveBuf if it was truncated in the middle of a header
+ // block. Because it's usually not owned, we can only
+ // process it under Write.
+ buf []byte // not owned; only valid during Write
+
+ // saveBuf is previous data passed to Write which we weren't able
+ // to fully parse before. Unlike buf, we own this data.
+ saveBuf bytes.Buffer
+}
+
+// NewDecoder returns a new decoder with the provided maximum dynamic
+// table size. The emitFunc will be called for each valid field
+// parsed, in the same goroutine as calls to Write, before Write returns.
+func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decoder {
+ d := &Decoder{
+ emit: emitFunc,
+ emitEnabled: true,
+ }
+ d.dynTab.allowedMaxSize = maxDynamicTableSize
+ d.dynTab.setMaxSize(maxDynamicTableSize)
+ return d
+}
+
+// ErrStringLength is returned by Decoder.Write when the max string length
+// (as configured by Decoder.SetMaxStringLength) would be violated.
+var ErrStringLength = errors.New("hpack: string too long")
+
+// SetMaxStringLength sets the maximum size of a HeaderField name or
+// value string. If a string exceeds this length (even after any
+// decompression), Write will return ErrStringLength.
+// A value of 0 means unlimited and is the default from NewDecoder.
+func (d *Decoder) SetMaxStringLength(n int) {
+ d.maxStrLen = n
+}
+
+// SetEmitFunc changes the callback used when new header fields
+// are decoded.
+// It must be non-nil. It does not affect EmitEnabled.
+func (d *Decoder) SetEmitFunc(emitFunc func(f HeaderField)) {
+ d.emit = emitFunc
+}
+
+// SetEmitEnabled controls whether the emitFunc provided to NewDecoder
+// should be called. The default is true.
+//
+// This facility exists to let servers enforce MAX_HEADER_LIST_SIZE
+// while still decoding and keeping in-sync with decoder state, but
+// without doing unnecessary decompression or generating unnecessary
+// garbage for header fields past the limit.
+func (d *Decoder) SetEmitEnabled(v bool) { d.emitEnabled = v }
+
+// EmitEnabled reports whether calls to the emitFunc provided to NewDecoder
+// are currently enabled. The default is true.
+func (d *Decoder) EmitEnabled() bool { return d.emitEnabled }
+
+// TODO: add method *Decoder.Reset(maxSize, emitFunc) to let callers re-use Decoders and their
+// underlying buffers for garbage reasons.
+
+func (d *Decoder) SetMaxDynamicTableSize(v uint32) {
+ d.dynTab.setMaxSize(v)
+}
+
+// SetAllowedMaxDynamicTableSize sets the upper bound that the encoded
+// stream (via dynamic table size updates) may set the maximum size
+// to.
+func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) {
+ d.dynTab.allowedMaxSize = v
+}
+
+type dynamicTable struct {
+ // ents is the FIFO described at
+ // http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2
+ // The newest (low index) is append at the end, and items are
+ // evicted from the front.
+ ents []HeaderField
+ size uint32
+ maxSize uint32 // current maxSize
+ allowedMaxSize uint32 // maxSize may go up to this, inclusive
+}
+
+func (dt *dynamicTable) setMaxSize(v uint32) {
+ dt.maxSize = v
+ dt.evict()
+}
+
+// TODO: change dynamicTable to be a struct with a slice and a size int field,
+// per http://http2.github.io/http2-spec/compression.html#rfc.section.4.1:
+//
+//
+// Then make add increment the size. maybe the max size should move from Decoder to
+// dynamicTable and add should return an ok bool if there was enough space.
+//
+// Later we'll need a remove operation on dynamicTable.
+
+func (dt *dynamicTable) add(f HeaderField) {
+ dt.ents = append(dt.ents, f)
+ dt.size += f.size()
+ dt.evict()
+}
+
+// If we're too big, evict old stuff (front of the slice)
+func (dt *dynamicTable) evict() {
+ base := dt.ents // keep base pointer of slice
+ for dt.size > dt.maxSize {
+ dt.size -= dt.ents[0].size()
+ dt.ents = dt.ents[1:]
+ }
+
+ // Shift slice contents down if we evicted things.
+ if len(dt.ents) != len(base) {
+ copy(base, dt.ents)
+ dt.ents = base[:len(dt.ents)]
+ }
+}
+
+// constantTimeStringCompare compares string a and b in a constant
+// time manner.
+func constantTimeStringCompare(a, b string) bool {
+ if len(a) != len(b) {
+ return false
+ }
+
+ c := byte(0)
+
+ for i := 0; i < len(a); i++ {
+ c |= a[i] ^ b[i]
+ }
+
+ return c == 0
+}
+
+// Search searches f in the table. The return value i is 0 if there is
+// no name match. If there is name match or name/value match, i is the
+// index of that entry (1-based). If both name and value match,
+// nameValueMatch becomes true.
+func (dt *dynamicTable) search(f HeaderField) (i uint64, nameValueMatch bool) {
+ l := len(dt.ents)
+ for j := l - 1; j >= 0; j-- {
+ ent := dt.ents[j]
+ if !constantTimeStringCompare(ent.Name, f.Name) {
+ continue
+ }
+ if i == 0 {
+ i = uint64(l - j)
+ }
+ if f.Sensitive {
+ continue
+ }
+ if !constantTimeStringCompare(ent.Value, f.Value) {
+ continue
+ }
+ i = uint64(l - j)
+ nameValueMatch = true
+ return
+ }
+ return
+}
+
+func (d *Decoder) maxTableIndex() int {
+ return len(d.dynTab.ents) + len(staticTable)
+}
+
+func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) {
+ if i < 1 {
+ return
+ }
+ if i > uint64(d.maxTableIndex()) {
+ return
+ }
+ if i <= uint64(len(staticTable)) {
+ return staticTable[i-1], true
+ }
+ dents := d.dynTab.ents
+ return dents[len(dents)-(int(i)-len(staticTable))], true
+}
+
+// Decode decodes an entire block.
+//
+// TODO: remove this method and make it incremental later? This is
+// easier for debugging now.
+func (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) {
+ var hf []HeaderField
+ saveFunc := d.emit
+ defer func() { d.emit = saveFunc }()
+ d.emit = func(f HeaderField) { hf = append(hf, f) }
+ if _, err := d.Write(p); err != nil {
+ return nil, err
+ }
+ if err := d.Close(); err != nil {
+ return nil, err
+ }
+ return hf, nil
+}
+
+func (d *Decoder) Close() error {
+ if d.saveBuf.Len() > 0 {
+ d.saveBuf.Reset()
+ return DecodingError{errors.New("truncated headers")}
+ }
+ return nil
+}
+
+func (d *Decoder) Write(p []byte) (n int, err error) {
+ if len(p) == 0 {
+ // Prevent state machine CPU attacks (making us redo
+ // work up to the point of finding out we don't have
+ // enough data)
+ return
+ }
+ // Only copy the data if we have to. Optimistically assume
+ // that p will contain a complete header block.
+ if d.saveBuf.Len() == 0 {
+ d.buf = p
+ } else {
+ d.saveBuf.Write(p)
+ d.buf = d.saveBuf.Bytes()
+ d.saveBuf.Reset()
+ }
+
+ for len(d.buf) > 0 {
+ err = d.parseHeaderFieldRepr()
+ if err == errNeedMore {
+ // Extra paranoia, making sure saveBuf won't
+ // get too large. All the varint and string
+ // reading code earlier should already catch
+ // overlong things and return ErrStringLength,
+ // but keep this as a last resort.
+ const varIntOverhead = 8 // conservative
+ if d.maxStrLen != 0 && int64(len(d.buf)) > 2*(int64(d.maxStrLen)+varIntOverhead) {
+ return 0, ErrStringLength
+ }
+ d.saveBuf.Write(d.buf)
+ return len(p), nil
+ }
+ if err != nil {
+ break
+ }
+ }
+ return len(p), err
+}
+
+// errNeedMore is an internal sentinel error value that means the
+// buffer is truncated and we need to read more data before we can
+// continue parsing.
+var errNeedMore = errors.New("need more data")
+
+type indexType int
+
+const (
+ indexedTrue indexType = iota
+ indexedFalse
+ indexedNever
+)
+
+func (v indexType) indexed() bool { return v == indexedTrue }
+func (v indexType) sensitive() bool { return v == indexedNever }
+
+// returns errNeedMore if there isn't enough data available.
+// any other error is fatal.
+// consumes d.buf iff it returns nil.
+// precondition: must be called with len(d.buf) > 0
+func (d *Decoder) parseHeaderFieldRepr() error {
+ b := d.buf[0]
+ switch {
+ case b&128 != 0:
+ // Indexed representation.
+ // High bit set?
+ // http://http2.github.io/http2-spec/compression.html#rfc.section.6.1
+ return d.parseFieldIndexed()
+ case b&192 == 64:
+ // 6.2.1 Literal Header Field with Incremental Indexing
+ // 0b10xxxxxx: top two bits are 10
+ // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.1
+ return d.parseFieldLiteral(6, indexedTrue)
+ case b&240 == 0:
+ // 6.2.2 Literal Header Field without Indexing
+ // 0b0000xxxx: top four bits are 0000
+ // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.2
+ return d.parseFieldLiteral(4, indexedFalse)
+ case b&240 == 16:
+ // 6.2.3 Literal Header Field never Indexed
+ // 0b0001xxxx: top four bits are 0001
+ // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.3
+ return d.parseFieldLiteral(4, indexedNever)
+ case b&224 == 32:
+ // 6.3 Dynamic Table Size Update
+ // Top three bits are '001'.
+ // http://http2.github.io/http2-spec/compression.html#rfc.section.6.3
+ return d.parseDynamicTableSizeUpdate()
+ }
+
+ return DecodingError{errors.New("invalid encoding")}
+}
+
+// (same invariants and behavior as parseHeaderFieldRepr)
+func (d *Decoder) parseFieldIndexed() error {
+ buf := d.buf
+ idx, buf, err := readVarInt(7, buf)
+ if err != nil {
+ return err
+ }
+ hf, ok := d.at(idx)
+ if !ok {
+ return DecodingError{InvalidIndexError(idx)}
+ }
+ d.buf = buf
+ return d.callEmit(HeaderField{Name: hf.Name, Value: hf.Value})
+}
+
+// (same invariants and behavior as parseHeaderFieldRepr)
+func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error {
+ buf := d.buf
+ nameIdx, buf, err := readVarInt(n, buf)
+ if err != nil {
+ return err
+ }
+
+ var hf HeaderField
+ wantStr := d.emitEnabled || it.indexed()
+ if nameIdx > 0 {
+ ihf, ok := d.at(nameIdx)
+ if !ok {
+ return DecodingError{InvalidIndexError(nameIdx)}
+ }
+ hf.Name = ihf.Name
+ } else {
+ hf.Name, buf, err = d.readString(buf, wantStr)
+ if err != nil {
+ return err
+ }
+ }
+ hf.Value, buf, err = d.readString(buf, wantStr)
+ if err != nil {
+ return err
+ }
+ d.buf = buf
+ if it.indexed() {
+ d.dynTab.add(hf)
+ }
+ hf.Sensitive = it.sensitive()
+ return d.callEmit(hf)
+}
+
+func (d *Decoder) callEmit(hf HeaderField) error {
+ if d.maxStrLen != 0 {
+ if len(hf.Name) > d.maxStrLen || len(hf.Value) > d.maxStrLen {
+ return ErrStringLength
+ }
+ }
+ if d.emitEnabled {
+ d.emit(hf)
+ }
+ return nil
+}
+
+// (same invariants and behavior as parseHeaderFieldRepr)
+func (d *Decoder) parseDynamicTableSizeUpdate() error {
+ buf := d.buf
+ size, buf, err := readVarInt(5, buf)
+ if err != nil {
+ return err
+ }
+ if size > uint64(d.dynTab.allowedMaxSize) {
+ return DecodingError{errors.New("dynamic table size update too large")}
+ }
+ d.dynTab.setMaxSize(uint32(size))
+ d.buf = buf
+ return nil
+}
+
+var errVarintOverflow = DecodingError{errors.New("varint integer overflow")}
+
+// readVarInt reads an unsigned variable length integer off the
+// beginning of p. n is the parameter as described in
+// http://http2.github.io/http2-spec/compression.html#rfc.section.5.1.
+//
+// n must always be between 1 and 8.
+//
+// The returned remain buffer is either a smaller suffix of p, or err != nil.
+// The error is errNeedMore if p doesn't contain a complete integer.
+func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) {
+ if n < 1 || n > 8 {
+ panic("bad n")
+ }
+ if len(p) == 0 {
+ return 0, p, errNeedMore
+ }
+ i = uint64(p[0])
+ if n < 8 {
+ i &= (1 << uint64(n)) - 1
+ }
+ if i < (1<<uint64(n))-1 {
+ return i, p[1:], nil
+ }
+
+ origP := p
+ p = p[1:]
+ var m uint64
+ for len(p) > 0 {
+ b := p[0]
+ p = p[1:]
+ i += uint64(b&127) << m
+ if b&128 == 0 {
+ return i, p, nil
+ }
+ m += 7
+ if m >= 63 { // TODO: proper overflow check. making this up.
+ return 0, origP, errVarintOverflow
+ }
+ }
+ return 0, origP, errNeedMore
+}
+
+// readString decodes an hpack string from p.
+//
+// wantStr is whether s will be used. If false, decompression and
+// []byte->string garbage are skipped if s will be ignored
+// anyway. This does mean that huffman decoding errors for non-indexed
+// strings past the MAX_HEADER_LIST_SIZE are ignored, but the server
+// is returning an error anyway, and because they're not indexed, the error
+// won't affect the decoding state.
+func (d *Decoder) readString(p []byte, wantStr bool) (s string, remain []byte, err error) {
+ if len(p) == 0 {
+ return "", p, errNeedMore
+ }
+ isHuff := p[0]&128 != 0
+ strLen, p, err := readVarInt(7, p)
+ if err != nil {
+ return "", p, err
+ }
+ if d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) {
+ return "", nil, ErrStringLength
+ }
+ if uint64(len(p)) < strLen {
+ return "", p, errNeedMore
+ }
+ if !isHuff {
+ if wantStr {
+ s = string(p[:strLen])
+ }
+ return s, p[strLen:], nil
+ }
+
+ if wantStr {
+ buf := bufPool.Get().(*bytes.Buffer)
+ buf.Reset() // don't trust others
+ defer bufPool.Put(buf)
+ if err := huffmanDecode(buf, d.maxStrLen, p[:strLen]); err != nil {
+ buf.Reset()
+ return "", nil, err
+ }
+ s = buf.String()
+ buf.Reset() // be nice to GC
+ }
+ return s, p[strLen:], nil
+}
diff --git a/src/kube2msb/vendor/golang.org/x/net/http2/hpack/huffman.go b/src/kube2msb/vendor/golang.org/x/net/http2/hpack/huffman.go
new file mode 100644
index 0000000..eb4b1f0
--- /dev/null
+++ b/src/kube2msb/vendor/golang.org/x/net/http2/hpack/huffman.go
@@ -0,0 +1,190 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package hpack
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "sync"
+)
+
+var bufPool = sync.Pool{
+ New: func() interface{} { return new(bytes.Buffer) },
+}
+
+// HuffmanDecode decodes the string in v and writes the expanded
+// result to w, returning the number of bytes written to w and the
+// Write call's return value. At most one Write call is made.
+func HuffmanDecode(w io.Writer, v []byte) (int, error) {
+ buf := bufPool.Get().(*bytes.Buffer)
+ buf.Reset()
+ defer bufPool.Put(buf)
+ if err := huffmanDecode(buf, 0, v); err != nil {
+ return 0, err
+ }
+ return w.Write(buf.Bytes())
+}
+
+// HuffmanDecodeToString decodes the string in v.
+func HuffmanDecodeToString(v []byte) (string, error) {
+ buf := bufPool.Get().(*bytes.Buffer)
+ buf.Reset()
+ defer bufPool.Put(buf)
+ if err := huffmanDecode(buf, 0, v); err != nil {
+ return "", err
+ }
+ return buf.String(), nil
+}
+
+// ErrInvalidHuffman is returned for errors found decoding
+// Huffman-encoded strings.
+var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data")
+
+// huffmanDecode decodes v to buf.
+// If maxLen is greater than 0, attempts to write more to buf than
+// maxLen bytes will return ErrStringLength.
+func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error {
+ n := rootHuffmanNode
+ cur, nbits := uint(0), uint8(0)
+ for _, b := range v {
+ cur = cur<<8 | uint(b)
+ nbits += 8
+ for nbits >= 8 {
+ idx := byte(cur >> (nbits - 8))
+ n = n.children[idx]
+ if n == nil {
+ return ErrInvalidHuffman
+ }
+ if n.children == nil {
+ if maxLen != 0 && buf.Len() == maxLen {
+ return ErrStringLength
+ }
+ buf.WriteByte(n.sym)
+ nbits -= n.codeLen
+ n = rootHuffmanNode
+ } else {
+ nbits -= 8
+ }
+ }
+ }
+ for nbits > 0 {
+ n = n.children[byte(cur<<(8-nbits))]
+ if n.children != nil || n.codeLen > nbits {
+ break
+ }
+ buf.WriteByte(n.sym)
+ nbits -= n.codeLen
+ n = rootHuffmanNode
+ }
+ return nil
+}
+
+type node struct {
+ // children is non-nil for internal nodes
+ children []*node
+
+ // The following are only valid if children is nil:
+ codeLen uint8 // number of bits that led to the output of sym
+ sym byte // output symbol
+}
+
+func newInternalNode() *node {
+ return &node{children: make([]*node, 256)}
+}
+
+var rootHuffmanNode = newInternalNode()
+
+func init() {
+ if len(huffmanCodes) != 256 {
+ panic("unexpected size")
+ }
+ for i, code := range huffmanCodes {
+ addDecoderNode(byte(i), code, huffmanCodeLen[i])
+ }
+}
+
+func addDecoderNode(sym byte, code uint32, codeLen uint8) {
+ cur := rootHuffmanNode
+ for codeLen > 8 {
+ codeLen -= 8
+ i := uint8(code >> codeLen)
+ if cur.children[i] == nil {
+ cur.children[i] = newInternalNode()
+ }
+ cur = cur.children[i]
+ }
+ shift := 8 - codeLen
+ start, end := int(uint8(code<<shift)), int(1<<shift)
+ for i := start; i < start+end; i++ {
+ cur.children[i] = &node{sym: sym, codeLen: codeLen}
+ }
+}
+
+// AppendHuffmanString appends s, as encoded in Huffman codes, to dst
+// and returns the extended buffer.
+func AppendHuffmanString(dst []byte, s string) []byte {
+ rembits := uint8(8)
+
+ for i := 0; i < len(s); i++ {
+ if rembits == 8 {
+ dst = append(dst, 0)
+ }
+ dst, rembits = appendByteToHuffmanCode(dst, rembits, s[i])
+ }
+
+ if rembits < 8 {
+ // special EOS symbol
+ code := uint32(0x3fffffff)
+ nbits := uint8(30)
+
+ t := uint8(code >> (nbits - rembits))
+ dst[len(dst)-1] |= t
+ }
+
+ return dst
+}
+
+// HuffmanEncodeLength returns the number of bytes required to encode
+// s in Huffman codes. The result is round up to byte boundary.
+func HuffmanEncodeLength(s string) uint64 {
+ n := uint64(0)
+ for i := 0; i < len(s); i++ {
+ n += uint64(huffmanCodeLen[s[i]])
+ }
+ return (n + 7) / 8
+}
+
+// appendByteToHuffmanCode appends Huffman code for c to dst and
+// returns the extended buffer and the remaining bits in the last
+// element. The appending is not byte aligned and the remaining bits
+// in the last element of dst is given in rembits.
+func appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) {
+ code := huffmanCodes[c]
+ nbits := huffmanCodeLen[c]
+
+ for {
+ if rembits > nbits {
+ t := uint8(code << (rembits - nbits))
+ dst[len(dst)-1] |= t
+ rembits -= nbits
+ break
+ }
+
+ t := uint8(code >> (nbits - rembits))
+ dst[len(dst)-1] |= t
+
+ nbits -= rembits
+ rembits = 8
+
+ if nbits == 0 {
+ break
+ }
+
+ dst = append(dst, 0)
+ }
+
+ return dst, rembits
+}
diff --git a/src/kube2msb/vendor/golang.org/x/net/http2/hpack/tables.go b/src/kube2msb/vendor/golang.org/x/net/http2/hpack/tables.go
new file mode 100644
index 0000000..b9283a0
--- /dev/null
+++ b/src/kube2msb/vendor/golang.org/x/net/http2/hpack/tables.go
@@ -0,0 +1,352 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package hpack
+
+func pair(name, value string) HeaderField {
+ return HeaderField{Name: name, Value: value}
+}
+
+// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B
+var staticTable = [...]HeaderField{
+ pair(":authority", ""), // index 1 (1-based)
+ pair(":method", "GET"),
+ pair(":method", "POST"),
+ pair(":path", "/"),
+ pair(":path", "/index.html"),
+ pair(":scheme", "http"),
+ pair(":scheme", "https"),
+ pair(":status", "200"),
+ pair(":status", "204"),
+ pair(":status", "206"),
+ pair(":status", "304"),
+ pair(":status", "400"),
+ pair(":status", "404"),
+ pair(":status", "500"),
+ pair("accept-charset", ""),
+ pair("accept-encoding", "gzip, deflate"),
+ pair("accept-language", ""),
+ pair("accept-ranges", ""),
+ pair("accept", ""),
+ pair("access-control-allow-origin", ""),
+ pair("age", ""),
+ pair("allow", ""),
+ pair("authorization", ""),
+ pair("cache-control", ""),
+ pair("content-disposition", ""),
+ pair("content-encoding", ""),
+ pair("content-language", ""),
+ pair("content-length", ""),
+ pair("content-location", ""),
+ pair("content-range", ""),
+ pair("content-type", ""),
+ pair("cookie", ""),
+ pair("date", ""),
+ pair("etag", ""),
+ pair("expect", ""),
+ pair("expires", ""),
+ pair("from", ""),
+ pair("host", ""),
+ pair("if-match", ""),
+ pair("if-modified-since", ""),
+ pair("if-none-match", ""),
+ pair("if-range", ""),
+ pair("if-unmodified-since", ""),
+ pair("last-modified", ""),
+ pair("link", ""),
+ pair("location", ""),
+ pair("max-forwards", ""),
+ pair("proxy-authenticate", ""),
+ pair("proxy-authorization", ""),
+ pair("range", ""),
+ pair("referer", ""),
+ pair("refresh", ""),
+ pair("retry-after", ""),
+ pair("server", ""),
+ pair("set-cookie", ""),
+ pair("strict-transport-security", ""),
+ pair("transfer-encoding", ""),
+ pair("user-agent", ""),
+ pair("vary", ""),
+ pair("via", ""),
+ pair("www-authenticate", ""),
+}
+
+var huffmanCodes = [256]uint32{
+ 0x1ff8,
+ 0x7fffd8,
+ 0xfffffe2,
+ 0xfffffe3,
+ 0xfffffe4,
+ 0xfffffe5,
+ 0xfffffe6,
+ 0xfffffe7,
+ 0xfffffe8,
+ 0xffffea,
+ 0x3ffffffc,
+ 0xfffffe9,
+ 0xfffffea,
+ 0x3ffffffd,
+ 0xfffffeb,
+ 0xfffffec,
+ 0xfffffed,
+ 0xfffffee,
+ 0xfffffef,
+ 0xffffff0,
+ 0xffffff1,
+ 0xffffff2,
+ 0x3ffffffe,
+ 0xffffff3,
+ 0xffffff4,
+ 0xffffff5,
+ 0xffffff6,
+ 0xffffff7,
+ 0xffffff8,
+ 0xffffff9,
+ 0xffffffa,
+ 0xffffffb,
+ 0x14,
+ 0x3f8,
+ 0x3f9,
+ 0xffa,
+ 0x1ff9,
+ 0x15,
+ 0xf8,
+ 0x7fa,
+ 0x3fa,
+ 0x3fb,
+ 0xf9,
+ 0x7fb,
+ 0xfa,
+ 0x16,
+ 0x17,
+ 0x18,
+ 0x0,
+ 0x1,
+ 0x2,
+ 0x19,
+ 0x1a,
+ 0x1b,
+ 0x1c,
+ 0x1d,
+ 0x1e,
+ 0x1f,
+ 0x5c,
+ 0xfb,
+ 0x7ffc,
+ 0x20,
+ 0xffb,
+ 0x3fc,
+ 0x1ffa,
+ 0x21,
+ 0x5d,
+ 0x5e,
+ 0x5f,
+ 0x60,
+ 0x61,
+ 0x62,
+ 0x63,
+ 0x64,
+ 0x65,
+ 0x66,
+ 0x67,
+ 0x68,
+ 0x69,
+ 0x6a,
+ 0x6b,
+ 0x6c,
+ 0x6d,
+ 0x6e,
+ 0x6f,
+ 0x70,
+ 0x71,
+ 0x72,
+ 0xfc,
+ 0x73,
+ 0xfd,
+ 0x1ffb,
+ 0x7fff0,
+ 0x1ffc,
+ 0x3ffc,
+ 0x22,
+ 0x7ffd,
+ 0x3,
+ 0x23,
+ 0x4,
+ 0x24,
+ 0x5,
+ 0x25,
+ 0x26,
+ 0x27,
+ 0x6,
+ 0x74,
+ 0x75,
+ 0x28,
+ 0x29,
+ 0x2a,
+ 0x7,
+ 0x2b,
+ 0x76,
+ 0x2c,
+ 0x8,
+ 0x9,
+ 0x2d,
+ 0x77,
+ 0x78,
+ 0x79,
+ 0x7a,
+ 0x7b,
+ 0x7ffe,
+ 0x7fc,
+ 0x3ffd,
+ 0x1ffd,
+ 0xffffffc,
+ 0xfffe6,
+ 0x3fffd2,
+ 0xfffe7,
+ 0xfffe8,
+ 0x3fffd3,
+ 0x3fffd4,
+ 0x3fffd5,
+ 0x7fffd9,
+ 0x3fffd6,
+ 0x7fffda,
+ 0x7fffdb,
+ 0x7fffdc,
+ 0x7fffdd,
+ 0x7fffde,
+ 0xffffeb,
+ 0x7fffdf,
+ 0xffffec,
+ 0xffffed,
+ 0x3fffd7,
+ 0x7fffe0,
+ 0xffffee,
+ 0x7fffe1,
+ 0x7fffe2,
+ 0x7fffe3,
+ 0x7fffe4,
+ 0x1fffdc,
+ 0x3fffd8,
+ 0x7fffe5,
+ 0x3fffd9,
+ 0x7fffe6,
+ 0x7fffe7,
+ 0xffffef,
+ 0x3fffda,
+ 0x1fffdd,
+ 0xfffe9,
+ 0x3fffdb,
+ 0x3fffdc,
+ 0x7fffe8,
+ 0x7fffe9,
+ 0x1fffde,
+ 0x7fffea,
+ 0x3fffdd,
+ 0x3fffde,
+ 0xfffff0,
+ 0x1fffdf,
+ 0x3fffdf,
+ 0x7fffeb,
+ 0x7fffec,
+ 0x1fffe0,
+ 0x1fffe1,
+ 0x3fffe0,
+ 0x1fffe2,
+ 0x7fffed,
+ 0x3fffe1,
+ 0x7fffee,
+ 0x7fffef,
+ 0xfffea,
+ 0x3fffe2,
+ 0x3fffe3,
+ 0x3fffe4,
+ 0x7ffff0,
+ 0x3fffe5,
+ 0x3fffe6,
+ 0x7ffff1,
+ 0x3ffffe0,
+ 0x3ffffe1,
+ 0xfffeb,
+ 0x7fff1,
+ 0x3fffe7,
+ 0x7ffff2,
+ 0x3fffe8,
+ 0x1ffffec,
+ 0x3ffffe2,
+ 0x3ffffe3,
+ 0x3ffffe4,
+ 0x7ffffde,
+ 0x7ffffdf,
+ 0x3ffffe5,
+ 0xfffff1,
+ 0x1ffffed,
+ 0x7fff2,
+ 0x1fffe3,
+ 0x3ffffe6,
+ 0x7ffffe0,
+ 0x7ffffe1,
+ 0x3ffffe7,
+ 0x7ffffe2,
+ 0xfffff2,
+ 0x1fffe4,
+ 0x1fffe5,
+ 0x3ffffe8,
+ 0x3ffffe9,
+ 0xffffffd,
+ 0x7ffffe3,
+ 0x7ffffe4,
+ 0x7ffffe5,
+ 0xfffec,
+ 0xfffff3,
+ 0xfffed,
+ 0x1fffe6,
+ 0x3fffe9,
+ 0x1fffe7,
+ 0x1fffe8,
+ 0x7ffff3,
+ 0x3fffea,
+ 0x3fffeb,
+ 0x1ffffee,
+ 0x1ffffef,
+ 0xfffff4,
+ 0xfffff5,
+ 0x3ffffea,
+ 0x7ffff4,
+ 0x3ffffeb,
+ 0x7ffffe6,
+ 0x3ffffec,
+ 0x3ffffed,
+ 0x7ffffe7,
+ 0x7ffffe8,
+ 0x7ffffe9,
+ 0x7ffffea,
+ 0x7ffffeb,
+ 0xffffffe,
+ 0x7ffffec,
+ 0x7ffffed,
+ 0x7ffffee,
+ 0x7ffffef,
+ 0x7fffff0,
+ 0x3ffffee,
+}
+
+var huffmanCodeLen = [256]uint8{
+ 13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28,
+ 28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+ 6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6,
+ 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 8, 15, 6, 12, 10,
+ 13, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 8, 7, 8, 13, 19, 13, 14, 6,
+ 15, 5, 6, 5, 6, 5, 6, 6, 6, 5, 7, 7, 6, 6, 6, 5,
+ 6, 7, 6, 5, 5, 6, 7, 7, 7, 7, 7, 15, 11, 14, 13, 28,
+ 20, 22, 20, 20, 22, 22, 22, 23, 22, 23, 23, 23, 23, 23, 24, 23,
+ 24, 24, 22, 23, 24, 23, 23, 23, 23, 21, 22, 23, 22, 23, 23, 24,
+ 22, 21, 20, 22, 22, 23, 23, 21, 23, 22, 22, 24, 21, 22, 23, 23,
+ 21, 21, 22, 21, 23, 22, 23, 23, 20, 22, 22, 22, 23, 22, 22, 23,
+ 26, 26, 20, 19, 22, 23, 22, 25, 26, 26, 26, 27, 27, 26, 24, 25,
+ 19, 21, 26, 27, 27, 26, 27, 24, 21, 21, 26, 26, 28, 27, 27, 27,
+ 20, 24, 20, 21, 22, 21, 21, 23, 22, 22, 25, 25, 24, 24, 26, 23,
+ 26, 27, 26, 26, 27, 27, 27, 27, 27, 28, 27, 27, 27, 27, 27, 26,
+}
diff --git a/src/kube2msb/vendor/golang.org/x/net/http2/http2.go b/src/kube2msb/vendor/golang.org/x/net/http2/http2.go
new file mode 100644
index 0000000..4c5e11a
--- /dev/null
+++ b/src/kube2msb/vendor/golang.org/x/net/http2/http2.go
@@ -0,0 +1,429 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package http2 implements the HTTP/2 protocol.
+//
+// This package is low-level and intended to be used directly by very
+// few people. Most users will use it indirectly through the automatic
+// use by the net/http package (from Go 1.6 and later).
+// For use in earlier Go versions see ConfigureServer. (Transport support
+// requires Go 1.6 or later)
+//
+// See https://http2.github.io/ for more information on HTTP/2.
+//
+// See https://http2.golang.org/ for a test server running this code.
+package http2
+
+import (
+ "bufio"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+var (
+ VerboseLogs bool
+ logFrameWrites bool
+ logFrameReads bool
+)
+
+func init() {
+ e := os.Getenv("GODEBUG")
+ if strings.Contains(e, "http2debug=1") {
+ VerboseLogs = true
+ }
+ if strings.Contains(e, "http2debug=2") {
+ VerboseLogs = true
+ logFrameWrites = true
+ logFrameReads = true
+ }
+}
+
+const (
+ // ClientPreface is the string that must be sent by new
+ // connections from clients.
+ ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"
+
+ // SETTINGS_MAX_FRAME_SIZE default
+ // http://http2.github.io/http2-spec/#rfc.section.6.5.2
+ initialMaxFrameSize = 16384
+
+ // NextProtoTLS is the NPN/ALPN protocol negotiated during
+ // HTTP/2's TLS setup.
+ NextProtoTLS = "h2"
+
+ // http://http2.github.io/http2-spec/#SettingValues
+ initialHeaderTableSize = 4096
+
+ initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size
+
+ defaultMaxReadFrameSize = 1 << 20
+)
+
+var (
+ clientPreface = []byte(ClientPreface)
+)
+
+type streamState int
+
+const (
+ stateIdle streamState = iota
+ stateOpen
+ stateHalfClosedLocal
+ stateHalfClosedRemote
+ stateResvLocal
+ stateResvRemote
+ stateClosed
+)
+
+var stateName = [...]string{
+ stateIdle: "Idle",
+ stateOpen: "Open",
+ stateHalfClosedLocal: "HalfClosedLocal",
+ stateHalfClosedRemote: "HalfClosedRemote",
+ stateResvLocal: "ResvLocal",
+ stateResvRemote: "ResvRemote",
+ stateClosed: "Closed",
+}
+
+func (st streamState) String() string {
+ return stateName[st]
+}
+
+// Setting is a setting parameter: which setting it is, and its value.
+type Setting struct {
+ // ID is which setting is being set.
+ // See http://http2.github.io/http2-spec/#SettingValues
+ ID SettingID
+
+ // Val is the value.
+ Val uint32
+}
+
+func (s Setting) String() string {
+ return fmt.Sprintf("[%v = %d]", s.ID, s.Val)
+}
+
+// Valid reports whether the setting is valid.
+func (s Setting) Valid() error {
+ // Limits and error codes from 6.5.2 Defined SETTINGS Parameters
+ switch s.ID {
+ case SettingEnablePush:
+ if s.Val != 1 && s.Val != 0 {
+ return ConnectionError(ErrCodeProtocol)
+ }
+ case SettingInitialWindowSize:
+ if s.Val > 1<<31-1 {
+ return ConnectionError(ErrCodeFlowControl)
+ }
+ case SettingMaxFrameSize:
+ if s.Val < 16384 || s.Val > 1<<24-1 {
+ return ConnectionError(ErrCodeProtocol)
+ }
+ }
+ return nil
+}
+
+// A SettingID is an HTTP/2 setting as defined in
+// http://http2.github.io/http2-spec/#iana-settings
+type SettingID uint16
+
+const (
+ SettingHeaderTableSize SettingID = 0x1
+ SettingEnablePush SettingID = 0x2
+ SettingMaxConcurrentStreams SettingID = 0x3
+ SettingInitialWindowSize SettingID = 0x4
+ SettingMaxFrameSize SettingID = 0x5
+ SettingMaxHeaderListSize SettingID = 0x6
+)
+
+var settingName = map[SettingID]string{
+ SettingHeaderTableSize: "HEADER_TABLE_SIZE",
+ SettingEnablePush: "ENABLE_PUSH",
+ SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS",
+ SettingInitialWindowSize: "INITIAL_WINDOW_SIZE",
+ SettingMaxFrameSize: "MAX_FRAME_SIZE",
+ SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE",
+}
+
+func (s SettingID) String() string {
+ if v, ok := settingName[s]; ok {
+ return v
+ }
+ return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s))
+}
+
+var (
+ errInvalidHeaderFieldName = errors.New("http2: invalid header field name")
+ errInvalidHeaderFieldValue = errors.New("http2: invalid header field value")
+)
+
+// validHeaderFieldName reports whether v is a valid header field name (key).
+// RFC 7230 says:
+// header-field = field-name ":" OWS field-value OWS
+// field-name = token
+// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." /
+// "^" / "_" / "
+// Further, http2 says:
+// "Just as in HTTP/1.x, header field names are strings of ASCII
+// characters that are compared in a case-insensitive
+// fashion. However, header field names MUST be converted to
+// lowercase prior to their encoding in HTTP/2. "
+func validHeaderFieldName(v string) bool {
+ if len(v) == 0 {
+ return false
+ }
+ for _, r := range v {
+ if int(r) >= len(isTokenTable) || ('A' <= r && r <= 'Z') {
+ return false
+ }
+ if !isTokenTable[byte(r)] {
+ return false
+ }
+ }
+ return true
+}
+
+// validHeaderFieldValue reports whether v is a valid header field value.
+//
+// RFC 7230 says:
+// field-value = *( field-content / obs-fold )
+// obj-fold = N/A to http2, and deprecated
+// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ]
+// field-vchar = VCHAR / obs-text
+// obs-text = %x80-FF
+// VCHAR = "any visible [USASCII] character"
+//
+// http2 further says: "Similarly, HTTP/2 allows header field values
+// that are not valid. While most of the values that can be encoded
+// will not alter header field parsing, carriage return (CR, ASCII
+// 0xd), line feed (LF, ASCII 0xa), and the zero character (NUL, ASCII
+// 0x0) might be exploited by an attacker if they are translated
+// verbatim. Any request or response that contains a character not
+// permitted in a header field value MUST be treated as malformed
+// (Section 8.1.2.6). Valid characters are defined by the
+// field-content ABNF rule in Section 3.2 of [RFC7230]."
+//
+// This function does not (yet?) properly handle the rejection of
+// strings that begin or end with SP or HTAB.
+func validHeaderFieldValue(v string) bool {
+ for i := 0; i < len(v); i++ {
+ if b := v[i]; b < ' ' && b != '\t' || b == 0x7f {
+ return false
+ }
+ }
+ return true
+}
+
+var httpCodeStringCommon = map[int]string{} // n -> strconv.Itoa(n)
+
+func init() {
+ for i := 100; i <= 999; i++ {
+ if v := http.StatusText(i); v != "" {
+ httpCodeStringCommon[i] = strconv.Itoa(i)
+ }
+ }
+}
+
+func httpCodeString(code int) string {
+ if s, ok := httpCodeStringCommon[code]; ok {
+ return s
+ }
+ return strconv.Itoa(code)
+}
+
+// from pkg io
+type stringWriter interface {
+ WriteString(s string) (n int, err error)
+}
+
+// A gate lets two goroutines coordinate their activities.
+type gate chan struct{}
+
+func (g gate) Done() { g <- struct{}{} }
+func (g gate) Wait() { <-g }
+
+// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed).
+type closeWaiter chan struct{}
+
+// Init makes a closeWaiter usable.
+// It exists because so a closeWaiter value can be placed inside a
+// larger struct and have the Mutex and Cond's memory in the same
+// allocation.
+func (cw *closeWaiter) Init() {
+ *cw = make(chan struct{})
+}
+
+// Close marks the closeWaiter as closed and unblocks any waiters.
+func (cw closeWaiter) Close() {
+ close(cw)
+}
+
+// Wait waits for the closeWaiter to become closed.
+func (cw closeWaiter) Wait() {
+ <-cw
+}
+
+// bufferedWriter is a buffered writer that writes to w.
+// Its buffered writer is lazily allocated as needed, to minimize
+// idle memory usage with many connections.
+type bufferedWriter struct {
+ w io.Writer // immutable
+ bw *bufio.Writer // non-nil when data is buffered
+}
+
+func newBufferedWriter(w io.Writer) *bufferedWriter {
+ return &bufferedWriter{w: w}
+}
+
+var bufWriterPool = sync.Pool{
+ New: func() interface{} {
+ // TODO: pick something better? this is a bit under
+ // (3 x typical 1500 byte MTU) at least.
+ return bufio.NewWriterSize(nil, 4<<10)
+ },
+}
+
+func (w *bufferedWriter) Write(p []byte) (n int, err error) {
+ if w.bw == nil {
+ bw := bufWriterPool.Get().(*bufio.Writer)
+ bw.Reset(w.w)
+ w.bw = bw
+ }
+ return w.bw.Write(p)
+}
+
+func (w *bufferedWriter) Flush() error {
+ bw := w.bw
+ if bw == nil {
+ return nil
+ }
+ err := bw.Flush()
+ bw.Reset(nil)
+ bufWriterPool.Put(bw)
+ w.bw = nil
+ return err
+}
+
+func mustUint31(v int32) uint32 {
+ if v < 0 || v > 2147483647 {
+ panic("out of range")
+ }
+ return uint32(v)
+}
+
+// bodyAllowedForStatus reports whether a given response status code
+// permits a body. See RFC2616, section 4.4.
+func bodyAllowedForStatus(status int) bool {
+ switch {
+ case status >= 100 && status <= 199:
+ return false
+ case status == 204:
+ return false
+ case status == 304:
+ return false
+ }
+ return true
+}
+
+type httpError struct {
+ msg string
+ timeout bool
+}
+
+func (e *httpError) Error() string { return e.msg }
+func (e *httpError) Timeout() bool { return e.timeout }
+func (e *httpError) Temporary() bool { return true }
+
+var errTimeout error = &httpError{msg: "http2: timeout awaiting response headers", timeout: true}
+
+var isTokenTable = [127]bool{
+ '!': true,
+ '#': true,
+ '$': true,
+ '%': true,
+ '&': true,
+ '\'': true,
+ '*': true,
+ '+': true,
+ '-': true,
+ '.': true,
+ '0': true,
+ '1': true,
+ '2': true,
+ '3': true,
+ '4': true,
+ '5': true,
+ '6': true,
+ '7': true,
+ '8': true,
+ '9': true,
+ 'A': true,
+ 'B': true,
+ 'C': true,
+ 'D': true,
+ 'E': true,
+ 'F': true,
+ 'G': true,
+ 'H': true,
+ 'I': true,
+ 'J': true,
+ 'K': true,
+ 'L': true,
+ 'M': true,
+ 'N': true,
+ 'O': true,
+ 'P': true,
+ 'Q': true,
+ 'R': true,
+ 'S': true,
+ 'T': true,
+ 'U': true,
+ 'W': true,
+ 'V': true,
+ 'X': true,
+ 'Y': true,
+ 'Z': true,
+ '^': true,
+ '_': true,
+ '`': true,
+ 'a': true,
+ 'b': true,
+ 'c': true,
+ 'd': true,
+ 'e': true,
+ 'f': true,
+ 'g': true,
+ 'h': true,
+ 'i': true,
+ 'j': true,
+ 'k': true,
+ 'l': true,
+ 'm': true,
+ 'n': true,
+ 'o': true,
+ 'p': true,
+ 'q': true,
+ 'r': true,
+ 's': true,
+ 't': true,
+ 'u': true,
+ 'v': true,
+ 'w': true,
+ 'x': true,
+ 'y': true,
+ 'z': true,
+ '|': true,
+ '~': true,
+}
+
+type connectionStater interface {
+ ConnectionState() tls.ConnectionState
+}
diff --git a/src/kube2msb/vendor/golang.org/x/net/http2/not_go15.go b/src/kube2msb/vendor/golang.org/x/net/http2/not_go15.go
new file mode 100644
index 0000000..d0fa5c8
--- /dev/null
+++ b/src/kube2msb/vendor/golang.org/x/net/http2/not_go15.go
@@ -0,0 +1,11 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.5
+
+package http2
+
+import "net/http"
+
+func requestCancel(req *http.Request) <-chan struct{} { return nil }
diff --git a/src/kube2msb/vendor/golang.org/x/net/http2/not_go16.go b/src/kube2msb/vendor/golang.org/x/net/http2/not_go16.go
new file mode 100644
index 0000000..db53c5b
--- /dev/null
+++ b/src/kube2msb/vendor/golang.org/x/net/http2/not_go16.go
@@ -0,0 +1,13 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.6
+
+package http2
+
+import "net/http"
+
+func configureTransport(t1 *http.Transport) (*Transport, error) {
+ return nil, errTransportVersion
+}
diff --git a/src/kube2msb/vendor/golang.org/x/net/http2/pipe.go b/src/kube2msb/vendor/golang.org/x/net/http2/pipe.go
new file mode 100644
index 0000000..69446e7
--- /dev/null
+++ b/src/kube2msb/vendor/golang.org/x/net/http2/pipe.go
@@ -0,0 +1,147 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "errors"
+ "io"
+ "sync"
+)
+
+// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like
+// io.Pipe except there are no PipeReader/PipeWriter halves, and the
+// underlying buffer is an interface. (io.Pipe is always unbuffered)
+type pipe struct {
+ mu sync.Mutex
+ c sync.Cond // c.L lazily initialized to &p.mu
+ b pipeBuffer
+ err error // read error once empty. non-nil means closed.
+ breakErr error // immediate read error (caller doesn't see rest of b)
+ donec chan struct{} // closed on error
+ readFn func() // optional code to run in Read before error
+}
+
+type pipeBuffer interface {
+ Len() int
+ io.Writer
+ io.Reader
+}
+
+// Read waits until data is available and copies bytes
+// from the buffer into p.
+func (p *pipe) Read(d []byte) (n int, err error) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.c.L == nil {
+ p.c.L = &p.mu
+ }
+ for {
+ if p.breakErr != nil {
+ return 0, p.breakErr
+ }
+ if p.b.Len() > 0 {
+ return p.b.Read(d)
+ }
+ if p.err != nil {
+ if p.readFn != nil {
+ p.readFn() // e.g. copy trailers
+ p.readFn = nil // not sticky like p.err
+ }
+ return 0, p.err
+ }
+ p.c.Wait()
+ }
+}
+
+var errClosedPipeWrite = errors.New("write on closed buffer")
+
+// Write copies bytes from p into the buffer and wakes a reader.
+// It is an error to write more data than the buffer can hold.
+func (p *pipe) Write(d []byte) (n int, err error) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.c.L == nil {
+ p.c.L = &p.mu
+ }
+ defer p.c.Signal()
+ if p.err != nil {
+ return 0, errClosedPipeWrite
+ }
+ return p.b.Write(d)
+}
+
+// CloseWithError causes the next Read (waking up a current blocked
+// Read if needed) to return the provided err after all data has been
+// read.
+//
+// The error must be non-nil.
+func (p *pipe) CloseWithError(err error) { p.closeWithError(&p.err, err, nil) }
+
+// BreakWithError causes the next Read (waking up a current blocked
+// Read if needed) to return the provided err immediately, without
+// waiting for unread data.
+func (p *pipe) BreakWithError(err error) { p.closeWithError(&p.breakErr, err, nil) }
+
+// closeWithErrorAndCode is like CloseWithError but also sets some code to run
+// in the caller's goroutine before returning the error.
+func (p *pipe) closeWithErrorAndCode(err error, fn func()) { p.closeWithError(&p.err, err, fn) }
+
+func (p *pipe) closeWithError(dst *error, err error, fn func()) {
+ if err == nil {
+ panic("err must be non-nil")
+ }
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.c.L == nil {
+ p.c.L = &p.mu
+ }
+ defer p.c.Signal()
+ if *dst != nil {
+ // Already been done.
+ return
+ }
+ p.readFn = fn
+ *dst = err
+ p.closeDoneLocked()
+}
+
+// requires p.mu be held.
+func (p *pipe) closeDoneLocked() {
+ if p.donec == nil {
+ return
+ }
+ // Close if unclosed. This isn't racy since we always
+ // hold p.mu while closing.
+ select {
+ case <-p.donec:
+ default:
+ close(p.donec)
+ }
+}
+
+// Err returns the error (if any) first set by BreakWithError or CloseWithError.
+func (p *pipe) Err() error {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.breakErr != nil {
+ return p.breakErr
+ }
+ return p.err
+}
+
+// Done returns a channel which is closed if and when this pipe is closed
+// with CloseWithError.
+func (p *pipe) Done() <-chan struct{} {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.donec == nil {
+ p.donec = make(chan struct{})
+ if p.err != nil || p.breakErr != nil {
+ // Already hit an error.
+ p.closeDoneLocked()
+ }
+ }
+ return p.donec
+}
diff --git a/src/kube2msb/vendor/golang.org/x/net/http2/server.go b/src/kube2msb/vendor/golang.org/x/net/http2/server.go
new file mode 100644
index 0000000..6f4c2bb
--- /dev/null
+++ b/src/kube2msb/vendor/golang.org/x/net/http2/server.go
@@ -0,0 +1,2308 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// TODO: replace all <-sc.doneServing with reads from the stream's cw
+// instead, and make sure that on close we close all open
+// streams. then remove doneServing?
+
+// TODO: re-audit GOAWAY support. Consider each incoming frame type and
+// whether it should be ignored during graceful shutdown.
+
+// TODO: disconnect idle clients. GFE seems to do 4 minutes. make
+// configurable? or maximum number of idle clients and remove the
+// oldest?
+
+// TODO: turn off the serve goroutine when idle, so
+// an idle conn only has the readFrames goroutine active. (which could
+// also be optimized probably to pin less memory in crypto/tls). This
+// would involve tracking when the serve goroutine is active (atomic
+// int32 read/CAS probably?) and starting it up when frames arrive,
+// and shutting it down when all handlers exit. the occasional PING
+// packets could use time.AfterFunc to call sc.wakeStartServeLoop()
+// (which is a no-op if already running) and then queue the PING write
+// as normal. The serve loop would then exit in most cases (if no
+// Handlers running) and not be woken up again until the PING packet
+// returns.
+
+// TODO (maybe): add a mechanism for Handlers to going into
+// half-closed-local mode (rw.(io.Closer) test?) but not exit their
+// handler, and continue to be able to read from the
+// Request.Body. This would be a somewhat semantic change from HTTP/1
+// (or at least what we expose in net/http), so I'd probably want to
+// add it there too. For now, this package says that returning from
+// the Handler ServeHTTP function means you're both done reading and
+// done writing, without a way to stop just one or the other.
+
+package http2
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "net"
+ "net/http"
+ "net/textproto"
+ "net/url"
+ "os"
+ "reflect"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/net/http2/hpack"
+)
+
+const (
+ prefaceTimeout = 10 * time.Second
+ firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway
+ handlerChunkWriteSize = 4 << 10
+ defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to?
+)
+
+var (
+ errClientDisconnected = errors.New("client disconnected")
+ errClosedBody = errors.New("body closed by handler")
+ errHandlerComplete = errors.New("http2: request body closed due to handler exiting")
+ errStreamClosed = errors.New("http2: stream closed")
+)
+
+var responseWriterStatePool = sync.Pool{
+ New: func() interface{} {
+ rws := &responseWriterState{}
+ rws.bw = bufio.NewWriterSize(chunkWriter{rws}, handlerChunkWriteSize)
+ return rws
+ },
+}
+
+// Test hooks.
+var (
+ testHookOnConn func()
+ testHookGetServerConn func(*serverConn)
+ testHookOnPanicMu *sync.Mutex // nil except in tests
+ testHookOnPanic func(sc *serverConn, panicVal interface{}) (rePanic bool)
+)
+
+// Server is an HTTP/2 server.
+type Server struct {
+ // MaxHandlers limits the number of http.Handler ServeHTTP goroutines
+ // which may run at a time over all connections.
+ // Negative or zero no limit.
+ // TODO: implement
+ MaxHandlers int
+
+ // MaxConcurrentStreams optionally specifies the number of
+ // concurrent streams that each client may have open at a
+ // time. This is unrelated to the number of http.Handler goroutines
+ // which may be active globally, which is MaxHandlers.
+ // If zero, MaxConcurrentStreams defaults to at least 100, per
+ // the HTTP/2 spec's recommendations.
+ MaxConcurrentStreams uint32
+
+ // MaxReadFrameSize optionally specifies the largest frame
+ // this server is willing to read. A valid value is between
+ // 16k and 16M, inclusive. If zero or otherwise invalid, a
+ // default value is used.
+ MaxReadFrameSize uint32
+
+ // PermitProhibitedCipherSuites, if true, permits the use of
+ // cipher suites prohibited by the HTTP/2 spec.
+ PermitProhibitedCipherSuites bool
+}
+
+func (s *Server) maxReadFrameSize() uint32 {
+ if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize {
+ return v
+ }
+ return defaultMaxReadFrameSize
+}
+
+func (s *Server) maxConcurrentStreams() uint32 {
+ if v := s.MaxConcurrentStreams; v > 0 {
+ return v
+ }
+ return defaultMaxStreams
+}
+
+// ConfigureServer adds HTTP/2 support to a net/http Server.
+//
+// The configuration conf may be nil.
+//
+// ConfigureServer must be called before s begins serving.
+func ConfigureServer(s *http.Server, conf *Server) error {
+ if conf == nil {
+ conf = new(Server)
+ }
+
+ if s.TLSConfig == nil {
+ s.TLSConfig = new(tls.Config)
+ } else if s.TLSConfig.CipherSuites != nil {
+ // If they already provided a CipherSuite list, return
+ // an error if it has a bad order or is missing
+ // ECDHE_RSA_WITH_AES_128_GCM_SHA256.
+ const requiredCipher = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
+ haveRequired := false
+ sawBad := false
+ for i, cs := range s.TLSConfig.CipherSuites {
+ if cs == requiredCipher {
+ haveRequired = true
+ }
+ if isBadCipher(cs) {
+ sawBad = true
+ } else if sawBad {
+ return fmt.Errorf("http2: TLSConfig.CipherSuites index %d contains an HTTP/2-approved cipher suite (%#04x), but it comes after unapproved cipher suites. With this configuration, clients that don't support previous, approved cipher suites may be given an unapproved one and reject the connection.", i, cs)
+ }
+ }
+ if !haveRequired {
+ return fmt.Errorf("http2: TLSConfig.CipherSuites is missing HTTP/2-required TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256")
+ }
+ }
+
+ // Note: not setting MinVersion to tls.VersionTLS12,
+ // as we don't want to interfere with HTTP/1.1 traffic
+ // on the user's server. We enforce TLS 1.2 later once
+ // we accept a connection. Ideally this should be done
+ // during next-proto selection, but using TLS <1.2 with
+ // HTTP/2 is still the client's bug.
+
+ s.TLSConfig.PreferServerCipherSuites = true
+
+ haveNPN := false
+ for _, p := range s.TLSConfig.NextProtos {
+ if p == NextProtoTLS {
+ haveNPN = true
+ break
+ }
+ }
+ if !haveNPN {
+ s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS)
+ }
+ // h2-14 is temporary (as of 2015-03-05) while we wait for all browsers
+ // to switch to "h2".
+ s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, "h2-14")
+
+ if s.TLSNextProto == nil {
+ s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){}
+ }
+ protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) {
+ if testHookOnConn != nil {
+ testHookOnConn()
+ }
+ conf.ServeConn(c, &ServeConnOpts{
+ Handler: h,
+ BaseConfig: hs,
+ })
+ }
+ s.TLSNextProto[NextProtoTLS] = protoHandler
+ s.TLSNextProto["h2-14"] = protoHandler // temporary; see above.
+ return nil
+}
+
+// ServeConnOpts are options for the Server.ServeConn method.
+type ServeConnOpts struct {
+ // BaseConfig optionally sets the base configuration
+ // for values. If nil, defaults are used.
+ BaseConfig *http.Server
+
+ // Handler specifies which handler to use for processing
+ // requests. If nil, BaseConfig.Handler is used. If BaseConfig
+ // or BaseConfig.Handler is nil, http.DefaultServeMux is used.
+ Handler http.Handler
+}
+
+func (o *ServeConnOpts) baseConfig() *http.Server {
+ if o != nil && o.BaseConfig != nil {
+ return o.BaseConfig
+ }
+ return new(http.Server)
+}
+
+func (o *ServeConnOpts) handler() http.Handler {
+ if o != nil {
+ if o.Handler != nil {
+ return o.Handler
+ }
+ if o.BaseConfig != nil && o.BaseConfig.Handler != nil {
+ return o.BaseConfig.Handler
+ }
+ }
+ return http.DefaultServeMux
+}
+
+// ServeConn serves HTTP/2 requests on the provided connection and
+// blocks until the connection is no longer readable.
+//
+// ServeConn starts speaking HTTP/2 assuming that c has not had any
+// reads or writes. It writes its initial settings frame and expects
+// to be able to read the preface and settings frame from the
+// client. If c has a ConnectionState method like a *tls.Conn, the
+// ConnectionState is used to verify the TLS ciphersuite and to set
+// the Request.TLS field in Handlers.
+//
+// ServeConn does not support h2c by itself. Any h2c support must be
+// implemented in terms of providing a suitably-behaving net.Conn.
+//
+// The opts parameter is optional. If nil, default values are used.
+func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
+ sc := &serverConn{
+ srv: s,
+ hs: opts.baseConfig(),
+ conn: c,
+ remoteAddrStr: c.RemoteAddr().String(),
+ bw: newBufferedWriter(c),
+ handler: opts.handler(),
+ streams: make(map[uint32]*stream),
+ readFrameCh: make(chan readFrameResult),
+ wantWriteFrameCh: make(chan frameWriteMsg, 8),
+ wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync
+ bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way
+ doneServing: make(chan struct{}),
+ advMaxStreams: s.maxConcurrentStreams(),
+ writeSched: writeScheduler{
+ maxFrameSize: initialMaxFrameSize,
+ },
+ initialWindowSize: initialWindowSize,
+ headerTableSize: initialHeaderTableSize,
+ serveG: newGoroutineLock(),
+ pushEnabled: true,
+ }
+ sc.flow.add(initialWindowSize)
+ sc.inflow.add(initialWindowSize)
+ sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
+ sc.hpackDecoder = hpack.NewDecoder(initialHeaderTableSize, nil)
+ sc.hpackDecoder.SetMaxStringLength(sc.maxHeaderStringLen())
+
+ fr := NewFramer(sc.bw, c)
+ fr.SetMaxReadFrameSize(s.maxReadFrameSize())
+ sc.framer = fr
+
+ if tc, ok := c.(connectionStater); ok {
+ sc.tlsState = new(tls.ConnectionState)
+ *sc.tlsState = tc.ConnectionState()
+ // 9.2 Use of TLS Features
+ // An implementation of HTTP/2 over TLS MUST use TLS
+ // 1.2 or higher with the restrictions on feature set
+ // and cipher suite described in this section. Due to
+ // implementation limitations, it might not be
+ // possible to fail TLS negotiation. An endpoint MUST
+ // immediately terminate an HTTP/2 connection that
+ // does not meet the TLS requirements described in
+ // this section with a connection error (Section
+ // 5.4.1) of type INADEQUATE_SECURITY.
+ if sc.tlsState.Version < tls.VersionTLS12 {
+ sc.rejectConn(ErrCodeInadequateSecurity, "TLS version too low")
+ return
+ }
+
+ if sc.tlsState.ServerName == "" {
+ // Client must use SNI, but we don't enforce that anymore,
+ // since it was causing problems when connecting to bare IP
+ // addresses during development.
+ //
+ // TODO: optionally enforce? Or enforce at the time we receive
+ // a new request, and verify the the ServerName matches the :authority?
+ // But that precludes proxy situations, perhaps.
+ //
+ // So for now, do nothing here again.
+ }
+
+ if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) {
+ // "Endpoints MAY choose to generate a connection error
+ // (Section 5.4.1) of type INADEQUATE_SECURITY if one of
+ // the prohibited cipher suites are negotiated."
+ //
+ // We choose that. In my opinion, the spec is weak
+ // here. It also says both parties must support at least
+ // TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 so there's no
+ // excuses here. If we really must, we could allow an
+ // "AllowInsecureWeakCiphers" option on the server later.
+ // Let's see how it plays out first.
+ sc.rejectConn(ErrCodeInadequateSecurity, fmt.Sprintf("Prohibited TLS 1.2 Cipher Suite: %x", sc.tlsState.CipherSuite))
+ return
+ }
+ }
+
+ if hook := testHookGetServerConn; hook != nil {
+ hook(sc)
+ }
+ sc.serve()
+}
+
+// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
+func isBadCipher(cipher uint16) bool {
+ switch cipher {
+ case tls.TLS_RSA_WITH_RC4_128_SHA,
+ tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
+ tls.TLS_RSA_WITH_AES_128_CBC_SHA,
+ tls.TLS_RSA_WITH_AES_256_CBC_SHA,
+ tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
+ tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
+ tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
+ tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
+ tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
+ tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
+ tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:
+ // Reject cipher suites from Appendix A.
+ // "This list includes those cipher suites that do not
+ // offer an ephemeral key exchange and those that are
+ // based on the TLS null, stream or block cipher type"
+ return true
+ default:
+ return false
+ }
+}
+
+func (sc *serverConn) rejectConn(err ErrCode, debug string) {
+ sc.vlogf("http2: server rejecting conn: %v, %s", err, debug)
+ // ignoring errors. hanging up anyway.
+ sc.framer.WriteGoAway(0, err, []byte(debug))
+ sc.bw.Flush()
+ sc.conn.Close()
+}
+
+type serverConn struct {
+ // Immutable:
+ srv *Server
+ hs *http.Server
+ conn net.Conn
+ bw *bufferedWriter // writing to conn
+ handler http.Handler
+ framer *Framer
+ hpackDecoder *hpack.Decoder
+ doneServing chan struct{} // closed when serverConn.serve ends
+ readFrameCh chan readFrameResult // written by serverConn.readFrames
+ wantWriteFrameCh chan frameWriteMsg // from handlers -> serve
+ wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes
+ bodyReadCh chan bodyReadMsg // from handlers -> serve
+ testHookCh chan func(int) // code to run on the serve loop
+ flow flow // conn-wide (not stream-specific) outbound flow control
+ inflow flow // conn-wide inbound flow control
+ tlsState *tls.ConnectionState // shared by all handlers, like net/http
+ remoteAddrStr string
+
+ // Everything following is owned by the serve loop; use serveG.check():
+ serveG goroutineLock // used to verify funcs are on serve()
+ pushEnabled bool
+ sawFirstSettings bool // got the initial SETTINGS frame after the preface
+ needToSendSettingsAck bool
+ unackedSettings int // how many SETTINGS have we sent without ACKs?
+ clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit)
+ advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client
+ curOpenStreams uint32 // client's number of open streams
+ maxStreamID uint32 // max ever seen
+ streams map[uint32]*stream
+ initialWindowSize int32
+ headerTableSize uint32
+ peerMaxHeaderListSize uint32 // zero means unknown (default)
+ canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case
+ req requestParam // non-zero while reading request headers
+ writingFrame bool // started write goroutine but haven't heard back on wroteFrameCh
+ needsFrameFlush bool // last frame write wasn't a flush
+ writeSched writeScheduler
+ inGoAway bool // we've started to or sent GOAWAY
+ needToSendGoAway bool // we need to schedule a GOAWAY frame write
+ goAwayCode ErrCode
+ shutdownTimerCh <-chan time.Time // nil until used
+ shutdownTimer *time.Timer // nil until used
+
+ // Owned by the writeFrameAsync goroutine:
+ headerWriteBuf bytes.Buffer
+ hpackEncoder *hpack.Encoder
+}
+
+func (sc *serverConn) maxHeaderStringLen() int {
+ v := sc.maxHeaderListSize()
+ if uint32(int(v)) == v {
+ return int(v)
+ }
+ // They had a crazy big number for MaxHeaderBytes anyway,
+ // so give them unlimited header lengths:
+ return 0
+}
+
+func (sc *serverConn) maxHeaderListSize() uint32 {
+ n := sc.hs.MaxHeaderBytes
+ if n <= 0 {
+ n = http.DefaultMaxHeaderBytes
+ }
+ // http2's count is in a slightly different unit and includes 32 bytes per pair.
+ // So, take the net/http.Server value and pad it up a bit, assuming 10 headers.
+ const perFieldOverhead = 32 // per http2 spec
+ const typicalHeaders = 10 // conservative
+ return uint32(n + typicalHeaders*perFieldOverhead)
+}
+
+// requestParam is the state of the next request, initialized over
+// potentially several frames HEADERS + zero or more CONTINUATION
+// frames.
+type requestParam struct {
+ // stream is non-nil if we're reading (HEADER or CONTINUATION)
+ // frames for a request (but not DATA).
+ stream *stream
+ header http.Header
+ method, path string
+ scheme, authority string
+ sawRegularHeader bool // saw a non-pseudo header already
+ invalidHeader bool // an invalid header was seen
+ headerListSize int64 // actually uint32, but easier math this way
+}
+
+// stream represents a stream. This is the minimal metadata needed by
+// the serve goroutine. Most of the actual stream state is owned by
+// the http.Handler's goroutine in the responseWriter. Because the
+// responseWriter's responseWriterState is recycled at the end of a
+// handler, this struct intentionally has no pointer to the
+// *responseWriter{,State} itself, as the Handler ending nils out the
+// responseWriter's state field.
+type stream struct {
+ // immutable:
+ sc *serverConn
+ id uint32
+ body *pipe // non-nil if expecting DATA frames
+ cw closeWaiter // closed wait stream transitions to closed state
+
+ // owned by serverConn's serve loop:
+ bodyBytes int64 // body bytes seen so far
+ declBodyBytes int64 // or -1 if undeclared
+ flow flow // limits writing from Handler to client
+ inflow flow // what the client is allowed to POST/etc to us
+ parent *stream // or nil
+ numTrailerValues int64
+ weight uint8
+ state streamState
+ sentReset bool // only true once detached from streams map
+ gotReset bool // only true once detacted from streams map
+ gotTrailerHeader bool // HEADER frame for trailers was seen
+
+ trailer http.Header // accumulated trailers
+ reqTrailer http.Header // handler's Request.Trailer
+}
+
+func (sc *serverConn) Framer() *Framer { return sc.framer }
+func (sc *serverConn) CloseConn() error { return sc.conn.Close() }
+func (sc *serverConn) Flush() error { return sc.bw.Flush() }
+func (sc *serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) {
+ return sc.hpackEncoder, &sc.headerWriteBuf
+}
+
+func (sc *serverConn) state(streamID uint32) (streamState, *stream) {
+ sc.serveG.check()
+ // http://http2.github.io/http2-spec/#rfc.section.5.1
+ if st, ok := sc.streams[streamID]; ok {
+ return st.state, st
+ }
+ // "The first use of a new stream identifier implicitly closes all
+ // streams in the "idle" state that might have been initiated by
+ // that peer with a lower-valued stream identifier. For example, if
+ // a client sends a HEADERS frame on stream 7 without ever sending a
+ // frame on stream 5, then stream 5 transitions to the "closed"
+ // state when the first frame for stream 7 is sent or received."
+ if streamID <= sc.maxStreamID {
+ return stateClosed, nil
+ }
+ return stateIdle, nil
+}
+
+// setConnState calls the net/http ConnState hook for this connection, if configured.
+// Note that the net/http package does StateNew and StateClosed for us.
+// There is currently no plan for StateHijacked or hijacking HTTP/2 connections.
+func (sc *serverConn) setConnState(state http.ConnState) {
+ if sc.hs.ConnState != nil {
+ sc.hs.ConnState(sc.conn, state)
+ }
+}
+
+func (sc *serverConn) vlogf(format string, args ...interface{}) {
+ if VerboseLogs {
+ sc.logf(format, args...)
+ }
+}
+
+func (sc *serverConn) logf(format string, args ...interface{}) {
+ if lg := sc.hs.ErrorLog; lg != nil {
+ lg.Printf(format, args...)
+ } else {
+ log.Printf(format, args...)
+ }
+}
+
+// errno returns v's underlying uintptr, else 0.
+//
+// TODO: remove this helper function once http2 can use build
+// tags. See comment in isClosedConnError.
+func errno(v error) uintptr {
+ if rv := reflect.ValueOf(v); rv.Kind() == reflect.Uintptr {
+ return uintptr(rv.Uint())
+ }
+ return 0
+}
+
+// isClosedConnError reports whether err is an error from use of a closed
+// network connection.
+func isClosedConnError(err error) bool {
+ if err == nil {
+ return false
+ }
+
+ // TODO: remove this string search and be more like the Windows
+ // case below. That might involve modifying the standard library
+ // to return better error types.
+ str := err.Error()
+ if strings.Contains(str, "use of closed network connection") {
+ return true
+ }
+
+ // TODO(bradfitz): x/tools/cmd/bundle doesn't really support
+ // build tags, so I can't make an http2_windows.go file with
+ // Windows-specific stuff. Fix that and move this, once we
+ // have a way to bundle this into std's net/http somehow.
+ if runtime.GOOS == "windows" {
+ if oe, ok := err.(*net.OpError); ok && oe.Op == "read" {
+ if se, ok := oe.Err.(*os.SyscallError); ok && se.Syscall == "wsarecv" {
+ const WSAECONNABORTED = 10053
+ const WSAECONNRESET = 10054
+ if n := errno(se.Err); n == WSAECONNRESET || n == WSAECONNABORTED {
+ return true
+ }
+ }
+ }
+ }
+ return false
+}
+
+func (sc *serverConn) condlogf(err error, format string, args ...interface{}) {
+ if err == nil {
+ return
+ }
+ if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) {
+ // Boring, expected errors.
+ sc.vlogf(format, args...)
+ } else {
+ sc.logf(format, args...)
+ }
+}
+
+func (sc *serverConn) onNewHeaderField(f hpack.HeaderField) {
+ sc.serveG.check()
+ if VerboseLogs {
+ sc.vlogf("http2: server decoded %v", f)
+ }
+ switch {
+ case !validHeaderFieldValue(f.Value): // f.Name checked _after_ pseudo check, since ':' is invalid
+ sc.req.invalidHeader = true
+ case strings.HasPrefix(f.Name, ":"):
+ if sc.req.sawRegularHeader {
+ sc.logf("pseudo-header after regular header")
+ sc.req.invalidHeader = true
+ return
+ }
+ var dst *string
+ switch f.Name {
+ case ":method":
+ dst = &sc.req.method
+ case ":path":
+ dst = &sc.req.path
+ case ":scheme":
+ dst = &sc.req.scheme
+ case ":authority":
+ dst = &sc.req.authority
+ default:
+ // 8.1.2.1 Pseudo-Header Fields
+ // "Endpoints MUST treat a request or response
+ // that contains undefined or invalid
+ // pseudo-header fields as malformed (Section
+ // 8.1.2.6)."
+ sc.logf("invalid pseudo-header %q", f.Name)
+ sc.req.invalidHeader = true
+ return
+ }
+ if *dst != "" {
+ sc.logf("duplicate pseudo-header %q sent", f.Name)
+ sc.req.invalidHeader = true
+ return
+ }
+ *dst = f.Value
+ case !validHeaderFieldName(f.Name):
+ sc.req.invalidHeader = true
+ default:
+ sc.req.sawRegularHeader = true
+ sc.req.header.Add(sc.canonicalHeader(f.Name), f.Value)
+ const headerFieldOverhead = 32 // per spec
+ sc.req.headerListSize += int64(len(f.Name)) + int64(len(f.Value)) + headerFieldOverhead
+ if sc.req.headerListSize > int64(sc.maxHeaderListSize()) {
+ sc.hpackDecoder.SetEmitEnabled(false)
+ }
+ }
+}
+
+func (st *stream) onNewTrailerField(f hpack.HeaderField) {
+ sc := st.sc
+ sc.serveG.check()
+ if VerboseLogs {
+ sc.vlogf("http2: server decoded trailer %v", f)
+ }
+ switch {
+ case strings.HasPrefix(f.Name, ":"):
+ sc.req.invalidHeader = true
+ return
+ case !validHeaderFieldName(f.Name) || !validHeaderFieldValue(f.Value):
+ sc.req.invalidHeader = true
+ return
+ default:
+ key := sc.canonicalHeader(f.Name)
+ if st.trailer != nil {
+ vv := append(st.trailer[key], f.Value)
+ st.trailer[key] = vv
+
+ // arbitrary; TODO: read spec about header list size limits wrt trailers
+ const tooBig = 1000
+ if len(vv) >= tooBig {
+ sc.hpackDecoder.SetEmitEnabled(false)
+ }
+ }
+ }
+}
+
+func (sc *serverConn) canonicalHeader(v string) string {
+ sc.serveG.check()
+ cv, ok := commonCanonHeader[v]
+ if ok {
+ return cv
+ }
+ cv, ok = sc.canonHeader[v]
+ if ok {
+ return cv
+ }
+ if sc.canonHeader == nil {
+ sc.canonHeader = make(map[string]string)
+ }
+ cv = http.CanonicalHeaderKey(v)
+ sc.canonHeader[v] = cv
+ return cv
+}
+
+type readFrameResult struct {
+ f Frame // valid until readMore is called
+ err error
+
+ // readMore should be called once the consumer no longer needs or
+ // retains f. After readMore, f is invalid and more frames can be
+ // read.
+ readMore func()
+}
+
+// readFrames is the loop that reads incoming frames.
+// It takes care to only read one frame at a time, blocking until the
+// consumer is done with the frame.
+// It's run on its own goroutine.
+func (sc *serverConn) readFrames() {
+ gate := make(gate)
+ for {
+ f, err := sc.framer.ReadFrame()
+ select {
+ case sc.readFrameCh <- readFrameResult{f, err, gate.Done}:
+ case <-sc.doneServing:
+ return
+ }
+ select {
+ case <-gate:
+ case <-sc.doneServing:
+ return
+ }
+ if terminalReadFrameError(err) {
+ return
+ }
+ }
+}
+
+// frameWriteResult is the message passed from writeFrameAsync to the serve goroutine.
+type frameWriteResult struct {
+ wm frameWriteMsg // what was written (or attempted)
+ err error // result of the writeFrame call
+}
+
+// writeFrameAsync runs in its own goroutine and writes a single frame
+// and then reports when it's done.
+// At most one goroutine can be running writeFrameAsync at a time per
+// serverConn.
+func (sc *serverConn) writeFrameAsync(wm frameWriteMsg) {
+ err := wm.write.writeFrame(sc)
+ sc.wroteFrameCh <- frameWriteResult{wm, err}
+}
+
+func (sc *serverConn) closeAllStreamsOnConnClose() {
+ sc.serveG.check()
+ for _, st := range sc.streams {
+ sc.closeStream(st, errClientDisconnected)
+ }
+}
+
+func (sc *serverConn) stopShutdownTimer() {
+ sc.serveG.check()
+ if t := sc.shutdownTimer; t != nil {
+ t.Stop()
+ }
+}
+
+func (sc *serverConn) notePanic() {
+ // Note: this is for serverConn.serve panicking, not http.Handler code.
+ if testHookOnPanicMu != nil {
+ testHookOnPanicMu.Lock()
+ defer testHookOnPanicMu.Unlock()
+ }
+ if testHookOnPanic != nil {
+ if e := recover(); e != nil {
+ if testHookOnPanic(sc, e) {
+ panic(e)
+ }
+ }
+ }
+}
+
+func (sc *serverConn) serve() {
+ sc.serveG.check()
+ defer sc.notePanic()
+ defer sc.conn.Close()
+ defer sc.closeAllStreamsOnConnClose()
+ defer sc.stopShutdownTimer()
+ defer close(sc.doneServing) // unblocks handlers trying to send
+
+ if VerboseLogs {
+ sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs)
+ }
+
+ sc.writeFrame(frameWriteMsg{
+ write: writeSettings{
+ {SettingMaxFrameSize, sc.srv.maxReadFrameSize()},
+ {SettingMaxConcurrentStreams, sc.advMaxStreams},
+ {SettingMaxHeaderListSize, sc.maxHeaderListSize()},
+
+ // TODO: more actual settings, notably
+ // SettingInitialWindowSize, but then we also
+ // want to bump up the conn window size the
+ // same amount here right after the settings
+ },
+ })
+ sc.unackedSettings++
+
+ if err := sc.readPreface(); err != nil {
+ sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err)
+ return
+ }
+ // Now that we've got the preface, get us out of the
+ // "StateNew" state. We can't go directly to idle, though.
+ // Active means we read some data and anticipate a request. We'll
+ // do another Active when we get a HEADERS frame.
+ sc.setConnState(http.StateActive)
+ sc.setConnState(http.StateIdle)
+
+ go sc.readFrames() // closed by defer sc.conn.Close above
+
+ settingsTimer := time.NewTimer(firstSettingsTimeout)
+ loopNum := 0
+ for {
+ loopNum++
+ select {
+ case wm := <-sc.wantWriteFrameCh:
+ sc.writeFrame(wm)
+ case res := <-sc.wroteFrameCh:
+ sc.wroteFrame(res)
+ case res := <-sc.readFrameCh:
+ if !sc.processFrameFromReader(res) {
+ return
+ }
+ res.readMore()
+ if settingsTimer.C != nil {
+ settingsTimer.Stop()
+ settingsTimer.C = nil
+ }
+ case m := <-sc.bodyReadCh:
+ sc.noteBodyRead(m.st, m.n)
+ case <-settingsTimer.C:
+ sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr())
+ return
+ case <-sc.shutdownTimerCh:
+ sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr())
+ return
+ case fn := <-sc.testHookCh:
+ fn(loopNum)
+ }
+ }
+}
+
+// readPreface reads the ClientPreface greeting from the peer
+// or returns an error on timeout or an invalid greeting.
+func (sc *serverConn) readPreface() error {
+ errc := make(chan error, 1)
+ go func() {
+ // Read the client preface
+ buf := make([]byte, len(ClientPreface))
+ if _, err := io.ReadFull(sc.conn, buf); err != nil {
+ errc <- err
+ } else if !bytes.Equal(buf, clientPreface) {
+ errc <- fmt.Errorf("bogus greeting %q", buf)
+ } else {
+ errc <- nil
+ }
+ }()
+ timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server?
+ defer timer.Stop()
+ select {
+ case <-timer.C:
+ return errors.New("timeout waiting for client preface")
+ case err := <-errc:
+ if err == nil {
+ if VerboseLogs {
+ sc.vlogf("http2: server: client %v said hello", sc.conn.RemoteAddr())
+ }
+ }
+ return err
+ }
+}
+
+var errChanPool = sync.Pool{
+ New: func() interface{} { return make(chan error, 1) },
+}
+
+var writeDataPool = sync.Pool{
+ New: func() interface{} { return new(writeData) },
+}
+
+// writeDataFromHandler writes DATA response frames from a handler on
+// the given stream.
+func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error {
+ ch := errChanPool.Get().(chan error)
+ writeArg := writeDataPool.Get().(*writeData)
+ *writeArg = writeData{stream.id, data, endStream}
+ err := sc.writeFrameFromHandler(frameWriteMsg{
+ write: writeArg,
+ stream: stream,
+ done: ch,
+ })
+ if err != nil {
+ return err
+ }
+ var frameWriteDone bool // the frame write is done (successfully or not)
+ select {
+ case err = <-ch:
+ frameWriteDone = true
+ case <-sc.doneServing:
+ return errClientDisconnected
+ case <-stream.cw:
+ // If both ch and stream.cw were ready (as might
+ // happen on the final Write after an http.Handler
+ // ends), prefer the write result. Otherwise this
+ // might just be us successfully closing the stream.
+ // The writeFrameAsync and serve goroutines guarantee
+ // that the ch send will happen before the stream.cw
+ // close.
+ select {
+ case err = <-ch:
+ frameWriteDone = true
+ default:
+ return errStreamClosed
+ }
+ }
+ errChanPool.Put(ch)
+ if frameWriteDone {
+ writeDataPool.Put(writeArg)
+ }
+ return err
+}
+
+// writeFrameFromHandler sends wm to sc.wantWriteFrameCh, but aborts
+// if the connection has gone away.
+//
+// This must not be run from the serve goroutine itself, else it might
+// deadlock writing to sc.wantWriteFrameCh (which is only mildly
+// buffered and is read by serve itself). If you're on the serve
+// goroutine, call writeFrame instead.
+func (sc *serverConn) writeFrameFromHandler(wm frameWriteMsg) error {
+ sc.serveG.checkNotOn() // NOT
+ select {
+ case sc.wantWriteFrameCh <- wm:
+ return nil
+ case <-sc.doneServing:
+ // Serve loop is gone.
+ // Client has closed their connection to the server.
+ return errClientDisconnected
+ }
+}
+
+// writeFrame schedules a frame to write and sends it if there's nothing
+// already being written.
+//
+// There is no pushback here (the serve goroutine never blocks). It's
+// the http.Handlers that block, waiting for their previous frames to
+// make it onto the wire
+//
+// If you're not on the serve goroutine, use writeFrameFromHandler instead.
+func (sc *serverConn) writeFrame(wm frameWriteMsg) {
+ sc.serveG.check()
+ sc.writeSched.add(wm)
+ sc.scheduleFrameWrite()
+}
+
+// startFrameWrite starts a goroutine to write wm (in a separate
+// goroutine since that might block on the network), and updates the
+// serve goroutine's state about the world, updated from info in wm.
+func (sc *serverConn) startFrameWrite(wm frameWriteMsg) {
+ sc.serveG.check()
+ if sc.writingFrame {
+ panic("internal error: can only be writing one frame at a time")
+ }
+
+ st := wm.stream
+ if st != nil {
+ switch st.state {
+ case stateHalfClosedLocal:
+ panic("internal error: attempt to send frame on half-closed-local stream")
+ case stateClosed:
+ if st.sentReset || st.gotReset {
+ // Skip this frame.
+ sc.scheduleFrameWrite()
+ return
+ }
+ panic(fmt.Sprintf("internal error: attempt to send a write %v on a closed stream", wm))
+ }
+ }
+
+ sc.writingFrame = true
+ sc.needsFrameFlush = true
+ go sc.writeFrameAsync(wm)
+}
+
+// errHandlerPanicked is the error given to any callers blocked in a read from
+// Request.Body when the main goroutine panics. Since most handlers read in the
+// the main ServeHTTP goroutine, this will show up rarely.
+var errHandlerPanicked = errors.New("http2: handler panicked")
+
+// wroteFrame is called on the serve goroutine with the result of
+// whatever happened on writeFrameAsync.
+func (sc *serverConn) wroteFrame(res frameWriteResult) {
+ sc.serveG.check()
+ if !sc.writingFrame {
+ panic("internal error: expected to be already writing a frame")
+ }
+ sc.writingFrame = false
+
+ wm := res.wm
+ st := wm.stream
+
+ closeStream := endsStream(wm.write)
+
+ if _, ok := wm.write.(handlerPanicRST); ok {
+ sc.closeStream(st, errHandlerPanicked)
+ }
+
+ // Reply (if requested) to the blocked ServeHTTP goroutine.
+ if ch := wm.done; ch != nil {
+ select {
+ case ch <- res.err:
+ default:
+ panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wm.write))
+ }
+ }
+ wm.write = nil // prevent use (assume it's tainted after wm.done send)
+
+ if closeStream {
+ if st == nil {
+ panic("internal error: expecting non-nil stream")
+ }
+ switch st.state {
+ case stateOpen:
+ // Here we would go to stateHalfClosedLocal in
+ // theory, but since our handler is done and
+ // the net/http package provides no mechanism
+ // for finishing writing to a ResponseWriter
+ // while still reading data (see possible TODO
+ // at top of this file), we go into closed
+ // state here anyway, after telling the peer
+ // we're hanging up on them.
+ st.state = stateHalfClosedLocal // won't last long, but necessary for closeStream via resetStream
+ errCancel := StreamError{st.id, ErrCodeCancel}
+ sc.resetStream(errCancel)
+ case stateHalfClosedRemote:
+ sc.closeStream(st, errHandlerComplete)
+ }
+ }
+
+ sc.scheduleFrameWrite()
+}
+
+// scheduleFrameWrite tickles the frame writing scheduler.
+//
+// If a frame is already being written, nothing happens. This will be called again
+// when the frame is done being written.
+//
+// If a frame isn't being written we need to send one, the best frame
+// to send is selected, preferring first things that aren't
+// stream-specific (e.g. ACKing settings), and then finding the
+// highest priority stream.
+//
+// If a frame isn't being written and there's nothing else to send, we
+// flush the write buffer.
+func (sc *serverConn) scheduleFrameWrite() {
+ sc.serveG.check()
+ if sc.writingFrame {
+ return
+ }
+ if sc.needToSendGoAway {
+ sc.needToSendGoAway = false
+ sc.startFrameWrite(frameWriteMsg{
+ write: &writeGoAway{
+ maxStreamID: sc.maxStreamID,
+ code: sc.goAwayCode,
+ },
+ })
+ return
+ }
+ if sc.needToSendSettingsAck {
+ sc.needToSendSettingsAck = false
+ sc.startFrameWrite(frameWriteMsg{write: writeSettingsAck{}})
+ return
+ }
+ if !sc.inGoAway {
+ if wm, ok := sc.writeSched.take(); ok {
+ sc.startFrameWrite(wm)
+ return
+ }
+ }
+ if sc.needsFrameFlush {
+ sc.startFrameWrite(frameWriteMsg{write: flushFrameWriter{}})
+ sc.needsFrameFlush = false // after startFrameWrite, since it sets this true
+ return
+ }
+}
+
+func (sc *serverConn) goAway(code ErrCode) {
+ sc.serveG.check()
+ if sc.inGoAway {
+ return
+ }
+ if code != ErrCodeNo {
+ sc.shutDownIn(250 * time.Millisecond)
+ } else {
+ // TODO: configurable
+ sc.shutDownIn(1 * time.Second)
+ }
+ sc.inGoAway = true
+ sc.needToSendGoAway = true
+ sc.goAwayCode = code
+ sc.scheduleFrameWrite()
+}
+
+func (sc *serverConn) shutDownIn(d time.Duration) {
+ sc.serveG.check()
+ sc.shutdownTimer = time.NewTimer(d)
+ sc.shutdownTimerCh = sc.shutdownTimer.C
+}
+
+func (sc *serverConn) resetStream(se StreamError) {
+ sc.serveG.check()
+ sc.writeFrame(frameWriteMsg{write: se})
+ if st, ok := sc.streams[se.StreamID]; ok {
+ st.sentReset = true
+ sc.closeStream(st, se)
+ }
+}
+
+// processFrameFromReader processes the serve loop's read from readFrameCh from the
+// frame-reading goroutine.
+// processFrameFromReader returns whether the connection should be kept open.
+func (sc *serverConn) processFrameFromReader(res readFrameResult) bool {
+ sc.serveG.check()
+ err := res.err
+ if err != nil {
+ if err == ErrFrameTooLarge {
+ sc.goAway(ErrCodeFrameSize)
+ return true // goAway will close the loop
+ }
+ clientGone := err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err)
+ if clientGone {
+ // TODO: could we also get into this state if
+ // the peer does a half close
+ // (e.g. CloseWrite) because they're done
+ // sending frames but they're still wanting
+ // our open replies? Investigate.
+ // TODO: add CloseWrite to crypto/tls.Conn first
+ // so we have a way to test this? I suppose
+ // just for testing we could have a non-TLS mode.
+ return false
+ }
+ } else {
+ f := res.f
+ if VerboseLogs {
+ sc.vlogf("http2: server read frame %v", summarizeFrame(f))
+ }
+ err = sc.processFrame(f)
+ if err == nil {
+ return true
+ }
+ }
+
+ switch ev := err.(type) {
+ case StreamError:
+ sc.resetStream(ev)
+ return true
+ case goAwayFlowError:
+ sc.goAway(ErrCodeFlowControl)
+ return true
+ case ConnectionError:
+ sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev)
+ sc.goAway(ErrCode(ev))
+ return true // goAway will handle shutdown
+ default:
+ if res.err != nil {
+ sc.vlogf("http2: server closing client connection; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err)
+ } else {
+ sc.logf("http2: server closing client connection: %v", err)
+ }
+ return false
+ }
+}
+
+func (sc *serverConn) processFrame(f Frame) error {
+ sc.serveG.check()
+
+ // First frame received must be SETTINGS.
+ if !sc.sawFirstSettings {
+ if _, ok := f.(*SettingsFrame); !ok {
+ return ConnectionError(ErrCodeProtocol)
+ }
+ sc.sawFirstSettings = true
+ }
+
+ switch f := f.(type) {
+ case *SettingsFrame:
+ return sc.processSettings(f)
+ case *HeadersFrame:
+ return sc.processHeaders(f)
+ case *ContinuationFrame:
+ return sc.processContinuation(f)
+ case *WindowUpdateFrame:
+ return sc.processWindowUpdate(f)
+ case *PingFrame:
+ return sc.processPing(f)
+ case *DataFrame:
+ return sc.processData(f)
+ case *RSTStreamFrame:
+ return sc.processResetStream(f)
+ case *PriorityFrame:
+ return sc.processPriority(f)
+ case *PushPromiseFrame:
+ // A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE
+ // frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
+ return ConnectionError(ErrCodeProtocol)
+ default:
+ sc.vlogf("http2: server ignoring frame: %v", f.Header())
+ return nil
+ }
+}
+
+func (sc *serverConn) processPing(f *PingFrame) error {
+ sc.serveG.check()
+ if f.IsAck() {
+ // 6.7 PING: " An endpoint MUST NOT respond to PING frames
+ // containing this flag."
+ return nil
+ }
+ if f.StreamID != 0 {
+ // "PING frames are not associated with any individual
+ // stream. If a PING frame is received with a stream
+ // identifier field value other than 0x0, the recipient MUST
+ // respond with a connection error (Section 5.4.1) of type
+ // PROTOCOL_ERROR."
+ return ConnectionError(ErrCodeProtocol)
+ }
+ sc.writeFrame(frameWriteMsg{write: writePingAck{f}})
+ return nil
+}
+
+func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error {
+ sc.serveG.check()
+ switch {
+ case f.StreamID != 0: // stream-level flow control
+ st := sc.streams[f.StreamID]
+ if st == nil {
+ // "WINDOW_UPDATE can be sent by a peer that has sent a
+ // frame bearing the END_STREAM flag. This means that a
+ // receiver could receive a WINDOW_UPDATE frame on a "half
+ // closed (remote)" or "closed" stream. A receiver MUST
+ // NOT treat this as an error, see Section 5.1."
+ return nil
+ }
+ if !st.flow.add(int32(f.Increment)) {
+ return StreamError{f.StreamID, ErrCodeFlowControl}
+ }
+ default: // connection-level flow control
+ if !sc.flow.add(int32(f.Increment)) {
+ return goAwayFlowError{}
+ }
+ }
+ sc.scheduleFrameWrite()
+ return nil
+}
+
+func (sc *serverConn) processResetStream(f *RSTStreamFrame) error {
+ sc.serveG.check()
+
+ state, st := sc.state(f.StreamID)
+ if state == stateIdle {
+ // 6.4 "RST_STREAM frames MUST NOT be sent for a
+ // stream in the "idle" state. If a RST_STREAM frame
+ // identifying an idle stream is received, the
+ // recipient MUST treat this as a connection error
+ // (Section 5.4.1) of type PROTOCOL_ERROR.
+ return ConnectionError(ErrCodeProtocol)
+ }
+ if st != nil {
+ st.gotReset = true
+ sc.closeStream(st, StreamError{f.StreamID, f.ErrCode})
+ }
+ return nil
+}
+
+func (sc *serverConn) closeStream(st *stream, err error) {
+ sc.serveG.check()
+ if st.state == stateIdle || st.state == stateClosed {
+ panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state))
+ }
+ st.state = stateClosed
+ sc.curOpenStreams--
+ if sc.curOpenStreams == 0 {
+ sc.setConnState(http.StateIdle)
+ }
+ delete(sc.streams, st.id)
+ if p := st.body; p != nil {
+ p.CloseWithError(err)
+ }
+ st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc
+ sc.writeSched.forgetStream(st.id)
+}
+
+func (sc *serverConn) processSettings(f *SettingsFrame) error {
+ sc.serveG.check()
+ if f.IsAck() {
+ sc.unackedSettings--
+ if sc.unackedSettings < 0 {
+ // Why is the peer ACKing settings we never sent?
+ // The spec doesn't mention this case, but
+ // hang up on them anyway.
+ return ConnectionError(ErrCodeProtocol)
+ }
+ return nil
+ }
+ if err := f.ForeachSetting(sc.processSetting); err != nil {
+ return err
+ }
+ sc.needToSendSettingsAck = true
+ sc.scheduleFrameWrite()
+ return nil
+}
+
+func (sc *serverConn) processSetting(s Setting) error {
+ sc.serveG.check()
+ if err := s.Valid(); err != nil {
+ return err
+ }
+ if VerboseLogs {
+ sc.vlogf("http2: server processing setting %v", s)
+ }
+ switch s.ID {
+ case SettingHeaderTableSize:
+ sc.headerTableSize = s.Val
+ sc.hpackEncoder.SetMaxDynamicTableSize(s.Val)
+ case SettingEnablePush:
+ sc.pushEnabled = s.Val != 0
+ case SettingMaxConcurrentStreams:
+ sc.clientMaxStreams = s.Val
+ case SettingInitialWindowSize:
+ return sc.processSettingInitialWindowSize(s.Val)
+ case SettingMaxFrameSize:
+ sc.writeSched.maxFrameSize = s.Val
+ case SettingMaxHeaderListSize:
+ sc.peerMaxHeaderListSize = s.Val
+ default:
+ // Unknown setting: "An endpoint that receives a SETTINGS
+ // frame with any unknown or unsupported identifier MUST
+ // ignore that setting."
+ if VerboseLogs {
+ sc.vlogf("http2: server ignoring unknown setting %v", s)
+ }
+ }
+ return nil
+}
+
+func (sc *serverConn) processSettingInitialWindowSize(val uint32) error {
+ sc.serveG.check()
+ // Note: val already validated to be within range by
+ // processSetting's Valid call.
+
+ // "A SETTINGS frame can alter the initial flow control window
+ // size for all current streams. When the value of
+ // SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST
+ // adjust the size of all stream flow control windows that it
+ // maintains by the difference between the new value and the
+ // old value."
+ old := sc.initialWindowSize
+ sc.initialWindowSize = int32(val)
+ growth := sc.initialWindowSize - old // may be negative
+ for _, st := range sc.streams {
+ if !st.flow.add(growth) {
+ // 6.9.2 Initial Flow Control Window Size
+ // "An endpoint MUST treat a change to
+ // SETTINGS_INITIAL_WINDOW_SIZE that causes any flow
+ // control window to exceed the maximum size as a
+ // connection error (Section 5.4.1) of type
+ // FLOW_CONTROL_ERROR."
+ return ConnectionError(ErrCodeFlowControl)
+ }
+ }
+ return nil
+}
+
+func (sc *serverConn) processData(f *DataFrame) error {
+ sc.serveG.check()
+ // "If a DATA frame is received whose stream is not in "open"
+ // or "half closed (local)" state, the recipient MUST respond
+ // with a stream error (Section 5.4.2) of type STREAM_CLOSED."
+ id := f.Header().StreamID
+ st, ok := sc.streams[id]
+ if !ok || st.state != stateOpen || st.gotTrailerHeader {
+ // This includes sending a RST_STREAM if the stream is
+ // in stateHalfClosedLocal (which currently means that
+ // the http.Handler returned, so it's done reading &
+ // done writing). Try to stop the client from sending
+ // more DATA.
+ return StreamError{id, ErrCodeStreamClosed}
+ }
+ if st.body == nil {
+ panic("internal error: should have a body in this state")
+ }
+ data := f.Data()
+
+ // Sender sending more than they'd declared?
+ if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes {
+ st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes))
+ return StreamError{id, ErrCodeStreamClosed}
+ }
+ if len(data) > 0 {
+ // Check whether the client has flow control quota.
+ if int(st.inflow.available()) < len(data) {
+ return StreamError{id, ErrCodeFlowControl}
+ }
+ st.inflow.take(int32(len(data)))
+ wrote, err := st.body.Write(data)
+ if err != nil {
+ return StreamError{id, ErrCodeStreamClosed}
+ }
+ if wrote != len(data) {
+ panic("internal error: bad Writer")
+ }
+ st.bodyBytes += int64(len(data))
+ }
+ if f.StreamEnded() {
+ st.endStream()
+ }
+ return nil
+}
+
+// endStream closes a Request.Body's pipe. It is called when a DATA
+// frame says a request body is over (or after trailers).
+func (st *stream) endStream() {
+ sc := st.sc
+ sc.serveG.check()
+
+ if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes {
+ st.body.CloseWithError(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes",
+ st.declBodyBytes, st.bodyBytes))
+ } else {
+ st.body.closeWithErrorAndCode(io.EOF, st.copyTrailersToHandlerRequest)
+ st.body.CloseWithError(io.EOF)
+ }
+ st.state = stateHalfClosedRemote
+}
+
+// copyTrailersToHandlerRequest is run in the Handler's goroutine in
+// its Request.Body.Read just before it gets io.EOF.
+func (st *stream) copyTrailersToHandlerRequest() {
+ for k, vv := range st.trailer {
+ if _, ok := st.reqTrailer[k]; ok {
+ // Only copy it over it was pre-declared.
+ st.reqTrailer[k] = vv
+ }
+ }
+}
+
+func (sc *serverConn) processHeaders(f *HeadersFrame) error {
+ sc.serveG.check()
+ id := f.Header().StreamID
+ if sc.inGoAway {
+ // Ignore.
+ return nil
+ }
+ // http://http2.github.io/http2-spec/#rfc.section.5.1.1
+ // Streams initiated by a client MUST use odd-numbered stream
+ // identifiers. [...] An endpoint that receives an unexpected
+ // stream identifier MUST respond with a connection error
+ // (Section 5.4.1) of type PROTOCOL_ERROR.
+ if id%2 != 1 {
+ return ConnectionError(ErrCodeProtocol)
+ }
+ // A HEADERS frame can be used to create a new stream or
+ // send a trailer for an open one. If we already have a stream
+ // open, let it process its own HEADERS frame (trailers at this
+ // point, if it's valid).
+ st := sc.streams[f.Header().StreamID]
+ if st != nil {
+ return st.processTrailerHeaders(f)
+ }
+
+ // [...] The identifier of a newly established stream MUST be
+ // numerically greater than all streams that the initiating
+ // endpoint has opened or reserved. [...] An endpoint that
+ // receives an unexpected stream identifier MUST respond with
+ // a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
+ if id <= sc.maxStreamID || sc.req.stream != nil {
+ return ConnectionError(ErrCodeProtocol)
+ }
+
+ if id > sc.maxStreamID {
+ sc.maxStreamID = id
+ }
+ st = &stream{
+ sc: sc,
+ id: id,
+ state: stateOpen,
+ }
+ if f.StreamEnded() {
+ st.state = stateHalfClosedRemote
+ }
+ st.cw.Init()
+
+ st.flow.conn = &sc.flow // link to conn-level counter
+ st.flow.add(sc.initialWindowSize)
+ st.inflow.conn = &sc.inflow // link to conn-level counter
+ st.inflow.add(initialWindowSize) // TODO: update this when we send a higher initial window size in the initial settings
+
+ sc.streams[id] = st
+ if f.HasPriority() {
+ adjustStreamPriority(sc.streams, st.id, f.Priority)
+ }
+ sc.curOpenStreams++
+ if sc.curOpenStreams == 1 {
+ sc.setConnState(http.StateActive)
+ }
+ sc.req = requestParam{
+ stream: st,
+ header: make(http.Header),
+ }
+ sc.hpackDecoder.SetEmitFunc(sc.onNewHeaderField)
+ sc.hpackDecoder.SetEmitEnabled(true)
+ return sc.processHeaderBlockFragment(st, f.HeaderBlockFragment(), f.HeadersEnded())
+}
+
+func (st *stream) processTrailerHeaders(f *HeadersFrame) error {
+ sc := st.sc
+ sc.serveG.check()
+ if st.gotTrailerHeader {
+ return ConnectionError(ErrCodeProtocol)
+ }
+ st.gotTrailerHeader = true
+ if !f.StreamEnded() {
+ return StreamError{st.id, ErrCodeProtocol}
+ }
+ sc.resetPendingRequest() // we use invalidHeader from it for trailers
+ return st.processTrailerHeaderBlockFragment(f.HeaderBlockFragment(), f.HeadersEnded())
+}
+
+func (sc *serverConn) processContinuation(f *ContinuationFrame) error {
+ sc.serveG.check()
+ st := sc.streams[f.Header().StreamID]
+ if st.gotTrailerHeader {
+ return st.processTrailerHeaderBlockFragment(f.HeaderBlockFragment(), f.HeadersEnded())
+ }
+ return sc.processHeaderBlockFragment(st, f.HeaderBlockFragment(), f.HeadersEnded())
+}
+
+func (sc *serverConn) processHeaderBlockFragment(st *stream, frag []byte, end bool) error {
+ sc.serveG.check()
+ if _, err := sc.hpackDecoder.Write(frag); err != nil {
+ return ConnectionError(ErrCodeCompression)
+ }
+ if !end {
+ return nil
+ }
+ if err := sc.hpackDecoder.Close(); err != nil {
+ return ConnectionError(ErrCodeCompression)
+ }
+ defer sc.resetPendingRequest()
+ if sc.curOpenStreams > sc.advMaxStreams {
+ // "Endpoints MUST NOT exceed the limit set by their
+ // peer. An endpoint that receives a HEADERS frame
+ // that causes their advertised concurrent stream
+ // limit to be exceeded MUST treat this as a stream
+ // error (Section 5.4.2) of type PROTOCOL_ERROR or
+ // REFUSED_STREAM."
+ if sc.unackedSettings == 0 {
+ // They should know better.
+ return StreamError{st.id, ErrCodeProtocol}
+ }
+ // Assume it's a network race, where they just haven't
+ // received our last SETTINGS update. But actually
+ // this can't happen yet, because we don't yet provide
+ // a way for users to adjust server parameters at
+ // runtime.
+ return StreamError{st.id, ErrCodeRefusedStream}
+ }
+
+ rw, req, err := sc.newWriterAndRequest()
+ if err != nil {
+ return err
+ }
+ st.reqTrailer = req.Trailer
+ if st.reqTrailer != nil {
+ st.trailer = make(http.Header)
+ }
+ st.body = req.Body.(*requestBody).pipe // may be nil
+ st.declBodyBytes = req.ContentLength
+
+ handler := sc.handler.ServeHTTP
+ if !sc.hpackDecoder.EmitEnabled() {
+ // Their header list was too long. Send a 431 error.
+ handler = handleHeaderListTooLong
+ }
+
+ go sc.runHandler(rw, req, handler)
+ return nil
+}
+
+func (st *stream) processTrailerHeaderBlockFragment(frag []byte, end bool) error {
+ sc := st.sc
+ sc.serveG.check()
+ sc.hpackDecoder.SetEmitFunc(st.onNewTrailerField)
+ if _, err := sc.hpackDecoder.Write(frag); err != nil {
+ return ConnectionError(ErrCodeCompression)
+ }
+ if !end {
+ return nil
+ }
+
+ rp := &sc.req
+ if rp.invalidHeader {
+ return StreamError{rp.stream.id, ErrCodeProtocol}
+ }
+
+ err := sc.hpackDecoder.Close()
+ st.endStream()
+ if err != nil {
+ return ConnectionError(ErrCodeCompression)
+ }
+ return nil
+}
+
+func (sc *serverConn) processPriority(f *PriorityFrame) error {
+ adjustStreamPriority(sc.streams, f.StreamID, f.PriorityParam)
+ return nil
+}
+
+func adjustStreamPriority(streams map[uint32]*stream, streamID uint32, priority PriorityParam) {
+ st, ok := streams[streamID]
+ if !ok {
+ // TODO: not quite correct (this streamID might
+ // already exist in the dep tree, but be closed), but
+ // close enough for now.
+ return
+ }
+ st.weight = priority.Weight
+ parent := streams[priority.StreamDep] // might be nil
+ if parent == st {
+ // if client tries to set this stream to be the parent of itself
+ // ignore and keep going
+ return
+ }
+
+ // section 5.3.3: If a stream is made dependent on one of its
+ // own dependencies, the formerly dependent stream is first
+ // moved to be dependent on the reprioritized stream's previous
+ // parent. The moved dependency retains its weight.
+ for piter := parent; piter != nil; piter = piter.parent {
+ if piter == st {
+ parent.parent = st.parent
+ break
+ }
+ }
+ st.parent = parent
+ if priority.Exclusive && (st.parent != nil || priority.StreamDep == 0) {
+ for _, openStream := range streams {
+ if openStream != st && openStream.parent == st.parent {
+ openStream.parent = st
+ }
+ }
+ }
+}
+
+// resetPendingRequest zeros out all state related to a HEADERS frame
+// and its zero or more CONTINUATION frames sent to start a new
+// request.
+func (sc *serverConn) resetPendingRequest() {
+ sc.serveG.check()
+ sc.req = requestParam{}
+}
+
+func (sc *serverConn) newWriterAndRequest() (*responseWriter, *http.Request, error) {
+ sc.serveG.check()
+ rp := &sc.req
+
+ if rp.invalidHeader {
+ return nil, nil, StreamError{rp.stream.id, ErrCodeProtocol}
+ }
+
+ isConnect := rp.method == "CONNECT"
+ if isConnect {
+ if rp.path != "" || rp.scheme != "" || rp.authority == "" {
+ return nil, nil, StreamError{rp.stream.id, ErrCodeProtocol}
+ }
+ } else if rp.method == "" || rp.path == "" ||
+ (rp.scheme != "https" && rp.scheme != "http") {
+ // See 8.1.2.6 Malformed Requests and Responses:
+ //
+ // Malformed requests or responses that are detected
+ // MUST be treated as a stream error (Section 5.4.2)
+ // of type PROTOCOL_ERROR."
+ //
+ // 8.1.2.3 Request Pseudo-Header Fields
+ // "All HTTP/2 requests MUST include exactly one valid
+ // value for the :method, :scheme, and :path
+ // pseudo-header fields"
+ return nil, nil, StreamError{rp.stream.id, ErrCodeProtocol}
+ }
+
+ bodyOpen := rp.stream.state == stateOpen
+ if rp.method == "HEAD" && bodyOpen {
+ // HEAD requests can't have bodies
+ return nil, nil, StreamError{rp.stream.id, ErrCodeProtocol}
+ }
+ var tlsState *tls.ConnectionState // nil if not scheme https
+
+ if rp.scheme == "https" {
+ tlsState = sc.tlsState
+ }
+ authority := rp.authority
+ if authority == "" {
+ authority = rp.header.Get("Host")
+ }
+ needsContinue := rp.header.Get("Expect") == "100-continue"
+ if needsContinue {
+ rp.header.Del("Expect")
+ }
+ // Merge Cookie headers into one "; "-delimited value.
+ if cookies := rp.header["Cookie"]; len(cookies) > 1 {
+ rp.header.Set("Cookie", strings.Join(cookies, "; "))
+ }
+
+ // Setup Trailers
+ var trailer http.Header
+ for _, v := range rp.header["Trailer"] {
+ for _, key := range strings.Split(v, ",") {
+ key = http.CanonicalHeaderKey(strings.TrimSpace(key))
+ switch key {
+ case "Transfer-Encoding", "Trailer", "Content-Length":
+ // Bogus. (copy of http1 rules)
+ // Ignore.
+ default:
+ if trailer == nil {
+ trailer = make(http.Header)
+ }
+ trailer[key] = nil
+ }
+ }
+ }
+ delete(rp.header, "Trailer")
+
+ body := &requestBody{
+ conn: sc,
+ stream: rp.stream,
+ needsContinue: needsContinue,
+ }
+ var url_ *url.URL
+ var requestURI string
+ if isConnect {
+ url_ = &url.URL{Host: rp.authority}
+ requestURI = rp.authority // mimic HTTP/1 server behavior
+ } else {
+ var err error
+ url_, err = url.ParseRequestURI(rp.path)
+ if err != nil {
+ return nil, nil, StreamError{rp.stream.id, ErrCodeProtocol}
+ }
+ requestURI = rp.path
+ }
+ req := &http.Request{
+ Method: rp.method,
+ URL: url_,
+ RemoteAddr: sc.remoteAddrStr,
+ Header: rp.header,
+ RequestURI: requestURI,
+ Proto: "HTTP/2.0",
+ ProtoMajor: 2,
+ ProtoMinor: 0,
+ TLS: tlsState,
+ Host: authority,
+ Body: body,
+ Trailer: trailer,
+ }
+ if bodyOpen {
+ body.pipe = &pipe{
+ b: &fixedBuffer{buf: make([]byte, initialWindowSize)}, // TODO: garbage
+ }
+
+ if vv, ok := rp.header["Content-Length"]; ok {
+ req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64)
+ } else {
+ req.ContentLength = -1
+ }
+ }
+
+ rws := responseWriterStatePool.Get().(*responseWriterState)
+ bwSave := rws.bw
+ *rws = responseWriterState{} // zero all the fields
+ rws.conn = sc
+ rws.bw = bwSave
+ rws.bw.Reset(chunkWriter{rws})
+ rws.stream = rp.stream
+ rws.req = req
+ rws.body = body
+
+ rw := &responseWriter{rws: rws}
+ return rw, req, nil
+}
+
+// Run on its own goroutine.
+func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) {
+ didPanic := true
+ defer func() {
+ if didPanic {
+ e := recover()
+ // Same as net/http:
+ const size = 64 << 10
+ buf := make([]byte, size)
+ buf = buf[:runtime.Stack(buf, false)]
+ sc.writeFrameFromHandler(frameWriteMsg{
+ write: handlerPanicRST{rw.rws.stream.id},
+ stream: rw.rws.stream,
+ })
+ sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf)
+ return
+ }
+ rw.handlerDone()
+ }()
+ handler(rw, req)
+ didPanic = false
+}
+
+func handleHeaderListTooLong(w http.ResponseWriter, r *http.Request) {
+ // 10.5.1 Limits on Header Block Size:
+ // .. "A server that receives a larger header block than it is
+ // willing to handle can send an HTTP 431 (Request Header Fields Too
+ // Large) status code"
+ const statusRequestHeaderFieldsTooLarge = 431 // only in Go 1.6+
+ w.WriteHeader(statusRequestHeaderFieldsTooLarge)
+ io.WriteString(w, "<h1>HTTP Error 431</h1><p>Request Header Field(s) Too Large</p>")
+}
+
+// called from handler goroutines.
+// h may be nil.
+func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) error {
+ sc.serveG.checkNotOn() // NOT on
+ var errc chan error
+ if headerData.h != nil {
+ // If there's a header map (which we don't own), so we have to block on
+ // waiting for this frame to be written, so an http.Flush mid-handler
+ // writes out the correct value of keys, before a handler later potentially
+ // mutates it.
+ errc = errChanPool.Get().(chan error)
+ }
+ if err := sc.writeFrameFromHandler(frameWriteMsg{
+ write: headerData,
+ stream: st,
+ done: errc,
+ }); err != nil {
+ return err
+ }
+ if errc != nil {
+ select {
+ case err := <-errc:
+ errChanPool.Put(errc)
+ return err
+ case <-sc.doneServing:
+ return errClientDisconnected
+ case <-st.cw:
+ return errStreamClosed
+ }
+ }
+ return nil
+}
+
+// called from handler goroutines.
+func (sc *serverConn) write100ContinueHeaders(st *stream) {
+ sc.writeFrameFromHandler(frameWriteMsg{
+ write: write100ContinueHeadersFrame{st.id},
+ stream: st,
+ })
+}
+
+// A bodyReadMsg tells the server loop that the http.Handler read n
+// bytes of the DATA from the client on the given stream.
+type bodyReadMsg struct {
+ st *stream
+ n int
+}
+
+// called from handler goroutines.
+// Notes that the handler for the given stream ID read n bytes of its body
+// and schedules flow control tokens to be sent.
+func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int) {
+ sc.serveG.checkNotOn() // NOT on
+ select {
+ case sc.bodyReadCh <- bodyReadMsg{st, n}:
+ case <-sc.doneServing:
+ }
+}
+
+func (sc *serverConn) noteBodyRead(st *stream, n int) {
+ sc.serveG.check()
+ sc.sendWindowUpdate(nil, n) // conn-level
+ if st.state != stateHalfClosedRemote && st.state != stateClosed {
+ // Don't send this WINDOW_UPDATE if the stream is closed
+ // remotely.
+ sc.sendWindowUpdate(st, n)
+ }
+}
+
+// st may be nil for conn-level
+func (sc *serverConn) sendWindowUpdate(st *stream, n int) {
+ sc.serveG.check()
+ // "The legal range for the increment to the flow control
+ // window is 1 to 2^31-1 (2,147,483,647) octets."
+ // A Go Read call on 64-bit machines could in theory read
+ // a larger Read than this. Very unlikely, but we handle it here
+ // rather than elsewhere for now.
+ const maxUint31 = 1<<31 - 1
+ for n >= maxUint31 {
+ sc.sendWindowUpdate32(st, maxUint31)
+ n -= maxUint31
+ }
+ sc.sendWindowUpdate32(st, int32(n))
+}
+
+// st may be nil for conn-level
+func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) {
+ sc.serveG.check()
+ if n == 0 {
+ return
+ }
+ if n < 0 {
+ panic("negative update")
+ }
+ var streamID uint32
+ if st != nil {
+ streamID = st.id
+ }
+ sc.writeFrame(frameWriteMsg{
+ write: writeWindowUpdate{streamID: streamID, n: uint32(n)},
+ stream: st,
+ })
+ var ok bool
+ if st == nil {
+ ok = sc.inflow.add(n)
+ } else {
+ ok = st.inflow.add(n)
+ }
+ if !ok {
+ panic("internal error; sent too many window updates without decrements?")
+ }
+}
+
+type requestBody struct {
+ stream *stream
+ conn *serverConn
+ closed bool
+ pipe *pipe // non-nil if we have a HTTP entity message body
+ needsContinue bool // need to send a 100-continue
+}
+
+func (b *requestBody) Close() error {
+ if b.pipe != nil {
+ b.pipe.CloseWithError(errClosedBody)
+ }
+ b.closed = true
+ return nil
+}
+
+func (b *requestBody) Read(p []byte) (n int, err error) {
+ if b.needsContinue {
+ b.needsContinue = false
+ b.conn.write100ContinueHeaders(b.stream)
+ }
+ if b.pipe == nil {
+ return 0, io.EOF
+ }
+ n, err = b.pipe.Read(p)
+ if n > 0 {
+ b.conn.noteBodyReadFromHandler(b.stream, n)
+ }
+ return
+}
+
+// responseWriter is the http.ResponseWriter implementation. It's
+// intentionally small (1 pointer wide) to minimize garbage. The
+// responseWriterState pointer inside is zeroed at the end of a
+// request (in handlerDone) and calls on the responseWriter thereafter
+// simply crash (caller's mistake), but the much larger responseWriterState
+// and buffers are reused between multiple requests.
+type responseWriter struct {
+ rws *responseWriterState
+}
+
+// Optional http.ResponseWriter interfaces implemented.
+var (
+ _ http.CloseNotifier = (*responseWriter)(nil)
+ _ http.Flusher = (*responseWriter)(nil)
+ _ stringWriter = (*responseWriter)(nil)
+)
+
+type responseWriterState struct {
+ // immutable within a request:
+ stream *stream
+ req *http.Request
+ body *requestBody // to close at end of request, if DATA frames didn't
+ conn *serverConn
+
+ // TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc
+ bw *bufio.Writer // writing to a chunkWriter{this *responseWriterState}
+
+ // mutated by http.Handler goroutine:
+ handlerHeader http.Header // nil until called
+ snapHeader http.Header // snapshot of handlerHeader at WriteHeader time
+ trailers []string // set in writeChunk
+ status int // status code passed to WriteHeader
+ wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet.
+ sentHeader bool // have we sent the header frame?
+ handlerDone bool // handler has finished
+
+ sentContentLen int64 // non-zero if handler set a Content-Length header
+ wroteBytes int64
+
+ closeNotifierMu sync.Mutex // guards closeNotifierCh
+ closeNotifierCh chan bool // nil until first used
+}
+
+type chunkWriter struct{ rws *responseWriterState }
+
+func (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) }
+
+func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) != 0 }
+
+// declareTrailer is called for each Trailer header when the
+// response header is written. It notes that a header will need to be
+// written in the trailers at the end of the response.
+func (rws *responseWriterState) declareTrailer(k string) {
+ k = http.CanonicalHeaderKey(k)
+ switch k {
+ case "Transfer-Encoding", "Content-Length", "Trailer":
+ // Forbidden by RFC 2616 14.40.
+ return
+ }
+ if !strSliceContains(rws.trailers, k) {
+ rws.trailers = append(rws.trailers, k)
+ }
+}
+
+// writeChunk writes chunks from the bufio.Writer. But because
+// bufio.Writer may bypass its chunking, sometimes p may be
+// arbitrarily large.
+//
+// writeChunk is also responsible (on the first chunk) for sending the
+// HEADER response.
+func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
+ if !rws.wroteHeader {
+ rws.writeHeader(200)
+ }
+
+ isHeadResp := rws.req.Method == "HEAD"
+ if !rws.sentHeader {
+ rws.sentHeader = true
+ var ctype, clen string
+ if clen = rws.snapHeader.Get("Content-Length"); clen != "" {
+ rws.snapHeader.Del("Content-Length")
+ clen64, err := strconv.ParseInt(clen, 10, 64)
+ if err == nil && clen64 >= 0 {
+ rws.sentContentLen = clen64
+ } else {
+ clen = ""
+ }
+ }
+ if clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) {
+ clen = strconv.Itoa(len(p))
+ }
+ _, hasContentType := rws.snapHeader["Content-Type"]
+ if !hasContentType && bodyAllowedForStatus(rws.status) {
+ ctype = http.DetectContentType(p)
+ }
+ var date string
+ if _, ok := rws.snapHeader["Date"]; !ok {
+ // TODO(bradfitz): be faster here, like net/http? measure.
+ date = time.Now().UTC().Format(http.TimeFormat)
+ }
+
+ for _, v := range rws.snapHeader["Trailer"] {
+ foreachHeaderElement(v, rws.declareTrailer)
+ }
+
+ endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp
+ err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{
+ streamID: rws.stream.id,
+ httpResCode: rws.status,
+ h: rws.snapHeader,
+ endStream: endStream,
+ contentType: ctype,
+ contentLength: clen,
+ date: date,
+ })
+ if err != nil {
+ return 0, err
+ }
+ if endStream {
+ return 0, nil
+ }
+ }
+ if isHeadResp {
+ return len(p), nil
+ }
+ if len(p) == 0 && !rws.handlerDone {
+ return 0, nil
+ }
+
+ if rws.handlerDone {
+ rws.promoteUndeclaredTrailers()
+ }
+
+ endStream := rws.handlerDone && !rws.hasTrailers()
+ if len(p) > 0 || endStream {
+ // only send a 0 byte DATA frame if we're ending the stream.
+ if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil {
+ return 0, err
+ }
+ }
+
+ if rws.handlerDone && rws.hasTrailers() {
+ err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{
+ streamID: rws.stream.id,
+ h: rws.handlerHeader,
+ trailers: rws.trailers,
+ endStream: true,
+ })
+ return len(p), err
+ }
+ return len(p), nil
+}
+
+// TrailerPrefix is a magic prefix for ResponseWriter.Header map keys
+// that, if present, signals that the map entry is actually for
+// the response trailers, and not the response headers. The prefix
+// is stripped after the ServeHTTP call finishes and the values are
+// sent in the trailers.
+//
+// This mechanism is intended only for trailers that are not known
+// prior to the headers being written. If the set of trailers is fixed
+// or known before the header is written, the normal Go trailers mechanism
+// is preferred:
+// https://golang.org/pkg/net/http/#ResponseWriter
+// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
+const TrailerPrefix = "Trailer:"
+
+// promoteUndeclaredTrailers permits http.Handlers to set trailers
+// after the header has already been flushed. Because the Go
+// ResponseWriter interface has no way to set Trailers (only the
+// Header), and because we didn't want to expand the ResponseWriter
+// interface, and because nobody used trailers, and because RFC 2616
+// says you SHOULD (but not must) predeclare any trailers in the
+// header, the official ResponseWriter rules said trailers in Go must
+// be predeclared, and then we reuse the same ResponseWriter.Header()
+// map to mean both Headers and Trailers. When it's time to write the
+// Trailers, we pick out the fields of Headers that were declared as
+// trailers. That worked for a while, until we found the first major
+// user of Trailers in the wild: gRPC (using them only over http2),
+// and gRPC libraries permit setting trailers mid-stream without
+// predeclarnig them. So: change of plans. We still permit the old
+// way, but we also permit this hack: if a Header() key begins with
+// "Trailer:", the suffix of that key is a Trailer. Because ':' is an
+// invalid token byte anyway, there is no ambiguity. (And it's already
+// filtered out) It's mildly hacky, but not terrible.
+//
+// This method runs after the Handler is done and promotes any Header
+// fields to be trailers.
+func (rws *responseWriterState) promoteUndeclaredTrailers() {
+ for k, vv := range rws.handlerHeader {
+ if !strings.HasPrefix(k, TrailerPrefix) {
+ continue
+ }
+ trailerKey := strings.TrimPrefix(k, TrailerPrefix)
+ rws.declareTrailer(trailerKey)
+ rws.handlerHeader[http.CanonicalHeaderKey(trailerKey)] = vv
+ }
+ sort.Strings(rws.trailers)
+}
+
+func (w *responseWriter) Flush() {
+ rws := w.rws
+ if rws == nil {
+ panic("Header called after Handler finished")
+ }
+ if rws.bw.Buffered() > 0 {
+ if err := rws.bw.Flush(); err != nil {
+ // Ignore the error. The frame writer already knows.
+ return
+ }
+ } else {
+ // The bufio.Writer won't call chunkWriter.Write
+ // (writeChunk with zero bytes, so we have to do it
+ // ourselves to force the HTTP response header and/or
+ // final DATA frame (with END_STREAM) to be sent.
+ rws.writeChunk(nil)
+ }
+}
+
+func (w *responseWriter) CloseNotify() <-chan bool {
+ rws := w.rws
+ if rws == nil {
+ panic("CloseNotify called after Handler finished")
+ }
+ rws.closeNotifierMu.Lock()
+ ch := rws.closeNotifierCh
+ if ch == nil {
+ ch = make(chan bool, 1)
+ rws.closeNotifierCh = ch
+ go func() {
+ rws.stream.cw.Wait() // wait for close
+ ch <- true
+ }()
+ }
+ rws.closeNotifierMu.Unlock()
+ return ch
+}
+
+func (w *responseWriter) Header() http.Header {
+ rws := w.rws
+ if rws == nil {
+ panic("Header called after Handler finished")
+ }
+ if rws.handlerHeader == nil {
+ rws.handlerHeader = make(http.Header)
+ }
+ return rws.handlerHeader
+}
+
+func (w *responseWriter) WriteHeader(code int) {
+ rws := w.rws
+ if rws == nil {
+ panic("WriteHeader called after Handler finished")
+ }
+ rws.writeHeader(code)
+}
+
+func (rws *responseWriterState) writeHeader(code int) {
+ if !rws.wroteHeader {
+ rws.wroteHeader = true
+ rws.status = code
+ if len(rws.handlerHeader) > 0 {
+ rws.snapHeader = cloneHeader(rws.handlerHeader)
+ }
+ }
+}
+
+func cloneHeader(h http.Header) http.Header {
+ h2 := make(http.Header, len(h))
+ for k, vv := range h {
+ vv2 := make([]string, len(vv))
+ copy(vv2, vv)
+ h2[k] = vv2
+ }
+ return h2
+}
+
+// The Life Of A Write is like this:
+//
+// * Handler calls w.Write or w.WriteString ->
+// * -> rws.bw (*bufio.Writer) ->
+// * (Handler migth call Flush)
+// * -> chunkWriter{rws}
+// * -> responseWriterState.writeChunk(p []byte)
+// * -> responseWriterState.writeChunk (most of the magic; see comment there)
+func (w *responseWriter) Write(p []byte) (n int, err error) {
+ return w.write(len(p), p, "")
+}
+
+func (w *responseWriter) WriteString(s string) (n int, err error) {
+ return w.write(len(s), nil, s)
+}
+
+// either dataB or dataS is non-zero.
+func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, err error) {
+ rws := w.rws
+ if rws == nil {
+ panic("Write called after Handler finished")
+ }
+ if !rws.wroteHeader {
+ w.WriteHeader(200)
+ }
+ if !bodyAllowedForStatus(rws.status) {
+ return 0, http.ErrBodyNotAllowed
+ }
+ rws.wroteBytes += int64(len(dataB)) + int64(len(dataS)) // only one can be set
+ if rws.sentContentLen != 0 && rws.wroteBytes > rws.sentContentLen {
+ // TODO: send a RST_STREAM
+ return 0, errors.New("http2: handler wrote more than declared Content-Length")
+ }
+
+ if dataB != nil {
+ return rws.bw.Write(dataB)
+ } else {
+ return rws.bw.WriteString(dataS)
+ }
+}
+
+func (w *responseWriter) handlerDone() {
+ rws := w.rws
+ rws.handlerDone = true
+ w.Flush()
+ w.rws = nil
+ responseWriterStatePool.Put(rws)
+}
+
+// foreachHeaderElement splits v according to the "#rule" construction
+// in RFC 2616 section 2.1 and calls fn for each non-empty element.
+func foreachHeaderElement(v string, fn func(string)) {
+ v = textproto.TrimString(v)
+ if v == "" {
+ return
+ }
+ if !strings.Contains(v, ",") {
+ fn(v)
+ return
+ }
+ for _, f := range strings.Split(v, ",") {
+ if f = textproto.TrimString(f); f != "" {
+ fn(f)
+ }
+ }
+}
diff --git a/src/kube2msb/vendor/golang.org/x/net/http2/transport.go b/src/kube2msb/vendor/golang.org/x/net/http2/transport.go
new file mode 100644
index 0000000..c3a1bdb
--- /dev/null
+++ b/src/kube2msb/vendor/golang.org/x/net/http2/transport.go
@@ -0,0 +1,1750 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Transport code.
+
+package http2
+
+import (
+ "bufio"
+ "bytes"
+ "compress/gzip"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "net"
+ "net/http"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/net/http2/hpack"
+)
+
+const (
+ // transportDefaultConnFlow is how many connection-level flow control
+ // tokens we give the server at start-up, past the default 64k.
+ transportDefaultConnFlow = 1 << 30
+
+ // transportDefaultStreamFlow is how many stream-level flow
+ // control tokens we announce to the peer, and how many bytes
+ // we buffer per stream.
+ transportDefaultStreamFlow = 4 << 20
+
+ // transportDefaultStreamMinRefresh is the minimum number of bytes we'll send
+ // a stream-level WINDOW_UPDATE for at a time.
+ transportDefaultStreamMinRefresh = 4 << 10
+
+ defaultUserAgent = "Go-http-client/2.0"
+)
+
+// Transport is an HTTP/2 Transport.
+//
+// A Transport internally caches connections to servers. It is safe
+// for concurrent use by multiple goroutines.
+type Transport struct {
+ // DialTLS specifies an optional dial function for creating
+ // TLS connections for requests.
+ //
+ // If DialTLS is nil, tls.Dial is used.
+ //
+ // If the returned net.Conn has a ConnectionState method like tls.Conn,
+ // it will be used to set http.Response.TLS.
+ DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error)
+
+ // TLSClientConfig specifies the TLS configuration to use with
+ // tls.Client. If nil, the default configuration is used.
+ TLSClientConfig *tls.Config
+
+ // ConnPool optionally specifies an alternate connection pool to use.
+ // If nil, the default is used.
+ ConnPool ClientConnPool
+
+ // DisableCompression, if true, prevents the Transport from
+ // requesting compression with an "Accept-Encoding: gzip"
+ // request header when the Request contains no existing
+ // Accept-Encoding value. If the Transport requests gzip on
+ // its own and gets a gzipped response, it's transparently
+ // decoded in the Response.Body. However, if the user
+ // explicitly requested gzip it is not automatically
+ // uncompressed.
+ DisableCompression bool
+
+ // MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to
+ // send in the initial settings frame. It is how many bytes
+ // of response headers are allow. Unlike the http2 spec, zero here
+ // means to use a default limit (currently 10MB). If you actually
+ // want to advertise an ulimited value to the peer, Transport
+ // interprets the highest possible value here (0xffffffff or 1<<32-1)
+ // to mean no limit.
+ MaxHeaderListSize uint32
+
+ // t1, if non-nil, is the standard library Transport using
+ // this transport. Its settings are used (but not its
+ // RoundTrip method, etc).
+ t1 *http.Transport
+
+ connPoolOnce sync.Once
+ connPoolOrDef ClientConnPool // non-nil version of ConnPool
+}
+
+func (t *Transport) maxHeaderListSize() uint32 {
+ if t.MaxHeaderListSize == 0 {
+ return 10 << 20
+ }
+ if t.MaxHeaderListSize == 0xffffffff {
+ return 0
+ }
+ return t.MaxHeaderListSize
+}
+
+func (t *Transport) disableCompression() bool {
+ return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression)
+}
+
+var errTransportVersion = errors.New("http2: ConfigureTransport is only supported starting at Go 1.6")
+
+// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2.
+// It requires Go 1.6 or later and returns an error if the net/http package is too old
+// or if t1 has already been HTTP/2-enabled.
+func ConfigureTransport(t1 *http.Transport) error {
+ _, err := configureTransport(t1) // in configure_transport.go (go1.6) or not_go16.go
+ return err
+}
+
+func (t *Transport) connPool() ClientConnPool {
+ t.connPoolOnce.Do(t.initConnPool)
+ return t.connPoolOrDef
+}
+
+func (t *Transport) initConnPool() {
+ if t.ConnPool != nil {
+ t.connPoolOrDef = t.ConnPool
+ } else {
+ t.connPoolOrDef = &clientConnPool{t: t}
+ }
+}
+
+// ClientConn is the state of a single HTTP/2 client connection to an
+// HTTP/2 server.
+type ClientConn struct {
+ t *Transport
+ tconn net.Conn // usually *tls.Conn, except specialized impls
+ tlsState *tls.ConnectionState // nil only for specialized impls
+
+ // readLoop goroutine fields:
+ readerDone chan struct{} // closed on error
+ readerErr error // set before readerDone is closed
+
+ mu sync.Mutex // guards following
+ cond *sync.Cond // hold mu; broadcast on flow/closed changes
+ flow flow // our conn-level flow control quota (cs.flow is per stream)
+ inflow flow // peer's conn-level flow control
+ closed bool
+ goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received
+ streams map[uint32]*clientStream // client-initiated
+ nextStreamID uint32
+ bw *bufio.Writer
+ br *bufio.Reader
+ fr *Framer
+ // Settings from peer:
+ maxFrameSize uint32
+ maxConcurrentStreams uint32
+ initialWindowSize uint32
+ hbuf bytes.Buffer // HPACK encoder writes into this
+ henc *hpack.Encoder
+ freeBuf [][]byte
+
+ wmu sync.Mutex // held while writing; acquire AFTER mu if holding both
+ werr error // first write error that has occurred
+}
+
+// clientStream is the state for a single HTTP/2 stream. One of these
+// is created for each Transport.RoundTrip call.
+type clientStream struct {
+ cc *ClientConn
+ req *http.Request
+ ID uint32
+ resc chan resAndError
+ bufPipe pipe // buffered pipe with the flow-controlled response payload
+ requestedGzip bool
+
+ flow flow // guarded by cc.mu
+ inflow flow // guarded by cc.mu
+ bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read
+ readErr error // sticky read error; owned by transportResponseBody.Read
+ stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu
+
+ peerReset chan struct{} // closed on peer reset
+ resetErr error // populated before peerReset is closed
+
+ done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu
+
+ // owned by clientConnReadLoop:
+ pastHeaders bool // got HEADERS w/ END_HEADERS
+ pastTrailers bool // got second HEADERS frame w/ END_HEADERS
+
+ trailer http.Header // accumulated trailers
+ resTrailer *http.Header // client's Response.Trailer
+}
+
+// awaitRequestCancel runs in its own goroutine and waits for the user
+// to either cancel a RoundTrip request (using the provided
+// Request.Cancel channel), or for the request to be done (any way it
+// might be removed from the cc.streams map: peer reset, successful
+// completion, TCP connection breakage, etc)
+func (cs *clientStream) awaitRequestCancel(cancel <-chan struct{}) {
+ if cancel == nil {
+ return
+ }
+ select {
+ case <-cancel:
+ cs.bufPipe.CloseWithError(errRequestCanceled)
+ cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
+ case <-cs.done:
+ }
+}
+
+// checkReset reports any error sent in a RST_STREAM frame by the
+// server.
+func (cs *clientStream) checkReset() error {
+ select {
+ case <-cs.peerReset:
+ return cs.resetErr
+ default:
+ return nil
+ }
+}
+
+func (cs *clientStream) abortRequestBodyWrite(err error) {
+ if err == nil {
+ panic("nil error")
+ }
+ cc := cs.cc
+ cc.mu.Lock()
+ cs.stopReqBody = err
+ cc.cond.Broadcast()
+ cc.mu.Unlock()
+}
+
+type stickyErrWriter struct {
+ w io.Writer
+ err *error
+}
+
+func (sew stickyErrWriter) Write(p []byte) (n int, err error) {
+ if *sew.err != nil {
+ return 0, *sew.err
+ }
+ n, err = sew.w.Write(p)
+ *sew.err = err
+ return
+}
+
+var ErrNoCachedConn = errors.New("http2: no cached connection was available")
+
+// RoundTripOpt are options for the Transport.RoundTripOpt method.
+type RoundTripOpt struct {
+ // OnlyCachedConn controls whether RoundTripOpt may
+ // create a new TCP connection. If set true and
+ // no cached connection is available, RoundTripOpt
+ // will return ErrNoCachedConn.
+ OnlyCachedConn bool
+}
+
+func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
+ return t.RoundTripOpt(req, RoundTripOpt{})
+}
+
+// authorityAddr returns a given authority (a host/IP, or host:port / ip:port)
+// and returns a host:port. The port 443 is added if needed.
+func authorityAddr(authority string) (addr string) {
+ if _, _, err := net.SplitHostPort(authority); err == nil {
+ return authority
+ }
+ return net.JoinHostPort(authority, "443")
+}
+
+// RoundTripOpt is like RoundTrip, but takes options.
+func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) {
+ if req.URL.Scheme != "https" {
+ return nil, errors.New("http2: unsupported scheme")
+ }
+
+ addr := authorityAddr(req.URL.Host)
+ for {
+ cc, err := t.connPool().GetClientConn(req, addr)
+ if err != nil {
+ t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err)
+ return nil, err
+ }
+ res, err := cc.RoundTrip(req)
+ if shouldRetryRequest(req, err) {
+ continue
+ }
+ if err != nil {
+ t.vlogf("RoundTrip failure: %v", err)
+ return nil, err
+ }
+ return res, nil
+ }
+}
+
+// CloseIdleConnections closes any connections which were previously
+// connected from previous requests but are now sitting idle.
+// It does not interrupt any connections currently in use.
+func (t *Transport) CloseIdleConnections() {
+ if cp, ok := t.connPool().(*clientConnPool); ok {
+ cp.closeIdleConnections()
+ }
+}
+
+var (
+ errClientConnClosed = errors.New("http2: client conn is closed")
+ errClientConnUnusable = errors.New("http2: client conn not usable")
+)
+
+func shouldRetryRequest(req *http.Request, err error) bool {
+ // TODO: retry GET requests (no bodies) more aggressively, if shutdown
+ // before response.
+ return err == errClientConnUnusable
+}
+
+func (t *Transport) dialClientConn(addr string) (*ClientConn, error) {
+ host, _, err := net.SplitHostPort(addr)
+ if err != nil {
+ return nil, err
+ }
+ tconn, err := t.dialTLS()("tcp", addr, t.newTLSConfig(host))
+ if err != nil {
+ return nil, err
+ }
+ return t.NewClientConn(tconn)
+}
+
+func (t *Transport) newTLSConfig(host string) *tls.Config {
+ cfg := new(tls.Config)
+ if t.TLSClientConfig != nil {
+ *cfg = *t.TLSClientConfig
+ }
+ cfg.NextProtos = []string{NextProtoTLS} // TODO: don't override if already in list
+ cfg.ServerName = host
+ return cfg
+}
+
+func (t *Transport) dialTLS() func(string, string, *tls.Config) (net.Conn, error) {
+ if t.DialTLS != nil {
+ return t.DialTLS
+ }
+ return t.dialTLSDefault
+}
+
+func (t *Transport) dialTLSDefault(network, addr string, cfg *tls.Config) (net.Conn, error) {
+ cn, err := tls.Dial(network, addr, cfg)
+ if err != nil {
+ return nil, err
+ }
+ if err := cn.Handshake(); err != nil {
+ return nil, err
+ }
+ if !cfg.InsecureSkipVerify {
+ if err := cn.VerifyHostname(cfg.ServerName); err != nil {
+ return nil, err
+ }
+ }
+ state := cn.ConnectionState()
+ if p := state.NegotiatedProtocol; p != NextProtoTLS {
+ return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS)
+ }
+ if !state.NegotiatedProtocolIsMutual {
+ return nil, errors.New("http2: could not negotiate protocol mutually")
+ }
+ return cn, nil
+}
+
+// disableKeepAlives reports whether connections should be closed as
+// soon as possible after handling the first request.
+func (t *Transport) disableKeepAlives() bool {
+ return t.t1 != nil && t.t1.DisableKeepAlives
+}
+
+func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
+ if VerboseLogs {
+ t.vlogf("http2: Transport creating client conn to %v", c.RemoteAddr())
+ }
+ if _, err := c.Write(clientPreface); err != nil {
+ t.vlogf("client preface write error: %v", err)
+ return nil, err
+ }
+
+ cc := &ClientConn{
+ t: t,
+ tconn: c,
+ readerDone: make(chan struct{}),
+ nextStreamID: 1,
+ maxFrameSize: 16 << 10, // spec default
+ initialWindowSize: 65535, // spec default
+ maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough.
+ streams: make(map[uint32]*clientStream),
+ }
+ cc.cond = sync.NewCond(&cc.mu)
+ cc.flow.add(int32(initialWindowSize))
+
+ // TODO: adjust this writer size to account for frame size +
+ // MTU + crypto/tls record padding.
+ cc.bw = bufio.NewWriter(stickyErrWriter{c, &cc.werr})
+ cc.br = bufio.NewReader(c)
+ cc.fr = NewFramer(cc.bw, cc.br)
+
+ // TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on
+ // henc in response to SETTINGS frames?
+ cc.henc = hpack.NewEncoder(&cc.hbuf)
+
+ if cs, ok := c.(connectionStater); ok {
+ state := cs.ConnectionState()
+ cc.tlsState = &state
+ }
+
+ initialSettings := []Setting{
+ Setting{ID: SettingEnablePush, Val: 0},
+ Setting{ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow},
+ }
+ if max := t.maxHeaderListSize(); max != 0 {
+ initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max})
+ }
+ cc.fr.WriteSettings(initialSettings...)
+ cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow)
+ cc.inflow.add(transportDefaultConnFlow + initialWindowSize)
+ cc.bw.Flush()
+ if cc.werr != nil {
+ return nil, cc.werr
+ }
+
+ // Read the obligatory SETTINGS frame
+ f, err := cc.fr.ReadFrame()
+ if err != nil {
+ return nil, err
+ }
+ sf, ok := f.(*SettingsFrame)
+ if !ok {
+ return nil, fmt.Errorf("expected settings frame, got: %T", f)
+ }
+ cc.fr.WriteSettingsAck()
+ cc.bw.Flush()
+
+ sf.ForeachSetting(func(s Setting) error {
+ switch s.ID {
+ case SettingMaxFrameSize:
+ cc.maxFrameSize = s.Val
+ case SettingMaxConcurrentStreams:
+ cc.maxConcurrentStreams = s.Val
+ case SettingInitialWindowSize:
+ cc.initialWindowSize = s.Val
+ default:
+ // TODO(bradfitz): handle more; at least SETTINGS_HEADER_TABLE_SIZE?
+ t.vlogf("Unhandled Setting: %v", s)
+ }
+ return nil
+ })
+
+ go cc.readLoop()
+ return cc, nil
+}
+
+func (cc *ClientConn) setGoAway(f *GoAwayFrame) {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ cc.goAway = f
+}
+
+func (cc *ClientConn) CanTakeNewRequest() bool {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ return cc.canTakeNewRequestLocked()
+}
+
+func (cc *ClientConn) canTakeNewRequestLocked() bool {
+ return cc.goAway == nil && !cc.closed &&
+ int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) &&
+ cc.nextStreamID < 2147483647
+}
+
+func (cc *ClientConn) closeIfIdle() {
+ cc.mu.Lock()
+ if len(cc.streams) > 0 {
+ cc.mu.Unlock()
+ return
+ }
+ cc.closed = true
+ // TODO: do clients send GOAWAY too? maybe? Just Close:
+ cc.mu.Unlock()
+
+ cc.tconn.Close()
+}
+
+const maxAllocFrameSize = 512 << 10
+
+// frameBuffer returns a scratch buffer suitable for writing DATA frames.
+// They're capped at the min of the peer's max frame size or 512KB
+// (kinda arbitrarily), but definitely capped so we don't allocate 4GB
+// bufers.
+func (cc *ClientConn) frameScratchBuffer() []byte {
+ cc.mu.Lock()
+ size := cc.maxFrameSize
+ if size > maxAllocFrameSize {
+ size = maxAllocFrameSize
+ }
+ for i, buf := range cc.freeBuf {
+ if len(buf) >= int(size) {
+ cc.freeBuf[i] = nil
+ cc.mu.Unlock()
+ return buf[:size]
+ }
+ }
+ cc.mu.Unlock()
+ return make([]byte, size)
+}
+
+func (cc *ClientConn) putFrameScratchBuffer(buf []byte) {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ const maxBufs = 4 // arbitrary; 4 concurrent requests per conn? investigate.
+ if len(cc.freeBuf) < maxBufs {
+ cc.freeBuf = append(cc.freeBuf, buf)
+ return
+ }
+ for i, old := range cc.freeBuf {
+ if old == nil {
+ cc.freeBuf[i] = buf
+ return
+ }
+ }
+ // forget about it.
+}
+
+// errRequestCanceled is a copy of net/http's errRequestCanceled because it's not
+// exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests.
+var errRequestCanceled = errors.New("net/http: request canceled")
+
+func commaSeparatedTrailers(req *http.Request) (string, error) {
+ keys := make([]string, 0, len(req.Trailer))
+ for k := range req.Trailer {
+ k = http.CanonicalHeaderKey(k)
+ switch k {
+ case "Transfer-Encoding", "Trailer", "Content-Length":
+ return "", &badStringError{"invalid Trailer key", k}
+ }
+ keys = append(keys, k)
+ }
+ if len(keys) > 0 {
+ sort.Strings(keys)
+ // TODO: could do better allocation-wise here, but trailers are rare,
+ // so being lazy for now.
+ return strings.Join(keys, ","), nil
+ }
+ return "", nil
+}
+
+func (cc *ClientConn) responseHeaderTimeout() time.Duration {
+ if cc.t.t1 != nil {
+ return cc.t.t1.ResponseHeaderTimeout
+ }
+ // No way to do this (yet?) with just an http2.Transport. Probably
+ // no need. Request.Cancel this is the new way. We only need to support
+ // this for compatibility with the old http.Transport fields when
+ // we're doing transparent http2.
+ return 0
+}
+
+// checkConnHeaders checks whether req has any invalid connection-level headers.
+// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields.
+// Certain headers are special-cased as okay but not transmitted later.
+func checkConnHeaders(req *http.Request) error {
+ if v := req.Header.Get("Upgrade"); v != "" {
+ return errors.New("http2: invalid Upgrade request header")
+ }
+ if v := req.Header.Get("Transfer-Encoding"); (v != "" && v != "chunked") || len(req.Header["Transfer-Encoding"]) > 1 {
+ return errors.New("http2: invalid Transfer-Encoding request header")
+ }
+ if v := req.Header.Get("Connection"); (v != "" && v != "close" && v != "keep-alive") || len(req.Header["Connection"]) > 1 {
+ return errors.New("http2: invalid Connection request header")
+ }
+ return nil
+}
+
+func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
+ if err := checkConnHeaders(req); err != nil {
+ return nil, err
+ }
+
+ trailers, err := commaSeparatedTrailers(req)
+ if err != nil {
+ return nil, err
+ }
+ hasTrailers := trailers != ""
+
+ var body io.Reader = req.Body
+ contentLen := req.ContentLength
+ if req.Body != nil && contentLen == 0 {
+ // Test to see if it's actually zero or just unset.
+ var buf [1]byte
+ n, rerr := io.ReadFull(body, buf[:])
+ if rerr != nil && rerr != io.EOF {
+ contentLen = -1
+ body = errorReader{rerr}
+ } else if n == 1 {
+ // Oh, guess there is data in this Body Reader after all.
+ // The ContentLength field just wasn't set.
+ // Stich the Body back together again, re-attaching our
+ // consumed byte.
+ contentLen = -1
+ body = io.MultiReader(bytes.NewReader(buf[:]), body)
+ } else {
+ // Body is actually empty.
+ body = nil
+ }
+ }
+
+ cc.mu.Lock()
+ if cc.closed || !cc.canTakeNewRequestLocked() {
+ cc.mu.Unlock()
+ return nil, errClientConnUnusable
+ }
+
+ cs := cc.newStream()
+ cs.req = req
+ hasBody := body != nil
+
+ // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
+ if !cc.t.disableCompression() &&
+ req.Header.Get("Accept-Encoding") == "" &&
+ req.Header.Get("Range") == "" &&
+ req.Method != "HEAD" {
+ // Request gzip only, not deflate. Deflate is ambiguous and
+ // not as universally supported anyway.
+ // See: http://www.gzip.org/zlib/zlib_faq.html#faq38
+ //
+ // Note that we don't request this for HEAD requests,
+ // due to a bug in nginx:
+ // http://trac.nginx.org/nginx/ticket/358
+ // https://golang.org/issue/5522
+ //
+ // We don't request gzip if the request is for a range, since
+ // auto-decoding a portion of a gzipped document will just fail
+ // anyway. See https://golang.org/issue/8923
+ cs.requestedGzip = true
+ }
+
+ // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is
+ // sent by writeRequestBody below, along with any Trailers,
+ // again in form HEADERS{1}, CONTINUATION{0,})
+ hdrs := cc.encodeHeaders(req, cs.requestedGzip, trailers, contentLen)
+ cc.wmu.Lock()
+ endStream := !hasBody && !hasTrailers
+ werr := cc.writeHeaders(cs.ID, endStream, hdrs)
+ cc.wmu.Unlock()
+ cc.mu.Unlock()
+
+ if werr != nil {
+ if hasBody {
+ req.Body.Close() // per RoundTripper contract
+ }
+ cc.forgetStreamID(cs.ID)
+ // Don't bother sending a RST_STREAM (our write already failed;
+ // no need to keep writing)
+ return nil, werr
+ }
+
+ var respHeaderTimer <-chan time.Time
+ var bodyCopyErrc chan error // result of body copy
+ if hasBody {
+ bodyCopyErrc = make(chan error, 1)
+ go func() {
+ bodyCopyErrc <- cs.writeRequestBody(body, req.Body)
+ }()
+ } else {
+ if d := cc.responseHeaderTimeout(); d != 0 {
+ timer := time.NewTimer(d)
+ defer timer.Stop()
+ respHeaderTimer = timer.C
+ }
+ }
+
+ readLoopResCh := cs.resc
+ requestCanceledCh := requestCancel(req)
+ bodyWritten := false
+
+ for {
+ select {
+ case re := <-readLoopResCh:
+ res := re.res
+ if re.err != nil || res.StatusCode > 299 {
+ // On error or status code 3xx, 4xx, 5xx, etc abort any
+ // ongoing write, assuming that the server doesn't care
+ // about our request body. If the server replied with 1xx or
+ // 2xx, however, then assume the server DOES potentially
+ // want our body (e.g. full-duplex streaming:
+ // golang.org/issue/13444). If it turns out the server
+ // doesn't, they'll RST_STREAM us soon enough. This is a
+ // heuristic to avoid adding knobs to Transport. Hopefully
+ // we can keep it.
+ cs.abortRequestBodyWrite(errStopReqBodyWrite)
+ }
+ if re.err != nil {
+ cc.forgetStreamID(cs.ID)
+ return nil, re.err
+ }
+ res.Request = req
+ res.TLS = cc.tlsState
+ return res, nil
+ case <-respHeaderTimer:
+ cc.forgetStreamID(cs.ID)
+ if !hasBody || bodyWritten {
+ cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
+ } else {
+ cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
+ }
+ return nil, errTimeout
+ case <-requestCanceledCh:
+ cc.forgetStreamID(cs.ID)
+ if !hasBody || bodyWritten {
+ cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
+ } else {
+ cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
+ }
+ return nil, errRequestCanceled
+ case <-cs.peerReset:
+ // processResetStream already removed the
+ // stream from the streams map; no need for
+ // forgetStreamID.
+ return nil, cs.resetErr
+ case err := <-bodyCopyErrc:
+ if err != nil {
+ return nil, err
+ }
+ bodyWritten = true
+ if d := cc.responseHeaderTimeout(); d != 0 {
+ timer := time.NewTimer(d)
+ defer timer.Stop()
+ respHeaderTimer = timer.C
+ }
+ }
+ }
+}
+
+// requires cc.wmu be held
+func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, hdrs []byte) error {
+ first := true // first frame written (HEADERS is first, then CONTINUATION)
+ frameSize := int(cc.maxFrameSize)
+ for len(hdrs) > 0 && cc.werr == nil {
+ chunk := hdrs
+ if len(chunk) > frameSize {
+ chunk = chunk[:frameSize]
+ }
+ hdrs = hdrs[len(chunk):]
+ endHeaders := len(hdrs) == 0
+ if first {
+ cc.fr.WriteHeaders(HeadersFrameParam{
+ StreamID: streamID,
+ BlockFragment: chunk,
+ EndStream: endStream,
+ EndHeaders: endHeaders,
+ })
+ first = false
+ } else {
+ cc.fr.WriteContinuation(streamID, endHeaders, chunk)
+ }
+ }
+ // TODO(bradfitz): this Flush could potentially block (as
+ // could the WriteHeaders call(s) above), which means they
+ // wouldn't respond to Request.Cancel being readable. That's
+ // rare, but this should probably be in a goroutine.
+ cc.bw.Flush()
+ return cc.werr
+}
+
+// internal error values; they don't escape to callers
+var (
+ // abort request body write; don't send cancel
+ errStopReqBodyWrite = errors.New("http2: aborting request body write")
+
+ // abort request body write, but send stream reset of cancel.
+ errStopReqBodyWriteAndCancel = errors.New("http2: canceling request")
+)
+
+func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) {
+ cc := cs.cc
+ sentEnd := false // whether we sent the final DATA frame w/ END_STREAM
+ buf := cc.frameScratchBuffer()
+ defer cc.putFrameScratchBuffer(buf)
+
+ defer func() {
+ // TODO: write h12Compare test showing whether
+ // Request.Body is closed by the Transport,
+ // and in multiple cases: server replies <=299 and >299
+ // while still writing request body
+ cerr := bodyCloser.Close()
+ if err == nil {
+ err = cerr
+ }
+ }()
+
+ req := cs.req
+ hasTrailers := req.Trailer != nil
+
+ var sawEOF bool
+ for !sawEOF {
+ n, err := body.Read(buf)
+ if err == io.EOF {
+ sawEOF = true
+ err = nil
+ } else if err != nil {
+ return err
+ }
+
+ remain := buf[:n]
+ for len(remain) > 0 && err == nil {
+ var allowed int32
+ allowed, err = cs.awaitFlowControl(len(remain))
+ switch {
+ case err == errStopReqBodyWrite:
+ return err
+ case err == errStopReqBodyWriteAndCancel:
+ cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
+ return err
+ case err != nil:
+ return err
+ }
+ cc.wmu.Lock()
+ data := remain[:allowed]
+ remain = remain[allowed:]
+ sentEnd = sawEOF && len(remain) == 0 && !hasTrailers
+ err = cc.fr.WriteData(cs.ID, sentEnd, data)
+ if err == nil {
+ // TODO(bradfitz): this flush is for latency, not bandwidth.
+ // Most requests won't need this. Make this opt-in or opt-out?
+ // Use some heuristic on the body type? Nagel-like timers?
+ // Based on 'n'? Only last chunk of this for loop, unless flow control
+ // tokens are low? For now, always:
+ err = cc.bw.Flush()
+ }
+ cc.wmu.Unlock()
+ }
+ if err != nil {
+ return err
+ }
+ }
+
+ cc.wmu.Lock()
+ if !sentEnd {
+ var trls []byte
+ if hasTrailers {
+ cc.mu.Lock()
+ trls = cc.encodeTrailers(req)
+ cc.mu.Unlock()
+ }
+
+ // Avoid forgetting to send an END_STREAM if the encoded
+ // trailers are 0 bytes. Both results produce and END_STREAM.
+ if len(trls) > 0 {
+ err = cc.writeHeaders(cs.ID, true, trls)
+ } else {
+ err = cc.fr.WriteData(cs.ID, true, nil)
+ }
+ }
+ if ferr := cc.bw.Flush(); ferr != nil && err == nil {
+ err = ferr
+ }
+ cc.wmu.Unlock()
+
+ return err
+}
+
+// awaitFlowControl waits for [1, min(maxBytes, cc.cs.maxFrameSize)] flow
+// control tokens from the server.
+// It returns either the non-zero number of tokens taken or an error
+// if the stream is dead.
+func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) {
+ cc := cs.cc
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ for {
+ if cc.closed {
+ return 0, errClientConnClosed
+ }
+ if cs.stopReqBody != nil {
+ return 0, cs.stopReqBody
+ }
+ if err := cs.checkReset(); err != nil {
+ return 0, err
+ }
+ if a := cs.flow.available(); a > 0 {
+ take := a
+ if int(take) > maxBytes {
+
+ take = int32(maxBytes) // can't truncate int; take is int32
+ }
+ if take > int32(cc.maxFrameSize) {
+ take = int32(cc.maxFrameSize)
+ }
+ cs.flow.take(take)
+ return take, nil
+ }
+ cc.cond.Wait()
+ }
+}
+
+type badStringError struct {
+ what string
+ str string
+}
+
+func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) }
+
+// requires cc.mu be held.
+func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) []byte {
+ cc.hbuf.Reset()
+
+ host := req.Host
+ if host == "" {
+ host = req.URL.Host
+ }
+
+ // 8.1.2.3 Request Pseudo-Header Fields
+ // The :path pseudo-header field includes the path and query parts of the
+ // target URI (the path-absolute production and optionally a '?' character
+ // followed by the query production (see Sections 3.3 and 3.4 of
+ // [RFC3986]).
+ cc.writeHeader(":authority", host)
+ cc.writeHeader(":method", req.Method)
+ if req.Method != "CONNECT" {
+ cc.writeHeader(":path", req.URL.RequestURI())
+ cc.writeHeader(":scheme", "https")
+ }
+ if trailers != "" {
+ cc.writeHeader("trailer", trailers)
+ }
+
+ var didUA bool
+ for k, vv := range req.Header {
+ lowKey := strings.ToLower(k)
+ switch lowKey {
+ case "host", "content-length":
+ // Host is :authority, already sent.
+ // Content-Length is automatic, set below.
+ continue
+ case "connection", "proxy-connection", "transfer-encoding", "upgrade":
+ // Per 8.1.2.2 Connection-Specific Header
+ // Fields, don't send connection-specific
+ // fields. We deal with these earlier in
+ // RoundTrip, deciding whether they're
+ // error-worthy, but we don't want to mutate
+ // the user's *Request so at this point, just
+ // skip over them at this point.
+ continue
+ case "user-agent":
+ // Match Go's http1 behavior: at most one
+ // User-Agent. If set to nil or empty string,
+ // then omit it. Otherwise if not mentioned,
+ // include the default (below).
+ didUA = true
+ if len(vv) < 1 {
+ continue
+ }
+ vv = vv[:1]
+ if vv[0] == "" {
+ continue
+ }
+ }
+ for _, v := range vv {
+ cc.writeHeader(lowKey, v)
+ }
+ }
+ if shouldSendReqContentLength(req.Method, contentLength) {
+ cc.writeHeader("content-length", strconv.FormatInt(contentLength, 10))
+ }
+ if addGzipHeader {
+ cc.writeHeader("accept-encoding", "gzip")
+ }
+ if !didUA {
+ cc.writeHeader("user-agent", defaultUserAgent)
+ }
+ return cc.hbuf.Bytes()
+}
+
+// shouldSendReqContentLength reports whether the http2.Transport should send
+// a "content-length" request header. This logic is basically a copy of the net/http
+// transferWriter.shouldSendContentLength.
+// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown).
+// -1 means unknown.
+func shouldSendReqContentLength(method string, contentLength int64) bool {
+ if contentLength > 0 {
+ return true
+ }
+ if contentLength < 0 {
+ return false
+ }
+ // For zero bodies, whether we send a content-length depends on the method.
+ // It also kinda doesn't matter for http2 either way, with END_STREAM.
+ switch method {
+ case "POST", "PUT", "PATCH":
+ return true
+ default:
+ return false
+ }
+}
+
+// requires cc.mu be held.
+func (cc *ClientConn) encodeTrailers(req *http.Request) []byte {
+ cc.hbuf.Reset()
+ for k, vv := range req.Trailer {
+ // Transfer-Encoding, etc.. have already been filter at the
+ // start of RoundTrip
+ lowKey := strings.ToLower(k)
+ for _, v := range vv {
+ cc.writeHeader(lowKey, v)
+ }
+ }
+ return cc.hbuf.Bytes()
+}
+
+func (cc *ClientConn) writeHeader(name, value string) {
+ if VerboseLogs {
+ log.Printf("http2: Transport encoding header %q = %q", name, value)
+ }
+ cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value})
+}
+
+type resAndError struct {
+ res *http.Response
+ err error
+}
+
+// requires cc.mu be held.
+func (cc *ClientConn) newStream() *clientStream {
+ cs := &clientStream{
+ cc: cc,
+ ID: cc.nextStreamID,
+ resc: make(chan resAndError, 1),
+ peerReset: make(chan struct{}),
+ done: make(chan struct{}),
+ }
+ cs.flow.add(int32(cc.initialWindowSize))
+ cs.flow.setConnFlow(&cc.flow)
+ cs.inflow.add(transportDefaultStreamFlow)
+ cs.inflow.setConnFlow(&cc.inflow)
+ cc.nextStreamID += 2
+ cc.streams[cs.ID] = cs
+ return cs
+}
+
+func (cc *ClientConn) forgetStreamID(id uint32) {
+ cc.streamByID(id, true)
+}
+
+func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ cs := cc.streams[id]
+ if andRemove && cs != nil && !cc.closed {
+ delete(cc.streams, id)
+ close(cs.done)
+ }
+ return cs
+}
+
+// clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop.
+type clientConnReadLoop struct {
+ cc *ClientConn
+ activeRes map[uint32]*clientStream // keyed by streamID
+ closeWhenIdle bool
+
+ hdec *hpack.Decoder
+
+ // Fields reset on each HEADERS:
+ nextRes *http.Response
+ sawRegHeader bool // saw non-pseudo header
+ reqMalformed error // non-nil once known to be malformed
+ lastHeaderEndsStream bool
+ headerListSize int64 // actually uint32, but easier math this way
+}
+
+// readLoop runs in its own goroutine and reads and dispatches frames.
+func (cc *ClientConn) readLoop() {
+ rl := &clientConnReadLoop{
+ cc: cc,
+ activeRes: make(map[uint32]*clientStream),
+ }
+ rl.hdec = hpack.NewDecoder(initialHeaderTableSize, rl.onNewHeaderField)
+
+ defer rl.cleanup()
+ cc.readerErr = rl.run()
+ if ce, ok := cc.readerErr.(ConnectionError); ok {
+ cc.wmu.Lock()
+ cc.fr.WriteGoAway(0, ErrCode(ce), nil)
+ cc.wmu.Unlock()
+ }
+}
+
+func (rl *clientConnReadLoop) cleanup() {
+ cc := rl.cc
+ defer cc.tconn.Close()
+ defer cc.t.connPool().MarkDead(cc)
+ defer close(cc.readerDone)
+
+ // Close any response bodies if the server closes prematurely.
+ // TODO: also do this if we've written the headers but not
+ // gotten a response yet.
+ err := cc.readerErr
+ if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ cc.mu.Lock()
+ for _, cs := range rl.activeRes {
+ cs.bufPipe.CloseWithError(err)
+ }
+ for _, cs := range cc.streams {
+ select {
+ case cs.resc <- resAndError{err: err}:
+ default:
+ }
+ close(cs.done)
+ }
+ cc.closed = true
+ cc.cond.Broadcast()
+ cc.mu.Unlock()
+}
+
+func (rl *clientConnReadLoop) run() error {
+ cc := rl.cc
+ rl.closeWhenIdle = cc.t.disableKeepAlives()
+ gotReply := false // ever saw a reply
+ for {
+ f, err := cc.fr.ReadFrame()
+ if err != nil {
+ cc.vlogf("Transport readFrame error: (%T) %v", err, err)
+ }
+ if se, ok := err.(StreamError); ok {
+ // TODO: deal with stream errors from the framer.
+ return se
+ } else if err != nil {
+ return err
+ }
+ if VerboseLogs {
+ cc.vlogf("http2: Transport received %s", summarizeFrame(f))
+ }
+ maybeIdle := false // whether frame might transition us to idle
+
+ switch f := f.(type) {
+ case *HeadersFrame:
+ err = rl.processHeaders(f)
+ maybeIdle = true
+ gotReply = true
+ case *ContinuationFrame:
+ err = rl.processContinuation(f)
+ maybeIdle = true
+ case *DataFrame:
+ err = rl.processData(f)
+ maybeIdle = true
+ case *GoAwayFrame:
+ err = rl.processGoAway(f)
+ maybeIdle = true
+ case *RSTStreamFrame:
+ err = rl.processResetStream(f)
+ maybeIdle = true
+ case *SettingsFrame:
+ err = rl.processSettings(f)
+ case *PushPromiseFrame:
+ err = rl.processPushPromise(f)
+ case *WindowUpdateFrame:
+ err = rl.processWindowUpdate(f)
+ case *PingFrame:
+ err = rl.processPing(f)
+ default:
+ cc.logf("Transport: unhandled response frame type %T", f)
+ }
+ if err != nil {
+ return err
+ }
+ if rl.closeWhenIdle && gotReply && maybeIdle && len(rl.activeRes) == 0 {
+ cc.closeIfIdle()
+ }
+ }
+}
+
+func (rl *clientConnReadLoop) processHeaders(f *HeadersFrame) error {
+ rl.sawRegHeader = false
+ rl.reqMalformed = nil
+ rl.lastHeaderEndsStream = f.StreamEnded()
+ rl.headerListSize = 0
+ rl.nextRes = &http.Response{
+ Proto: "HTTP/2.0",
+ ProtoMajor: 2,
+ Header: make(http.Header),
+ }
+ rl.hdec.SetEmitEnabled(true)
+ return rl.processHeaderBlockFragment(f.HeaderBlockFragment(), f.StreamID, f.HeadersEnded())
+}
+
+func (rl *clientConnReadLoop) processContinuation(f *ContinuationFrame) error {
+ return rl.processHeaderBlockFragment(f.HeaderBlockFragment(), f.StreamID, f.HeadersEnded())
+}
+
+func (rl *clientConnReadLoop) processHeaderBlockFragment(frag []byte, streamID uint32, finalFrag bool) error {
+ cc := rl.cc
+ streamEnded := rl.lastHeaderEndsStream
+ cs := cc.streamByID(streamID, streamEnded && finalFrag)
+ if cs == nil {
+ // We'd get here if we canceled a request while the
+ // server was mid-way through replying with its
+ // headers. (The case of a CONTINUATION arriving
+ // without HEADERS would be rejected earlier by the
+ // Framer). So if this was just something we canceled,
+ // ignore it.
+ return nil
+ }
+ if cs.pastHeaders {
+ rl.hdec.SetEmitFunc(func(f hpack.HeaderField) { rl.onNewTrailerField(cs, f) })
+ } else {
+ rl.hdec.SetEmitFunc(rl.onNewHeaderField)
+ }
+ _, err := rl.hdec.Write(frag)
+ if err != nil {
+ return ConnectionError(ErrCodeCompression)
+ }
+ if finalFrag {
+ if err := rl.hdec.Close(); err != nil {
+ return ConnectionError(ErrCodeCompression)
+ }
+ }
+
+ if !finalFrag {
+ return nil
+ }
+
+ if !cs.pastHeaders {
+ cs.pastHeaders = true
+ } else {
+ // We're dealing with trailers. (and specifically the
+ // final frame of headers)
+ if cs.pastTrailers {
+ // Too many HEADERS frames for this stream.
+ return ConnectionError(ErrCodeProtocol)
+ }
+ cs.pastTrailers = true
+ if !streamEnded {
+ // We expect that any header block fragment
+ // frame for trailers with END_HEADERS also
+ // has END_STREAM.
+ return ConnectionError(ErrCodeProtocol)
+ }
+ rl.endStream(cs)
+ return nil
+ }
+
+ if rl.reqMalformed != nil {
+ cs.resc <- resAndError{err: rl.reqMalformed}
+ rl.cc.writeStreamReset(cs.ID, ErrCodeProtocol, rl.reqMalformed)
+ return nil
+ }
+
+ res := rl.nextRes
+
+ if res.StatusCode == 100 {
+ // Just skip 100-continue response headers for now.
+ // TODO: golang.org/issue/13851 for doing it properly.
+ cs.pastHeaders = false // do it all again
+ return nil
+ }
+
+ if !streamEnded || cs.req.Method == "HEAD" {
+ res.ContentLength = -1
+ if clens := res.Header["Content-Length"]; len(clens) == 1 {
+ if clen64, err := strconv.ParseInt(clens[0], 10, 64); err == nil {
+ res.ContentLength = clen64
+ } else {
+ // TODO: care? unlike http/1, it won't mess up our framing, so it's
+ // more safe smuggling-wise to ignore.
+ }
+ } else if len(clens) > 1 {
+ // TODO: care? unlike http/1, it won't mess up our framing, so it's
+ // more safe smuggling-wise to ignore.
+ }
+ }
+
+ if streamEnded {
+ res.Body = noBody
+ } else {
+ buf := new(bytes.Buffer) // TODO(bradfitz): recycle this garbage
+ cs.bufPipe = pipe{b: buf}
+ cs.bytesRemain = res.ContentLength
+ res.Body = transportResponseBody{cs}
+ go cs.awaitRequestCancel(requestCancel(cs.req))
+
+ if cs.requestedGzip && res.Header.Get("Content-Encoding") == "gzip" {
+ res.Header.Del("Content-Encoding")
+ res.Header.Del("Content-Length")
+ res.ContentLength = -1
+ res.Body = &gzipReader{body: res.Body}
+ }
+ rl.activeRes[cs.ID] = cs
+ }
+
+ cs.resTrailer = &res.Trailer
+ cs.resc <- resAndError{res: res}
+ rl.nextRes = nil // unused now; will be reset next HEADERS frame
+ return nil
+}
+
+// transportResponseBody is the concrete type of Transport.RoundTrip's
+// Response.Body. It is an io.ReadCloser. On Read, it reads from cs.body.
+// On Close it sends RST_STREAM if EOF wasn't already seen.
+type transportResponseBody struct {
+ cs *clientStream
+}
+
+func (b transportResponseBody) Read(p []byte) (n int, err error) {
+ cs := b.cs
+ cc := cs.cc
+
+ if cs.readErr != nil {
+ return 0, cs.readErr
+ }
+ n, err = b.cs.bufPipe.Read(p)
+ if cs.bytesRemain != -1 {
+ if int64(n) > cs.bytesRemain {
+ n = int(cs.bytesRemain)
+ if err == nil {
+ err = errors.New("net/http: server replied with more than declared Content-Length; truncated")
+ cc.writeStreamReset(cs.ID, ErrCodeProtocol, err)
+ }
+ cs.readErr = err
+ return int(cs.bytesRemain), err
+ }
+ cs.bytesRemain -= int64(n)
+ if err == io.EOF && cs.bytesRemain > 0 {
+ err = io.ErrUnexpectedEOF
+ cs.readErr = err
+ return n, err
+ }
+ }
+ if n == 0 {
+ // No flow control tokens to send back.
+ return
+ }
+
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+
+ var connAdd, streamAdd int32
+ // Check the conn-level first, before the stream-level.
+ if v := cc.inflow.available(); v < transportDefaultConnFlow/2 {
+ connAdd = transportDefaultConnFlow - v
+ cc.inflow.add(connAdd)
+ }
+ if err == nil { // No need to refresh if the stream is over or failed.
+ if v := cs.inflow.available(); v < transportDefaultStreamFlow-transportDefaultStreamMinRefresh {
+ streamAdd = transportDefaultStreamFlow - v
+ cs.inflow.add(streamAdd)
+ }
+ }
+ if connAdd != 0 || streamAdd != 0 {
+ cc.wmu.Lock()
+ defer cc.wmu.Unlock()
+ if connAdd != 0 {
+ cc.fr.WriteWindowUpdate(0, mustUint31(connAdd))
+ }
+ if streamAdd != 0 {
+ cc.fr.WriteWindowUpdate(cs.ID, mustUint31(streamAdd))
+ }
+ cc.bw.Flush()
+ }
+ return
+}
+
+var errClosedResponseBody = errors.New("http2: response body closed")
+
+func (b transportResponseBody) Close() error {
+ cs := b.cs
+ if cs.bufPipe.Err() != io.EOF {
+ // TODO: write test for this
+ cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
+ }
+ cs.bufPipe.BreakWithError(errClosedResponseBody)
+ return nil
+}
+
+func (rl *clientConnReadLoop) processData(f *DataFrame) error {
+ cc := rl.cc
+ cs := cc.streamByID(f.StreamID, f.StreamEnded())
+ if cs == nil {
+ cc.mu.Lock()
+ neverSent := cc.nextStreamID
+ cc.mu.Unlock()
+ if f.StreamID >= neverSent {
+ // We never asked for this.
+ cc.logf("http2: Transport received unsolicited DATA frame; closing connection")
+ return ConnectionError(ErrCodeProtocol)
+ }
+ // We probably did ask for this, but canceled. Just ignore it.
+ // TODO: be stricter here? only silently ignore things which
+ // we canceled, but not things which were closed normally
+ // by the peer? Tough without accumulating too much state.
+ return nil
+ }
+ if data := f.Data(); len(data) > 0 {
+ if cs.bufPipe.b == nil {
+ // Data frame after it's already closed?
+ cc.logf("http2: Transport received DATA frame for closed stream; closing connection")
+ return ConnectionError(ErrCodeProtocol)
+ }
+
+ // Check connection-level flow control.
+ cc.mu.Lock()
+ if cs.inflow.available() >= int32(len(data)) {
+ cs.inflow.take(int32(len(data)))
+ } else {
+ cc.mu.Unlock()
+ return ConnectionError(ErrCodeFlowControl)
+ }
+ cc.mu.Unlock()
+
+ if _, err := cs.bufPipe.Write(data); err != nil {
+ return err
+ }
+ }
+
+ if f.StreamEnded() {
+ rl.endStream(cs)
+ }
+ return nil
+}
+
+var errInvalidTrailers = errors.New("http2: invalid trailers")
+
+func (rl *clientConnReadLoop) endStream(cs *clientStream) {
+ // TODO: check that any declared content-length matches, like
+ // server.go's (*stream).endStream method.
+ err := io.EOF
+ code := cs.copyTrailers
+ if rl.reqMalformed != nil {
+ err = rl.reqMalformed
+ code = nil
+ }
+ cs.bufPipe.closeWithErrorAndCode(err, code)
+ delete(rl.activeRes, cs.ID)
+ if cs.req.Close || cs.req.Header.Get("Connection") == "close" {
+ rl.closeWhenIdle = true
+ }
+}
+
+func (cs *clientStream) copyTrailers() {
+ for k, vv := range cs.trailer {
+ t := cs.resTrailer
+ if *t == nil {
+ *t = make(http.Header)
+ }
+ (*t)[k] = vv
+ }
+}
+
+func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error {
+ cc := rl.cc
+ cc.t.connPool().MarkDead(cc)
+ if f.ErrCode != 0 {
+ // TODO: deal with GOAWAY more. particularly the error code
+ cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode)
+ }
+ cc.setGoAway(f)
+ return nil
+}
+
+func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error {
+ cc := rl.cc
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ return f.ForeachSetting(func(s Setting) error {
+ switch s.ID {
+ case SettingMaxFrameSize:
+ cc.maxFrameSize = s.Val
+ case SettingMaxConcurrentStreams:
+ cc.maxConcurrentStreams = s.Val
+ case SettingInitialWindowSize:
+ // TODO: error if this is too large.
+
+ // TODO: adjust flow control of still-open
+ // frames by the difference of the old initial
+ // window size and this one.
+ cc.initialWindowSize = s.Val
+ default:
+ // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably.
+ cc.vlogf("Unhandled Setting: %v", s)
+ }
+ return nil
+ })
+}
+
+func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error {
+ cc := rl.cc
+ cs := cc.streamByID(f.StreamID, false)
+ if f.StreamID != 0 && cs == nil {
+ return nil
+ }
+
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+
+ fl := &cc.flow
+ if cs != nil {
+ fl = &cs.flow
+ }
+ if !fl.add(int32(f.Increment)) {
+ return ConnectionError(ErrCodeFlowControl)
+ }
+ cc.cond.Broadcast()
+ return nil
+}
+
+func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error {
+ cs := rl.cc.streamByID(f.StreamID, true)
+ if cs == nil {
+ // TODO: return error if server tries to RST_STEAM an idle stream
+ return nil
+ }
+ select {
+ case <-cs.peerReset:
+ // Already reset.
+ // This is the only goroutine
+ // which closes this, so there
+ // isn't a race.
+ default:
+ err := StreamError{cs.ID, f.ErrCode}
+ cs.resetErr = err
+ close(cs.peerReset)
+ cs.bufPipe.CloseWithError(err)
+ cs.cc.cond.Broadcast() // wake up checkReset via clientStream.awaitFlowControl
+ }
+ delete(rl.activeRes, cs.ID)
+ return nil
+}
+
+func (rl *clientConnReadLoop) processPing(f *PingFrame) error {
+ if f.IsAck() {
+ // 6.7 PING: " An endpoint MUST NOT respond to PING frames
+ // containing this flag."
+ return nil
+ }
+ cc := rl.cc
+ cc.wmu.Lock()
+ defer cc.wmu.Unlock()
+ if err := cc.fr.WritePing(true, f.Data); err != nil {
+ return err
+ }
+ return cc.bw.Flush()
+}
+
+func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error {
+ // We told the peer we don't want them.
+ // Spec says:
+ // "PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH
+ // setting of the peer endpoint is set to 0. An endpoint that
+ // has set this setting and has received acknowledgement MUST
+ // treat the receipt of a PUSH_PROMISE frame as a connection
+ // error (Section 5.4.1) of type PROTOCOL_ERROR."
+ return ConnectionError(ErrCodeProtocol)
+}
+
+func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) {
+ // TODO: do something with err? send it as a debug frame to the peer?
+ // But that's only in GOAWAY. Invent a new frame type? Is there one already?
+ cc.wmu.Lock()
+ cc.fr.WriteRSTStream(streamID, code)
+ cc.bw.Flush()
+ cc.wmu.Unlock()
+}
+
+var (
+ errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit")
+ errPseudoTrailers = errors.New("http2: invalid pseudo header in trailers")
+)
+
+func (rl *clientConnReadLoop) checkHeaderField(f hpack.HeaderField) bool {
+ if rl.reqMalformed != nil {
+ return false
+ }
+
+ const headerFieldOverhead = 32 // per spec
+ rl.headerListSize += int64(len(f.Name)) + int64(len(f.Value)) + headerFieldOverhead
+ if max := rl.cc.t.maxHeaderListSize(); max != 0 && rl.headerListSize > int64(max) {
+ rl.hdec.SetEmitEnabled(false)
+ rl.reqMalformed = errResponseHeaderListSize
+ return false
+ }
+
+ if !validHeaderFieldValue(f.Value) {
+ rl.reqMalformed = errInvalidHeaderFieldValue
+ return false
+ }
+
+ isPseudo := strings.HasPrefix(f.Name, ":")
+ if isPseudo {
+ if rl.sawRegHeader {
+ rl.reqMalformed = errors.New("http2: invalid pseudo header after regular header")
+ return false
+ }
+ } else {
+ if !validHeaderFieldName(f.Name) {
+ rl.reqMalformed = errInvalidHeaderFieldName
+ return false
+ }
+ rl.sawRegHeader = true
+ }
+
+ return true
+}
+
+// onNewHeaderField runs on the readLoop goroutine whenever a new
+// hpack header field is decoded.
+func (rl *clientConnReadLoop) onNewHeaderField(f hpack.HeaderField) {
+ cc := rl.cc
+ if VerboseLogs {
+ cc.logf("http2: Transport decoded %v", f)
+ }
+
+ if !rl.checkHeaderField(f) {
+ return
+ }
+
+ isPseudo := strings.HasPrefix(f.Name, ":")
+ if isPseudo {
+ switch f.Name {
+ case ":status":
+ code, err := strconv.Atoi(f.Value)
+ if err != nil {
+ rl.reqMalformed = errors.New("http2: invalid :status")
+ return
+ }
+ rl.nextRes.Status = f.Value + " " + http.StatusText(code)
+ rl.nextRes.StatusCode = code
+ default:
+ // "Endpoints MUST NOT generate pseudo-header
+ // fields other than those defined in this
+ // document."
+ rl.reqMalformed = fmt.Errorf("http2: unknown response pseudo header %q", f.Name)
+ }
+ return
+ }
+
+ key := http.CanonicalHeaderKey(f.Name)
+ if key == "Trailer" {
+ t := rl.nextRes.Trailer
+ if t == nil {
+ t = make(http.Header)
+ rl.nextRes.Trailer = t
+ }
+ foreachHeaderElement(f.Value, func(v string) {
+ t[http.CanonicalHeaderKey(v)] = nil
+ })
+ } else {
+ rl.nextRes.Header.Add(key, f.Value)
+ }
+}
+
+func (rl *clientConnReadLoop) onNewTrailerField(cs *clientStream, f hpack.HeaderField) {
+ if VerboseLogs {
+ rl.cc.logf("http2: Transport decoded trailer %v", f)
+ }
+ if !rl.checkHeaderField(f) {
+ return
+ }
+ if strings.HasPrefix(f.Name, ":") {
+ // Pseudo-header fields MUST NOT appear in
+ // trailers. Endpoints MUST treat a request or
+ // response that contains undefined or invalid
+ // pseudo-header fields as malformed.
+ rl.reqMalformed = errPseudoTrailers
+ return
+ }
+
+ key := http.CanonicalHeaderKey(f.Name)
+
+ // The spec says one must predeclare their trailers but in practice
+ // popular users (which is to say the only user we found) do not so we
+ // violate the spec and accept all of them.
+ const acceptAllTrailers = true
+ if _, ok := (*cs.resTrailer)[key]; ok || acceptAllTrailers {
+ if cs.trailer == nil {
+ cs.trailer = make(http.Header)
+ }
+ cs.trailer[key] = append(cs.trailer[key], f.Value)
+ }
+}
+
+func (cc *ClientConn) logf(format string, args ...interface{}) {
+ cc.t.logf(format, args...)
+}
+
+func (cc *ClientConn) vlogf(format string, args ...interface{}) {
+ cc.t.vlogf(format, args...)
+}
+
+func (t *Transport) vlogf(format string, args ...interface{}) {
+ if VerboseLogs {
+ t.logf(format, args...)
+ }
+}
+
+func (t *Transport) logf(format string, args ...interface{}) {
+ log.Printf(format, args...)
+}
+
+var noBody io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil))
+
+func strSliceContains(ss []string, s string) bool {
+ for _, v := range ss {
+ if v == s {
+ return true
+ }
+ }
+ return false
+}
+
+type erringRoundTripper struct{ err error }
+
+func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err }
+
+// gzipReader wraps a response body so it can lazily
+// call gzip.NewReader on the first call to Read
+type gzipReader struct {
+ body io.ReadCloser // underlying Response.Body
+ zr *gzip.Reader // lazily-initialized gzip reader
+ zerr error // sticky error
+}
+
+func (gz *gzipReader) Read(p []byte) (n int, err error) {
+ if gz.zerr != nil {
+ return 0, gz.zerr
+ }
+ if gz.zr == nil {
+ gz.zr, err = gzip.NewReader(gz.body)
+ if err != nil {
+ gz.zerr = err
+ return 0, err
+ }
+ }
+ return gz.zr.Read(p)
+}
+
+func (gz *gzipReader) Close() error {
+ return gz.body.Close()
+}
+
+type errorReader struct{ err error }
+
+func (r errorReader) Read(p []byte) (int, error) { return 0, r.err }
diff --git a/src/kube2msb/vendor/golang.org/x/net/http2/write.go b/src/kube2msb/vendor/golang.org/x/net/http2/write.go
new file mode 100644
index 0000000..5297a4b
--- /dev/null
+++ b/src/kube2msb/vendor/golang.org/x/net/http2/write.go
@@ -0,0 +1,263 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "bytes"
+ "fmt"
+ "log"
+ "net/http"
+ "sort"
+ "time"
+
+ "golang.org/x/net/http2/hpack"
+)
+
+// writeFramer is implemented by any type that is used to write frames.
+type writeFramer interface {
+ writeFrame(writeContext) error
+}
+
+// writeContext is the interface needed by the various frame writer
+// types below. All the writeFrame methods below are scheduled via the
+// frame writing scheduler (see writeScheduler in writesched.go).
+//
+// This interface is implemented by *serverConn.
+//
+// TODO: decide whether to a) use this in the client code (which didn't
+// end up using this yet, because it has a simpler design, not
+// currently implementing priorities), or b) delete this and
+// make the server code a bit more concrete.
+type writeContext interface {
+ Framer() *Framer
+ Flush() error
+ CloseConn() error
+ // HeaderEncoder returns an HPACK encoder that writes to the
+ // returned buffer.
+ HeaderEncoder() (*hpack.Encoder, *bytes.Buffer)
+}
+
+// endsStream reports whether the given frame writer w will locally
+// close the stream.
+func endsStream(w writeFramer) bool {
+ switch v := w.(type) {
+ case *writeData:
+ return v.endStream
+ case *writeResHeaders:
+ return v.endStream
+ case nil:
+ // This can only happen if the caller reuses w after it's
+ // been intentionally nil'ed out to prevent use. Keep this
+ // here to catch future refactoring breaking it.
+ panic("endsStream called on nil writeFramer")
+ }
+ return false
+}
+
+type flushFrameWriter struct{}
+
+func (flushFrameWriter) writeFrame(ctx writeContext) error {
+ return ctx.Flush()
+}
+
+type writeSettings []Setting
+
+func (s writeSettings) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WriteSettings([]Setting(s)...)
+}
+
+type writeGoAway struct {
+ maxStreamID uint32
+ code ErrCode
+}
+
+func (p *writeGoAway) writeFrame(ctx writeContext) error {
+ err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil)
+ if p.code != 0 {
+ ctx.Flush() // ignore error: we're hanging up on them anyway
+ time.Sleep(50 * time.Millisecond)
+ ctx.CloseConn()
+ }
+ return err
+}
+
+type writeData struct {
+ streamID uint32
+ p []byte
+ endStream bool
+}
+
+func (w *writeData) String() string {
+ return fmt.Sprintf("writeData(stream=%d, p=%d, endStream=%v)", w.streamID, len(w.p), w.endStream)
+}
+
+func (w *writeData) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WriteData(w.streamID, w.endStream, w.p)
+}
+
+// handlerPanicRST is the message sent from handler goroutines when
+// the handler panics.
+type handlerPanicRST struct {
+ StreamID uint32
+}
+
+func (hp handlerPanicRST) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal)
+}
+
+func (se StreamError) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WriteRSTStream(se.StreamID, se.Code)
+}
+
+type writePingAck struct{ pf *PingFrame }
+
+func (w writePingAck) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WritePing(true, w.pf.Data)
+}
+
+type writeSettingsAck struct{}
+
+func (writeSettingsAck) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WriteSettingsAck()
+}
+
+// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames
+// for HTTP response headers or trailers from a server handler.
+type writeResHeaders struct {
+ streamID uint32
+ httpResCode int // 0 means no ":status" line
+ h http.Header // may be nil
+ trailers []string // if non-nil, which keys of h to write. nil means all.
+ endStream bool
+
+ date string
+ contentType string
+ contentLength string
+}
+
+func encKV(enc *hpack.Encoder, k, v string) {
+ if VerboseLogs {
+ log.Printf("http2: server encoding header %q = %q", k, v)
+ }
+ enc.WriteField(hpack.HeaderField{Name: k, Value: v})
+}
+
+func (w *writeResHeaders) writeFrame(ctx writeContext) error {
+ enc, buf := ctx.HeaderEncoder()
+ buf.Reset()
+
+ if w.httpResCode != 0 {
+ encKV(enc, ":status", httpCodeString(w.httpResCode))
+ }
+
+ encodeHeaders(enc, w.h, w.trailers)
+
+ if w.contentType != "" {
+ encKV(enc, "content-type", w.contentType)
+ }
+ if w.contentLength != "" {
+ encKV(enc, "content-length", w.contentLength)
+ }
+ if w.date != "" {
+ encKV(enc, "date", w.date)
+ }
+
+ headerBlock := buf.Bytes()
+ if len(headerBlock) == 0 && w.trailers == nil {
+ panic("unexpected empty hpack")
+ }
+
+ // For now we're lazy and just pick the minimum MAX_FRAME_SIZE
+ // that all peers must support (16KB). Later we could care
+ // more and send larger frames if the peer advertised it, but
+ // there's little point. Most headers are small anyway (so we
+ // generally won't have CONTINUATION frames), and extra frames
+ // only waste 9 bytes anyway.
+ const maxFrameSize = 16384
+
+ first := true
+ for len(headerBlock) > 0 {
+ frag := headerBlock
+ if len(frag) > maxFrameSize {
+ frag = frag[:maxFrameSize]
+ }
+ headerBlock = headerBlock[len(frag):]
+ endHeaders := len(headerBlock) == 0
+ var err error
+ if first {
+ first = false
+ err = ctx.Framer().WriteHeaders(HeadersFrameParam{
+ StreamID: w.streamID,
+ BlockFragment: frag,
+ EndStream: w.endStream,
+ EndHeaders: endHeaders,
+ })
+ } else {
+ err = ctx.Framer().WriteContinuation(w.streamID, endHeaders, frag)
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+type write100ContinueHeadersFrame struct {
+ streamID uint32
+}
+
+func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error {
+ enc, buf := ctx.HeaderEncoder()
+ buf.Reset()
+ encKV(enc, ":status", "100")
+ return ctx.Framer().WriteHeaders(HeadersFrameParam{
+ StreamID: w.streamID,
+ BlockFragment: buf.Bytes(),
+ EndStream: false,
+ EndHeaders: true,
+ })
+}
+
+type writeWindowUpdate struct {
+ streamID uint32 // or 0 for conn-level
+ n uint32
+}
+
+func (wu writeWindowUpdate) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n)
+}
+
+func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) {
+ // TODO: garbage. pool sorters like http1? hot path for 1 key?
+ if keys == nil {
+ keys = make([]string, 0, len(h))
+ for k := range h {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ }
+ for _, k := range keys {
+ vv := h[k]
+ k = lowerHeader(k)
+ if !validHeaderFieldName(k) {
+ // TODO: return an error? golang.org/issue/14048
+ // For now just omit it.
+ continue
+ }
+ isTE := k == "transfer-encoding"
+ for _, v := range vv {
+ if !validHeaderFieldValue(v) {
+ // TODO: return an error? golang.org/issue/14048
+ // For now just omit it.
+ continue
+ }
+ // TODO: more of "8.1.2.2 Connection-Specific Header Fields"
+ if isTE && v != "trailers" {
+ continue
+ }
+ encKV(enc, k, v)
+ }
+ }
+}
diff --git a/src/kube2msb/vendor/golang.org/x/net/http2/writesched.go b/src/kube2msb/vendor/golang.org/x/net/http2/writesched.go
new file mode 100644
index 0000000..c24316c
--- /dev/null
+++ b/src/kube2msb/vendor/golang.org/x/net/http2/writesched.go
@@ -0,0 +1,283 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import "fmt"
+
+// frameWriteMsg is a request to write a frame.
+type frameWriteMsg struct {
+ // write is the interface value that does the writing, once the
+ // writeScheduler (below) has decided to select this frame
+ // to write. The write functions are all defined in write.go.
+ write writeFramer
+
+ stream *stream // used for prioritization. nil for non-stream frames.
+
+ // done, if non-nil, must be a buffered channel with space for
+ // 1 message and is sent the return value from write (or an
+ // earlier error) when the frame has been written.
+ done chan error
+}
+
+// for debugging only:
+func (wm frameWriteMsg) String() string {
+ var streamID uint32
+ if wm.stream != nil {
+ streamID = wm.stream.id
+ }
+ var des string
+ if s, ok := wm.write.(fmt.Stringer); ok {
+ des = s.String()
+ } else {
+ des = fmt.Sprintf("%T", wm.write)
+ }
+ return fmt.Sprintf("[frameWriteMsg stream=%d, ch=%v, type: %v]", streamID, wm.done != nil, des)
+}
+
+// writeScheduler tracks pending frames to write, priorities, and decides
+// the next one to use. It is not thread-safe.
+type writeScheduler struct {
+ // zero are frames not associated with a specific stream.
+ // They're sent before any stream-specific freams.
+ zero writeQueue
+
+ // maxFrameSize is the maximum size of a DATA frame
+ // we'll write. Must be non-zero and between 16K-16M.
+ maxFrameSize uint32
+
+ // sq contains the stream-specific queues, keyed by stream ID.
+ // when a stream is idle, it's deleted from the map.
+ sq map[uint32]*writeQueue
+
+ // canSend is a slice of memory that's reused between frame
+ // scheduling decisions to hold the list of writeQueues (from sq)
+ // which have enough flow control data to send. After canSend is
+ // built, the best is selected.
+ canSend []*writeQueue
+
+ // pool of empty queues for reuse.
+ queuePool []*writeQueue
+}
+
+func (ws *writeScheduler) putEmptyQueue(q *writeQueue) {
+ if len(q.s) != 0 {
+ panic("queue must be empty")
+ }
+ ws.queuePool = append(ws.queuePool, q)
+}
+
+func (ws *writeScheduler) getEmptyQueue() *writeQueue {
+ ln := len(ws.queuePool)
+ if ln == 0 {
+ return new(writeQueue)
+ }
+ q := ws.queuePool[ln-1]
+ ws.queuePool = ws.queuePool[:ln-1]
+ return q
+}
+
+func (ws *writeScheduler) empty() bool { return ws.zero.empty() && len(ws.sq) == 0 }
+
+func (ws *writeScheduler) add(wm frameWriteMsg) {
+ st := wm.stream
+ if st == nil {
+ ws.zero.push(wm)
+ } else {
+ ws.streamQueue(st.id).push(wm)
+ }
+}
+
+func (ws *writeScheduler) streamQueue(streamID uint32) *writeQueue {
+ if q, ok := ws.sq[streamID]; ok {
+ return q
+ }
+ if ws.sq == nil {
+ ws.sq = make(map[uint32]*writeQueue)
+ }
+ q := ws.getEmptyQueue()
+ ws.sq[streamID] = q
+ return q
+}
+
+// take returns the most important frame to write and removes it from the scheduler.
+// It is illegal to call this if the scheduler is empty or if there are no connection-level
+// flow control bytes available.
+func (ws *writeScheduler) take() (wm frameWriteMsg, ok bool) {
+ if ws.maxFrameSize == 0 {
+ panic("internal error: ws.maxFrameSize not initialized or invalid")
+ }
+
+ // If there any frames not associated with streams, prefer those first.
+ // These are usually SETTINGS, etc.
+ if !ws.zero.empty() {
+ return ws.zero.shift(), true
+ }
+ if len(ws.sq) == 0 {
+ return
+ }
+
+ // Next, prioritize frames on streams that aren't DATA frames (no cost).
+ for id, q := range ws.sq {
+ if q.firstIsNoCost() {
+ return ws.takeFrom(id, q)
+ }
+ }
+
+ // Now, all that remains are DATA frames with non-zero bytes to
+ // send. So pick the best one.
+ if len(ws.canSend) != 0 {
+ panic("should be empty")
+ }
+ for _, q := range ws.sq {
+ if n := ws.streamWritableBytes(q); n > 0 {
+ ws.canSend = append(ws.canSend, q)
+ }
+ }
+ if len(ws.canSend) == 0 {
+ return
+ }
+ defer ws.zeroCanSend()
+
+ // TODO: find the best queue
+ q := ws.canSend[0]
+
+ return ws.takeFrom(q.streamID(), q)
+}
+
+// zeroCanSend is defered from take.
+func (ws *writeScheduler) zeroCanSend() {
+ for i := range ws.canSend {
+ ws.canSend[i] = nil
+ }
+ ws.canSend = ws.canSend[:0]
+}
+
+// streamWritableBytes returns the number of DATA bytes we could write
+// from the given queue's stream, if this stream/queue were
+// selected. It is an error to call this if q's head isn't a
+// *writeData.
+func (ws *writeScheduler) streamWritableBytes(q *writeQueue) int32 {
+ wm := q.head()
+ ret := wm.stream.flow.available() // max we can write
+ if ret == 0 {
+ return 0
+ }
+ if int32(ws.maxFrameSize) < ret {
+ ret = int32(ws.maxFrameSize)
+ }
+ if ret == 0 {
+ panic("internal error: ws.maxFrameSize not initialized or invalid")
+ }
+ wd := wm.write.(*writeData)
+ if len(wd.p) < int(ret) {
+ ret = int32(len(wd.p))
+ }
+ return ret
+}
+
+func (ws *writeScheduler) takeFrom(id uint32, q *writeQueue) (wm frameWriteMsg, ok bool) {
+ wm = q.head()
+ // If the first item in this queue costs flow control tokens
+ // and we don't have enough, write as much as we can.
+ if wd, ok := wm.write.(*writeData); ok && len(wd.p) > 0 {
+ allowed := wm.stream.flow.available() // max we can write
+ if allowed == 0 {
+ // No quota available. Caller can try the next stream.
+ return frameWriteMsg{}, false
+ }
+ if int32(ws.maxFrameSize) < allowed {
+ allowed = int32(ws.maxFrameSize)
+ }
+ // TODO: further restrict the allowed size, because even if
+ // the peer says it's okay to write 16MB data frames, we might
+ // want to write smaller ones to properly weight competing
+ // streams' priorities.
+
+ if len(wd.p) > int(allowed) {
+ wm.stream.flow.take(allowed)
+ chunk := wd.p[:allowed]
+ wd.p = wd.p[allowed:]
+ // Make up a new write message of a valid size, rather
+ // than shifting one off the queue.
+ return frameWriteMsg{
+ stream: wm.stream,
+ write: &writeData{
+ streamID: wd.streamID,
+ p: chunk,
+ // even if the original had endStream set, there
+ // arebytes remaining because len(wd.p) > allowed,
+ // so we know endStream is false:
+ endStream: false,
+ },
+ // our caller is blocking on the final DATA frame, not
+ // these intermediates, so no need to wait:
+ done: nil,
+ }, true
+ }
+ wm.stream.flow.take(int32(len(wd.p)))
+ }
+
+ q.shift()
+ if q.empty() {
+ ws.putEmptyQueue(q)
+ delete(ws.sq, id)
+ }
+ return wm, true
+}
+
+func (ws *writeScheduler) forgetStream(id uint32) {
+ q, ok := ws.sq[id]
+ if !ok {
+ return
+ }
+ delete(ws.sq, id)
+
+ // But keep it for others later.
+ for i := range q.s {
+ q.s[i] = frameWriteMsg{}
+ }
+ q.s = q.s[:0]
+ ws.putEmptyQueue(q)
+}
+
+type writeQueue struct {
+ s []frameWriteMsg
+}
+
+// streamID returns the stream ID for a non-empty stream-specific queue.
+func (q *writeQueue) streamID() uint32 { return q.s[0].stream.id }
+
+func (q *writeQueue) empty() bool { return len(q.s) == 0 }
+
+func (q *writeQueue) push(wm frameWriteMsg) {
+ q.s = append(q.s, wm)
+}
+
+// head returns the next item that would be removed by shift.
+func (q *writeQueue) head() frameWriteMsg {
+ if len(q.s) == 0 {
+ panic("invalid use of queue")
+ }
+ return q.s[0]
+}
+
+func (q *writeQueue) shift() frameWriteMsg {
+ if len(q.s) == 0 {
+ panic("invalid use of queue")
+ }
+ wm := q.s[0]
+ // TODO: less copy-happy queue.
+ copy(q.s, q.s[1:])
+ q.s[len(q.s)-1] = frameWriteMsg{}
+ q.s = q.s[:len(q.s)-1]
+ return wm
+}
+
+func (q *writeQueue) firstIsNoCost() bool {
+ if df, ok := q.s[0].write.(*writeData); ok {
+ return len(df.p) == 0
+ }
+ return true
+}
diff --git a/src/kube2msb/vendor/golang.org/x/oauth2/AUTHORS b/src/kube2msb/vendor/golang.org/x/oauth2/AUTHORS
new file mode 100644
index 0000000..15167cd
--- /dev/null
+++ b/src/kube2msb/vendor/golang.org/x/oauth2/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/src/kube2msb/vendor/golang.org/x/oauth2/CONTRIBUTING.md b/src/kube2msb/vendor/golang.org/x/oauth2/CONTRIBUTING.md
new file mode 100644
index 0000000..46aa2b1
--- /dev/null
+++ b/src/kube2msb/vendor/golang.org/x/oauth2/CONTRIBUTING.md
@@ -0,0 +1,31 @@
+# Contributing to Go
+
+Go is an open source project.
+
+It is the work of hundreds of contributors. We appreciate your help!
+
+
+## Filing issues
+
+When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions:
+
+1. What version of Go are you using (`go version`)?
+2. What operating system and processor architecture are you using?
+3. What did you do?
+4. What did you expect to see?
+5. What did you see instead?
+
+General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker.
+The gophers there will answer or ask you to file an issue if you've tripped over a bug.
+
+## Contributing code
+
+Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html)
+before sending patches.
+
+**We do not accept GitHub pull requests**
+(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review).
+
+Unless otherwise noted, the Go source files are distributed under
+the BSD-style license found in the LICENSE file.
+
diff --git a/src/kube2msb/vendor/golang.org/x/oauth2/CONTRIBUTORS b/src/kube2msb/vendor/golang.org/x/oauth2/CONTRIBUTORS
new file mode 100644
index 0000000..1c4577e
--- /dev/null
+++ b/src/kube2msb/vendor/golang.org/x/oauth2/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/src/kube2msb/vendor/golang.org/x/oauth2/LICENSE b/src/kube2msb/vendor/golang.org/x/oauth2/LICENSE
new file mode 100644
index 0000000..d02f24f
--- /dev/null
+++ b/src/kube2msb/vendor/golang.org/x/oauth2/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The oauth2 Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/kube2msb/vendor/golang.org/x/oauth2/README.md b/src/kube2msb/vendor/golang.org/x/oauth2/README.md
new file mode 100644
index 0000000..0d51417
--- /dev/null
+++ b/src/kube2msb/vendor/golang.org/x/oauth2/README.md
@@ -0,0 +1,64 @@
+# OAuth2 for Go
+
+[![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2)
+
+oauth2 package contains a client implementation for OAuth 2.0 spec.
+
+## Installation
+
+~~~~
+go get golang.org/x/oauth2
+~~~~
+
+See godoc for further documentation and examples.
+
+* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2)
+* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google)
+
+
+## App Engine
+
+In change 96e89be (March 2015) we removed the `oauth2.Context2` type in favor
+of the [`context.Context`](https://golang.org/x/net/context#Context) type from
+the `golang.org/x/net/context` package
+
+This means its no longer possible to use the "Classic App Engine"
+`appengine.Context` type with the `oauth2` package. (You're using
+Classic App Engine if you import the package `"appengine"`.)
+
+To work around this, you may use the new `"google.golang.org/appengine"`
+package. This package has almost the same API as the `"appengine"` package,
+but it can be fetched with `go get` and used on "Managed VMs" and well as
+Classic App Engine.
+
+See the [new `appengine` package's readme](https://github.com/golang/appengine#updating-a-go-app-engine-app)
+for information on updating your app.
+
+If you don't want to update your entire app to use the new App Engine packages,
+you may use both sets of packages in parallel, using only the new packages
+with the `oauth2` package.
+
+ import (
+ "golang.org/x/net/context"
+ "golang.org/x/oauth2"
+ "golang.org/x/oauth2/google"
+ newappengine "google.golang.org/appengine"
+ newurlfetch "google.golang.org/appengine/urlfetch"
+
+ "appengine"
+ )
+
+ func handler(w http.ResponseWriter, r *http.Request) {
+ var c appengine.Context = appengine.NewContext(r)
+ c.Infof("Logging a message with the old package")
+
+ var ctx context.Context = newappengine.NewContext(r)
+ client := &http.Client{
+ Transport: &oauth2.Transport{
+ Source: google.AppEngineTokenSource(ctx, "scope"),
+ Base: &newurlfetch.Transport{Context: ctx},
+ },
+ }
+ client.Get("...")
+ }
+
diff --git a/src/kube2msb/vendor/golang.org/x/oauth2/client_appengine.go b/src/kube2msb/vendor/golang.org/x/oauth2/client_appengine.go
new file mode 100644
index 0000000..4a554cb
--- /dev/null
+++ b/src/kube2msb/vendor/golang.org/x/oauth2/client_appengine.go
@@ -0,0 +1,25 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appengine appenginevm
+
+// App Engine hooks.
+
+package oauth2
+
+import (
+ "net/http"
+
+ "golang.org/x/net/context"
+ "golang.org/x/oauth2/internal"
+ "google.golang.org/appengine/urlfetch"
+)
+
+func init() {
+ internal.RegisterContextClientFunc(contextClientAppEngine)
+}
+
+func contextClientAppEngine(ctx context.Context) (*http.Client, error) {
+ return urlfetch.Client(ctx), nil
+}
diff --git a/src/kube2msb/vendor/golang.org/x/oauth2/google/appengine.go b/src/kube2msb/vendor/golang.org/x/oauth2/google/appengine.go
new file mode 100644
index 0000000..65dc347
--- /dev/null
+++ b/src/kube2msb/vendor/golang.org/x/oauth2/google/appengine.go
@@ -0,0 +1,83 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package google
+
+import (
+ "sort"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/net/context"
+ "golang.org/x/oauth2"
+)
+
+// Set at init time by appengine_hook.go. If nil, we're not on App Engine.
+var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error)
+
+// AppEngineTokenSource returns a token source that fetches tokens
+// issued to the current App Engine application's service account.
+// If you are implementing a 3-legged OAuth 2.0 flow on App Engine
+// that involves user accounts, see oauth2.Config instead.
+//
+// The provided context must have come from appengine.NewContext.
+func AppEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource {
+ if appengineTokenFunc == nil {
+ panic("google: AppEngineTokenSource can only be used on App Engine.")
+ }
+ scopes := append([]string{}, scope...)
+ sort.Strings(scopes)
+ return &appEngineTokenSource{
+ ctx: ctx,
+ scopes: scopes,
+ key: strings.Join(scopes, " "),
+ }
+}
+
+// aeTokens helps the fetched tokens to be reused until their expiration.
+var (
+ aeTokensMu sync.Mutex
+ aeTokens = make(map[string]*tokenLock) // key is space-separated scopes
+)
+
+type tokenLock struct {
+ mu sync.Mutex // guards t; held while fetching or updating t
+ t *oauth2.Token
+}
+
+type appEngineTokenSource struct {
+ ctx context.Context
+ scopes []string
+ key string // to aeTokens map; space-separated scopes
+}
+
+func (ts *appEngineTokenSource) Token() (*oauth2.Token, error) {
+ if appengineTokenFunc == nil {
+ panic("google: AppEngineTokenSource can only be used on App Engine.")
+ }
+
+ aeTokensMu.Lock()
+ tok, ok := aeTokens[ts.key]
+ if !ok {
+ tok = &tokenLock{}
+ aeTokens[ts.key] = tok
+ }
+ aeTokensMu.Unlock()
+
+ tok.mu.Lock()
+ defer tok.mu.Unlock()
+ if tok.t.Valid() {
+ return tok.t, nil
+ }
+ access, exp, err := appengineTokenFunc(ts.ctx, ts.scopes...)
+ if err != nil {
+ return nil, err
+ }
+ tok.t = &oauth2.Token{
+ AccessToken: access,
+ Expiry: exp,
+ }
+ return tok.t, nil
+}
diff --git a/src/kube2msb/vendor/golang.org/x/oauth2/google/appengine_hook.go b/src/kube2msb/vendor/golang.org/x/oauth2/google/appengine_hook.go
new file mode 100644
index 0000000..2f9b154
--- /dev/null
+++ b/src/kube2msb/vendor/golang.org/x/oauth2/google/appengine_hook.go
@@ -0,0 +1,13 @@
+// Copyright 2015 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appengine appenginevm
+
+package google
+
+import "google.golang.org/appengine"
+
+func init() {
+ appengineTokenFunc = appengine.AccessToken
+}
diff --git a/src/kube2msb/vendor/golang.org/x/oauth2/google/default.go b/src/kube2msb/vendor/golang.org/x/oauth2/google/default.go
new file mode 100644
index 0000000..78f8089
--- /dev/null
+++ b/src/kube2msb/vendor/golang.org/x/oauth2/google/default.go
@@ -0,0 +1,154 @@
+// Copyright 2015 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package google
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "os"
+ "path/filepath"
+ "runtime"
+
+ "golang.org/x/net/context"
+ "golang.org/x/oauth2"
+ "golang.org/x/oauth2/jwt"
+ "google.golang.org/cloud/compute/metadata"
+)
+
+// DefaultClient returns an HTTP Client that uses the
+// DefaultTokenSource to obtain authentication credentials.
+//
+// This client should be used when developing services
+// that run on Google App Engine or Google Compute Engine
+// and use "Application Default Credentials."
+//
+// For more details, see:
+// https://developers.google.com/accounts/docs/application-default-credentials
+//
+func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) {
+ ts, err := DefaultTokenSource(ctx, scope...)
+ if err != nil {
+ return nil, err
+ }
+ return oauth2.NewClient(ctx, ts), nil
+}
+
+// DefaultTokenSource is a token source that uses
+// "Application Default Credentials".
+//
+// It looks for credentials in the following places,
+// preferring the first location found:
+//
+// 1. A JSON file whose path is specified by the
+// GOOGLE_APPLICATION_CREDENTIALS environment variable.
+// 2. A JSON file in a location known to the gcloud command-line tool.
+// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json.
+// On other systems, $HOME/.config/gcloud/application_default_credentials.json.
+// 3. On Google App Engine it uses the appengine.AccessToken function.
+// 4. On Google Compute Engine, it fetches credentials from the metadata server.
+// (In this final case any provided scopes are ignored.)
+//
+// For more details, see:
+// https://developers.google.com/accounts/docs/application-default-credentials
+//
+func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSource, error) {
+ // First, try the environment variable.
+ const envVar = "GOOGLE_APPLICATION_CREDENTIALS"
+ if filename := os.Getenv(envVar); filename != "" {
+ ts, err := tokenSourceFromFile(ctx, filename, scope)
+ if err != nil {
+ return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err)
+ }
+ return ts, nil
+ }
+
+ // Second, try a well-known file.
+ filename := wellKnownFile()
+ _, err := os.Stat(filename)
+ if err == nil {
+ ts, err2 := tokenSourceFromFile(ctx, filename, scope)
+ if err2 == nil {
+ return ts, nil
+ }
+ err = err2
+ } else if os.IsNotExist(err) {
+ err = nil // ignore this error
+ }
+ if err != nil {
+ return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err)
+ }
+
+ // Third, if we're on Google App Engine use those credentials.
+ if appengineTokenFunc != nil {
+ return AppEngineTokenSource(ctx, scope...), nil
+ }
+
+ // Fourth, if we're on Google Compute Engine use the metadata server.
+ if metadata.OnGCE() {
+ return ComputeTokenSource(""), nil
+ }
+
+ // None are found; return helpful error.
+ const url = "https://developers.google.com/accounts/docs/application-default-credentials"
+ return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url)
+}
+
+func wellKnownFile() string {
+ const f = "application_default_credentials.json"
+ if runtime.GOOS == "windows" {
+ return filepath.Join(os.Getenv("APPDATA"), "gcloud", f)
+ }
+ return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f)
+}
+
+func tokenSourceFromFile(ctx context.Context, filename string, scopes []string) (oauth2.TokenSource, error) {
+ b, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ var d struct {
+ // Common fields
+ Type string
+ ClientID string `json:"client_id"`
+
+ // User Credential fields
+ ClientSecret string `json:"client_secret"`
+ RefreshToken string `json:"refresh_token"`
+
+ // Service Account fields
+ ClientEmail string `json:"client_email"`
+ PrivateKeyID string `json:"private_key_id"`
+ PrivateKey string `json:"private_key"`
+ }
+ if err := json.Unmarshal(b, &d); err != nil {
+ return nil, err
+ }
+ switch d.Type {
+ case "authorized_user":
+ cfg := &oauth2.Config{
+ ClientID: d.ClientID,
+ ClientSecret: d.ClientSecret,
+ Scopes: append([]string{}, scopes...), // copy
+ Endpoint: Endpoint,
+ }
+ tok := &oauth2.Token{RefreshToken: d.RefreshToken}
+ return cfg.TokenSource(ctx, tok), nil
+ case "service_account":
+ cfg := &jwt.Config{
+ Email: d.ClientEmail,
+ PrivateKey: []byte(d.PrivateKey),
+ Scopes: append([]string{}, scopes...), // copy
+ TokenURL: JWTTokenURL,
+ }
+ return cfg.TokenSource(ctx), nil
+ case "":
+ return nil, errors.New("missing 'type' field in credentials")
+ default:
+ return nil, fmt.Errorf("unknown credential type: %q", d.Type)
+ }
+}
diff --git a/src/kube2msb/vendor/golang.org/x/oauth2/google/google.go b/src/kube2msb/vendor/golang.org/x/oauth2/google/google.go
new file mode 100644
index 0000000..2077d98
--- /dev/null
+++ b/src/kube2msb/vendor/golang.org/x/oauth2/google/google.go
@@ -0,0 +1,145 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package google provides support for making OAuth2 authorized and
+// authenticated HTTP requests to Google APIs.
+// It supports the Web server flow, client-side credentials, service accounts,
+// Google Compute Engine service accounts, and Google App Engine service
+// accounts.
+//
+// For more information, please read
+// https://developers.google.com/accounts/docs/OAuth2
+// and
+// https://developers.google.com/accounts/docs/application-default-credentials.
+package google
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "strings"
+ "time"
+
+ "golang.org/x/oauth2"
+ "golang.org/x/oauth2/jwt"
+ "google.golang.org/cloud/compute/metadata"
+)
+
+// Endpoint is Google's OAuth 2.0 endpoint.
+var Endpoint = oauth2.Endpoint{
+ AuthURL: "https://accounts.google.com/o/oauth2/auth",
+ TokenURL: "https://accounts.google.com/o/oauth2/token",
+}
+
+// JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow.
+const JWTTokenURL = "https://accounts.google.com/o/oauth2/token"
+
+// ConfigFromJSON uses a Google Developers Console client_credentials.json
+// file to construct a config.
+// client_credentials.json can be downloadable from https://console.developers.google.com,
+// under "APIs & Auth" > "Credentials". Download the Web application credentials in the
+// JSON format and provide the contents of the file as jsonKey.
+func ConfigFromJSON(jsonKey []byte, scope ...string) (*oauth2.Config, error) {
+ type cred struct {
+ ClientID string `json:"client_id"`
+ ClientSecret string `json:"client_secret"`
+ RedirectURIs []string `json:"redirect_uris"`
+ AuthURI string `json:"auth_uri"`
+ TokenURI string `json:"token_uri"`
+ }
+ var j struct {
+ Web *cred `json:"web"`
+ Installed *cred `json:"installed"`
+ }
+ if err := json.Unmarshal(jsonKey, &j); err != nil {
+ return nil, err
+ }
+ var c *cred
+ switch {
+ case j.Web != nil:
+ c = j.Web
+ case j.Installed != nil:
+ c = j.Installed
+ default:
+ return nil, fmt.Errorf("oauth2/google: no credentials found")
+ }
+ if len(c.RedirectURIs) < 1 {
+ return nil, errors.New("oauth2/google: missing redirect URL in the client_credentials.json")
+ }
+ return &oauth2.Config{
+ ClientID: c.ClientID,
+ ClientSecret: c.ClientSecret,
+ RedirectURL: c.RedirectURIs[0],
+ Scopes: scope,
+ Endpoint: oauth2.Endpoint{
+ AuthURL: c.AuthURI,
+ TokenURL: c.TokenURI,
+ },
+ }, nil
+}
+
+// JWTConfigFromJSON uses a Google Developers service account JSON key file to read
+// the credentials that authorize and authenticate the requests.
+// Create a service account on "Credentials" page under "APIs & Auth" for your
+// project at https://console.developers.google.com to download a JSON key file.
+func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) {
+ var key struct {
+ Email string `json:"client_email"`
+ PrivateKey string `json:"private_key"`
+ }
+ if err := json.Unmarshal(jsonKey, &key); err != nil {
+ return nil, err
+ }
+ return &jwt.Config{
+ Email: key.Email,
+ PrivateKey: []byte(key.PrivateKey),
+ Scopes: scope,
+ TokenURL: JWTTokenURL,
+ }, nil
+}
+
+// ComputeTokenSource returns a token source that fetches access tokens
+// from Google Compute Engine (GCE)'s metadata server. It's only valid to use
+// this token source if your program is running on a GCE instance.
+// If no account is specified, "default" is used.
+// Further information about retrieving access tokens from the GCE metadata
+// server can be found at https://cloud.google.com/compute/docs/authentication.
+func ComputeTokenSource(account string) oauth2.TokenSource {
+ return oauth2.ReuseTokenSource(nil, computeSource{account: account})
+}
+
+type computeSource struct {
+ account string
+}
+
+func (cs computeSource) Token() (*oauth2.Token, error) {
+ if !metadata.OnGCE() {
+ return nil, errors.New("oauth2/google: can't get a token from the metadata service; not running on GCE")
+ }
+ acct := cs.account
+ if acct == "" {
+ acct = "default"
+ }
+ tokenJSON, err := metadata.Get("instance/service-accounts/" + acct + "/token")
+ if err != nil {
+ return nil, err
+ }
+ var res struct {
+ AccessToken string `json:"access_token"`
+ ExpiresInSec int `json:"expires_in"`
+ TokenType string `json:"token_type"`
+ }
+ err = json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res)
+ if err != nil {
+ return nil, fmt.Errorf("oauth2/google: invalid token JSON from metadata: %v", err)
+ }
+ if res.ExpiresInSec == 0 || res.AccessToken == "" {
+ return nil, fmt.Errorf("oauth2/google: incomplete token received from metadata")
+ }
+ return &oauth2.Token{
+ AccessToken: res.AccessToken,
+ TokenType: res.TokenType,
+ Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second),
+ }, nil
+}
diff --git a/src/kube2msb/vendor/golang.org/x/oauth2/google/sdk.go b/src/kube2msb/vendor/golang.org/x/oauth2/google/sdk.go
new file mode 100644
index 0000000..01ba0ec
--- /dev/null
+++ b/src/kube2msb/vendor/golang.org/x/oauth2/google/sdk.go
@@ -0,0 +1,168 @@
+// Copyright 2015 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package google
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net/http"
+ "os"
+ "os/user"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "time"
+
+ "golang.org/x/net/context"
+ "golang.org/x/oauth2"
+ "golang.org/x/oauth2/internal"
+)
+
+type sdkCredentials struct {
+ Data []struct {
+ Credential struct {
+ ClientID string `json:"client_id"`
+ ClientSecret string `json:"client_secret"`
+ AccessToken string `json:"access_token"`
+ RefreshToken string `json:"refresh_token"`
+ TokenExpiry *time.Time `json:"token_expiry"`
+ } `json:"credential"`
+ Key struct {
+ Account string `json:"account"`
+ Scope string `json:"scope"`
+ } `json:"key"`
+ }
+}
+
+// An SDKConfig provides access to tokens from an account already
+// authorized via the Google Cloud SDK.
+type SDKConfig struct {
+ conf oauth2.Config
+ initialToken *oauth2.Token
+}
+
+// NewSDKConfig creates an SDKConfig for the given Google Cloud SDK
+// account. If account is empty, the account currently active in
+// Google Cloud SDK properties is used.
+// Google Cloud SDK credentials must be created by running `gcloud auth`
+// before using this function.
+// The Google Cloud SDK is available at https://cloud.google.com/sdk/.
+func NewSDKConfig(account string) (*SDKConfig, error) {
+ configPath, err := sdkConfigPath()
+ if err != nil {
+ return nil, fmt.Errorf("oauth2/google: error getting SDK config path: %v", err)
+ }
+ credentialsPath := filepath.Join(configPath, "credentials")
+ f, err := os.Open(credentialsPath)
+ if err != nil {
+ return nil, fmt.Errorf("oauth2/google: failed to load SDK credentials: %v", err)
+ }
+ defer f.Close()
+
+ var c sdkCredentials
+ if err := json.NewDecoder(f).Decode(&c); err != nil {
+ return nil, fmt.Errorf("oauth2/google: failed to decode SDK credentials from %q: %v", credentialsPath, err)
+ }
+ if len(c.Data) == 0 {
+ return nil, fmt.Errorf("oauth2/google: no credentials found in %q, run `gcloud auth login` to create one", credentialsPath)
+ }
+ if account == "" {
+ propertiesPath := filepath.Join(configPath, "properties")
+ f, err := os.Open(propertiesPath)
+ if err != nil {
+ return nil, fmt.Errorf("oauth2/google: failed to load SDK properties: %v", err)
+ }
+ defer f.Close()
+ ini, err := internal.ParseINI(f)
+ if err != nil {
+ return nil, fmt.Errorf("oauth2/google: failed to parse SDK properties %q: %v", propertiesPath, err)
+ }
+ core, ok := ini["core"]
+ if !ok {
+ return nil, fmt.Errorf("oauth2/google: failed to find [core] section in %v", ini)
+ }
+ active, ok := core["account"]
+ if !ok {
+ return nil, fmt.Errorf("oauth2/google: failed to find %q attribute in %v", "account", core)
+ }
+ account = active
+ }
+
+ for _, d := range c.Data {
+ if account == "" || d.Key.Account == account {
+ if d.Credential.AccessToken == "" && d.Credential.RefreshToken == "" {
+ return nil, fmt.Errorf("oauth2/google: no token available for account %q", account)
+ }
+ var expiry time.Time
+ if d.Credential.TokenExpiry != nil {
+ expiry = *d.Credential.TokenExpiry
+ }
+ return &SDKConfig{
+ conf: oauth2.Config{
+ ClientID: d.Credential.ClientID,
+ ClientSecret: d.Credential.ClientSecret,
+ Scopes: strings.Split(d.Key.Scope, " "),
+ Endpoint: Endpoint,
+ RedirectURL: "oob",
+ },
+ initialToken: &oauth2.Token{
+ AccessToken: d.Credential.AccessToken,
+ RefreshToken: d.Credential.RefreshToken,
+ Expiry: expiry,
+ },
+ }, nil
+ }
+ }
+ return nil, fmt.Errorf("oauth2/google: no such credentials for account %q", account)
+}
+
+// Client returns an HTTP client using Google Cloud SDK credentials to
+// authorize requests. The token will auto-refresh as necessary. The
+// underlying http.RoundTripper will be obtained using the provided
+// context. The returned client and its Transport should not be
+// modified.
+func (c *SDKConfig) Client(ctx context.Context) *http.Client {
+ return &http.Client{
+ Transport: &oauth2.Transport{
+ Source: c.TokenSource(ctx),
+ },
+ }
+}
+
+// TokenSource returns an oauth2.TokenSource that retrieve tokens from
+// Google Cloud SDK credentials using the provided context.
+// It will returns the current access token stored in the credentials,
+// and refresh it when it expires, but it won't update the credentials
+// with the new access token.
+func (c *SDKConfig) TokenSource(ctx context.Context) oauth2.TokenSource {
+ return c.conf.TokenSource(ctx, c.initialToken)
+}
+
+// Scopes are the OAuth 2.0 scopes the current account is authorized for.
+func (c *SDKConfig) Scopes() []string {
+ return c.conf.Scopes
+}
+
+// sdkConfigPath tries to guess where the gcloud config is located.
+// It can be overridden during tests.
+var sdkConfigPath = func() (string, error) {
+ if runtime.GOOS == "windows" {
+ return filepath.Join(os.Getenv("APPDATA"), "gcloud"), nil
+ }
+ homeDir := guessUnixHomeDir()
+ if homeDir == "" {
+ return "", errors.New("unable to get current user home directory: os/user lookup failed; $HOME is empty")
+ }
+ return filepath.Join(homeDir, ".config", "gcloud"), nil
+}
+
+func guessUnixHomeDir() string {
+ usr, err := user.Current()
+ if err == nil {
+ return usr.HomeDir
+ }
+ return os.Getenv("HOME")
+}
diff --git a/src/kube2msb/vendor/golang.org/x/oauth2/internal/oauth2.go b/src/kube2msb/vendor/golang.org/x/oauth2/internal/oauth2.go
new file mode 100644
index 0000000..dc8ebfc
--- /dev/null
+++ b/src/kube2msb/vendor/golang.org/x/oauth2/internal/oauth2.go
@@ -0,0 +1,76 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package internal contains support packages for oauth2 package.
+package internal
+
+import (
+ "bufio"
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+)
+
+// ParseKey converts the binary contents of a private key file
+// to an *rsa.PrivateKey. It detects whether the private key is in a
+// PEM container or not. If so, it extracts the the private key
+// from PEM container before conversion. It only supports PEM
+// containers with no passphrase.
+func ParseKey(key []byte) (*rsa.PrivateKey, error) {
+ block, _ := pem.Decode(key)
+ if block != nil {
+ key = block.Bytes
+ }
+ parsedKey, err := x509.ParsePKCS8PrivateKey(key)
+ if err != nil {
+ parsedKey, err = x509.ParsePKCS1PrivateKey(key)
+ if err != nil {
+ return nil, fmt.Errorf("private key should be a PEM or plain PKSC1 or PKCS8; parse error: %v", err)
+ }
+ }
+ parsed, ok := parsedKey.(*rsa.PrivateKey)
+ if !ok {
+ return nil, errors.New("private key is invalid")
+ }
+ return parsed, nil
+}
+
+func ParseINI(ini io.Reader) (map[string]map[string]string, error) {
+ result := map[string]map[string]string{
+ "": map[string]string{}, // root section
+ }
+ scanner := bufio.NewScanner(ini)
+ currentSection := ""
+ for scanner.Scan() {
+ line := strings.TrimSpace(scanner.Text())
+ if strings.HasPrefix(line, ";") {
+ // comment.
+ continue
+ }
+ if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") {
+ currentSection = strings.TrimSpace(line[1 : len(line)-1])
+ result[currentSection] = map[string]string{}
+ continue
+ }
+ parts := strings.SplitN(line, "=", 2)
+ if len(parts) == 2 && parts[0] != "" {
+ result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1])
+ }
+ }
+ if err := scanner.Err(); err != nil {
+ return nil, fmt.Errorf("error scanning ini: %v", err)
+ }
+ return result, nil
+}
+
+func CondVal(v string) []string {
+ if v == "" {
+ return nil
+ }
+ return []string{v}
+}
diff --git a/src/kube2msb/vendor/golang.org/x/oauth2/internal/token.go b/src/kube2msb/vendor/golang.org/x/oauth2/internal/token.go
new file mode 100644
index 0000000..ea6716c
--- /dev/null
+++ b/src/kube2msb/vendor/golang.org/x/oauth2/internal/token.go
@@ -0,0 +1,213 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package internal contains support packages for oauth2 package.
+package internal
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "mime"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "golang.org/x/net/context"
+)
+
+// Token represents the crendentials used to authorize
+// the requests to access protected resources on the OAuth 2.0
+// provider's backend.
+//
+// This type is a mirror of oauth2.Token and exists to break
+// an otherwise-circular dependency. Other internal packages
+// should convert this Token into an oauth2.Token before use.
+type Token struct {
+ // AccessToken is the token that authorizes and authenticates
+ // the requests.
+ AccessToken string
+
+ // TokenType is the type of token.
+ // The Type method returns either this or "Bearer", the default.
+ TokenType string
+
+ // RefreshToken is a token that's used by the application
+ // (as opposed to the user) to refresh the access token
+ // if it expires.
+ RefreshToken string
+
+ // Expiry is the optional expiration time of the access token.
+ //
+ // If zero, TokenSource implementations will reuse the same
+ // token forever and RefreshToken or equivalent
+ // mechanisms for that TokenSource will not be used.
+ Expiry time.Time
+
+ // Raw optionally contains extra metadata from the server
+ // when updating a token.
+ Raw interface{}
+}
+
+// tokenJSON is the struct representing the HTTP response from OAuth2
+// providers returning a token in JSON form.
+type tokenJSON struct {
+ AccessToken string `json:"access_token"`
+ TokenType string `json:"token_type"`
+ RefreshToken string `json:"refresh_token"`
+ ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number
+ Expires expirationTime `json:"expires"` // broken Facebook spelling of expires_in
+}
+
+func (e *tokenJSON) expiry() (t time.Time) {
+ if v := e.ExpiresIn; v != 0 {
+ return time.Now().Add(time.Duration(v) * time.Second)
+ }
+ if v := e.Expires; v != 0 {
+ return time.Now().Add(time.Duration(v) * time.Second)
+ }
+ return
+}
+
+type expirationTime int32
+
+func (e *expirationTime) UnmarshalJSON(b []byte) error {
+ var n json.Number
+ err := json.Unmarshal(b, &n)
+ if err != nil {
+ return err
+ }
+ i, err := n.Int64()
+ if err != nil {
+ return err
+ }
+ *e = expirationTime(i)
+ return nil
+}
+
+var brokenAuthHeaderProviders = []string{
+ "https://accounts.google.com/",
+ "https://www.googleapis.com/",
+ "https://github.com/",
+ "https://api.instagram.com/",
+ "https://www.douban.com/",
+ "https://api.dropbox.com/",
+ "https://api.soundcloud.com/",
+ "https://www.linkedin.com/",
+ "https://api.twitch.tv/",
+ "https://oauth.vk.com/",
+ "https://api.odnoklassniki.ru/",
+ "https://connect.stripe.com/",
+ "https://api.pushbullet.com/",
+ "https://oauth.sandbox.trainingpeaks.com/",
+ "https://oauth.trainingpeaks.com/",
+ "https://www.strava.com/oauth/",
+ "https://app.box.com/",
+ "https://test-sandbox.auth.corp.google.com",
+ "https://user.gini.net/",
+}
+
+// providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL
+// implements the OAuth2 spec correctly
+// See https://code.google.com/p/goauth2/issues/detail?id=31 for background.
+// In summary:
+// - Reddit only accepts client secret in the Authorization header
+// - Dropbox accepts either it in URL param or Auth header, but not both.
+// - Google only accepts URL param (not spec compliant?), not Auth header
+// - Stripe only accepts client secret in Auth header with Bearer method, not Basic
+func providerAuthHeaderWorks(tokenURL string) bool {
+ for _, s := range brokenAuthHeaderProviders {
+ if strings.HasPrefix(tokenURL, s) {
+ // Some sites fail to implement the OAuth2 spec fully.
+ return false
+ }
+ }
+
+ // Assume the provider implements the spec properly
+ // otherwise. We can add more exceptions as they're
+ // discovered. We will _not_ be adding configurable hooks
+ // to this package to let users select server bugs.
+ return true
+}
+
+func RetrieveToken(ctx context.Context, ClientID, ClientSecret, TokenURL string, v url.Values) (*Token, error) {
+ hc, err := ContextClient(ctx)
+ if err != nil {
+ return nil, err
+ }
+ v.Set("client_id", ClientID)
+ bustedAuth := !providerAuthHeaderWorks(TokenURL)
+ if bustedAuth && ClientSecret != "" {
+ v.Set("client_secret", ClientSecret)
+ }
+ req, err := http.NewRequest("POST", TokenURL, strings.NewReader(v.Encode()))
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+ if !bustedAuth {
+ req.SetBasicAuth(ClientID, ClientSecret)
+ }
+ r, err := hc.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer r.Body.Close()
+ body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20))
+ if err != nil {
+ return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
+ }
+ if code := r.StatusCode; code < 200 || code > 299 {
+ return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", r.Status, body)
+ }
+
+ var token *Token
+ content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type"))
+ switch content {
+ case "application/x-www-form-urlencoded", "text/plain":
+ vals, err := url.ParseQuery(string(body))
+ if err != nil {
+ return nil, err
+ }
+ token = &Token{
+ AccessToken: vals.Get("access_token"),
+ TokenType: vals.Get("token_type"),
+ RefreshToken: vals.Get("refresh_token"),
+ Raw: vals,
+ }
+ e := vals.Get("expires_in")
+ if e == "" {
+ // TODO(jbd): Facebook's OAuth2 implementation is broken and
+ // returns expires_in field in expires. Remove the fallback to expires,
+ // when Facebook fixes their implementation.
+ e = vals.Get("expires")
+ }
+ expires, _ := strconv.Atoi(e)
+ if expires != 0 {
+ token.Expiry = time.Now().Add(time.Duration(expires) * time.Second)
+ }
+ default:
+ var tj tokenJSON
+ if err = json.Unmarshal(body, &tj); err != nil {
+ return nil, err
+ }
+ token = &Token{
+ AccessToken: tj.AccessToken,
+ TokenType: tj.TokenType,
+ RefreshToken: tj.RefreshToken,
+ Expiry: tj.expiry(),
+ Raw: make(map[string]interface{}),
+ }
+ json.Unmarshal(body, &token.Raw) // no error checks for optional fields
+ }
+ // Don't overwrite `RefreshToken` with an empty value
+ // if this was a token refreshing request.
+ if token.RefreshToken == "" {
+ token.RefreshToken = v.Get("refresh_token")
+ }
+ return token, nil
+}
diff --git a/src/kube2msb/vendor/golang.org/x/oauth2/internal/transport.go b/src/kube2msb/vendor/golang.org/x/oauth2/internal/transport.go
new file mode 100644
index 0000000..521e7b4
--- /dev/null
+++ b/src/kube2msb/vendor/golang.org/x/oauth2/internal/transport.go
@@ -0,0 +1,67 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package internal contains support packages for oauth2 package.
+package internal
+
+import (
+ "net/http"
+
+ "golang.org/x/net/context"
+)
+
+// HTTPClient is the context key to use with golang.org/x/net/context's
+// WithValue function to associate an *http.Client value with a context.
+var HTTPClient ContextKey
+
+// ContextKey is just an empty struct. It exists so HTTPClient can be
+// an immutable public variable with a unique type. It's immutable
+// because nobody else can create a ContextKey, being unexported.
+type ContextKey struct{}
+
+// ContextClientFunc is a func which tries to return an *http.Client
+// given a Context value. If it returns an error, the search stops
+// with that error. If it returns (nil, nil), the search continues
+// down the list of registered funcs.
+type ContextClientFunc func(context.Context) (*http.Client, error)
+
+var contextClientFuncs []ContextClientFunc
+
+func RegisterContextClientFunc(fn ContextClientFunc) {
+ contextClientFuncs = append(contextClientFuncs, fn)
+}
+
+func ContextClient(ctx context.Context) (*http.Client, error) {
+ for _, fn := range contextClientFuncs {
+ c, err := fn(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if c != nil {
+ return c, nil
+ }
+ }
+ if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok {
+ return hc, nil
+ }
+ return http.DefaultClient, nil
+}
+
+func ContextTransport(ctx context.Context) http.RoundTripper {
+ hc, err := ContextClient(ctx)
+ // This is a rare error case (somebody using nil on App Engine).
+ if err != nil {
+ return ErrorTransport{err}
+ }
+ return hc.Transport
+}
+
+// ErrorTransport returns the specified error on RoundTrip.
+// This RoundTripper should be used in rare error cases where
+// error handling can be postponed to response handling time.
+type ErrorTransport struct{ Err error }
+
+func (t ErrorTransport) RoundTrip(*http.Request) (*http.Response, error) {
+ return nil, t.Err
+}
diff --git a/src/kube2msb/vendor/golang.org/x/oauth2/jws/jws.go b/src/kube2msb/vendor/golang.org/x/oauth2/jws/jws.go
new file mode 100644
index 0000000..396b3fa
--- /dev/null
+++ b/src/kube2msb/vendor/golang.org/x/oauth2/jws/jws.go
@@ -0,0 +1,160 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package jws provides encoding and decoding utilities for
+// signed JWS messages.
+package jws
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/sha256"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "strings"
+ "time"
+)
+
+// ClaimSet contains information about the JWT signature including the
+// permissions being requested (scopes), the target of the token, the issuer,
+// the time the token was issued, and the lifetime of the token.
+type ClaimSet struct {
+ Iss string `json:"iss"` // email address of the client_id of the application making the access token request
+ Scope string `json:"scope,omitempty"` // space-delimited list of the permissions the application requests
+ Aud string `json:"aud"` // descriptor of the intended target of the assertion (Optional).
+ Exp int64 `json:"exp"` // the expiration time of the assertion
+ Iat int64 `json:"iat"` // the time the assertion was issued.
+ Typ string `json:"typ,omitempty"` // token type (Optional).
+
+ // Email for which the application is requesting delegated access (Optional).
+ Sub string `json:"sub,omitempty"`
+
+ // The old name of Sub. Client keeps setting Prn to be
+ // complaint with legacy OAuth 2.0 providers. (Optional)
+ Prn string `json:"prn,omitempty"`
+
+ // See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3
+ // This array is marshalled using custom code (see (c *ClaimSet) encode()).
+ PrivateClaims map[string]interface{} `json:"-"`
+
+ exp time.Time
+ iat time.Time
+}
+
+func (c *ClaimSet) encode() (string, error) {
+ if c.exp.IsZero() || c.iat.IsZero() {
+ // Reverting time back for machines whose time is not perfectly in sync.
+ // If client machine's time is in the future according
+ // to Google servers, an access token will not be issued.
+ now := time.Now().Add(-10 * time.Second)
+ c.iat = now
+ c.exp = now.Add(time.Hour)
+ }
+
+ c.Exp = c.exp.Unix()
+ c.Iat = c.iat.Unix()
+
+ b, err := json.Marshal(c)
+ if err != nil {
+ return "", err
+ }
+
+ if len(c.PrivateClaims) == 0 {
+ return base64Encode(b), nil
+ }
+
+ // Marshal private claim set and then append it to b.
+ prv, err := json.Marshal(c.PrivateClaims)
+ if err != nil {
+ return "", fmt.Errorf("jws: invalid map of private claims %v", c.PrivateClaims)
+ }
+
+ // Concatenate public and private claim JSON objects.
+ if !bytes.HasSuffix(b, []byte{'}'}) {
+ return "", fmt.Errorf("jws: invalid JSON %s", b)
+ }
+ if !bytes.HasPrefix(prv, []byte{'{'}) {
+ return "", fmt.Errorf("jws: invalid JSON %s", prv)
+ }
+ b[len(b)-1] = ',' // Replace closing curly brace with a comma.
+ b = append(b, prv[1:]...) // Append private claims.
+ return base64Encode(b), nil
+}
+
+// Header represents the header for the signed JWS payloads.
+type Header struct {
+ // The algorithm used for signature.
+ Algorithm string `json:"alg"`
+
+ // Represents the token type.
+ Typ string `json:"typ"`
+}
+
+func (h *Header) encode() (string, error) {
+ b, err := json.Marshal(h)
+ if err != nil {
+ return "", err
+ }
+ return base64Encode(b), nil
+}
+
+// Decode decodes a claim set from a JWS payload.
+func Decode(payload string) (*ClaimSet, error) {
+ // decode returned id token to get expiry
+ s := strings.Split(payload, ".")
+ if len(s) < 2 {
+ // TODO(jbd): Provide more context about the error.
+ return nil, errors.New("jws: invalid token received")
+ }
+ decoded, err := base64Decode(s[1])
+ if err != nil {
+ return nil, err
+ }
+ c := &ClaimSet{}
+ err = json.NewDecoder(bytes.NewBuffer(decoded)).Decode(c)
+ return c, err
+}
+
+// Encode encodes a signed JWS with provided header and claim set.
+func Encode(header *Header, c *ClaimSet, signature *rsa.PrivateKey) (string, error) {
+ head, err := header.encode()
+ if err != nil {
+ return "", err
+ }
+ cs, err := c.encode()
+ if err != nil {
+ return "", err
+ }
+ ss := fmt.Sprintf("%s.%s", head, cs)
+ h := sha256.New()
+ h.Write([]byte(ss))
+ b, err := rsa.SignPKCS1v15(rand.Reader, signature, crypto.SHA256, h.Sum(nil))
+ if err != nil {
+ return "", err
+ }
+ sig := base64Encode(b)
+ return fmt.Sprintf("%s.%s", ss, sig), nil
+}
+
+// base64Encode returns and Base64url encoded version of the input string with any
+// trailing "=" stripped.
+func base64Encode(b []byte) string {
+ return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
+}
+
+// base64Decode decodes the Base64url encoded string
+func base64Decode(s string) ([]byte, error) {
+ // add back missing padding
+ switch len(s) % 4 {
+ case 2:
+ s += "=="
+ case 3:
+ s += "="
+ }
+ return base64.URLEncoding.DecodeString(s)
+}
diff --git a/src/kube2msb/vendor/golang.org/x/oauth2/jwt/jwt.go b/src/kube2msb/vendor/golang.org/x/oauth2/jwt/jwt.go
new file mode 100644
index 0000000..205d23e
--- /dev/null
+++ b/src/kube2msb/vendor/golang.org/x/oauth2/jwt/jwt.go
@@ -0,0 +1,147 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package jwt implements the OAuth 2.0 JSON Web Token flow, commonly
+// known as "two-legged OAuth 2.0".
+//
+// See: https://tools.ietf.org/html/draft-ietf-oauth-jwt-bearer-12
+package jwt
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+
+ "golang.org/x/net/context"
+ "golang.org/x/oauth2"
+ "golang.org/x/oauth2/internal"
+ "golang.org/x/oauth2/jws"
+)
+
+var (
+ defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer"
+ defaultHeader = &jws.Header{Algorithm: "RS256", Typ: "JWT"}
+)
+
+// Config is the configuration for using JWT to fetch tokens,
+// commonly known as "two-legged OAuth 2.0".
+type Config struct {
+ // Email is the OAuth client identifier used when communicating with
+ // the configured OAuth provider.
+ Email string
+
+ // PrivateKey contains the contents of an RSA private key or the
+ // contents of a PEM file that contains a private key. The provided
+ // private key is used to sign JWT payloads.
+ // PEM containers with a passphrase are not supported.
+ // Use the following command to convert a PKCS 12 file into a PEM.
+ //
+ // $ openssl pkcs12 -in key.p12 -out key.pem -nodes
+ //
+ PrivateKey []byte
+
+ // Subject is the optional user to impersonate.
+ Subject string
+
+ // Scopes optionally specifies a list of requested permission scopes.
+ Scopes []string
+
+ // TokenURL is the endpoint required to complete the 2-legged JWT flow.
+ TokenURL string
+}
+
+// TokenSource returns a JWT TokenSource using the configuration
+// in c and the HTTP client from the provided context.
+func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource {
+ return oauth2.ReuseTokenSource(nil, jwtSource{ctx, c})
+}
+
+// Client returns an HTTP client wrapping the context's
+// HTTP transport and adding Authorization headers with tokens
+// obtained from c.
+//
+// The returned client and its Transport should not be modified.
+func (c *Config) Client(ctx context.Context) *http.Client {
+ return oauth2.NewClient(ctx, c.TokenSource(ctx))
+}
+
+// jwtSource is a source that always does a signed JWT request for a token.
+// It should typically be wrapped with a reuseTokenSource.
+type jwtSource struct {
+ ctx context.Context
+ conf *Config
+}
+
+func (js jwtSource) Token() (*oauth2.Token, error) {
+ pk, err := internal.ParseKey(js.conf.PrivateKey)
+ if err != nil {
+ return nil, err
+ }
+ hc := oauth2.NewClient(js.ctx, nil)
+ claimSet := &jws.ClaimSet{
+ Iss: js.conf.Email,
+ Scope: strings.Join(js.conf.Scopes, " "),
+ Aud: js.conf.TokenURL,
+ }
+ if subject := js.conf.Subject; subject != "" {
+ claimSet.Sub = subject
+ // prn is the old name of sub. Keep setting it
+ // to be compatible with legacy OAuth 2.0 providers.
+ claimSet.Prn = subject
+ }
+ payload, err := jws.Encode(defaultHeader, claimSet, pk)
+ if err != nil {
+ return nil, err
+ }
+ v := url.Values{}
+ v.Set("grant_type", defaultGrantType)
+ v.Set("assertion", payload)
+ resp, err := hc.PostForm(js.conf.TokenURL, v)
+ if err != nil {
+ return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
+ }
+ defer resp.Body.Close()
+ body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20))
+ if err != nil {
+ return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
+ }
+ if c := resp.StatusCode; c < 200 || c > 299 {
+ return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", resp.Status, body)
+ }
+ // tokenRes is the JSON response body.
+ var tokenRes struct {
+ AccessToken string `json:"access_token"`
+ TokenType string `json:"token_type"`
+ IDToken string `json:"id_token"`
+ ExpiresIn int64 `json:"expires_in"` // relative seconds from now
+ }
+ if err := json.Unmarshal(body, &tokenRes); err != nil {
+ return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
+ }
+ token := &oauth2.Token{
+ AccessToken: tokenRes.AccessToken,
+ TokenType: tokenRes.TokenType,
+ }
+ raw := make(map[string]interface{})
+ json.Unmarshal(body, &raw) // no error checks for optional fields
+ token = token.WithExtra(raw)
+
+ if secs := tokenRes.ExpiresIn; secs > 0 {
+ token.Expiry = time.Now().Add(time.Duration(secs) * time.Second)
+ }
+ if v := tokenRes.IDToken; v != "" {
+ // decode returned id token to get expiry
+ claimSet, err := jws.Decode(v)
+ if err != nil {
+ return nil, fmt.Errorf("oauth2: error decoding JWT token: %v", err)
+ }
+ token.Expiry = time.Unix(claimSet.Exp, 0)
+ }
+ return token, nil
+}
diff --git a/src/kube2msb/vendor/golang.org/x/oauth2/oauth2.go b/src/kube2msb/vendor/golang.org/x/oauth2/oauth2.go
new file mode 100644
index 0000000..dfcf238
--- /dev/null
+++ b/src/kube2msb/vendor/golang.org/x/oauth2/oauth2.go
@@ -0,0 +1,325 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package oauth2 provides support for making
+// OAuth2 authorized and authenticated HTTP requests.
+// It can additionally grant authorization with Bearer JWT.
+package oauth2
+
+import (
+ "bytes"
+ "errors"
+ "net/http"
+ "net/url"
+ "strings"
+ "sync"
+
+ "golang.org/x/net/context"
+ "golang.org/x/oauth2/internal"
+)
+
+// NoContext is the default context you should supply if not using
+// your own context.Context (see https://golang.org/x/net/context).
+var NoContext = context.TODO()
+
+// Config describes a typical 3-legged OAuth2 flow, with both the
+// client application information and the server's endpoint URLs.
+type Config struct {
+ // ClientID is the application's ID.
+ ClientID string
+
+ // ClientSecret is the application's secret.
+ ClientSecret string
+
+ // Endpoint contains the resource server's token endpoint
+ // URLs. These are constants specific to each server and are
+ // often available via site-specific packages, such as
+ // google.Endpoint or github.Endpoint.
+ Endpoint Endpoint
+
+ // RedirectURL is the URL to redirect users going through
+ // the OAuth flow, after the resource owner's URLs.
+ RedirectURL string
+
+ // Scope specifies optional requested permissions.
+ Scopes []string
+}
+
+// A TokenSource is anything that can return a token.
+type TokenSource interface {
+ // Token returns a token or an error.
+ // Token must be safe for concurrent use by multiple goroutines.
+ // The returned Token must not be modified.
+ Token() (*Token, error)
+}
+
+// Endpoint contains the OAuth 2.0 provider's authorization and token
+// endpoint URLs.
+type Endpoint struct {
+ AuthURL string
+ TokenURL string
+}
+
+var (
+ // AccessTypeOnline and AccessTypeOffline are options passed
+ // to the Options.AuthCodeURL method. They modify the
+ // "access_type" field that gets sent in the URL returned by
+ // AuthCodeURL.
+ //
+ // Online is the default if neither is specified. If your
+ // application needs to refresh access tokens when the user
+ // is not present at the browser, then use offline. This will
+ // result in your application obtaining a refresh token the
+ // first time your application exchanges an authorization
+ // code for a user.
+ AccessTypeOnline AuthCodeOption = SetAuthURLParam("access_type", "online")
+ AccessTypeOffline AuthCodeOption = SetAuthURLParam("access_type", "offline")
+
+ // ApprovalForce forces the users to view the consent dialog
+ // and confirm the permissions request at the URL returned
+ // from AuthCodeURL, even if they've already done so.
+ ApprovalForce AuthCodeOption = SetAuthURLParam("approval_prompt", "force")
+)
+
+// An AuthCodeOption is passed to Config.AuthCodeURL.
+type AuthCodeOption interface {
+ setValue(url.Values)
+}
+
+type setParam struct{ k, v string }
+
+func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) }
+
+// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters
+// to a provider's authorization endpoint.
+func SetAuthURLParam(key, value string) AuthCodeOption {
+ return setParam{key, value}
+}
+
+// AuthCodeURL returns a URL to OAuth 2.0 provider's consent page
+// that asks for permissions for the required scopes explicitly.
+//
+// State is a token to protect the user from CSRF attacks. You must
+// always provide a non-zero string and validate that it matches the
+// the state query parameter on your redirect callback.
+// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info.
+//
+// Opts may include AccessTypeOnline or AccessTypeOffline, as well
+// as ApprovalForce.
+func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string {
+ var buf bytes.Buffer
+ buf.WriteString(c.Endpoint.AuthURL)
+ v := url.Values{
+ "response_type": {"code"},
+ "client_id": {c.ClientID},
+ "redirect_uri": internal.CondVal(c.RedirectURL),
+ "scope": internal.CondVal(strings.Join(c.Scopes, " ")),
+ "state": internal.CondVal(state),
+ }
+ for _, opt := range opts {
+ opt.setValue(v)
+ }
+ if strings.Contains(c.Endpoint.AuthURL, "?") {
+ buf.WriteByte('&')
+ } else {
+ buf.WriteByte('?')
+ }
+ buf.WriteString(v.Encode())
+ return buf.String()
+}
+
+// PasswordCredentialsToken converts a resource owner username and password
+// pair into a token.
+//
+// Per the RFC, this grant type should only be used "when there is a high
+// degree of trust between the resource owner and the client (e.g., the client
+// is part of the device operating system or a highly privileged application),
+// and when other authorization grant types are not available."
+// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info.
+//
+// The HTTP client to use is derived from the context.
+// If nil, http.DefaultClient is used.
+func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) {
+ return retrieveToken(ctx, c, url.Values{
+ "grant_type": {"password"},
+ "username": {username},
+ "password": {password},
+ "scope": internal.CondVal(strings.Join(c.Scopes, " ")),
+ })
+}
+
+// Exchange converts an authorization code into a token.
+//
+// It is used after a resource provider redirects the user back
+// to the Redirect URI (the URL obtained from AuthCodeURL).
+//
+// The HTTP client to use is derived from the context.
+// If a client is not provided via the context, http.DefaultClient is used.
+//
+// The code will be in the *http.Request.FormValue("code"). Before
+// calling Exchange, be sure to validate FormValue("state").
+func (c *Config) Exchange(ctx context.Context, code string) (*Token, error) {
+ return retrieveToken(ctx, c, url.Values{
+ "grant_type": {"authorization_code"},
+ "code": {code},
+ "redirect_uri": internal.CondVal(c.RedirectURL),
+ "scope": internal.CondVal(strings.Join(c.Scopes, " ")),
+ })
+}
+
+// Client returns an HTTP client using the provided token.
+// The token will auto-refresh as necessary. The underlying
+// HTTP transport will be obtained using the provided context.
+// The returned client and its Transport should not be modified.
+func (c *Config) Client(ctx context.Context, t *Token) *http.Client {
+ return NewClient(ctx, c.TokenSource(ctx, t))
+}
+
+// TokenSource returns a TokenSource that returns t until t expires,
+// automatically refreshing it as necessary using the provided context.
+//
+// Most users will use Config.Client instead.
+func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource {
+ tkr := &tokenRefresher{
+ ctx: ctx,
+ conf: c,
+ }
+ if t != nil {
+ tkr.refreshToken = t.RefreshToken
+ }
+ return &reuseTokenSource{
+ t: t,
+ new: tkr,
+ }
+}
+
+// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token"
+// HTTP requests to renew a token using a RefreshToken.
+type tokenRefresher struct {
+ ctx context.Context // used to get HTTP requests
+ conf *Config
+ refreshToken string
+}
+
+// WARNING: Token is not safe for concurrent access, as it
+// updates the tokenRefresher's refreshToken field.
+// Within this package, it is used by reuseTokenSource which
+// synchronizes calls to this method with its own mutex.
+func (tf *tokenRefresher) Token() (*Token, error) {
+ if tf.refreshToken == "" {
+ return nil, errors.New("oauth2: token expired and refresh token is not set")
+ }
+
+ tk, err := retrieveToken(tf.ctx, tf.conf, url.Values{
+ "grant_type": {"refresh_token"},
+ "refresh_token": {tf.refreshToken},
+ })
+
+ if err != nil {
+ return nil, err
+ }
+ if tf.refreshToken != tk.RefreshToken {
+ tf.refreshToken = tk.RefreshToken
+ }
+ return tk, err
+}
+
+// reuseTokenSource is a TokenSource that holds a single token in memory
+// and validates its expiry before each call to retrieve it with
+// Token. If it's expired, it will be auto-refreshed using the
+// new TokenSource.
+type reuseTokenSource struct {
+ new TokenSource // called when t is expired.
+
+ mu sync.Mutex // guards t
+ t *Token
+}
+
+// Token returns the current token if it's still valid, else will
+// refresh the current token (using r.Context for HTTP client
+// information) and return the new one.
+func (s *reuseTokenSource) Token() (*Token, error) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if s.t.Valid() {
+ return s.t, nil
+ }
+ t, err := s.new.Token()
+ if err != nil {
+ return nil, err
+ }
+ s.t = t
+ return t, nil
+}
+
+// StaticTokenSource returns a TokenSource that always returns the same token.
+// Because the provided token t is never refreshed, StaticTokenSource is only
+// useful for tokens that never expire.
+func StaticTokenSource(t *Token) TokenSource {
+ return staticTokenSource{t}
+}
+
+// staticTokenSource is a TokenSource that always returns the same Token.
+type staticTokenSource struct {
+ t *Token
+}
+
+func (s staticTokenSource) Token() (*Token, error) {
+ return s.t, nil
+}
+
+// HTTPClient is the context key to use with golang.org/x/net/context's
+// WithValue function to associate an *http.Client value with a context.
+var HTTPClient internal.ContextKey
+
+// NewClient creates an *http.Client from a Context and TokenSource.
+// The returned client is not valid beyond the lifetime of the context.
+//
+// As a special case, if src is nil, a non-OAuth2 client is returned
+// using the provided context. This exists to support related OAuth2
+// packages.
+func NewClient(ctx context.Context, src TokenSource) *http.Client {
+ if src == nil {
+ c, err := internal.ContextClient(ctx)
+ if err != nil {
+ return &http.Client{Transport: internal.ErrorTransport{err}}
+ }
+ return c
+ }
+ return &http.Client{
+ Transport: &Transport{
+ Base: internal.ContextTransport(ctx),
+ Source: ReuseTokenSource(nil, src),
+ },
+ }
+}
+
+// ReuseTokenSource returns a TokenSource which repeatedly returns the
+// same token as long as it's valid, starting with t.
+// When its cached token is invalid, a new token is obtained from src.
+//
+// ReuseTokenSource is typically used to reuse tokens from a cache
+// (such as a file on disk) between runs of a program, rather than
+// obtaining new tokens unnecessarily.
+//
+// The initial token t may be nil, in which case the TokenSource is
+// wrapped in a caching version if it isn't one already. This also
+// means it's always safe to wrap ReuseTokenSource around any other
+// TokenSource without adverse effects.
+func ReuseTokenSource(t *Token, src TokenSource) TokenSource {
+ // Don't wrap a reuseTokenSource in itself. That would work,
+ // but cause an unnecessary number of mutex operations.
+ // Just build the equivalent one.
+ if rt, ok := src.(*reuseTokenSource); ok {
+ if t == nil {
+ // Just use it directly.
+ return rt
+ }
+ src = rt.new
+ }
+ return &reuseTokenSource{
+ t: t,
+ new: src,
+ }
+}
diff --git a/src/kube2msb/vendor/golang.org/x/oauth2/token.go b/src/kube2msb/vendor/golang.org/x/oauth2/token.go
new file mode 100644
index 0000000..ebbdddb
--- /dev/null
+++ b/src/kube2msb/vendor/golang.org/x/oauth2/token.go
@@ -0,0 +1,143 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package oauth2
+
+import (
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+
+ "golang.org/x/net/context"
+ "golang.org/x/oauth2/internal"
+)
+
+// expiryDelta determines how earlier a token should be considered
+// expired than its actual expiration time. It is used to avoid late
+// expirations due to client-server time mismatches.
+const expiryDelta = 10 * time.Second
+
+// Token represents the crendentials used to authorize
+// the requests to access protected resources on the OAuth 2.0
+// provider's backend.
+//
+// Most users of this package should not access fields of Token
+// directly. They're exported mostly for use by related packages
+// implementing derivative OAuth2 flows.
+type Token struct {
+ // AccessToken is the token that authorizes and authenticates
+ // the requests.
+ AccessToken string `json:"access_token"`
+
+ // TokenType is the type of token.
+ // The Type method returns either this or "Bearer", the default.
+ TokenType string `json:"token_type,omitempty"`
+
+ // RefreshToken is a token that's used by the application
+ // (as opposed to the user) to refresh the access token
+ // if it expires.
+ RefreshToken string `json:"refresh_token,omitempty"`
+
+ // Expiry is the optional expiration time of the access token.
+ //
+ // If zero, TokenSource implementations will reuse the same
+ // token forever and RefreshToken or equivalent
+ // mechanisms for that TokenSource will not be used.
+ Expiry time.Time `json:"expiry,omitempty"`
+
+ // raw optionally contains extra metadata from the server
+ // when updating a token.
+ raw interface{}
+}
+
+// Type returns t.TokenType if non-empty, else "Bearer".
+func (t *Token) Type() string {
+ if strings.EqualFold(t.TokenType, "bearer") {
+ return "Bearer"
+ }
+ if strings.EqualFold(t.TokenType, "mac") {
+ return "MAC"
+ }
+ if strings.EqualFold(t.TokenType, "basic") {
+ return "Basic"
+ }
+ if t.TokenType != "" {
+ return t.TokenType
+ }
+ return "Bearer"
+}
+
+// SetAuthHeader sets the Authorization header to r using the access
+// token in t.
+//
+// This method is unnecessary when using Transport or an HTTP Client
+// returned by this package.
+func (t *Token) SetAuthHeader(r *http.Request) {
+ r.Header.Set("Authorization", t.Type()+" "+t.AccessToken)
+}
+
+// WithExtra returns a new Token that's a clone of t, but using the
+// provided raw extra map. This is only intended for use by packages
+// implementing derivative OAuth2 flows.
+func (t *Token) WithExtra(extra interface{}) *Token {
+ t2 := new(Token)
+ *t2 = *t
+ t2.raw = extra
+ return t2
+}
+
+// Extra returns an extra field.
+// Extra fields are key-value pairs returned by the server as a
+// part of the token retrieval response.
+func (t *Token) Extra(key string) interface{} {
+ if vals, ok := t.raw.(url.Values); ok {
+ // TODO(jbd): Cast numeric values to int64 or float64.
+ return vals.Get(key)
+ }
+ if raw, ok := t.raw.(map[string]interface{}); ok {
+ return raw[key]
+ }
+ return nil
+}
+
+// expired reports whether the token is expired.
+// t must be non-nil.
+func (t *Token) expired() bool {
+ if t.Expiry.IsZero() {
+ return false
+ }
+ return t.Expiry.Add(-expiryDelta).Before(time.Now())
+}
+
+// Valid reports whether t is non-nil, has an AccessToken, and is not expired.
+func (t *Token) Valid() bool {
+ return t != nil && t.AccessToken != "" && !t.expired()
+}
+
+// tokenFromInternal maps an *internal.Token struct into
+// a *Token struct.
+func tokenFromInternal(t *internal.Token) *Token {
+ if t == nil {
+ return nil
+ }
+ return &Token{
+ AccessToken: t.AccessToken,
+ TokenType: t.TokenType,
+ RefreshToken: t.RefreshToken,
+ Expiry: t.Expiry,
+ raw: t.Raw,
+ }
+}
+
+// retrieveToken takes a *Config and uses that to retrieve an *internal.Token.
+// This token is then mapped from *internal.Token into an *oauth2.Token which is returned along
+// with an error..
+func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) {
+ tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v)
+ if err != nil {
+ return nil, err
+ }
+ return tokenFromInternal(tk), nil
+}
diff --git a/src/kube2msb/vendor/golang.org/x/oauth2/transport.go b/src/kube2msb/vendor/golang.org/x/oauth2/transport.go
new file mode 100644
index 0000000..90db088
--- /dev/null
+++ b/src/kube2msb/vendor/golang.org/x/oauth2/transport.go
@@ -0,0 +1,132 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package oauth2
+
+import (
+ "errors"
+ "io"
+ "net/http"
+ "sync"
+)
+
+// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests,
+// wrapping a base RoundTripper and adding an Authorization header
+// with a token from the supplied Sources.
+//
+// Transport is a low-level mechanism. Most code will use the
+// higher-level Config.Client method instead.
+type Transport struct {
+ // Source supplies the token to add to outgoing requests'
+ // Authorization headers.
+ Source TokenSource
+
+ // Base is the base RoundTripper used to make HTTP requests.
+ // If nil, http.DefaultTransport is used.
+ Base http.RoundTripper
+
+ mu sync.Mutex // guards modReq
+ modReq map[*http.Request]*http.Request // original -> modified
+}
+
+// RoundTrip authorizes and authenticates the request with an
+// access token. If no token exists or token is expired,
+// tries to refresh/fetch a new token.
+func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
+ if t.Source == nil {
+ return nil, errors.New("oauth2: Transport's Source is nil")
+ }
+ token, err := t.Source.Token()
+ if err != nil {
+ return nil, err
+ }
+
+ req2 := cloneRequest(req) // per RoundTripper contract
+ token.SetAuthHeader(req2)
+ t.setModReq(req, req2)
+ res, err := t.base().RoundTrip(req2)
+ if err != nil {
+ t.setModReq(req, nil)
+ return nil, err
+ }
+ res.Body = &onEOFReader{
+ rc: res.Body,
+ fn: func() { t.setModReq(req, nil) },
+ }
+ return res, nil
+}
+
+// CancelRequest cancels an in-flight request by closing its connection.
+func (t *Transport) CancelRequest(req *http.Request) {
+ type canceler interface {
+ CancelRequest(*http.Request)
+ }
+ if cr, ok := t.base().(canceler); ok {
+ t.mu.Lock()
+ modReq := t.modReq[req]
+ delete(t.modReq, req)
+ t.mu.Unlock()
+ cr.CancelRequest(modReq)
+ }
+}
+
+func (t *Transport) base() http.RoundTripper {
+ if t.Base != nil {
+ return t.Base
+ }
+ return http.DefaultTransport
+}
+
+func (t *Transport) setModReq(orig, mod *http.Request) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ if t.modReq == nil {
+ t.modReq = make(map[*http.Request]*http.Request)
+ }
+ if mod == nil {
+ delete(t.modReq, orig)
+ } else {
+ t.modReq[orig] = mod
+ }
+}
+
+// cloneRequest returns a clone of the provided *http.Request.
+// The clone is a shallow copy of the struct and its Header map.
+func cloneRequest(r *http.Request) *http.Request {
+ // shallow copy of the struct
+ r2 := new(http.Request)
+ *r2 = *r
+ // deep copy of the Header
+ r2.Header = make(http.Header, len(r.Header))
+ for k, s := range r.Header {
+ r2.Header[k] = append([]string(nil), s...)
+ }
+ return r2
+}
+
+type onEOFReader struct {
+ rc io.ReadCloser
+ fn func()
+}
+
+func (r *onEOFReader) Read(p []byte) (n int, err error) {
+ n, err = r.rc.Read(p)
+ if err == io.EOF {
+ r.runFunc()
+ }
+ return
+}
+
+func (r *onEOFReader) Close() error {
+ err := r.rc.Close()
+ r.runFunc()
+ return err
+}
+
+func (r *onEOFReader) runFunc() {
+ if fn := r.fn; fn != nil {
+ fn()
+ r.fn = nil
+ }
+}
diff --git a/src/kube2msb/vendor/google.golang.org/cloud/LICENSE b/src/kube2msb/vendor/google.golang.org/cloud/LICENSE
new file mode 100644
index 0000000..a4c5efd
--- /dev/null
+++ b/src/kube2msb/vendor/google.golang.org/cloud/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2014 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/src/kube2msb/vendor/google.golang.org/cloud/compute/metadata/metadata.go b/src/kube2msb/vendor/google.golang.org/cloud/compute/metadata/metadata.go
new file mode 100644
index 0000000..0a70959
--- /dev/null
+++ b/src/kube2msb/vendor/google.golang.org/cloud/compute/metadata/metadata.go
@@ -0,0 +1,382 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package metadata provides access to Google Compute Engine (GCE)
+// metadata and API service accounts.
+//
+// This package is a wrapper around the GCE metadata service,
+// as documented at https://developers.google.com/compute/docs/metadata.
+package metadata
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/net/context"
+ "golang.org/x/net/context/ctxhttp"
+
+ "google.golang.org/cloud/internal"
+)
+
+// metadataIP is the documented metadata server IP address.
+const metadataIP = "169.254.169.254"
+
+type cachedValue struct {
+ k string
+ trim bool
+ mu sync.Mutex
+ v string
+}
+
+var (
+ projID = &cachedValue{k: "project/project-id", trim: true}
+ projNum = &cachedValue{k: "project/numeric-project-id", trim: true}
+ instID = &cachedValue{k: "instance/id", trim: true}
+)
+
+var (
+ metaClient = &http.Client{
+ Transport: &internal.Transport{
+ Base: &http.Transport{
+ Dial: (&net.Dialer{
+ Timeout: 2 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }).Dial,
+ ResponseHeaderTimeout: 2 * time.Second,
+ },
+ },
+ }
+ subscribeClient = &http.Client{
+ Transport: &internal.Transport{
+ Base: &http.Transport{
+ Dial: (&net.Dialer{
+ Timeout: 2 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }).Dial,
+ },
+ },
+ }
+)
+
+// NotDefinedError is returned when requested metadata is not defined.
+//
+// The underlying string is the suffix after "/computeMetadata/v1/".
+//
+// This error is not returned if the value is defined to be the empty
+// string.
+type NotDefinedError string
+
+func (suffix NotDefinedError) Error() string {
+ return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
+}
+
+// Get returns a value from the metadata service.
+// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
+//
+// If the GCE_METADATA_HOST environment variable is not defined, a default of
+// 169.254.169.254 will be used instead.
+//
+// If the requested metadata is not defined, the returned error will
+// be of type NotDefinedError.
+func Get(suffix string) (string, error) {
+ val, _, err := getETag(metaClient, suffix)
+ return val, err
+}
+
+// getETag returns a value from the metadata service as well as the associated
+// ETag using the provided client. This func is otherwise equivalent to Get.
+func getETag(client *http.Client, suffix string) (value, etag string, err error) {
+ // Using a fixed IP makes it very difficult to spoof the metadata service in
+ // a container, which is an important use-case for local testing of cloud
+ // deployments. To enable spoofing of the metadata service, the environment
+ // variable GCE_METADATA_HOST is first inspected to decide where metadata
+ // requests shall go.
+ host := os.Getenv("GCE_METADATA_HOST")
+ if host == "" {
+ // Using 169.254.169.254 instead of "metadata" here because Go
+ // binaries built with the "netgo" tag and without cgo won't
+ // know the search suffix for "metadata" is
+ // ".google.internal", and this IP address is documented as
+ // being stable anyway.
+ host = metadataIP
+ }
+ url := "http://" + host + "/computeMetadata/v1/" + suffix
+ req, _ := http.NewRequest("GET", url, nil)
+ req.Header.Set("Metadata-Flavor", "Google")
+ res, err := client.Do(req)
+ if err != nil {
+ return "", "", err
+ }
+ defer res.Body.Close()
+ if res.StatusCode == http.StatusNotFound {
+ return "", "", NotDefinedError(suffix)
+ }
+ if res.StatusCode != 200 {
+ return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url)
+ }
+ all, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ return "", "", err
+ }
+ return string(all), res.Header.Get("Etag"), nil
+}
+
+func getTrimmed(suffix string) (s string, err error) {
+ s, err = Get(suffix)
+ s = strings.TrimSpace(s)
+ return
+}
+
+func (c *cachedValue) get() (v string, err error) {
+ defer c.mu.Unlock()
+ c.mu.Lock()
+ if c.v != "" {
+ return c.v, nil
+ }
+ if c.trim {
+ v, err = getTrimmed(c.k)
+ } else {
+ v, err = Get(c.k)
+ }
+ if err == nil {
+ c.v = v
+ }
+ return
+}
+
+var onGCE struct {
+ sync.Mutex
+ set bool
+ v bool
+}
+
+// OnGCE reports whether this process is running on Google Compute Engine.
+func OnGCE() bool {
+ defer onGCE.Unlock()
+ onGCE.Lock()
+ if onGCE.set {
+ return onGCE.v
+ }
+ onGCE.set = true
+ onGCE.v = testOnGCE()
+ return onGCE.v
+}
+
+func testOnGCE() bool {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ resc := make(chan bool, 2)
+
+ // Try two strategies in parallel.
+ // See https://github.com/GoogleCloudPlatform/gcloud-golang/issues/194
+ go func() {
+ res, err := ctxhttp.Get(ctx, metaClient, "http://"+metadataIP)
+ if err != nil {
+ resc <- false
+ return
+ }
+ defer res.Body.Close()
+ resc <- res.Header.Get("Metadata-Flavor") == "Google"
+ }()
+
+ go func() {
+ addrs, err := net.LookupHost("metadata.google.internal")
+ if err != nil || len(addrs) == 0 {
+ resc <- false
+ return
+ }
+ resc <- strsContains(addrs, metadataIP)
+ }()
+
+ return <-resc
+}
+
+// Subscribe subscribes to a value from the metadata service.
+// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
+// The suffix may contain query parameters.
+//
+// Subscribe calls fn with the latest metadata value indicated by the provided
+// suffix. If the metadata value is deleted, fn is called with the empty string
+// and ok false. Subscribe blocks until fn returns a non-nil error or the value
+// is deleted. Subscribe returns the error value returned from the last call to
+// fn, which may be nil when ok == false.
+func Subscribe(suffix string, fn func(v string, ok bool) error) error {
+ const failedSubscribeSleep = time.Second * 5
+
+ // First check to see if the metadata value exists at all.
+ val, lastETag, err := getETag(subscribeClient, suffix)
+ if err != nil {
+ return err
+ }
+
+ if err := fn(val, true); err != nil {
+ return err
+ }
+
+ ok := true
+ if strings.ContainsRune(suffix, '?') {
+ suffix += "&wait_for_change=true&last_etag="
+ } else {
+ suffix += "?wait_for_change=true&last_etag="
+ }
+ for {
+ val, etag, err := getETag(subscribeClient, suffix+url.QueryEscape(lastETag))
+ if err != nil {
+ if _, deleted := err.(NotDefinedError); !deleted {
+ time.Sleep(failedSubscribeSleep)
+ continue // Retry on other errors.
+ }
+ ok = false
+ }
+ lastETag = etag
+
+ if err := fn(val, ok); err != nil || !ok {
+ return err
+ }
+ }
+}
+
+// ProjectID returns the current instance's project ID string.
+func ProjectID() (string, error) { return projID.get() }
+
+// NumericProjectID returns the current instance's numeric project ID.
+func NumericProjectID() (string, error) { return projNum.get() }
+
+// InternalIP returns the instance's primary internal IP address.
+func InternalIP() (string, error) {
+ return getTrimmed("instance/network-interfaces/0/ip")
+}
+
+// ExternalIP returns the instance's primary external (public) IP address.
+func ExternalIP() (string, error) {
+ return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
+}
+
+// Hostname returns the instance's hostname. This will be of the form
+// "<instanceID>.c.<projID>.internal".
+func Hostname() (string, error) {
+ return getTrimmed("instance/hostname")
+}
+
+// InstanceTags returns the list of user-defined instance tags,
+// assigned when initially creating a GCE instance.
+func InstanceTags() ([]string, error) {
+ var s []string
+ j, err := Get("instance/tags")
+ if err != nil {
+ return nil, err
+ }
+ if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
+ return nil, err
+ }
+ return s, nil
+}
+
+// InstanceID returns the current VM's numeric instance ID.
+func InstanceID() (string, error) {
+ return instID.get()
+}
+
+// InstanceName returns the current VM's instance ID string.
+func InstanceName() (string, error) {
+ host, err := Hostname()
+ if err != nil {
+ return "", err
+ }
+ return strings.Split(host, ".")[0], nil
+}
+
+// Zone returns the current VM's zone, such as "us-central1-b".
+func Zone() (string, error) {
+ zone, err := getTrimmed("instance/zone")
+ // zone is of the form "projects/<projNum>/zones/<zoneName>".
+ if err != nil {
+ return "", err
+ }
+ return zone[strings.LastIndex(zone, "/")+1:], nil
+}
+
+// InstanceAttributes returns the list of user-defined attributes,
+// assigned when initially creating a GCE VM instance. The value of an
+// attribute can be obtained with InstanceAttributeValue.
+func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") }
+
+// ProjectAttributes returns the list of user-defined attributes
+// applying to the project as a whole, not just this VM. The value of
+// an attribute can be obtained with ProjectAttributeValue.
+func ProjectAttributes() ([]string, error) { return lines("project/attributes/") }
+
+func lines(suffix string) ([]string, error) {
+ j, err := Get(suffix)
+ if err != nil {
+ return nil, err
+ }
+ s := strings.Split(strings.TrimSpace(j), "\n")
+ for i := range s {
+ s[i] = strings.TrimSpace(s[i])
+ }
+ return s, nil
+}
+
+// InstanceAttributeValue returns the value of the provided VM
+// instance attribute.
+//
+// If the requested attribute is not defined, the returned error will
+// be of type NotDefinedError.
+//
+// InstanceAttributeValue may return ("", nil) if the attribute was
+// defined to be the empty string.
+func InstanceAttributeValue(attr string) (string, error) {
+ return Get("instance/attributes/" + attr)
+}
+
+// ProjectAttributeValue returns the value of the provided
+// project attribute.
+//
+// If the requested attribute is not defined, the returned error will
+// be of type NotDefinedError.
+//
+// ProjectAttributeValue may return ("", nil) if the attribute was
+// defined to be the empty string.
+func ProjectAttributeValue(attr string) (string, error) {
+ return Get("project/attributes/" + attr)
+}
+
+// Scopes returns the service account scopes for the given account.
+// The account may be empty or the string "default" to use the instance's
+// main account.
+func Scopes(serviceAccount string) ([]string, error) {
+ if serviceAccount == "" {
+ serviceAccount = "default"
+ }
+ return lines("instance/service-accounts/" + serviceAccount + "/scopes")
+}
+
+func strsContains(ss []string, s string) bool {
+ for _, v := range ss {
+ if v == s {
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/kube2msb/vendor/google.golang.org/cloud/internal/cloud.go b/src/kube2msb/vendor/google.golang.org/cloud/internal/cloud.go
new file mode 100644
index 0000000..5942880
--- /dev/null
+++ b/src/kube2msb/vendor/google.golang.org/cloud/internal/cloud.go
@@ -0,0 +1,128 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package internal provides support for the cloud packages.
+//
+// Users should not import this package directly.
+package internal
+
+import (
+ "fmt"
+ "net/http"
+ "sync"
+
+ "golang.org/x/net/context"
+)
+
+type contextKey struct{}
+
+func WithContext(parent context.Context, projID string, c *http.Client) context.Context {
+ if c == nil {
+ panic("nil *http.Client passed to WithContext")
+ }
+ if projID == "" {
+ panic("empty project ID passed to WithContext")
+ }
+ return context.WithValue(parent, contextKey{}, &cloudContext{
+ ProjectID: projID,
+ HTTPClient: c,
+ })
+}
+
+const userAgent = "gcloud-golang/0.1"
+
+type cloudContext struct {
+ ProjectID string
+ HTTPClient *http.Client
+
+ mu sync.Mutex // guards svc
+ svc map[string]interface{} // e.g. "storage" => *rawStorage.Service
+}
+
+// Service returns the result of the fill function if it's never been
+// called before for the given name (which is assumed to be an API
+// service name, like "datastore"). If it has already been cached, the fill
+// func is not run.
+// It's safe for concurrent use by multiple goroutines.
+func Service(ctx context.Context, name string, fill func(*http.Client) interface{}) interface{} {
+ return cc(ctx).service(name, fill)
+}
+
+func (c *cloudContext) service(name string, fill func(*http.Client) interface{}) interface{} {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.svc == nil {
+ c.svc = make(map[string]interface{})
+ } else if v, ok := c.svc[name]; ok {
+ return v
+ }
+ v := fill(c.HTTPClient)
+ c.svc[name] = v
+ return v
+}
+
+// Transport is an http.RoundTripper that appends
+// Google Cloud client's user-agent to the original
+// request's user-agent header.
+type Transport struct {
+ // Base is the actual http.RoundTripper
+ // requests will use. It must not be nil.
+ Base http.RoundTripper
+}
+
+// RoundTrip appends a user-agent to the existing user-agent
+// header and delegates the request to the base http.RoundTripper.
+func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
+ req = cloneRequest(req)
+ ua := req.Header.Get("User-Agent")
+ if ua == "" {
+ ua = userAgent
+ } else {
+ ua = fmt.Sprintf("%s %s", ua, userAgent)
+ }
+ req.Header.Set("User-Agent", ua)
+ return t.Base.RoundTrip(req)
+}
+
+// cloneRequest returns a clone of the provided *http.Request.
+// The clone is a shallow copy of the struct and its Header map.
+func cloneRequest(r *http.Request) *http.Request {
+ // shallow copy of the struct
+ r2 := new(http.Request)
+ *r2 = *r
+ // deep copy of the Header
+ r2.Header = make(http.Header)
+ for k, s := range r.Header {
+ r2.Header[k] = s
+ }
+ return r2
+}
+
+func ProjID(ctx context.Context) string {
+ return cc(ctx).ProjectID
+}
+
+func HTTPClient(ctx context.Context) *http.Client {
+ return cc(ctx).HTTPClient
+}
+
+// cc returns the internal *cloudContext (cc) state for a context.Context.
+// It panics if the user did it wrong.
+func cc(ctx context.Context) *cloudContext {
+ if c, ok := ctx.Value(contextKey{}).(*cloudContext); ok {
+ return c
+ }
+ panic("invalid context.Context type; it should be created with cloud.NewContext")
+}
diff --git a/src/kube2msb/vendor/gopkg.in/inf.v0/LICENSE b/src/kube2msb/vendor/gopkg.in/inf.v0/LICENSE
new file mode 100644
index 0000000..87a5ced
--- /dev/null
+++ b/src/kube2msb/vendor/gopkg.in/inf.v0/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2012 Péter Surányi. Portions Copyright (c) 2009 The Go
+Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/kube2msb/vendor/gopkg.in/inf.v0/dec.go b/src/kube2msb/vendor/gopkg.in/inf.v0/dec.go
new file mode 100644
index 0000000..d17ad94
--- /dev/null
+++ b/src/kube2msb/vendor/gopkg.in/inf.v0/dec.go
@@ -0,0 +1,615 @@
+// Package inf (type inf.Dec) implements "infinite-precision" decimal
+// arithmetic.
+// "Infinite precision" describes two characteristics: practically unlimited
+// precision for decimal number representation and no support for calculating
+// with any specific fixed precision.
+// (Although there is no practical limit on precision, inf.Dec can only
+// represent finite decimals.)
+//
+// This package is currently in experimental stage and the API may change.
+//
+// This package does NOT support:
+// - rounding to specific precisions (as opposed to specific decimal positions)
+// - the notion of context (each rounding must be explicit)
+// - NaN and Inf values, and distinguishing between positive and negative zero
+// - conversions to and from float32/64 types
+//
+// Features considered for possible addition:
+// + formatting options
+// + Exp method
+// + combined operations such as AddRound/MulAdd etc
+// + exchanging data in decimal32/64/128 formats
+//
+package inf
+
+// TODO:
+// - avoid excessive deep copying (quo and rounders)
+
+import (
+ "fmt"
+ "io"
+ "math/big"
+ "strings"
+)
+
+// A Dec represents a signed arbitrary-precision decimal.
+// It is a combination of a sign, an arbitrary-precision integer coefficient
+// value, and a signed fixed-precision exponent value.
+// The sign and the coefficient value are handled together as a signed value
+// and referred to as the unscaled value.
+// (Positive and negative zero values are not distinguished.)
+// Since the exponent is most commonly non-positive, it is handled in negated
+// form and referred to as scale.
+//
+// The mathematical value of a Dec equals:
+//
+// unscaled * 10**(-scale)
+//
+// Note that different Dec representations may have equal mathematical values.
+//
+// unscaled scale String()
+// -------------------------
+// 0 0 "0"
+// 0 2 "0.00"
+// 0 -2 "0"
+// 1 0 "1"
+// 100 2 "1.00"
+// 10 0 "10"
+// 1 -1 "10"
+//
+// The zero value for a Dec represents the value 0 with scale 0.
+//
+// Operations are typically performed through the *Dec type.
+// The semantics of the assignment operation "=" for "bare" Dec values is
+// undefined and should not be relied on.
+//
+// Methods are typically of the form:
+//
+// func (z *Dec) Op(x, y *Dec) *Dec
+//
+// and implement operations z = x Op y with the result as receiver; if it
+// is one of the operands it may be overwritten (and its memory reused).
+// To enable chaining of operations, the result is also returned. Methods
+// returning a result other than *Dec take one of the operands as the receiver.
+//
+// A "bare" Quo method (quotient / division operation) is not provided, as the
+// result is not always a finite decimal and thus in general cannot be
+// represented as a Dec.
+// Instead, in the common case when rounding is (potentially) necessary,
+// QuoRound should be used with a Scale and a Rounder.
+// QuoExact or QuoRound with RoundExact can be used in the special cases when it
+// is known that the result is always a finite decimal.
+//
+type Dec struct {
+ unscaled big.Int
+ scale Scale
+}
+
+// Scale represents the type used for the scale of a Dec.
+type Scale int32
+
+const scaleSize = 4 // bytes in a Scale value
+
+// Scaler represents a method for obtaining the scale to use for the result of
+// an operation on x and y.
+type scaler interface {
+ Scale(x *Dec, y *Dec) Scale
+}
+
+var bigInt = [...]*big.Int{
+ big.NewInt(0), big.NewInt(1), big.NewInt(2), big.NewInt(3), big.NewInt(4),
+ big.NewInt(5), big.NewInt(6), big.NewInt(7), big.NewInt(8), big.NewInt(9),
+ big.NewInt(10),
+}
+
+var exp10cache [64]big.Int = func() [64]big.Int {
+ e10, e10i := [64]big.Int{}, bigInt[1]
+ for i, _ := range e10 {
+ e10[i].Set(e10i)
+ e10i = new(big.Int).Mul(e10i, bigInt[10])
+ }
+ return e10
+}()
+
+// NewDec allocates and returns a new Dec set to the given int64 unscaled value
+// and scale.
+func NewDec(unscaled int64, scale Scale) *Dec {
+ return new(Dec).SetUnscaled(unscaled).SetScale(scale)
+}
+
+// NewDecBig allocates and returns a new Dec set to the given *big.Int unscaled
+// value and scale.
+func NewDecBig(unscaled *big.Int, scale Scale) *Dec {
+ return new(Dec).SetUnscaledBig(unscaled).SetScale(scale)
+}
+
+// Scale returns the scale of x.
+func (x *Dec) Scale() Scale {
+ return x.scale
+}
+
+// Unscaled returns the unscaled value of x for u and true for ok when the
+// unscaled value can be represented as int64; otherwise it returns an undefined
+// int64 value for u and false for ok. Use x.UnscaledBig().Int64() to avoid
+// checking the validity of the value when the check is known to be redundant.
+func (x *Dec) Unscaled() (u int64, ok bool) {
+ u = x.unscaled.Int64()
+ var i big.Int
+ ok = i.SetInt64(u).Cmp(&x.unscaled) == 0
+ return
+}
+
+// UnscaledBig returns the unscaled value of x as *big.Int.
+func (x *Dec) UnscaledBig() *big.Int {
+ return &x.unscaled
+}
+
+// SetScale sets the scale of z, with the unscaled value unchanged, and returns
+// z.
+// The mathematical value of the Dec changes as if it was multiplied by
+// 10**(oldscale-scale).
+func (z *Dec) SetScale(scale Scale) *Dec {
+ z.scale = scale
+ return z
+}
+
+// SetUnscaled sets the unscaled value of z, with the scale unchanged, and
+// returns z.
+func (z *Dec) SetUnscaled(unscaled int64) *Dec {
+ z.unscaled.SetInt64(unscaled)
+ return z
+}
+
+// SetUnscaledBig sets the unscaled value of z, with the scale unchanged, and
+// returns z.
+func (z *Dec) SetUnscaledBig(unscaled *big.Int) *Dec {
+ z.unscaled.Set(unscaled)
+ return z
+}
+
+// Set sets z to the value of x and returns z.
+// It does nothing if z == x.
+func (z *Dec) Set(x *Dec) *Dec {
+ if z != x {
+ z.SetUnscaledBig(x.UnscaledBig())
+ z.SetScale(x.Scale())
+ }
+ return z
+}
+
+// Sign returns:
+//
+// -1 if x < 0
+// 0 if x == 0
+// +1 if x > 0
+//
+func (x *Dec) Sign() int {
+ return x.UnscaledBig().Sign()
+}
+
+// Neg sets z to -x and returns z.
+func (z *Dec) Neg(x *Dec) *Dec {
+ z.SetScale(x.Scale())
+ z.UnscaledBig().Neg(x.UnscaledBig())
+ return z
+}
+
+// Cmp compares x and y and returns:
+//
+// -1 if x < y
+// 0 if x == y
+// +1 if x > y
+//
+func (x *Dec) Cmp(y *Dec) int {
+ xx, yy := upscale(x, y)
+ return xx.UnscaledBig().Cmp(yy.UnscaledBig())
+}
+
+// Abs sets z to |x| (the absolute value of x) and returns z.
+func (z *Dec) Abs(x *Dec) *Dec {
+ z.SetScale(x.Scale())
+ z.UnscaledBig().Abs(x.UnscaledBig())
+ return z
+}
+
+// Add sets z to the sum x+y and returns z.
+// The scale of z is the greater of the scales of x and y.
+func (z *Dec) Add(x, y *Dec) *Dec {
+ xx, yy := upscale(x, y)
+ z.SetScale(xx.Scale())
+ z.UnscaledBig().Add(xx.UnscaledBig(), yy.UnscaledBig())
+ return z
+}
+
+// Sub sets z to the difference x-y and returns z.
+// The scale of z is the greater of the scales of x and y.
+func (z *Dec) Sub(x, y *Dec) *Dec {
+ xx, yy := upscale(x, y)
+ z.SetScale(xx.Scale())
+ z.UnscaledBig().Sub(xx.UnscaledBig(), yy.UnscaledBig())
+ return z
+}
+
+// Mul sets z to the product x*y and returns z.
+// The scale of z is the sum of the scales of x and y.
+func (z *Dec) Mul(x, y *Dec) *Dec {
+ z.SetScale(x.Scale() + y.Scale())
+ z.UnscaledBig().Mul(x.UnscaledBig(), y.UnscaledBig())
+ return z
+}
+
+// Round sets z to the value of x rounded to Scale s using Rounder r, and
+// returns z.
+func (z *Dec) Round(x *Dec, s Scale, r Rounder) *Dec {
+ return z.QuoRound(x, NewDec(1, 0), s, r)
+}
+
+// QuoRound sets z to the quotient x/y, rounded using the given Rounder to the
+// specified scale.
+//
+// If the rounder is RoundExact but the result can not be expressed exactly at
+// the specified scale, QuoRound returns nil, and the value of z is undefined.
+//
+// There is no corresponding Div method; the equivalent can be achieved through
+// the choice of Rounder used.
+//
+func (z *Dec) QuoRound(x, y *Dec, s Scale, r Rounder) *Dec {
+ return z.quo(x, y, sclr{s}, r)
+}
+
+func (z *Dec) quo(x, y *Dec, s scaler, r Rounder) *Dec {
+ scl := s.Scale(x, y)
+ var zzz *Dec
+ if r.UseRemainder() {
+ zz, rA, rB := new(Dec).quoRem(x, y, scl, true, new(big.Int), new(big.Int))
+ zzz = r.Round(new(Dec), zz, rA, rB)
+ } else {
+ zz, _, _ := new(Dec).quoRem(x, y, scl, false, nil, nil)
+ zzz = r.Round(new(Dec), zz, nil, nil)
+ }
+ if zzz == nil {
+ return nil
+ }
+ return z.Set(zzz)
+}
+
+// QuoExact sets z to the quotient x/y and returns z when x/y is a finite
+// decimal. Otherwise it returns nil and the value of z is undefined.
+//
+// The scale of a non-nil result is "x.Scale() - y.Scale()" or greater; it is
+// calculated so that the remainder will be zero whenever x/y is a finite
+// decimal.
+func (z *Dec) QuoExact(x, y *Dec) *Dec {
+ return z.quo(x, y, scaleQuoExact{}, RoundExact)
+}
+
+// quoRem sets z to the quotient x/y with the scale s, and if useRem is true,
+// it sets remNum and remDen to the numerator and denominator of the remainder.
+// It returns z, remNum and remDen.
+//
+// The remainder is normalized to the range -1 < r < 1 to simplify rounding;
+// that is, the results satisfy the following equation:
+//
+// x / y = z + (remNum/remDen) * 10**(-z.Scale())
+//
+// See Rounder for more details about rounding.
+//
+func (z *Dec) quoRem(x, y *Dec, s Scale, useRem bool,
+ remNum, remDen *big.Int) (*Dec, *big.Int, *big.Int) {
+ // difference (required adjustment) compared to "canonical" result scale
+ shift := s - (x.Scale() - y.Scale())
+ // pointers to adjusted unscaled dividend and divisor
+ var ix, iy *big.Int
+ switch {
+ case shift > 0:
+ // increased scale: decimal-shift dividend left
+ ix = new(big.Int).Mul(x.UnscaledBig(), exp10(shift))
+ iy = y.UnscaledBig()
+ case shift < 0:
+ // decreased scale: decimal-shift divisor left
+ ix = x.UnscaledBig()
+ iy = new(big.Int).Mul(y.UnscaledBig(), exp10(-shift))
+ default:
+ ix = x.UnscaledBig()
+ iy = y.UnscaledBig()
+ }
+ // save a copy of iy in case it to be overwritten with the result
+ iy2 := iy
+ if iy == z.UnscaledBig() {
+ iy2 = new(big.Int).Set(iy)
+ }
+ // set scale
+ z.SetScale(s)
+ // set unscaled
+ if useRem {
+ // Int division
+ _, intr := z.UnscaledBig().QuoRem(ix, iy, new(big.Int))
+ // set remainder
+ remNum.Set(intr)
+ remDen.Set(iy2)
+ } else {
+ z.UnscaledBig().Quo(ix, iy)
+ }
+ return z, remNum, remDen
+}
+
+type sclr struct{ s Scale }
+
+func (s sclr) Scale(x, y *Dec) Scale {
+ return s.s
+}
+
+type scaleQuoExact struct{}
+
+func (sqe scaleQuoExact) Scale(x, y *Dec) Scale {
+ rem := new(big.Rat).SetFrac(x.UnscaledBig(), y.UnscaledBig())
+ f2, f5 := factor2(rem.Denom()), factor(rem.Denom(), bigInt[5])
+ var f10 Scale
+ if f2 > f5 {
+ f10 = Scale(f2)
+ } else {
+ f10 = Scale(f5)
+ }
+ return x.Scale() - y.Scale() + f10
+}
+
+func factor(n *big.Int, p *big.Int) int {
+ // could be improved for large factors
+ d, f := n, 0
+ for {
+ dd, dm := new(big.Int).DivMod(d, p, new(big.Int))
+ if dm.Sign() == 0 {
+ f++
+ d = dd
+ } else {
+ break
+ }
+ }
+ return f
+}
+
+func factor2(n *big.Int) int {
+ // could be improved for large factors
+ f := 0
+ for ; n.Bit(f) == 0; f++ {
+ }
+ return f
+}
+
+func upscale(a, b *Dec) (*Dec, *Dec) {
+ if a.Scale() == b.Scale() {
+ return a, b
+ }
+ if a.Scale() > b.Scale() {
+ bb := b.rescale(a.Scale())
+ return a, bb
+ }
+ aa := a.rescale(b.Scale())
+ return aa, b
+}
+
+func exp10(x Scale) *big.Int {
+ if int(x) < len(exp10cache) {
+ return &exp10cache[int(x)]
+ }
+ return new(big.Int).Exp(bigInt[10], big.NewInt(int64(x)), nil)
+}
+
+func (x *Dec) rescale(newScale Scale) *Dec {
+ shift := newScale - x.Scale()
+ switch {
+ case shift < 0:
+ e := exp10(-shift)
+ return NewDecBig(new(big.Int).Quo(x.UnscaledBig(), e), newScale)
+ case shift > 0:
+ e := exp10(shift)
+ return NewDecBig(new(big.Int).Mul(x.UnscaledBig(), e), newScale)
+ }
+ return x
+}
+
+var zeros = []byte("00000000000000000000000000000000" +
+ "00000000000000000000000000000000")
+var lzeros = Scale(len(zeros))
+
+func appendZeros(s []byte, n Scale) []byte {
+ for i := Scale(0); i < n; i += lzeros {
+ if n > i+lzeros {
+ s = append(s, zeros...)
+ } else {
+ s = append(s, zeros[0:n-i]...)
+ }
+ }
+ return s
+}
+
+func (x *Dec) String() string {
+ if x == nil {
+ return "<nil>"
+ }
+ scale := x.Scale()
+ s := []byte(x.UnscaledBig().String())
+ if scale <= 0 {
+ if scale != 0 && x.unscaled.Sign() != 0 {
+ s = appendZeros(s, -scale)
+ }
+ return string(s)
+ }
+ negbit := Scale(-((x.Sign() - 1) / 2))
+ // scale > 0
+ lens := Scale(len(s))
+ if lens-negbit <= scale {
+ ss := make([]byte, 0, scale+2)
+ if negbit == 1 {
+ ss = append(ss, '-')
+ }
+ ss = append(ss, '0', '.')
+ ss = appendZeros(ss, scale-lens+negbit)
+ ss = append(ss, s[negbit:]...)
+ return string(ss)
+ }
+ // lens > scale
+ ss := make([]byte, 0, lens+1)
+ ss = append(ss, s[:lens-scale]...)
+ ss = append(ss, '.')
+ ss = append(ss, s[lens-scale:]...)
+ return string(ss)
+}
+
+// Format is a support routine for fmt.Formatter. It accepts the decimal
+// formats 'd' and 'f', and handles both equivalently.
+// Width, precision, flags and bases 2, 8, 16 are not supported.
+func (x *Dec) Format(s fmt.State, ch rune) {
+ if ch != 'd' && ch != 'f' && ch != 'v' && ch != 's' {
+ fmt.Fprintf(s, "%%!%c(dec.Dec=%s)", ch, x.String())
+ return
+ }
+ fmt.Fprintf(s, x.String())
+}
+
+func (z *Dec) scan(r io.RuneScanner) (*Dec, error) {
+ unscaled := make([]byte, 0, 256) // collects chars of unscaled as bytes
+ dp, dg := -1, -1 // indexes of decimal point, first digit
+loop:
+ for {
+ ch, _, err := r.ReadRune()
+ if err == io.EOF {
+ break loop
+ }
+ if err != nil {
+ return nil, err
+ }
+ switch {
+ case ch == '+' || ch == '-':
+ if len(unscaled) > 0 || dp >= 0 { // must be first character
+ r.UnreadRune()
+ break loop
+ }
+ case ch == '.':
+ if dp >= 0 {
+ r.UnreadRune()
+ break loop
+ }
+ dp = len(unscaled)
+ continue // don't add to unscaled
+ case ch >= '0' && ch <= '9':
+ if dg == -1 {
+ dg = len(unscaled)
+ }
+ default:
+ r.UnreadRune()
+ break loop
+ }
+ unscaled = append(unscaled, byte(ch))
+ }
+ if dg == -1 {
+ return nil, fmt.Errorf("no digits read")
+ }
+ if dp >= 0 {
+ z.SetScale(Scale(len(unscaled) - dp))
+ } else {
+ z.SetScale(0)
+ }
+ _, ok := z.UnscaledBig().SetString(string(unscaled), 10)
+ if !ok {
+ return nil, fmt.Errorf("invalid decimal: %s", string(unscaled))
+ }
+ return z, nil
+}
+
+// SetString sets z to the value of s, interpreted as a decimal (base 10),
+// and returns z and a boolean indicating success. The scale of z is the
+// number of digits after the decimal point (including any trailing 0s),
+// or 0 if there is no decimal point. If SetString fails, the value of z
+// is undefined but the returned value is nil.
+func (z *Dec) SetString(s string) (*Dec, bool) {
+ r := strings.NewReader(s)
+ _, err := z.scan(r)
+ if err != nil {
+ return nil, false
+ }
+ _, _, err = r.ReadRune()
+ if err != io.EOF {
+ return nil, false
+ }
+ // err == io.EOF => scan consumed all of s
+ return z, true
+}
+
+// Scan is a support routine for fmt.Scanner; it sets z to the value of
+// the scanned number. It accepts the decimal formats 'd' and 'f', and
+// handles both equivalently. Bases 2, 8, 16 are not supported.
+// The scale of z is the number of digits after the decimal point
+// (including any trailing 0s), or 0 if there is no decimal point.
+func (z *Dec) Scan(s fmt.ScanState, ch rune) error {
+ if ch != 'd' && ch != 'f' && ch != 's' && ch != 'v' {
+ return fmt.Errorf("Dec.Scan: invalid verb '%c'", ch)
+ }
+ s.SkipSpace()
+ _, err := z.scan(s)
+ return err
+}
+
+// Gob encoding version
+const decGobVersion byte = 1
+
+func scaleBytes(s Scale) []byte {
+ buf := make([]byte, scaleSize)
+ i := scaleSize
+ for j := 0; j < scaleSize; j++ {
+ i--
+ buf[i] = byte(s)
+ s >>= 8
+ }
+ return buf
+}
+
+func scale(b []byte) (s Scale) {
+ for j := 0; j < scaleSize; j++ {
+ s <<= 8
+ s |= Scale(b[j])
+ }
+ return
+}
+
+// GobEncode implements the gob.GobEncoder interface.
+func (x *Dec) GobEncode() ([]byte, error) {
+ buf, err := x.UnscaledBig().GobEncode()
+ if err != nil {
+ return nil, err
+ }
+ buf = append(append(buf, scaleBytes(x.Scale())...), decGobVersion)
+ return buf, nil
+}
+
+// GobDecode implements the gob.GobDecoder interface.
+func (z *Dec) GobDecode(buf []byte) error {
+ if len(buf) == 0 {
+ return fmt.Errorf("Dec.GobDecode: no data")
+ }
+ b := buf[len(buf)-1]
+ if b != decGobVersion {
+ return fmt.Errorf("Dec.GobDecode: encoding version %d not supported", b)
+ }
+ l := len(buf) - scaleSize - 1
+ err := z.UnscaledBig().GobDecode(buf[:l])
+ if err != nil {
+ return err
+ }
+ z.SetScale(scale(buf[l : l+scaleSize]))
+ return nil
+}
+
+// MarshalText implements the encoding.TextMarshaler interface.
+func (x *Dec) MarshalText() ([]byte, error) {
+ return []byte(x.String()), nil
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+func (z *Dec) UnmarshalText(data []byte) error {
+ _, ok := z.SetString(string(data))
+ if !ok {
+ return fmt.Errorf("invalid inf.Dec")
+ }
+ return nil
+}
diff --git a/src/kube2msb/vendor/gopkg.in/inf.v0/rounder.go b/src/kube2msb/vendor/gopkg.in/inf.v0/rounder.go
new file mode 100644
index 0000000..3a97ef5
--- /dev/null
+++ b/src/kube2msb/vendor/gopkg.in/inf.v0/rounder.go
@@ -0,0 +1,145 @@
+package inf
+
+import (
+ "math/big"
+)
+
+// Rounder represents a method for rounding the (possibly infinite decimal)
+// result of a division to a finite Dec. It is used by Dec.Round() and
+// Dec.Quo().
+//
+// See the Example for results of using each Rounder with some sample values.
+//
+type Rounder rounder
+
+// See http://speleotrove.com/decimal/damodel.html#refround for more detailed
+// definitions of these rounding modes.
+var (
+ RoundDown Rounder // towards 0
+ RoundUp Rounder // away from 0
+ RoundFloor Rounder // towards -infinity
+ RoundCeil Rounder // towards +infinity
+ RoundHalfDown Rounder // to nearest; towards 0 if same distance
+ RoundHalfUp Rounder // to nearest; away from 0 if same distance
+ RoundHalfEven Rounder // to nearest; even last digit if same distance
+)
+
+// RoundExact is to be used in the case when rounding is not necessary.
+// When used with Quo or Round, it returns the result verbatim when it can be
+// expressed exactly with the given precision, and it returns nil otherwise.
+// QuoExact is a shorthand for using Quo with RoundExact.
+var RoundExact Rounder
+
+type rounder interface {
+
+ // When UseRemainder() returns true, the Round() method is passed the
+ // remainder of the division, expressed as the numerator and denominator of
+ // a rational.
+ UseRemainder() bool
+
+ // Round sets the rounded value of a quotient to z, and returns z.
+ // quo is rounded down (truncated towards zero) to the scale obtained from
+ // the Scaler in Quo().
+ //
+ // When the remainder is not used, remNum and remDen are nil.
+ // When used, the remainder is normalized between -1 and 1; that is:
+ //
+ // -|remDen| < remNum < |remDen|
+ //
+ // remDen has the same sign as y, and remNum is zero or has the same sign
+ // as x.
+ Round(z, quo *Dec, remNum, remDen *big.Int) *Dec
+}
+
+type rndr struct {
+ useRem bool
+ round func(z, quo *Dec, remNum, remDen *big.Int) *Dec
+}
+
+func (r rndr) UseRemainder() bool {
+ return r.useRem
+}
+
+func (r rndr) Round(z, quo *Dec, remNum, remDen *big.Int) *Dec {
+ return r.round(z, quo, remNum, remDen)
+}
+
+var intSign = []*big.Int{big.NewInt(-1), big.NewInt(0), big.NewInt(1)}
+
+func roundHalf(f func(c int, odd uint) (roundUp bool)) func(z, q *Dec, rA, rB *big.Int) *Dec {
+ return func(z, q *Dec, rA, rB *big.Int) *Dec {
+ z.Set(q)
+ brA, brB := rA.BitLen(), rB.BitLen()
+ if brA < brB-1 {
+ // brA < brB-1 => |rA| < |rB/2|
+ return z
+ }
+ roundUp := false
+ srA, srB := rA.Sign(), rB.Sign()
+ s := srA * srB
+ if brA == brB-1 {
+ rA2 := new(big.Int).Lsh(rA, 1)
+ if s < 0 {
+ rA2.Neg(rA2)
+ }
+ roundUp = f(rA2.Cmp(rB)*srB, z.UnscaledBig().Bit(0))
+ } else {
+ // brA > brB-1 => |rA| > |rB/2|
+ roundUp = true
+ }
+ if roundUp {
+ z.UnscaledBig().Add(z.UnscaledBig(), intSign[s+1])
+ }
+ return z
+ }
+}
+
+func init() {
+ RoundExact = rndr{true,
+ func(z, q *Dec, rA, rB *big.Int) *Dec {
+ if rA.Sign() != 0 {
+ return nil
+ }
+ return z.Set(q)
+ }}
+ RoundDown = rndr{false,
+ func(z, q *Dec, rA, rB *big.Int) *Dec {
+ return z.Set(q)
+ }}
+ RoundUp = rndr{true,
+ func(z, q *Dec, rA, rB *big.Int) *Dec {
+ z.Set(q)
+ if rA.Sign() != 0 {
+ z.UnscaledBig().Add(z.UnscaledBig(), intSign[rA.Sign()*rB.Sign()+1])
+ }
+ return z
+ }}
+ RoundFloor = rndr{true,
+ func(z, q *Dec, rA, rB *big.Int) *Dec {
+ z.Set(q)
+ if rA.Sign()*rB.Sign() < 0 {
+ z.UnscaledBig().Add(z.UnscaledBig(), intSign[0])
+ }
+ return z
+ }}
+ RoundCeil = rndr{true,
+ func(z, q *Dec, rA, rB *big.Int) *Dec {
+ z.Set(q)
+ if rA.Sign()*rB.Sign() > 0 {
+ z.UnscaledBig().Add(z.UnscaledBig(), intSign[2])
+ }
+ return z
+ }}
+ RoundHalfDown = rndr{true, roundHalf(
+ func(c int, odd uint) bool {
+ return c > 0
+ })}
+ RoundHalfUp = rndr{true, roundHalf(
+ func(c int, odd uint) bool {
+ return c >= 0
+ })}
+ RoundHalfEven = rndr{true, roundHalf(
+ func(c int, odd uint) bool {
+ return c > 0 || c == 0 && odd == 1
+ })}
+}
diff --git a/src/kube2msb/vendor/gopkg.in/yaml.v2/LICENSE b/src/kube2msb/vendor/gopkg.in/yaml.v2/LICENSE
new file mode 100644
index 0000000..a68e67f
--- /dev/null
+++ b/src/kube2msb/vendor/gopkg.in/yaml.v2/LICENSE
@@ -0,0 +1,188 @@
+
+Copyright (c) 2011-2014 - Canonical Inc.
+
+This software is licensed under the LGPLv3, included below.
+
+As a special exception to the GNU Lesser General Public License version 3
+("LGPL3"), the copyright holders of this Library give you permission to
+convey to a third party a Combined Work that links statically or dynamically
+to this Library without providing any Minimal Corresponding Source or
+Minimal Application Code as set out in 4d or providing the installation
+information set out in section 4e, provided that you comply with the other
+provisions of LGPL3 and provided that you meet, for the Application the
+terms and conditions of the license(s) which apply to the Application.
+
+Except as stated in this special exception, the provisions of LGPL3 will
+continue to comply in full to this Library. If you modify this Library, you
+may apply this exception to your version of this Library, but you are not
+obliged to do so. If you do not wish to do so, delete this exception
+statement from your version. This exception does not (and cannot) modify any
+license terms which apply to the Application, with which you must still
+comply.
+
+
+ GNU LESSER GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+ This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+ 0. Additional Definitions.
+
+ As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+ "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+ An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+ A "Combined Work" is a work produced by combining or linking an
+Application with the Library. The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+ The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+ The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+ 1. Exception to Section 3 of the GNU GPL.
+
+ You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+ 2. Conveying Modified Versions.
+
+ If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+ a) under this License, provided that you make a good faith effort to
+ ensure that, in the event an Application does not supply the
+ function or data, the facility still operates, and performs
+ whatever part of its purpose remains meaningful, or
+
+ b) under the GNU GPL, with none of the additional permissions of
+ this License applicable to that copy.
+
+ 3. Object Code Incorporating Material from Library Header Files.
+
+ The object code form of an Application may incorporate material from
+a header file that is part of the Library. You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+ a) Give prominent notice with each copy of the object code that the
+ Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the object code with a copy of the GNU GPL and this license
+ document.
+
+ 4. Combined Works.
+
+ You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+ a) Give prominent notice with each copy of the Combined Work that
+ the Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the Combined Work with a copy of the GNU GPL and this license
+ document.
+
+ c) For a Combined Work that displays copyright notices during
+ execution, include the copyright notice for the Library among
+ these notices, as well as a reference directing the user to the
+ copies of the GNU GPL and this license document.
+
+ d) Do one of the following:
+
+ 0) Convey the Minimal Corresponding Source under the terms of this
+ License, and the Corresponding Application Code in a form
+ suitable for, and under terms that permit, the user to
+ recombine or relink the Application with a modified version of
+ the Linked Version to produce a modified Combined Work, in the
+ manner specified by section 6 of the GNU GPL for conveying
+ Corresponding Source.
+
+ 1) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (a) uses at run time
+ a copy of the Library already present on the user's computer
+ system, and (b) will operate properly with a modified version
+ of the Library that is interface-compatible with the Linked
+ Version.
+
+ e) Provide Installation Information, but only if you would otherwise
+ be required to provide such information under section 6 of the
+ GNU GPL, and only to the extent that such information is
+ necessary to install and execute a modified version of the
+ Combined Work produced by recombining or relinking the
+ Application with a modified version of the Linked Version. (If
+ you use option 4d0, the Installation Information must accompany
+ the Minimal Corresponding Source and Corresponding Application
+ Code. If you use option 4d1, you must provide the Installation
+ Information in the manner specified by section 6 of the GNU GPL
+ for conveying Corresponding Source.)
+
+ 5. Combined Libraries.
+
+ You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+ a) Accompany the combined library with a copy of the same work based
+ on the Library, uncombined with any other library facilities,
+ conveyed under the terms of this License.
+
+ b) Give prominent notice with the combined library that part of it
+ is a work based on the Library, and explaining where to find the
+ accompanying uncombined form of the same work.
+
+ 6. Revised Versions of the GNU Lesser General Public License.
+
+ The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+ If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
diff --git a/src/kube2msb/vendor/gopkg.in/yaml.v2/LICENSE.libyaml b/src/kube2msb/vendor/gopkg.in/yaml.v2/LICENSE.libyaml
new file mode 100644
index 0000000..8da58fb
--- /dev/null
+++ b/src/kube2msb/vendor/gopkg.in/yaml.v2/LICENSE.libyaml
@@ -0,0 +1,31 @@
+The following files were ported to Go from C files of libyaml, and thus
+are still covered by their original copyright and license:
+
+ apic.go
+ emitterc.go
+ parserc.go
+ readerc.go
+ scannerc.go
+ writerc.go
+ yamlh.go
+ yamlprivateh.go
+
+Copyright (c) 2006 Kirill Simonov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/src/kube2msb/vendor/gopkg.in/yaml.v2/README.md b/src/kube2msb/vendor/gopkg.in/yaml.v2/README.md
new file mode 100644
index 0000000..7b8bd86
--- /dev/null
+++ b/src/kube2msb/vendor/gopkg.in/yaml.v2/README.md
@@ -0,0 +1,131 @@
+# YAML support for the Go language
+
+Introduction
+------------
+
+The yaml package enables Go programs to comfortably encode and decode YAML
+values. It was developed within [Canonical](https://www.canonical.com) as
+part of the [juju](https://juju.ubuntu.com) project, and is based on a
+pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
+C library to parse and generate YAML data quickly and reliably.
+
+Compatibility
+-------------
+
+The yaml package supports most of YAML 1.1 and 1.2, including support for
+anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
+implemented, and base-60 floats from YAML 1.1 are purposefully not
+supported since they're a poor design and are gone in YAML 1.2.
+
+Installation and usage
+----------------------
+
+The import path for the package is *gopkg.in/yaml.v2*.
+
+To install it, run:
+
+ go get gopkg.in/yaml.v2
+
+API documentation
+-----------------
+
+If opened in a browser, the import path itself leads to the API documentation:
+
+ * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2)
+
+API stability
+-------------
+
+The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in).
+
+
+License
+-------
+
+The yaml package is licensed under the LGPL with an exception that allows it to be linked statically. Please see the LICENSE file for details.
+
+
+Example
+-------
+
+```Go
+package main
+
+import (
+ "fmt"
+ "log"
+
+ "gopkg.in/yaml.v2"
+)
+
+var data = `
+a: Easy!
+b:
+ c: 2
+ d: [3, 4]
+`
+
+type T struct {
+ A string
+ B struct {
+ RenamedC int `yaml:"c"`
+ D []int `yaml:",flow"`
+ }
+}
+
+func main() {
+ t := T{}
+
+ err := yaml.Unmarshal([]byte(data), &t)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- t:\n%v\n\n", t)
+
+ d, err := yaml.Marshal(&t)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- t dump:\n%s\n\n", string(d))
+
+ m := make(map[interface{}]interface{})
+
+ err = yaml.Unmarshal([]byte(data), &m)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- m:\n%v\n\n", m)
+
+ d, err = yaml.Marshal(&m)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- m dump:\n%s\n\n", string(d))
+}
+```
+
+This example will generate the following output:
+
+```
+--- t:
+{Easy! {2 [3 4]}}
+
+--- t dump:
+a: Easy!
+b:
+ c: 2
+ d: [3, 4]
+
+
+--- m:
+map[a:Easy! b:map[c:2 d:[3 4]]]
+
+--- m dump:
+a: Easy!
+b:
+ c: 2
+ d:
+ - 3
+ - 4
+```
+
diff --git a/src/kube2msb/vendor/gopkg.in/yaml.v2/apic.go b/src/kube2msb/vendor/gopkg.in/yaml.v2/apic.go
new file mode 100644
index 0000000..95ec014
--- /dev/null
+++ b/src/kube2msb/vendor/gopkg.in/yaml.v2/apic.go
@@ -0,0 +1,742 @@
+package yaml
+
+import (
+ "io"
+ "os"
+)
+
+func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
+ //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
+
+ // Check if we can move the queue at the beginning of the buffer.
+ if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
+ if parser.tokens_head != len(parser.tokens) {
+ copy(parser.tokens, parser.tokens[parser.tokens_head:])
+ }
+ parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
+ parser.tokens_head = 0
+ }
+ parser.tokens = append(parser.tokens, *token)
+ if pos < 0 {
+ return
+ }
+ copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
+ parser.tokens[parser.tokens_head+pos] = *token
+}
+
+// Create a new parser object.
+func yaml_parser_initialize(parser *yaml_parser_t) bool {
+ *parser = yaml_parser_t{
+ raw_buffer: make([]byte, 0, input_raw_buffer_size),
+ buffer: make([]byte, 0, input_buffer_size),
+ }
+ return true
+}
+
+// Destroy a parser object.
+func yaml_parser_delete(parser *yaml_parser_t) {
+ *parser = yaml_parser_t{}
+}
+
+// String read handler.
+func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+ if parser.input_pos == len(parser.input) {
+ return 0, io.EOF
+ }
+ n = copy(buffer, parser.input[parser.input_pos:])
+ parser.input_pos += n
+ return n, nil
+}
+
+// File read handler.
+func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+ return parser.input_file.Read(buffer)
+}
+
+// Set a string input.
+func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
+ if parser.read_handler != nil {
+ panic("must set the input source only once")
+ }
+ parser.read_handler = yaml_string_read_handler
+ parser.input = input
+ parser.input_pos = 0
+}
+
+// Set a file input.
+func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) {
+ if parser.read_handler != nil {
+ panic("must set the input source only once")
+ }
+ parser.read_handler = yaml_file_read_handler
+ parser.input_file = file
+}
+
+// Set the source encoding.
+func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
+ if parser.encoding != yaml_ANY_ENCODING {
+ panic("must set the encoding only once")
+ }
+ parser.encoding = encoding
+}
+
+// Create a new emitter object.
+func yaml_emitter_initialize(emitter *yaml_emitter_t) bool {
+ *emitter = yaml_emitter_t{
+ buffer: make([]byte, output_buffer_size),
+ raw_buffer: make([]byte, 0, output_raw_buffer_size),
+ states: make([]yaml_emitter_state_t, 0, initial_stack_size),
+ events: make([]yaml_event_t, 0, initial_queue_size),
+ }
+ return true
+}
+
+// Destroy an emitter object.
+func yaml_emitter_delete(emitter *yaml_emitter_t) {
+ *emitter = yaml_emitter_t{}
+}
+
+// String write handler.
+func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+ *emitter.output_buffer = append(*emitter.output_buffer, buffer...)
+ return nil
+}
+
+// File write handler.
+func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+ _, err := emitter.output_file.Write(buffer)
+ return err
+}
+
+// Set a string output.
+func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
+ if emitter.write_handler != nil {
+ panic("must set the output target only once")
+ }
+ emitter.write_handler = yaml_string_write_handler
+ emitter.output_buffer = output_buffer
+}
+
+// Set a file output.
+func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) {
+ if emitter.write_handler != nil {
+ panic("must set the output target only once")
+ }
+ emitter.write_handler = yaml_file_write_handler
+ emitter.output_file = file
+}
+
+// Set the output encoding.
+func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
+ if emitter.encoding != yaml_ANY_ENCODING {
+ panic("must set the output encoding only once")
+ }
+ emitter.encoding = encoding
+}
+
+// Set the canonical output style.
+func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
+ emitter.canonical = canonical
+}
+
+//// Set the indentation increment.
+func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
+ if indent < 2 || indent > 9 {
+ indent = 2
+ }
+ emitter.best_indent = indent
+}
+
+// Set the preferred line width.
+func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
+ if width < 0 {
+ width = -1
+ }
+ emitter.best_width = width
+}
+
+// Set if unescaped non-ASCII characters are allowed.
+func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
+ emitter.unicode = unicode
+}
+
+// Set the preferred line break character.
+func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
+ emitter.line_break = line_break
+}
+
+///*
+// * Destroy a token object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_token_delete(yaml_token_t *token)
+//{
+// assert(token); // Non-NULL token object expected.
+//
+// switch (token.type)
+// {
+// case YAML_TAG_DIRECTIVE_TOKEN:
+// yaml_free(token.data.tag_directive.handle);
+// yaml_free(token.data.tag_directive.prefix);
+// break;
+//
+// case YAML_ALIAS_TOKEN:
+// yaml_free(token.data.alias.value);
+// break;
+//
+// case YAML_ANCHOR_TOKEN:
+// yaml_free(token.data.anchor.value);
+// break;
+//
+// case YAML_TAG_TOKEN:
+// yaml_free(token.data.tag.handle);
+// yaml_free(token.data.tag.suffix);
+// break;
+//
+// case YAML_SCALAR_TOKEN:
+// yaml_free(token.data.scalar.value);
+// break;
+//
+// default:
+// break;
+// }
+//
+// memset(token, 0, sizeof(yaml_token_t));
+//}
+//
+///*
+// * Check if a string is a valid UTF-8 sequence.
+// *
+// * Check 'reader.c' for more details on UTF-8 encoding.
+// */
+//
+//static int
+//yaml_check_utf8(yaml_char_t *start, size_t length)
+//{
+// yaml_char_t *end = start+length;
+// yaml_char_t *pointer = start;
+//
+// while (pointer < end) {
+// unsigned char octet;
+// unsigned int width;
+// unsigned int value;
+// size_t k;
+//
+// octet = pointer[0];
+// width = (octet & 0x80) == 0x00 ? 1 :
+// (octet & 0xE0) == 0xC0 ? 2 :
+// (octet & 0xF0) == 0xE0 ? 3 :
+// (octet & 0xF8) == 0xF0 ? 4 : 0;
+// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
+// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
+// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
+// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
+// if (!width) return 0;
+// if (pointer+width > end) return 0;
+// for (k = 1; k < width; k ++) {
+// octet = pointer[k];
+// if ((octet & 0xC0) != 0x80) return 0;
+// value = (value << 6) + (octet & 0x3F);
+// }
+// if (!((width == 1) ||
+// (width == 2 && value >= 0x80) ||
+// (width == 3 && value >= 0x800) ||
+// (width == 4 && value >= 0x10000))) return 0;
+//
+// pointer += width;
+// }
+//
+// return 1;
+//}
+//
+
+// Create STREAM-START.
+func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_STREAM_START_EVENT,
+ encoding: encoding,
+ }
+ return true
+}
+
+// Create STREAM-END.
+func yaml_stream_end_event_initialize(event *yaml_event_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_STREAM_END_EVENT,
+ }
+ return true
+}
+
+// Create DOCUMENT-START.
+func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t,
+ tag_directives []yaml_tag_directive_t, implicit bool) bool {
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ version_directive: version_directive,
+ tag_directives: tag_directives,
+ implicit: implicit,
+ }
+ return true
+}
+
+// Create DOCUMENT-END.
+func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool {
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_END_EVENT,
+ implicit: implicit,
+ }
+ return true
+}
+
+///*
+// * Create ALIAS.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t)
+//{
+// mark yaml_mark_t = { 0, 0, 0 }
+// anchor_copy *yaml_char_t = NULL
+//
+// assert(event) // Non-NULL event object is expected.
+// assert(anchor) // Non-NULL anchor is expected.
+//
+// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0
+//
+// anchor_copy = yaml_strdup(anchor)
+// if (!anchor_copy)
+// return 0
+//
+// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark)
+//
+// return 1
+//}
+
+// Create SCALAR.
+func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ anchor: anchor,
+ tag: tag,
+ value: value,
+ implicit: plain_implicit,
+ quoted_implicit: quoted_implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create SEQUENCE-START.
+func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create SEQUENCE-END.
+func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ }
+ return true
+}
+
+// Create MAPPING-START.
+func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create MAPPING-END.
+func yaml_mapping_end_event_initialize(event *yaml_event_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ }
+ return true
+}
+
+// Destroy an event object.
+func yaml_event_delete(event *yaml_event_t) {
+ *event = yaml_event_t{}
+}
+
+///*
+// * Create a document object.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_initialize(document *yaml_document_t,
+// version_directive *yaml_version_directive_t,
+// tag_directives_start *yaml_tag_directive_t,
+// tag_directives_end *yaml_tag_directive_t,
+// start_implicit int, end_implicit int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// struct {
+// start *yaml_node_t
+// end *yaml_node_t
+// top *yaml_node_t
+// } nodes = { NULL, NULL, NULL }
+// version_directive_copy *yaml_version_directive_t = NULL
+// struct {
+// start *yaml_tag_directive_t
+// end *yaml_tag_directive_t
+// top *yaml_tag_directive_t
+// } tag_directives_copy = { NULL, NULL, NULL }
+// value yaml_tag_directive_t = { NULL, NULL }
+// mark yaml_mark_t = { 0, 0, 0 }
+//
+// assert(document) // Non-NULL document object is expected.
+// assert((tag_directives_start && tag_directives_end) ||
+// (tag_directives_start == tag_directives_end))
+// // Valid tag directives are expected.
+//
+// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
+//
+// if (version_directive) {
+// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
+// if (!version_directive_copy) goto error
+// version_directive_copy.major = version_directive.major
+// version_directive_copy.minor = version_directive.minor
+// }
+//
+// if (tag_directives_start != tag_directives_end) {
+// tag_directive *yaml_tag_directive_t
+// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
+// goto error
+// for (tag_directive = tag_directives_start
+// tag_directive != tag_directives_end; tag_directive ++) {
+// assert(tag_directive.handle)
+// assert(tag_directive.prefix)
+// if (!yaml_check_utf8(tag_directive.handle,
+// strlen((char *)tag_directive.handle)))
+// goto error
+// if (!yaml_check_utf8(tag_directive.prefix,
+// strlen((char *)tag_directive.prefix)))
+// goto error
+// value.handle = yaml_strdup(tag_directive.handle)
+// value.prefix = yaml_strdup(tag_directive.prefix)
+// if (!value.handle || !value.prefix) goto error
+// if (!PUSH(&context, tag_directives_copy, value))
+// goto error
+// value.handle = NULL
+// value.prefix = NULL
+// }
+// }
+//
+// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
+// tag_directives_copy.start, tag_directives_copy.top,
+// start_implicit, end_implicit, mark, mark)
+//
+// return 1
+//
+//error:
+// STACK_DEL(&context, nodes)
+// yaml_free(version_directive_copy)
+// while (!STACK_EMPTY(&context, tag_directives_copy)) {
+// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
+// yaml_free(value.handle)
+// yaml_free(value.prefix)
+// }
+// STACK_DEL(&context, tag_directives_copy)
+// yaml_free(value.handle)
+// yaml_free(value.prefix)
+//
+// return 0
+//}
+//
+///*
+// * Destroy a document object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_document_delete(document *yaml_document_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// tag_directive *yaml_tag_directive_t
+//
+// context.error = YAML_NO_ERROR // Eliminate a compliler warning.
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// while (!STACK_EMPTY(&context, document.nodes)) {
+// node yaml_node_t = POP(&context, document.nodes)
+// yaml_free(node.tag)
+// switch (node.type) {
+// case YAML_SCALAR_NODE:
+// yaml_free(node.data.scalar.value)
+// break
+// case YAML_SEQUENCE_NODE:
+// STACK_DEL(&context, node.data.sequence.items)
+// break
+// case YAML_MAPPING_NODE:
+// STACK_DEL(&context, node.data.mapping.pairs)
+// break
+// default:
+// assert(0) // Should not happen.
+// }
+// }
+// STACK_DEL(&context, document.nodes)
+//
+// yaml_free(document.version_directive)
+// for (tag_directive = document.tag_directives.start
+// tag_directive != document.tag_directives.end
+// tag_directive++) {
+// yaml_free(tag_directive.handle)
+// yaml_free(tag_directive.prefix)
+// }
+// yaml_free(document.tag_directives.start)
+//
+// memset(document, 0, sizeof(yaml_document_t))
+//}
+//
+///**
+// * Get a document node.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_node(document *yaml_document_t, index int)
+//{
+// assert(document) // Non-NULL document object is expected.
+//
+// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
+// return document.nodes.start + index - 1
+// }
+// return NULL
+//}
+//
+///**
+// * Get the root object.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_root_node(document *yaml_document_t)
+//{
+// assert(document) // Non-NULL document object is expected.
+//
+// if (document.nodes.top != document.nodes.start) {
+// return document.nodes.start
+// }
+// return NULL
+//}
+//
+///*
+// * Add a scalar node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_scalar(document *yaml_document_t,
+// tag *yaml_char_t, value *yaml_char_t, length int,
+// style yaml_scalar_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// value_copy *yaml_char_t = NULL
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+// assert(value) // Non-NULL value is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (length < 0) {
+// length = strlen((char *)value)
+// }
+//
+// if (!yaml_check_utf8(value, length)) goto error
+// value_copy = yaml_malloc(length+1)
+// if (!value_copy) goto error
+// memcpy(value_copy, value, length)
+// value_copy[length] = '\0'
+//
+// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// yaml_free(tag_copy)
+// yaml_free(value_copy)
+//
+// return 0
+//}
+//
+///*
+// * Add a sequence node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_sequence(document *yaml_document_t,
+// tag *yaml_char_t, style yaml_sequence_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// struct {
+// start *yaml_node_item_t
+// end *yaml_node_item_t
+// top *yaml_node_item_t
+// } items = { NULL, NULL, NULL }
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
+//
+// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
+// style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// STACK_DEL(&context, items)
+// yaml_free(tag_copy)
+//
+// return 0
+//}
+//
+///*
+// * Add a mapping node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_mapping(document *yaml_document_t,
+// tag *yaml_char_t, style yaml_mapping_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// struct {
+// start *yaml_node_pair_t
+// end *yaml_node_pair_t
+// top *yaml_node_pair_t
+// } pairs = { NULL, NULL, NULL }
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
+//
+// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
+// style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// STACK_DEL(&context, pairs)
+// yaml_free(tag_copy)
+//
+// return 0
+//}
+//
+///*
+// * Append an item to a sequence node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_sequence_item(document *yaml_document_t,
+// sequence int, item int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+//
+// assert(document) // Non-NULL document is required.
+// assert(sequence > 0
+// && document.nodes.start + sequence <= document.nodes.top)
+// // Valid sequence id is required.
+// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
+// // A sequence node is required.
+// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
+// // Valid item id is required.
+//
+// if (!PUSH(&context,
+// document.nodes.start[sequence-1].data.sequence.items, item))
+// return 0
+//
+// return 1
+//}
+//
+///*
+// * Append a pair of a key and a value to a mapping node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_mapping_pair(document *yaml_document_t,
+// mapping int, key int, value int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+//
+// pair yaml_node_pair_t
+//
+// assert(document) // Non-NULL document is required.
+// assert(mapping > 0
+// && document.nodes.start + mapping <= document.nodes.top)
+// // Valid mapping id is required.
+// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
+// // A mapping node is required.
+// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
+// // Valid key id is required.
+// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
+// // Valid value id is required.
+//
+// pair.key = key
+// pair.value = value
+//
+// if (!PUSH(&context,
+// document.nodes.start[mapping-1].data.mapping.pairs, pair))
+// return 0
+//
+// return 1
+//}
+//
+//
diff --git a/src/kube2msb/vendor/gopkg.in/yaml.v2/decode.go b/src/kube2msb/vendor/gopkg.in/yaml.v2/decode.go
new file mode 100644
index 0000000..085cddc
--- /dev/null
+++ b/src/kube2msb/vendor/gopkg.in/yaml.v2/decode.go
@@ -0,0 +1,683 @@
+package yaml
+
+import (
+ "encoding"
+ "encoding/base64"
+ "fmt"
+ "math"
+ "reflect"
+ "strconv"
+ "time"
+)
+
+const (
+ documentNode = 1 << iota
+ mappingNode
+ sequenceNode
+ scalarNode
+ aliasNode
+)
+
+type node struct {
+ kind int
+ line, column int
+ tag string
+ value string
+ implicit bool
+ children []*node
+ anchors map[string]*node
+}
+
+// ----------------------------------------------------------------------------
+// Parser, produces a node tree out of a libyaml event stream.
+
+type parser struct {
+ parser yaml_parser_t
+ event yaml_event_t
+ doc *node
+}
+
+func newParser(b []byte) *parser {
+ p := parser{}
+ if !yaml_parser_initialize(&p.parser) {
+ panic("failed to initialize YAML emitter")
+ }
+
+ if len(b) == 0 {
+ b = []byte{'\n'}
+ }
+
+ yaml_parser_set_input_string(&p.parser, b)
+
+ p.skip()
+ if p.event.typ != yaml_STREAM_START_EVENT {
+ panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ)))
+ }
+ p.skip()
+ return &p
+}
+
+func (p *parser) destroy() {
+ if p.event.typ != yaml_NO_EVENT {
+ yaml_event_delete(&p.event)
+ }
+ yaml_parser_delete(&p.parser)
+}
+
+func (p *parser) skip() {
+ if p.event.typ != yaml_NO_EVENT {
+ if p.event.typ == yaml_STREAM_END_EVENT {
+ failf("attempted to go past the end of stream; corrupted value?")
+ }
+ yaml_event_delete(&p.event)
+ }
+ if !yaml_parser_parse(&p.parser, &p.event) {
+ p.fail()
+ }
+}
+
+func (p *parser) fail() {
+ var where string
+ var line int
+ if p.parser.problem_mark.line != 0 {
+ line = p.parser.problem_mark.line
+ } else if p.parser.context_mark.line != 0 {
+ line = p.parser.context_mark.line
+ }
+ if line != 0 {
+ where = "line " + strconv.Itoa(line) + ": "
+ }
+ var msg string
+ if len(p.parser.problem) > 0 {
+ msg = p.parser.problem
+ } else {
+ msg = "unknown problem parsing YAML content"
+ }
+ failf("%s%s", where, msg)
+}
+
+func (p *parser) anchor(n *node, anchor []byte) {
+ if anchor != nil {
+ p.doc.anchors[string(anchor)] = n
+ }
+}
+
+func (p *parser) parse() *node {
+ switch p.event.typ {
+ case yaml_SCALAR_EVENT:
+ return p.scalar()
+ case yaml_ALIAS_EVENT:
+ return p.alias()
+ case yaml_MAPPING_START_EVENT:
+ return p.mapping()
+ case yaml_SEQUENCE_START_EVENT:
+ return p.sequence()
+ case yaml_DOCUMENT_START_EVENT:
+ return p.document()
+ case yaml_STREAM_END_EVENT:
+ // Happens when attempting to decode an empty buffer.
+ return nil
+ default:
+ panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ)))
+ }
+ panic("unreachable")
+}
+
+func (p *parser) node(kind int) *node {
+ return &node{
+ kind: kind,
+ line: p.event.start_mark.line,
+ column: p.event.start_mark.column,
+ }
+}
+
+func (p *parser) document() *node {
+ n := p.node(documentNode)
+ n.anchors = make(map[string]*node)
+ p.doc = n
+ p.skip()
+ n.children = append(n.children, p.parse())
+ if p.event.typ != yaml_DOCUMENT_END_EVENT {
+ panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ)))
+ }
+ p.skip()
+ return n
+}
+
+func (p *parser) alias() *node {
+ n := p.node(aliasNode)
+ n.value = string(p.event.anchor)
+ p.skip()
+ return n
+}
+
+func (p *parser) scalar() *node {
+ n := p.node(scalarNode)
+ n.value = string(p.event.value)
+ n.tag = string(p.event.tag)
+ n.implicit = p.event.implicit
+ p.anchor(n, p.event.anchor)
+ p.skip()
+ return n
+}
+
+func (p *parser) sequence() *node {
+ n := p.node(sequenceNode)
+ p.anchor(n, p.event.anchor)
+ p.skip()
+ for p.event.typ != yaml_SEQUENCE_END_EVENT {
+ n.children = append(n.children, p.parse())
+ }
+ p.skip()
+ return n
+}
+
+func (p *parser) mapping() *node {
+ n := p.node(mappingNode)
+ p.anchor(n, p.event.anchor)
+ p.skip()
+ for p.event.typ != yaml_MAPPING_END_EVENT {
+ n.children = append(n.children, p.parse(), p.parse())
+ }
+ p.skip()
+ return n
+}
+
+// ----------------------------------------------------------------------------
+// Decoder, unmarshals a node into a provided value.
+
+type decoder struct {
+ doc *node
+ aliases map[string]bool
+ mapType reflect.Type
+ terrors []string
+}
+
+var (
+ mapItemType = reflect.TypeOf(MapItem{})
+ durationType = reflect.TypeOf(time.Duration(0))
+ defaultMapType = reflect.TypeOf(map[interface{}]interface{}{})
+ ifaceType = defaultMapType.Elem()
+)
+
+func newDecoder() *decoder {
+ d := &decoder{mapType: defaultMapType}
+ d.aliases = make(map[string]bool)
+ return d
+}
+
+func (d *decoder) terror(n *node, tag string, out reflect.Value) {
+ if n.tag != "" {
+ tag = n.tag
+ }
+ value := n.value
+ if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG {
+ if len(value) > 10 {
+ value = " `" + value[:7] + "...`"
+ } else {
+ value = " `" + value + "`"
+ }
+ }
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type()))
+}
+
+func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) {
+ terrlen := len(d.terrors)
+ err := u.UnmarshalYAML(func(v interface{}) (err error) {
+ defer handleErr(&err)
+ d.unmarshal(n, reflect.ValueOf(v))
+ if len(d.terrors) > terrlen {
+ issues := d.terrors[terrlen:]
+ d.terrors = d.terrors[:terrlen]
+ return &TypeError{issues}
+ }
+ return nil
+ })
+ if e, ok := err.(*TypeError); ok {
+ d.terrors = append(d.terrors, e.Errors...)
+ return false
+ }
+ if err != nil {
+ fail(err)
+ }
+ return true
+}
+
+// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
+// if a value is found to implement it.
+// It returns the initialized and dereferenced out value, whether
+// unmarshalling was already done by UnmarshalYAML, and if so whether
+// its types unmarshalled appropriately.
+//
+// If n holds a null value, prepare returns before doing anything.
+func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
+ if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "") {
+ return out, false, false
+ }
+ again := true
+ for again {
+ again = false
+ if out.Kind() == reflect.Ptr {
+ if out.IsNil() {
+ out.Set(reflect.New(out.Type().Elem()))
+ }
+ out = out.Elem()
+ again = true
+ }
+ if out.CanAddr() {
+ if u, ok := out.Addr().Interface().(Unmarshaler); ok {
+ good = d.callUnmarshaler(n, u)
+ return out, true, good
+ }
+ }
+ }
+ return out, false, false
+}
+
+func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) {
+ switch n.kind {
+ case documentNode:
+ return d.document(n, out)
+ case aliasNode:
+ return d.alias(n, out)
+ }
+ out, unmarshaled, good := d.prepare(n, out)
+ if unmarshaled {
+ return good
+ }
+ switch n.kind {
+ case scalarNode:
+ good = d.scalar(n, out)
+ case mappingNode:
+ good = d.mapping(n, out)
+ case sequenceNode:
+ good = d.sequence(n, out)
+ default:
+ panic("internal error: unknown node kind: " + strconv.Itoa(n.kind))
+ }
+ return good
+}
+
+func (d *decoder) document(n *node, out reflect.Value) (good bool) {
+ if len(n.children) == 1 {
+ d.doc = n
+ d.unmarshal(n.children[0], out)
+ return true
+ }
+ return false
+}
+
+func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
+ an, ok := d.doc.anchors[n.value]
+ if !ok {
+ failf("unknown anchor '%s' referenced", n.value)
+ }
+ if d.aliases[n.value] {
+ failf("anchor '%s' value contains itself", n.value)
+ }
+ d.aliases[n.value] = true
+ good = d.unmarshal(an, out)
+ delete(d.aliases, n.value)
+ return good
+}
+
+var zeroValue reflect.Value
+
+func resetMap(out reflect.Value) {
+ for _, k := range out.MapKeys() {
+ out.SetMapIndex(k, zeroValue)
+ }
+}
+
+func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
+ var tag string
+ var resolved interface{}
+ if n.tag == "" && !n.implicit {
+ tag = yaml_STR_TAG
+ resolved = n.value
+ } else {
+ tag, resolved = resolve(n.tag, n.value)
+ if tag == yaml_BINARY_TAG {
+ data, err := base64.StdEncoding.DecodeString(resolved.(string))
+ if err != nil {
+ failf("!!binary value contains invalid base64 data")
+ }
+ resolved = string(data)
+ }
+ }
+ if resolved == nil {
+ if out.Kind() == reflect.Map && !out.CanAddr() {
+ resetMap(out)
+ } else {
+ out.Set(reflect.Zero(out.Type()))
+ }
+ return true
+ }
+ if s, ok := resolved.(string); ok && out.CanAddr() {
+ if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok {
+ err := u.UnmarshalText([]byte(s))
+ if err != nil {
+ fail(err)
+ }
+ return true
+ }
+ }
+ switch out.Kind() {
+ case reflect.String:
+ if tag == yaml_BINARY_TAG {
+ out.SetString(resolved.(string))
+ good = true
+ } else if resolved != nil {
+ out.SetString(n.value)
+ good = true
+ }
+ case reflect.Interface:
+ if resolved == nil {
+ out.Set(reflect.Zero(out.Type()))
+ } else {
+ out.Set(reflect.ValueOf(resolved))
+ }
+ good = true
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ switch resolved := resolved.(type) {
+ case int:
+ if !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ good = true
+ }
+ case int64:
+ if !out.OverflowInt(resolved) {
+ out.SetInt(resolved)
+ good = true
+ }
+ case uint64:
+ if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ good = true
+ }
+ case float64:
+ if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ good = true
+ }
+ case string:
+ if out.Type() == durationType {
+ d, err := time.ParseDuration(resolved)
+ if err == nil {
+ out.SetInt(int64(d))
+ good = true
+ }
+ }
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ switch resolved := resolved.(type) {
+ case int:
+ if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ good = true
+ }
+ case int64:
+ if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ good = true
+ }
+ case uint64:
+ if !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ good = true
+ }
+ case float64:
+ if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ good = true
+ }
+ }
+ case reflect.Bool:
+ switch resolved := resolved.(type) {
+ case bool:
+ out.SetBool(resolved)
+ good = true
+ }
+ case reflect.Float32, reflect.Float64:
+ switch resolved := resolved.(type) {
+ case int:
+ out.SetFloat(float64(resolved))
+ good = true
+ case int64:
+ out.SetFloat(float64(resolved))
+ good = true
+ case uint64:
+ out.SetFloat(float64(resolved))
+ good = true
+ case float64:
+ out.SetFloat(resolved)
+ good = true
+ }
+ case reflect.Ptr:
+ if out.Type().Elem() == reflect.TypeOf(resolved) {
+ // TODO DOes this make sense? When is out a Ptr except when decoding a nil value?
+ elem := reflect.New(out.Type().Elem())
+ elem.Elem().Set(reflect.ValueOf(resolved))
+ out.Set(elem)
+ good = true
+ }
+ }
+ if !good {
+ d.terror(n, tag, out)
+ }
+ return good
+}
+
+func settableValueOf(i interface{}) reflect.Value {
+ v := reflect.ValueOf(i)
+ sv := reflect.New(v.Type()).Elem()
+ sv.Set(v)
+ return sv
+}
+
+func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
+ l := len(n.children)
+
+ var iface reflect.Value
+ switch out.Kind() {
+ case reflect.Slice:
+ out.Set(reflect.MakeSlice(out.Type(), l, l))
+ case reflect.Interface:
+ // No type hints. Will have to use a generic sequence.
+ iface = out
+ out = settableValueOf(make([]interface{}, l))
+ default:
+ d.terror(n, yaml_SEQ_TAG, out)
+ return false
+ }
+ et := out.Type().Elem()
+
+ j := 0
+ for i := 0; i < l; i++ {
+ e := reflect.New(et).Elem()
+ if ok := d.unmarshal(n.children[i], e); ok {
+ out.Index(j).Set(e)
+ j++
+ }
+ }
+ out.Set(out.Slice(0, j))
+ if iface.IsValid() {
+ iface.Set(out)
+ }
+ return true
+}
+
+func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
+ switch out.Kind() {
+ case reflect.Struct:
+ return d.mappingStruct(n, out)
+ case reflect.Slice:
+ return d.mappingSlice(n, out)
+ case reflect.Map:
+ // okay
+ case reflect.Interface:
+ if d.mapType.Kind() == reflect.Map {
+ iface := out
+ out = reflect.MakeMap(d.mapType)
+ iface.Set(out)
+ } else {
+ slicev := reflect.New(d.mapType).Elem()
+ if !d.mappingSlice(n, slicev) {
+ return false
+ }
+ out.Set(slicev)
+ return true
+ }
+ default:
+ d.terror(n, yaml_MAP_TAG, out)
+ return false
+ }
+ outt := out.Type()
+ kt := outt.Key()
+ et := outt.Elem()
+
+ mapType := d.mapType
+ if outt.Key() == ifaceType && outt.Elem() == ifaceType {
+ d.mapType = outt
+ }
+
+ if out.IsNil() {
+ out.Set(reflect.MakeMap(outt))
+ }
+ l := len(n.children)
+ for i := 0; i < l; i += 2 {
+ if isMerge(n.children[i]) {
+ d.merge(n.children[i+1], out)
+ continue
+ }
+ k := reflect.New(kt).Elem()
+ if d.unmarshal(n.children[i], k) {
+ kkind := k.Kind()
+ if kkind == reflect.Interface {
+ kkind = k.Elem().Kind()
+ }
+ if kkind == reflect.Map || kkind == reflect.Slice {
+ failf("invalid map key: %#v", k.Interface())
+ }
+ e := reflect.New(et).Elem()
+ if d.unmarshal(n.children[i+1], e) {
+ out.SetMapIndex(k, e)
+ }
+ }
+ }
+ d.mapType = mapType
+ return true
+}
+
+func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) {
+ outt := out.Type()
+ if outt.Elem() != mapItemType {
+ d.terror(n, yaml_MAP_TAG, out)
+ return false
+ }
+
+ mapType := d.mapType
+ d.mapType = outt
+
+ var slice []MapItem
+ var l = len(n.children)
+ for i := 0; i < l; i += 2 {
+ if isMerge(n.children[i]) {
+ d.merge(n.children[i+1], out)
+ continue
+ }
+ item := MapItem{}
+ k := reflect.ValueOf(&item.Key).Elem()
+ if d.unmarshal(n.children[i], k) {
+ v := reflect.ValueOf(&item.Value).Elem()
+ if d.unmarshal(n.children[i+1], v) {
+ slice = append(slice, item)
+ }
+ }
+ }
+ out.Set(reflect.ValueOf(slice))
+ d.mapType = mapType
+ return true
+}
+
+func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
+ sinfo, err := getStructInfo(out.Type())
+ if err != nil {
+ panic(err)
+ }
+ name := settableValueOf("")
+ l := len(n.children)
+
+ var inlineMap reflect.Value
+ var elemType reflect.Type
+ if sinfo.InlineMap != -1 {
+ inlineMap = out.Field(sinfo.InlineMap)
+ inlineMap.Set(reflect.New(inlineMap.Type()).Elem())
+ elemType = inlineMap.Type().Elem()
+ }
+
+ for i := 0; i < l; i += 2 {
+ ni := n.children[i]
+ if isMerge(ni) {
+ d.merge(n.children[i+1], out)
+ continue
+ }
+ if !d.unmarshal(ni, name) {
+ continue
+ }
+ if info, ok := sinfo.FieldsMap[name.String()]; ok {
+ var field reflect.Value
+ if info.Inline == nil {
+ field = out.Field(info.Num)
+ } else {
+ field = out.FieldByIndex(info.Inline)
+ }
+ d.unmarshal(n.children[i+1], field)
+ } else if sinfo.InlineMap != -1 {
+ if inlineMap.IsNil() {
+ inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
+ }
+ value := reflect.New(elemType).Elem()
+ d.unmarshal(n.children[i+1], value)
+ inlineMap.SetMapIndex(name, value)
+ }
+ }
+ return true
+}
+
+func failWantMap() {
+ failf("map merge requires map or sequence of maps as the value")
+}
+
+func (d *decoder) merge(n *node, out reflect.Value) {
+ switch n.kind {
+ case mappingNode:
+ d.unmarshal(n, out)
+ case aliasNode:
+ an, ok := d.doc.anchors[n.value]
+ if ok && an.kind != mappingNode {
+ failWantMap()
+ }
+ d.unmarshal(n, out)
+ case sequenceNode:
+ // Step backwards as earlier nodes take precedence.
+ for i := len(n.children) - 1; i >= 0; i-- {
+ ni := n.children[i]
+ if ni.kind == aliasNode {
+ an, ok := d.doc.anchors[ni.value]
+ if ok && an.kind != mappingNode {
+ failWantMap()
+ }
+ } else if ni.kind != mappingNode {
+ failWantMap()
+ }
+ d.unmarshal(ni, out)
+ }
+ default:
+ failWantMap()
+ }
+}
+
+func isMerge(n *node) bool {
+ return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG)
+}
diff --git a/src/kube2msb/vendor/gopkg.in/yaml.v2/emitterc.go b/src/kube2msb/vendor/gopkg.in/yaml.v2/emitterc.go
new file mode 100644
index 0000000..2befd55
--- /dev/null
+++ b/src/kube2msb/vendor/gopkg.in/yaml.v2/emitterc.go
@@ -0,0 +1,1685 @@
+package yaml
+
+import (
+ "bytes"
+)
+
+// Flush the buffer if needed.
+func flush(emitter *yaml_emitter_t) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) {
+ return yaml_emitter_flush(emitter)
+ }
+ return true
+}
+
+// Put a character to the output buffer.
+func put(emitter *yaml_emitter_t, value byte) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.buffer[emitter.buffer_pos] = value
+ emitter.buffer_pos++
+ emitter.column++
+ return true
+}
+
+// Put a line break to the output buffer.
+func put_break(emitter *yaml_emitter_t) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ switch emitter.line_break {
+ case yaml_CR_BREAK:
+ emitter.buffer[emitter.buffer_pos] = '\r'
+ emitter.buffer_pos += 1
+ case yaml_LN_BREAK:
+ emitter.buffer[emitter.buffer_pos] = '\n'
+ emitter.buffer_pos += 1
+ case yaml_CRLN_BREAK:
+ emitter.buffer[emitter.buffer_pos+0] = '\r'
+ emitter.buffer[emitter.buffer_pos+1] = '\n'
+ emitter.buffer_pos += 2
+ default:
+ panic("unknown line break setting")
+ }
+ emitter.column = 0
+ emitter.line++
+ return true
+}
+
+// Copy a character from a string into buffer.
+func write(emitter *yaml_emitter_t, s []byte, i *int) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ p := emitter.buffer_pos
+ w := width(s[*i])
+ switch w {
+ case 4:
+ emitter.buffer[p+3] = s[*i+3]
+ fallthrough
+ case 3:
+ emitter.buffer[p+2] = s[*i+2]
+ fallthrough
+ case 2:
+ emitter.buffer[p+1] = s[*i+1]
+ fallthrough
+ case 1:
+ emitter.buffer[p+0] = s[*i+0]
+ default:
+ panic("unknown character width")
+ }
+ emitter.column++
+ emitter.buffer_pos += w
+ *i += w
+ return true
+}
+
+// Write a whole string into buffer.
+func write_all(emitter *yaml_emitter_t, s []byte) bool {
+ for i := 0; i < len(s); {
+ if !write(emitter, s, &i) {
+ return false
+ }
+ }
+ return true
+}
+
+// Copy a line break character from a string into buffer.
+func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool {
+ if s[*i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ *i++
+ } else {
+ if !write(emitter, s, i) {
+ return false
+ }
+ emitter.column = 0
+ emitter.line++
+ }
+ return true
+}
+
+// Set an emitter error and return false.
+func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool {
+ emitter.error = yaml_EMITTER_ERROR
+ emitter.problem = problem
+ return false
+}
+
+// Emit an event.
+func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ emitter.events = append(emitter.events, *event)
+ for !yaml_emitter_need_more_events(emitter) {
+ event := &emitter.events[emitter.events_head]
+ if !yaml_emitter_analyze_event(emitter, event) {
+ return false
+ }
+ if !yaml_emitter_state_machine(emitter, event) {
+ return false
+ }
+ yaml_event_delete(event)
+ emitter.events_head++
+ }
+ return true
+}
+
+// Check if we need to accumulate more events before emitting.
+//
+// We accumulate extra
+// - 1 event for DOCUMENT-START
+// - 2 events for SEQUENCE-START
+// - 3 events for MAPPING-START
+//
+func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool {
+ if emitter.events_head == len(emitter.events) {
+ return true
+ }
+ var accumulate int
+ switch emitter.events[emitter.events_head].typ {
+ case yaml_DOCUMENT_START_EVENT:
+ accumulate = 1
+ break
+ case yaml_SEQUENCE_START_EVENT:
+ accumulate = 2
+ break
+ case yaml_MAPPING_START_EVENT:
+ accumulate = 3
+ break
+ default:
+ return false
+ }
+ if len(emitter.events)-emitter.events_head > accumulate {
+ return false
+ }
+ var level int
+ for i := emitter.events_head; i < len(emitter.events); i++ {
+ switch emitter.events[i].typ {
+ case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT:
+ level++
+ case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT:
+ level--
+ }
+ if level == 0 {
+ return false
+ }
+ }
+ return true
+}
+
+// Append a directive to the directives stack.
+func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool {
+ for i := 0; i < len(emitter.tag_directives); i++ {
+ if bytes.Equal(value.handle, emitter.tag_directives[i].handle) {
+ if allow_duplicates {
+ return true
+ }
+ return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive")
+ }
+ }
+
+ // [Go] Do we actually need to copy this given garbage collection
+ // and the lack of deallocating destructors?
+ tag_copy := yaml_tag_directive_t{
+ handle: make([]byte, len(value.handle)),
+ prefix: make([]byte, len(value.prefix)),
+ }
+ copy(tag_copy.handle, value.handle)
+ copy(tag_copy.prefix, value.prefix)
+ emitter.tag_directives = append(emitter.tag_directives, tag_copy)
+ return true
+}
+
+// Increase the indentation level.
+func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool {
+ emitter.indents = append(emitter.indents, emitter.indent)
+ if emitter.indent < 0 {
+ if flow {
+ emitter.indent = emitter.best_indent
+ } else {
+ emitter.indent = 0
+ }
+ } else if !indentless {
+ emitter.indent += emitter.best_indent
+ }
+ return true
+}
+
+// State dispatcher.
+func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ switch emitter.state {
+ default:
+ case yaml_EMIT_STREAM_START_STATE:
+ return yaml_emitter_emit_stream_start(emitter, event)
+
+ case yaml_EMIT_FIRST_DOCUMENT_START_STATE:
+ return yaml_emitter_emit_document_start(emitter, event, true)
+
+ case yaml_EMIT_DOCUMENT_START_STATE:
+ return yaml_emitter_emit_document_start(emitter, event, false)
+
+ case yaml_EMIT_DOCUMENT_CONTENT_STATE:
+ return yaml_emitter_emit_document_content(emitter, event)
+
+ case yaml_EMIT_DOCUMENT_END_STATE:
+ return yaml_emitter_emit_document_end(emitter, event)
+
+ case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, true)
+
+ case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, false)
+
+ case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, true)
+
+ case yaml_EMIT_FLOW_MAPPING_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, false)
+
+ case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE:
+ return yaml_emitter_emit_flow_mapping_value(emitter, event, true)
+
+ case yaml_EMIT_FLOW_MAPPING_VALUE_STATE:
+ return yaml_emitter_emit_flow_mapping_value(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE:
+ return yaml_emitter_emit_block_sequence_item(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE:
+ return yaml_emitter_emit_block_sequence_item(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return yaml_emitter_emit_block_mapping_key(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_MAPPING_KEY_STATE:
+ return yaml_emitter_emit_block_mapping_key(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE:
+ return yaml_emitter_emit_block_mapping_value(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE:
+ return yaml_emitter_emit_block_mapping_value(emitter, event, false)
+
+ case yaml_EMIT_END_STATE:
+ return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END")
+ }
+ panic("invalid emitter state")
+}
+
+// Expect STREAM-START.
+func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if event.typ != yaml_STREAM_START_EVENT {
+ return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START")
+ }
+ if emitter.encoding == yaml_ANY_ENCODING {
+ emitter.encoding = event.encoding
+ if emitter.encoding == yaml_ANY_ENCODING {
+ emitter.encoding = yaml_UTF8_ENCODING
+ }
+ }
+ if emitter.best_indent < 2 || emitter.best_indent > 9 {
+ emitter.best_indent = 2
+ }
+ if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 {
+ emitter.best_width = 80
+ }
+ if emitter.best_width < 0 {
+ emitter.best_width = 1<<31 - 1
+ }
+ if emitter.line_break == yaml_ANY_BREAK {
+ emitter.line_break = yaml_LN_BREAK
+ }
+
+ emitter.indent = -1
+ emitter.line = 0
+ emitter.column = 0
+ emitter.whitespace = true
+ emitter.indention = true
+
+ if emitter.encoding != yaml_UTF8_ENCODING {
+ if !yaml_emitter_write_bom(emitter) {
+ return false
+ }
+ }
+ emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE
+ return true
+}
+
+// Expect DOCUMENT-START or STREAM-END.
+func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+
+ if event.typ == yaml_DOCUMENT_START_EVENT {
+
+ if event.version_directive != nil {
+ if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) {
+ return false
+ }
+ }
+
+ for i := 0; i < len(event.tag_directives); i++ {
+ tag_directive := &event.tag_directives[i]
+ if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) {
+ return false
+ }
+ if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) {
+ return false
+ }
+ }
+
+ for i := 0; i < len(default_tag_directives); i++ {
+ tag_directive := &default_tag_directives[i]
+ if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) {
+ return false
+ }
+ }
+
+ implicit := event.implicit
+ if !first || emitter.canonical {
+ implicit = false
+ }
+
+ if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) {
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if event.version_directive != nil {
+ implicit = false
+ if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if len(event.tag_directives) > 0 {
+ implicit = false
+ for i := 0; i < len(event.tag_directives); i++ {
+ tag_directive := &event.tag_directives[i]
+ if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) {
+ return false
+ }
+ if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ }
+
+ if yaml_emitter_check_empty_document(emitter) {
+ implicit = false
+ }
+ if !implicit {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) {
+ return false
+ }
+ if emitter.canonical {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ }
+
+ emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE
+ return true
+ }
+
+ if event.typ == yaml_STREAM_END_EVENT {
+ if emitter.open_ended {
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.state = yaml_EMIT_END_STATE
+ return true
+ }
+
+ return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END")
+}
+
+// Expect the root node.
+func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE)
+ return yaml_emitter_emit_node(emitter, event, true, false, false, false)
+}
+
+// Expect DOCUMENT-END.
+func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if event.typ != yaml_DOCUMENT_END_EVENT {
+ return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END")
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !event.implicit {
+ // [Go] Allocate the slice elsewhere.
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.state = yaml_EMIT_DOCUMENT_START_STATE
+ emitter.tag_directives = emitter.tag_directives[:0]
+ return true
+}
+
+// Expect a flow item node.
+func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ emitter.flow_level++
+ }
+
+ if event.typ == yaml_SEQUENCE_END_EVENT {
+ emitter.flow_level--
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ if emitter.canonical && !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+
+ return true
+ }
+
+ if !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, true, false, false)
+}
+
+// Expect a flow key node.
+func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ emitter.flow_level++
+ }
+
+ if event.typ == yaml_MAPPING_END_EVENT {
+ emitter.flow_level--
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ if emitter.canonical && !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+
+ if !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if !emitter.canonical && yaml_emitter_check_simple_key(emitter) {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a flow value node.
+func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+ if simple {
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+ return false
+ }
+ } else {
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) {
+ return false
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block item node.
+func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) {
+ return false
+ }
+ }
+ if event.typ == yaml_SEQUENCE_END_EVENT {
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, true, false, false)
+}
+
+// Expect a block key node.
+func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_increase_indent(emitter, false, false) {
+ return false
+ }
+ }
+ if event.typ == yaml_MAPPING_END_EVENT {
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if yaml_emitter_check_simple_key(emitter) {
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block value node.
+func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+ if simple {
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+ return false
+ }
+ } else {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) {
+ return false
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a node.
+func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
+ root bool, sequence bool, mapping bool, simple_key bool) bool {
+
+ emitter.root_context = root
+ emitter.sequence_context = sequence
+ emitter.mapping_context = mapping
+ emitter.simple_key_context = simple_key
+
+ switch event.typ {
+ case yaml_ALIAS_EVENT:
+ return yaml_emitter_emit_alias(emitter, event)
+ case yaml_SCALAR_EVENT:
+ return yaml_emitter_emit_scalar(emitter, event)
+ case yaml_SEQUENCE_START_EVENT:
+ return yaml_emitter_emit_sequence_start(emitter, event)
+ case yaml_MAPPING_START_EVENT:
+ return yaml_emitter_emit_mapping_start(emitter, event)
+ default:
+ return yaml_emitter_set_emitter_error(emitter,
+ "expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS")
+ }
+ return false
+}
+
+// Expect ALIAS.
+func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+}
+
+// Expect SCALAR.
+func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_select_scalar_style(emitter, event) {
+ return false
+ }
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ if !yaml_emitter_process_scalar(emitter) {
+ return false
+ }
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+}
+
+// Expect SEQUENCE-START.
+func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE ||
+ yaml_emitter_check_empty_sequence(emitter) {
+ emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE
+ } else {
+ emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE
+ }
+ return true
+}
+
+// Expect MAPPING-START.
+func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE ||
+ yaml_emitter_check_empty_mapping(emitter) {
+ emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE
+ } else {
+ emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE
+ }
+ return true
+}
+
+// Check if the document content is an empty scalar.
+func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool {
+ return false // [Go] Huh?
+}
+
+// Check if the next events represent an empty sequence.
+func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool {
+ if len(emitter.events)-emitter.events_head < 2 {
+ return false
+ }
+ return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT &&
+ emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT
+}
+
+// Check if the next events represent an empty mapping.
+func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool {
+ if len(emitter.events)-emitter.events_head < 2 {
+ return false
+ }
+ return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT &&
+ emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT
+}
+
+// Check if the next node can be expressed as a simple key.
+func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool {
+ length := 0
+ switch emitter.events[emitter.events_head].typ {
+ case yaml_ALIAS_EVENT:
+ length += len(emitter.anchor_data.anchor)
+ case yaml_SCALAR_EVENT:
+ if emitter.scalar_data.multiline {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix) +
+ len(emitter.scalar_data.value)
+ case yaml_SEQUENCE_START_EVENT:
+ if !yaml_emitter_check_empty_sequence(emitter) {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix)
+ case yaml_MAPPING_START_EVENT:
+ if !yaml_emitter_check_empty_mapping(emitter) {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix)
+ default:
+ return false
+ }
+ return length <= 128
+}
+
+// Determine an acceptable scalar style.
+func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+ no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0
+ if no_tag && !event.implicit && !event.quoted_implicit {
+ return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified")
+ }
+
+ style := event.scalar_style()
+ if style == yaml_ANY_SCALAR_STYLE {
+ style = yaml_PLAIN_SCALAR_STYLE
+ }
+ if emitter.canonical {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ if emitter.simple_key_context && emitter.scalar_data.multiline {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+
+ if style == yaml_PLAIN_SCALAR_STYLE {
+ if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed ||
+ emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ if no_tag && !event.implicit {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ }
+ if style == yaml_SINGLE_QUOTED_SCALAR_STYLE {
+ if !emitter.scalar_data.single_quoted_allowed {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ }
+ if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE {
+ if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ }
+
+ if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE {
+ emitter.tag_data.handle = []byte{'!'}
+ }
+ emitter.scalar_data.style = style
+ return true
+}
+
+// Write an achor.
+func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool {
+ if emitter.anchor_data.anchor == nil {
+ return true
+ }
+ c := []byte{'&'}
+ if emitter.anchor_data.alias {
+ c[0] = '*'
+ }
+ if !yaml_emitter_write_indicator(emitter, c, true, false, false) {
+ return false
+ }
+ return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor)
+}
+
+// Write a tag.
+func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool {
+ if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 {
+ return true
+ }
+ if len(emitter.tag_data.handle) > 0 {
+ if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) {
+ return false
+ }
+ if len(emitter.tag_data.suffix) > 0 {
+ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+ return false
+ }
+ }
+ } else {
+ // [Go] Allocate these slices elsewhere.
+ if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) {
+ return false
+ }
+ }
+ return true
+}
+
+// Write a scalar.
+func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool {
+ switch emitter.scalar_data.style {
+ case yaml_PLAIN_SCALAR_STYLE:
+ return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_SINGLE_QUOTED_SCALAR_STYLE:
+ return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_DOUBLE_QUOTED_SCALAR_STYLE:
+ return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_LITERAL_SCALAR_STYLE:
+ return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value)
+
+ case yaml_FOLDED_SCALAR_STYLE:
+ return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value)
+ }
+ panic("unknown scalar style")
+}
+
+// Check if a %YAML directive is valid.
+func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool {
+ if version_directive.major != 1 || version_directive.minor != 1 {
+ return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive")
+ }
+ return true
+}
+
+// Check if a %TAG directive is valid.
+func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool {
+ handle := tag_directive.handle
+ prefix := tag_directive.prefix
+ if len(handle) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty")
+ }
+ if handle[0] != '!' {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'")
+ }
+ if handle[len(handle)-1] != '!' {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'")
+ }
+ for i := 1; i < len(handle)-1; i += width(handle[i]) {
+ if !is_alpha(handle, i) {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only")
+ }
+ }
+ if len(prefix) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty")
+ }
+ return true
+}
+
+// Check if an anchor is valid.
+func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool {
+ if len(anchor) == 0 {
+ problem := "anchor value must not be empty"
+ if alias {
+ problem = "alias value must not be empty"
+ }
+ return yaml_emitter_set_emitter_error(emitter, problem)
+ }
+ for i := 0; i < len(anchor); i += width(anchor[i]) {
+ if !is_alpha(anchor, i) {
+ problem := "anchor value must contain alphanumerical characters only"
+ if alias {
+ problem = "alias value must contain alphanumerical characters only"
+ }
+ return yaml_emitter_set_emitter_error(emitter, problem)
+ }
+ }
+ emitter.anchor_data.anchor = anchor
+ emitter.anchor_data.alias = alias
+ return true
+}
+
+// Check if a tag is valid.
+func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool {
+ if len(tag) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty")
+ }
+ for i := 0; i < len(emitter.tag_directives); i++ {
+ tag_directive := &emitter.tag_directives[i]
+ if bytes.HasPrefix(tag, tag_directive.prefix) {
+ emitter.tag_data.handle = tag_directive.handle
+ emitter.tag_data.suffix = tag[len(tag_directive.prefix):]
+ return true
+ }
+ }
+ emitter.tag_data.suffix = tag
+ return true
+}
+
+// Check if a scalar is valid.
+func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ var (
+ block_indicators = false
+ flow_indicators = false
+ line_breaks = false
+ special_characters = false
+
+ leading_space = false
+ leading_break = false
+ trailing_space = false
+ trailing_break = false
+ break_space = false
+ space_break = false
+
+ preceeded_by_whitespace = false
+ followed_by_whitespace = false
+ previous_space = false
+ previous_break = false
+ )
+
+ emitter.scalar_data.value = value
+
+ if len(value) == 0 {
+ emitter.scalar_data.multiline = false
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = true
+ emitter.scalar_data.single_quoted_allowed = true
+ emitter.scalar_data.block_allowed = false
+ return true
+ }
+
+ if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) {
+ block_indicators = true
+ flow_indicators = true
+ }
+
+ preceeded_by_whitespace = true
+ for i, w := 0, 0; i < len(value); i += w {
+ w = width(value[i])
+ followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w)
+
+ if i == 0 {
+ switch value[i] {
+ case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`':
+ flow_indicators = true
+ block_indicators = true
+ case '?', ':':
+ flow_indicators = true
+ if followed_by_whitespace {
+ block_indicators = true
+ }
+ case '-':
+ if followed_by_whitespace {
+ flow_indicators = true
+ block_indicators = true
+ }
+ }
+ } else {
+ switch value[i] {
+ case ',', '?', '[', ']', '{', '}':
+ flow_indicators = true
+ case ':':
+ flow_indicators = true
+ if followed_by_whitespace {
+ block_indicators = true
+ }
+ case '#':
+ if preceeded_by_whitespace {
+ flow_indicators = true
+ block_indicators = true
+ }
+ }
+ }
+
+ if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode {
+ special_characters = true
+ }
+ if is_space(value, i) {
+ if i == 0 {
+ leading_space = true
+ }
+ if i+width(value[i]) == len(value) {
+ trailing_space = true
+ }
+ if previous_break {
+ break_space = true
+ }
+ previous_space = true
+ previous_break = false
+ } else if is_break(value, i) {
+ line_breaks = true
+ if i == 0 {
+ leading_break = true
+ }
+ if i+width(value[i]) == len(value) {
+ trailing_break = true
+ }
+ if previous_space {
+ space_break = true
+ }
+ previous_space = false
+ previous_break = true
+ } else {
+ previous_space = false
+ previous_break = false
+ }
+
+ // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition.
+ preceeded_by_whitespace = is_blankz(value, i)
+ }
+
+ emitter.scalar_data.multiline = line_breaks
+ emitter.scalar_data.flow_plain_allowed = true
+ emitter.scalar_data.block_plain_allowed = true
+ emitter.scalar_data.single_quoted_allowed = true
+ emitter.scalar_data.block_allowed = true
+
+ if leading_space || leading_break || trailing_space || trailing_break {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ if trailing_space {
+ emitter.scalar_data.block_allowed = false
+ }
+ if break_space {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ emitter.scalar_data.single_quoted_allowed = false
+ }
+ if space_break || special_characters {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ emitter.scalar_data.single_quoted_allowed = false
+ emitter.scalar_data.block_allowed = false
+ }
+ if line_breaks {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ if flow_indicators {
+ emitter.scalar_data.flow_plain_allowed = false
+ }
+ if block_indicators {
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ return true
+}
+
+// Check if the event data is valid.
+func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+ emitter.anchor_data.anchor = nil
+ emitter.tag_data.handle = nil
+ emitter.tag_data.suffix = nil
+ emitter.scalar_data.value = nil
+
+ switch event.typ {
+ case yaml_ALIAS_EVENT:
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) {
+ return false
+ }
+
+ case yaml_SCALAR_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+ if !yaml_emitter_analyze_scalar(emitter, event.value) {
+ return false
+ }
+
+ case yaml_SEQUENCE_START_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+
+ case yaml_MAPPING_START_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// Write the BOM character.
+func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool {
+ if !flush(emitter) {
+ return false
+ }
+ pos := emitter.buffer_pos
+ emitter.buffer[pos+0] = '\xEF'
+ emitter.buffer[pos+1] = '\xBB'
+ emitter.buffer[pos+2] = '\xBF'
+ emitter.buffer_pos += 3
+ return true
+}
+
+func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool {
+ indent := emitter.indent
+ if indent < 0 {
+ indent = 0
+ }
+ if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ for emitter.column < indent {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ emitter.whitespace = true
+ emitter.indention = true
+ return true
+}
+
+func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool {
+ if need_whitespace && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !write_all(emitter, indicator) {
+ return false
+ }
+ emitter.whitespace = is_whitespace
+ emitter.indention = (emitter.indention && is_indention)
+ emitter.open_ended = false
+ return true
+}
+
+func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool {
+ if !write_all(emitter, value) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool {
+ if !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !write_all(emitter, value) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool {
+ if need_whitespace && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ for i := 0; i < len(value); {
+ var must_write bool
+ switch value[i] {
+ case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']':
+ must_write = true
+ default:
+ must_write = is_alpha(value, i)
+ }
+ if must_write {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ } else {
+ w := width(value[i])
+ for k := 0; k < w; k++ {
+ octet := value[i]
+ i++
+ if !put(emitter, '%') {
+ return false
+ }
+
+ c := octet >> 4
+ if c < 10 {
+ c += '0'
+ } else {
+ c += 'A' - 10
+ }
+ if !put(emitter, c) {
+ return false
+ }
+
+ c = octet & 0x0f
+ if c < 10 {
+ c += '0'
+ } else {
+ c += 'A' - 10
+ }
+ if !put(emitter, c) {
+ return false
+ }
+ }
+ }
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+ if !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+
+ spaces := false
+ breaks := false
+ for i := 0; i < len(value); {
+ if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ spaces = true
+ } else if is_break(value, i) {
+ if !breaks && value[i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ spaces = false
+ breaks = false
+ }
+ }
+
+ emitter.whitespace = false
+ emitter.indention = false
+ if emitter.root_context {
+ emitter.open_ended = true
+ }
+
+ return true
+}
+
+func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+
+ if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) {
+ return false
+ }
+
+ spaces := false
+ breaks := false
+ for i := 0; i < len(value); {
+ if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ spaces = true
+ } else if is_break(value, i) {
+ if !breaks && value[i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if value[i] == '\'' {
+ if !put(emitter, '\'') {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ spaces = false
+ breaks = false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+ spaces := false
+ if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) {
+ return false
+ }
+
+ for i := 0; i < len(value); {
+ if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) ||
+ is_bom(value, i) || is_break(value, i) ||
+ value[i] == '"' || value[i] == '\\' {
+
+ octet := value[i]
+
+ var w int
+ var v rune
+ switch {
+ case octet&0x80 == 0x00:
+ w, v = 1, rune(octet&0x7F)
+ case octet&0xE0 == 0xC0:
+ w, v = 2, rune(octet&0x1F)
+ case octet&0xF0 == 0xE0:
+ w, v = 3, rune(octet&0x0F)
+ case octet&0xF8 == 0xF0:
+ w, v = 4, rune(octet&0x07)
+ }
+ for k := 1; k < w; k++ {
+ octet = value[i+k]
+ v = (v << 6) + (rune(octet) & 0x3F)
+ }
+ i += w
+
+ if !put(emitter, '\\') {
+ return false
+ }
+
+ var ok bool
+ switch v {
+ case 0x00:
+ ok = put(emitter, '0')
+ case 0x07:
+ ok = put(emitter, 'a')
+ case 0x08:
+ ok = put(emitter, 'b')
+ case 0x09:
+ ok = put(emitter, 't')
+ case 0x0A:
+ ok = put(emitter, 'n')
+ case 0x0b:
+ ok = put(emitter, 'v')
+ case 0x0c:
+ ok = put(emitter, 'f')
+ case 0x0d:
+ ok = put(emitter, 'r')
+ case 0x1b:
+ ok = put(emitter, 'e')
+ case 0x22:
+ ok = put(emitter, '"')
+ case 0x5c:
+ ok = put(emitter, '\\')
+ case 0x85:
+ ok = put(emitter, 'N')
+ case 0xA0:
+ ok = put(emitter, '_')
+ case 0x2028:
+ ok = put(emitter, 'L')
+ case 0x2029:
+ ok = put(emitter, 'P')
+ default:
+ if v <= 0xFF {
+ ok = put(emitter, 'x')
+ w = 2
+ } else if v <= 0xFFFF {
+ ok = put(emitter, 'u')
+ w = 4
+ } else {
+ ok = put(emitter, 'U')
+ w = 8
+ }
+ for k := (w - 1) * 4; ok && k >= 0; k -= 4 {
+ digit := byte((v >> uint(k)) & 0x0F)
+ if digit < 10 {
+ ok = put(emitter, digit+'0')
+ } else {
+ ok = put(emitter, digit+'A'-10)
+ }
+ }
+ }
+ if !ok {
+ return false
+ }
+ spaces = false
+ } else if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if is_space(value, i+1) {
+ if !put(emitter, '\\') {
+ return false
+ }
+ }
+ i += width(value[i])
+ } else if !write(emitter, value, &i) {
+ return false
+ }
+ spaces = true
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ spaces = false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool {
+ if is_space(value, 0) || is_break(value, 0) {
+ indent_hint := []byte{'0' + byte(emitter.best_indent)}
+ if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) {
+ return false
+ }
+ }
+
+ emitter.open_ended = false
+
+ var chomp_hint [1]byte
+ if len(value) == 0 {
+ chomp_hint[0] = '-'
+ } else {
+ i := len(value) - 1
+ for value[i]&0xC0 == 0x80 {
+ i--
+ }
+ if !is_break(value, i) {
+ chomp_hint[0] = '-'
+ } else if i == 0 {
+ chomp_hint[0] = '+'
+ emitter.open_ended = true
+ } else {
+ i--
+ for value[i]&0xC0 == 0x80 {
+ i--
+ }
+ if is_break(value, i) {
+ chomp_hint[0] = '+'
+ emitter.open_ended = true
+ }
+ }
+ }
+ if chomp_hint[0] != 0 {
+ if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) {
+ return false
+ }
+ }
+ return true
+}
+
+func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+ return false
+ }
+ if !put_break(emitter) {
+ return false
+ }
+ emitter.indention = true
+ emitter.whitespace = true
+ breaks := true
+ for i := 0; i < len(value); {
+ if is_break(value, i) {
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+
+ return true
+}
+
+func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+ return false
+ }
+
+ if !put_break(emitter) {
+ return false
+ }
+ emitter.indention = true
+ emitter.whitespace = true
+
+ breaks := true
+ leading_spaces := true
+ for i := 0; i < len(value); {
+ if is_break(value, i) {
+ if !breaks && !leading_spaces && value[i] == '\n' {
+ k := 0
+ for is_break(value, k) {
+ k += width(value[k])
+ }
+ if !is_blankz(value, k) {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ leading_spaces = is_blank(value, i)
+ }
+ if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+ return true
+}
diff --git a/src/kube2msb/vendor/gopkg.in/yaml.v2/encode.go b/src/kube2msb/vendor/gopkg.in/yaml.v2/encode.go
new file mode 100644
index 0000000..84f8499
--- /dev/null
+++ b/src/kube2msb/vendor/gopkg.in/yaml.v2/encode.go
@@ -0,0 +1,306 @@
+package yaml
+
+import (
+ "encoding"
+ "fmt"
+ "reflect"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type encoder struct {
+ emitter yaml_emitter_t
+ event yaml_event_t
+ out []byte
+ flow bool
+}
+
+func newEncoder() (e *encoder) {
+ e = &encoder{}
+ e.must(yaml_emitter_initialize(&e.emitter))
+ yaml_emitter_set_output_string(&e.emitter, &e.out)
+ yaml_emitter_set_unicode(&e.emitter, true)
+ e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING))
+ e.emit()
+ e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true))
+ e.emit()
+ return e
+}
+
+func (e *encoder) finish() {
+ e.must(yaml_document_end_event_initialize(&e.event, true))
+ e.emit()
+ e.emitter.open_ended = false
+ e.must(yaml_stream_end_event_initialize(&e.event))
+ e.emit()
+}
+
+func (e *encoder) destroy() {
+ yaml_emitter_delete(&e.emitter)
+}
+
+func (e *encoder) emit() {
+ // This will internally delete the e.event value.
+ if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT {
+ e.must(false)
+ }
+}
+
+func (e *encoder) must(ok bool) {
+ if !ok {
+ msg := e.emitter.problem
+ if msg == "" {
+ msg = "unknown problem generating YAML content"
+ }
+ failf("%s", msg)
+ }
+}
+
+func (e *encoder) marshal(tag string, in reflect.Value) {
+ if !in.IsValid() {
+ e.nilv()
+ return
+ }
+ iface := in.Interface()
+ if m, ok := iface.(Marshaler); ok {
+ v, err := m.MarshalYAML()
+ if err != nil {
+ fail(err)
+ }
+ if v == nil {
+ e.nilv()
+ return
+ }
+ in = reflect.ValueOf(v)
+ } else if m, ok := iface.(encoding.TextMarshaler); ok {
+ text, err := m.MarshalText()
+ if err != nil {
+ fail(err)
+ }
+ in = reflect.ValueOf(string(text))
+ }
+ switch in.Kind() {
+ case reflect.Interface:
+ if in.IsNil() {
+ e.nilv()
+ } else {
+ e.marshal(tag, in.Elem())
+ }
+ case reflect.Map:
+ e.mapv(tag, in)
+ case reflect.Ptr:
+ if in.IsNil() {
+ e.nilv()
+ } else {
+ e.marshal(tag, in.Elem())
+ }
+ case reflect.Struct:
+ e.structv(tag, in)
+ case reflect.Slice:
+ if in.Type().Elem() == mapItemType {
+ e.itemsv(tag, in)
+ } else {
+ e.slicev(tag, in)
+ }
+ case reflect.String:
+ e.stringv(tag, in)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ if in.Type() == durationType {
+ e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String()))
+ } else {
+ e.intv(tag, in)
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ e.uintv(tag, in)
+ case reflect.Float32, reflect.Float64:
+ e.floatv(tag, in)
+ case reflect.Bool:
+ e.boolv(tag, in)
+ default:
+ panic("cannot marshal type: " + in.Type().String())
+ }
+}
+
+func (e *encoder) mapv(tag string, in reflect.Value) {
+ e.mappingv(tag, func() {
+ keys := keyList(in.MapKeys())
+ sort.Sort(keys)
+ for _, k := range keys {
+ e.marshal("", k)
+ e.marshal("", in.MapIndex(k))
+ }
+ })
+}
+
+func (e *encoder) itemsv(tag string, in reflect.Value) {
+ e.mappingv(tag, func() {
+ slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem)
+ for _, item := range slice {
+ e.marshal("", reflect.ValueOf(item.Key))
+ e.marshal("", reflect.ValueOf(item.Value))
+ }
+ })
+}
+
+func (e *encoder) structv(tag string, in reflect.Value) {
+ sinfo, err := getStructInfo(in.Type())
+ if err != nil {
+ panic(err)
+ }
+ e.mappingv(tag, func() {
+ for _, info := range sinfo.FieldsList {
+ var value reflect.Value
+ if info.Inline == nil {
+ value = in.Field(info.Num)
+ } else {
+ value = in.FieldByIndex(info.Inline)
+ }
+ if info.OmitEmpty && isZero(value) {
+ continue
+ }
+ e.marshal("", reflect.ValueOf(info.Key))
+ e.flow = info.Flow
+ e.marshal("", value)
+ }
+ if sinfo.InlineMap >= 0 {
+ m := in.Field(sinfo.InlineMap)
+ if m.Len() > 0 {
+ e.flow = false
+ keys := keyList(m.MapKeys())
+ sort.Sort(keys)
+ for _, k := range keys {
+ if _, found := sinfo.FieldsMap[k.String()]; found {
+ panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String()))
+ }
+ e.marshal("", k)
+ e.flow = false
+ e.marshal("", m.MapIndex(k))
+ }
+ }
+ }
+ })
+}
+
+func (e *encoder) mappingv(tag string, f func()) {
+ implicit := tag == ""
+ style := yaml_BLOCK_MAPPING_STYLE
+ if e.flow {
+ e.flow = false
+ style = yaml_FLOW_MAPPING_STYLE
+ }
+ e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
+ e.emit()
+ f()
+ e.must(yaml_mapping_end_event_initialize(&e.event))
+ e.emit()
+}
+
+func (e *encoder) slicev(tag string, in reflect.Value) {
+ implicit := tag == ""
+ style := yaml_BLOCK_SEQUENCE_STYLE
+ if e.flow {
+ e.flow = false
+ style = yaml_FLOW_SEQUENCE_STYLE
+ }
+ e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
+ e.emit()
+ n := in.Len()
+ for i := 0; i < n; i++ {
+ e.marshal("", in.Index(i))
+ }
+ e.must(yaml_sequence_end_event_initialize(&e.event))
+ e.emit()
+}
+
+// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
+//
+// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
+// in YAML 1.2 and by this package, but these should be marshalled quoted for
+// the time being for compatibility with other parsers.
+func isBase60Float(s string) (result bool) {
+ // Fast path.
+ if s == "" {
+ return false
+ }
+ c := s[0]
+ if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
+ return false
+ }
+ // Do the full match.
+ return base60float.MatchString(s)
+}
+
+// From http://yaml.org/type/float.html, except the regular expression there
+// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
+var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
+
+func (e *encoder) stringv(tag string, in reflect.Value) {
+ var style yaml_scalar_style_t
+ s := in.String()
+ rtag, rs := resolve("", s)
+ if rtag == yaml_BINARY_TAG {
+ if tag == "" || tag == yaml_STR_TAG {
+ tag = rtag
+ s = rs.(string)
+ } else if tag == yaml_BINARY_TAG {
+ failf("explicitly tagged !!binary data must be base64-encoded")
+ } else {
+ failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
+ }
+ }
+ if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ } else if strings.Contains(s, "\n") {
+ style = yaml_LITERAL_SCALAR_STYLE
+ } else {
+ style = yaml_PLAIN_SCALAR_STYLE
+ }
+ e.emitScalar(s, "", tag, style)
+}
+
+func (e *encoder) boolv(tag string, in reflect.Value) {
+ var s string
+ if in.Bool() {
+ s = "true"
+ } else {
+ s = "false"
+ }
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) intv(tag string, in reflect.Value) {
+ s := strconv.FormatInt(in.Int(), 10)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) uintv(tag string, in reflect.Value) {
+ s := strconv.FormatUint(in.Uint(), 10)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) floatv(tag string, in reflect.Value) {
+ // FIXME: Handle 64 bits here.
+ s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32)
+ switch s {
+ case "+Inf":
+ s = ".inf"
+ case "-Inf":
+ s = "-.inf"
+ case "NaN":
+ s = ".nan"
+ }
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) nilv() {
+ e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
+ implicit := tag == ""
+ e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
+ e.emit()
+}
diff --git a/src/kube2msb/vendor/gopkg.in/yaml.v2/parserc.go b/src/kube2msb/vendor/gopkg.in/yaml.v2/parserc.go
new file mode 100644
index 0000000..0a7037a
--- /dev/null
+++ b/src/kube2msb/vendor/gopkg.in/yaml.v2/parserc.go
@@ -0,0 +1,1096 @@
+package yaml
+
+import (
+ "bytes"
+)
+
+// The parser implements the following grammar:
+//
+// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// implicit_document ::= block_node DOCUMENT-END*
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// block_node_or_indentless_sequence ::=
+// ALIAS
+// | properties (block_content | indentless_block_sequence)?
+// | block_content
+// | indentless_block_sequence
+// block_node ::= ALIAS
+// | properties block_content?
+// | block_content
+// flow_node ::= ALIAS
+// | properties flow_content?
+// | flow_content
+// properties ::= TAG ANCHOR? | ANCHOR TAG?
+// block_content ::= block_collection | flow_collection | SCALAR
+// flow_content ::= flow_collection | SCALAR
+// block_collection ::= block_sequence | block_mapping
+// flow_collection ::= flow_sequence | flow_mapping
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+// block_mapping ::= BLOCK-MAPPING_START
+// ((KEY block_node_or_indentless_sequence?)?
+// (VALUE block_node_or_indentless_sequence?)?)*
+// BLOCK-END
+// flow_sequence ::= FLOW-SEQUENCE-START
+// (flow_sequence_entry FLOW-ENTRY)*
+// flow_sequence_entry?
+// FLOW-SEQUENCE-END
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// flow_mapping ::= FLOW-MAPPING-START
+// (flow_mapping_entry FLOW-ENTRY)*
+// flow_mapping_entry?
+// FLOW-MAPPING-END
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+// Peek the next token in the token queue.
+func peek_token(parser *yaml_parser_t) *yaml_token_t {
+ if parser.token_available || yaml_parser_fetch_more_tokens(parser) {
+ return &parser.tokens[parser.tokens_head]
+ }
+ return nil
+}
+
+// Remove the next token from the queue (must be called after peek_token).
+func skip_token(parser *yaml_parser_t) {
+ parser.token_available = false
+ parser.tokens_parsed++
+ parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN
+ parser.tokens_head++
+}
+
+// Get the next event.
+func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool {
+ // Erase the event object.
+ *event = yaml_event_t{}
+
+ // No events after the end of the stream or error.
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE {
+ return true
+ }
+
+ // Generate the next event.
+ return yaml_parser_state_machine(parser, event)
+}
+
+// Set parser error.
+func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool {
+ parser.error = yaml_PARSER_ERROR
+ parser.problem = problem
+ parser.problem_mark = problem_mark
+ return false
+}
+
+func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool {
+ parser.error = yaml_PARSER_ERROR
+ parser.context = context
+ parser.context_mark = context_mark
+ parser.problem = problem
+ parser.problem_mark = problem_mark
+ return false
+}
+
+// State dispatcher.
+func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool {
+ //trace("yaml_parser_state_machine", "state:", parser.state.String())
+
+ switch parser.state {
+ case yaml_PARSE_STREAM_START_STATE:
+ return yaml_parser_parse_stream_start(parser, event)
+
+ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+ return yaml_parser_parse_document_start(parser, event, true)
+
+ case yaml_PARSE_DOCUMENT_START_STATE:
+ return yaml_parser_parse_document_start(parser, event, false)
+
+ case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+ return yaml_parser_parse_document_content(parser, event)
+
+ case yaml_PARSE_DOCUMENT_END_STATE:
+ return yaml_parser_parse_document_end(parser, event)
+
+ case yaml_PARSE_BLOCK_NODE_STATE:
+ return yaml_parser_parse_node(parser, event, true, false)
+
+ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+ return yaml_parser_parse_node(parser, event, true, true)
+
+ case yaml_PARSE_FLOW_NODE_STATE:
+ return yaml_parser_parse_node(parser, event, false, false)
+
+ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+ return yaml_parser_parse_block_sequence_entry(parser, event, true)
+
+ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_block_sequence_entry(parser, event, false)
+
+ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_indentless_sequence_entry(parser, event)
+
+ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return yaml_parser_parse_block_mapping_key(parser, event, true)
+
+ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+ return yaml_parser_parse_block_mapping_key(parser, event, false)
+
+ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_block_mapping_value(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+ return yaml_parser_parse_flow_sequence_entry(parser, event, true)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_flow_sequence_entry(parser, event, false)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event)
+
+ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+ return yaml_parser_parse_flow_mapping_key(parser, event, true)
+
+ case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+ return yaml_parser_parse_flow_mapping_key(parser, event, false)
+
+ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_flow_mapping_value(parser, event, false)
+
+ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+ return yaml_parser_parse_flow_mapping_value(parser, event, true)
+
+ default:
+ panic("invalid parser state")
+ }
+ return false
+}
+
+// Parse the production:
+// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// ************
+func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_STREAM_START_TOKEN {
+ return yaml_parser_set_parser_error(parser, "did not find expected <stream-start>", token.start_mark)
+ }
+ parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
+ *event = yaml_event_t{
+ typ: yaml_STREAM_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ encoding: token.encoding,
+ }
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// implicit_document ::= block_node DOCUMENT-END*
+// *
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// *************************
+func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool {
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ // Parse extra document end indicators.
+ if !implicit {
+ for token.typ == yaml_DOCUMENT_END_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ }
+
+ if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN &&
+ token.typ != yaml_TAG_DIRECTIVE_TOKEN &&
+ token.typ != yaml_DOCUMENT_START_TOKEN &&
+ token.typ != yaml_STREAM_END_TOKEN {
+ // Parse an implicit document.
+ if !yaml_parser_process_directives(parser, nil, nil) {
+ return false
+ }
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+ parser.state = yaml_PARSE_BLOCK_NODE_STATE
+
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ } else if token.typ != yaml_STREAM_END_TOKEN {
+ // Parse an explicit document.
+ var version_directive *yaml_version_directive_t
+ var tag_directives []yaml_tag_directive_t
+ start_mark := token.start_mark
+ if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) {
+ return false
+ }
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_DOCUMENT_START_TOKEN {
+ yaml_parser_set_parser_error(parser,
+ "did not find expected <document start>", token.start_mark)
+ return false
+ }
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+ parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE
+ end_mark := token.end_mark
+
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ version_directive: version_directive,
+ tag_directives: tag_directives,
+ implicit: false,
+ }
+ skip_token(parser)
+
+ } else {
+ // Parse the stream end.
+ parser.state = yaml_PARSE_END_STATE
+ *event = yaml_event_t{
+ typ: yaml_STREAM_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ }
+
+ return true
+}
+
+// Parse the productions:
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// ***********
+//
+func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN ||
+ token.typ == yaml_TAG_DIRECTIVE_TOKEN ||
+ token.typ == yaml_DOCUMENT_START_TOKEN ||
+ token.typ == yaml_DOCUMENT_END_TOKEN ||
+ token.typ == yaml_STREAM_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ return yaml_parser_process_empty_scalar(parser, event,
+ token.start_mark)
+ }
+ return yaml_parser_parse_node(parser, event, true, false)
+}
+
+// Parse the productions:
+// implicit_document ::= block_node DOCUMENT-END*
+// *************
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//
+func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ start_mark := token.start_mark
+ end_mark := token.start_mark
+
+ implicit := true
+ if token.typ == yaml_DOCUMENT_END_TOKEN {
+ end_mark = token.end_mark
+ skip_token(parser)
+ implicit = false
+ }
+
+ parser.tag_directives = parser.tag_directives[:0]
+
+ parser.state = yaml_PARSE_DOCUMENT_START_STATE
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_END_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ implicit: implicit,
+ }
+ return true
+}
+
+// Parse the productions:
+// block_node_or_indentless_sequence ::=
+// ALIAS
+// *****
+// | properties (block_content | indentless_block_sequence)?
+// ********** *
+// | block_content | indentless_block_sequence
+// *
+// block_node ::= ALIAS
+// *****
+// | properties block_content?
+// ********** *
+// | block_content
+// *
+// flow_node ::= ALIAS
+// *****
+// | properties flow_content?
+// ********** *
+// | flow_content
+// *
+// properties ::= TAG ANCHOR? | ANCHOR TAG?
+// *************************
+// block_content ::= block_collection | flow_collection | SCALAR
+// ******
+// flow_content ::= flow_collection | SCALAR
+// ******
+func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool {
+ //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)()
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_ALIAS_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ *event = yaml_event_t{
+ typ: yaml_ALIAS_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ anchor: token.value,
+ }
+ skip_token(parser)
+ return true
+ }
+
+ start_mark := token.start_mark
+ end_mark := token.start_mark
+
+ var tag_token bool
+ var tag_handle, tag_suffix, anchor []byte
+ var tag_mark yaml_mark_t
+ if token.typ == yaml_ANCHOR_TOKEN {
+ anchor = token.value
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_TAG_TOKEN {
+ tag_token = true
+ tag_handle = token.value
+ tag_suffix = token.suffix
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ } else if token.typ == yaml_TAG_TOKEN {
+ tag_token = true
+ tag_handle = token.value
+ tag_suffix = token.suffix
+ start_mark = token.start_mark
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_ANCHOR_TOKEN {
+ anchor = token.value
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ }
+
+ var tag []byte
+ if tag_token {
+ if len(tag_handle) == 0 {
+ tag = tag_suffix
+ tag_suffix = nil
+ } else {
+ for i := range parser.tag_directives {
+ if bytes.Equal(parser.tag_directives[i].handle, tag_handle) {
+ tag = append([]byte(nil), parser.tag_directives[i].prefix...)
+ tag = append(tag, tag_suffix...)
+ break
+ }
+ }
+ if len(tag) == 0 {
+ yaml_parser_set_parser_error_context(parser,
+ "while parsing a node", start_mark,
+ "found undefined tag handle", tag_mark)
+ return false
+ }
+ }
+ }
+
+ implicit := len(tag) == 0
+ if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if token.typ == yaml_SCALAR_TOKEN {
+ var plain_implicit, quoted_implicit bool
+ end_mark = token.end_mark
+ if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') {
+ plain_implicit = true
+ } else if len(tag) == 0 {
+ quoted_implicit = true
+ }
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ value: token.value,
+ implicit: plain_implicit,
+ quoted_implicit: quoted_implicit,
+ style: yaml_style_t(token.style),
+ }
+ skip_token(parser)
+ return true
+ }
+ if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN {
+ // [Go] Some of the events below can be merged as they differ only on style.
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if token.typ == yaml_FLOW_MAPPING_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+ }
+ return true
+ }
+ if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE),
+ }
+ return true
+ }
+ if len(anchor) > 0 || len(tag) > 0 {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ quoted_implicit: false,
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+ }
+ return true
+ }
+
+ context := "while parsing a flow node"
+ if block {
+ context = "while parsing a block node"
+ }
+ yaml_parser_set_parser_error_context(parser, context, start_mark,
+ "did not find expected node content", token.start_mark)
+ return false
+}
+
+// Parse the productions:
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// ******************** *********** * *********
+//
+func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, true, false)
+ } else {
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ }
+ if token.typ == yaml_BLOCK_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ skip_token(parser)
+ return true
+ }
+
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a block collection", context_mark,
+ "did not find expected '-' indicator", token.start_mark)
+}
+
+// Parse the productions:
+// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+// *********** *
+func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_BLOCK_ENTRY_TOKEN &&
+ token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, true, false)
+ }
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark?
+ }
+ return true
+}
+
+// Parse the productions:
+// block_mapping ::= BLOCK-MAPPING_START
+// *******************
+// ((KEY block_node_or_indentless_sequence?)?
+// *** *
+// (VALUE block_node_or_indentless_sequence?)?)*
+//
+// BLOCK-END
+// *********
+//
+func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, true, true)
+ } else {
+ parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ } else if token.typ == yaml_BLOCK_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ return true
+ }
+
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a block mapping", context_mark,
+ "did not find expected key", token.start_mark)
+}
+
+// Parse the productions:
+// block_mapping ::= BLOCK-MAPPING_START
+//
+// ((KEY block_node_or_indentless_sequence?)?
+//
+// (VALUE block_node_or_indentless_sequence?)?)*
+// ***** *
+// BLOCK-END
+//
+//
+func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE)
+ return yaml_parser_parse_node(parser, event, true, true)
+ }
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence ::= FLOW-SEQUENCE-START
+// *******************
+// (flow_sequence_entry FLOW-ENTRY)*
+// * **********
+// flow_sequence_entry?
+// *
+// FLOW-SEQUENCE-END
+// *****************
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *
+//
+func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ if !first {
+ if token.typ == yaml_FLOW_ENTRY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ } else {
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a flow sequence", context_mark,
+ "did not find expected ',' or ']'", token.start_mark)
+ }
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ implicit: true,
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+ }
+ skip_token(parser)
+ return true
+ } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ skip_token(parser)
+ return true
+}
+
+//
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_FLOW_ENTRY_TOKEN &&
+ token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ mark := token.end_mark
+ skip_token(parser)
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// ***** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ skip_token(parser)
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.start_mark, // [Go] Shouldn't this be end_mark?
+ }
+ return true
+}
+
+// Parse the productions:
+// flow_mapping ::= FLOW-MAPPING-START
+// ******************
+// (flow_mapping_entry FLOW-ENTRY)*
+// * **********
+// flow_mapping_entry?
+// ******************
+// FLOW-MAPPING-END
+// ****************
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// * *** *
+//
+func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ if !first {
+ if token.typ == yaml_FLOW_ENTRY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ } else {
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a flow mapping", context_mark,
+ "did not find expected ',' or '}'", token.start_mark)
+ }
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_FLOW_ENTRY_TOKEN &&
+ token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ } else {
+ parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+ }
+ } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// * ***** *
+//
+func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if empty {
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Generate an empty scalar event.
+func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: mark,
+ end_mark: mark,
+ value: nil, // Empty
+ implicit: true,
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+ }
+ return true
+}
+
+var default_tag_directives = []yaml_tag_directive_t{
+ {[]byte("!"), []byte("!")},
+ {[]byte("!!"), []byte("tag:yaml.org,2002:")},
+}
+
+// Parse directives.
+func yaml_parser_process_directives(parser *yaml_parser_t,
+ version_directive_ref **yaml_version_directive_t,
+ tag_directives_ref *[]yaml_tag_directive_t) bool {
+
+ var version_directive *yaml_version_directive_t
+ var tag_directives []yaml_tag_directive_t
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN {
+ if version_directive != nil {
+ yaml_parser_set_parser_error(parser,
+ "found duplicate %YAML directive", token.start_mark)
+ return false
+ }
+ if token.major != 1 || token.minor != 1 {
+ yaml_parser_set_parser_error(parser,
+ "found incompatible YAML document", token.start_mark)
+ return false
+ }
+ version_directive = &yaml_version_directive_t{
+ major: token.major,
+ minor: token.minor,
+ }
+ } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+ value := yaml_tag_directive_t{
+ handle: token.value,
+ prefix: token.prefix,
+ }
+ if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) {
+ return false
+ }
+ tag_directives = append(tag_directives, value)
+ }
+
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+
+ for i := range default_tag_directives {
+ if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) {
+ return false
+ }
+ }
+
+ if version_directive_ref != nil {
+ *version_directive_ref = version_directive
+ }
+ if tag_directives_ref != nil {
+ *tag_directives_ref = tag_directives
+ }
+ return true
+}
+
+// Append a tag directive to the directives stack.
+func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool {
+ for i := range parser.tag_directives {
+ if bytes.Equal(value.handle, parser.tag_directives[i].handle) {
+ if allow_duplicates {
+ return true
+ }
+ return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark)
+ }
+ }
+
+ // [Go] I suspect the copy is unnecessary. This was likely done
+ // because there was no way to track ownership of the data.
+ value_copy := yaml_tag_directive_t{
+ handle: make([]byte, len(value.handle)),
+ prefix: make([]byte, len(value.prefix)),
+ }
+ copy(value_copy.handle, value.handle)
+ copy(value_copy.prefix, value.prefix)
+ parser.tag_directives = append(parser.tag_directives, value_copy)
+ return true
+}
diff --git a/src/kube2msb/vendor/gopkg.in/yaml.v2/readerc.go b/src/kube2msb/vendor/gopkg.in/yaml.v2/readerc.go
new file mode 100644
index 0000000..f450791
--- /dev/null
+++ b/src/kube2msb/vendor/gopkg.in/yaml.v2/readerc.go
@@ -0,0 +1,394 @@
+package yaml
+
+import (
+ "io"
+)
+
+// Set the reader error and return 0.
+func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
+ parser.error = yaml_READER_ERROR
+ parser.problem = problem
+ parser.problem_offset = offset
+ parser.problem_value = value
+ return false
+}
+
+// Byte order marks.
+const (
+ bom_UTF8 = "\xef\xbb\xbf"
+ bom_UTF16LE = "\xff\xfe"
+ bom_UTF16BE = "\xfe\xff"
+)
+
+// Determine the input stream encoding by checking the BOM symbol. If no BOM is
+// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
+func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
+ // Ensure that we had enough bytes in the raw buffer.
+ for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
+ if !yaml_parser_update_raw_buffer(parser) {
+ return false
+ }
+ }
+
+ // Determine the encoding.
+ buf := parser.raw_buffer
+ pos := parser.raw_buffer_pos
+ avail := len(buf) - pos
+ if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
+ parser.encoding = yaml_UTF16LE_ENCODING
+ parser.raw_buffer_pos += 2
+ parser.offset += 2
+ } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
+ parser.encoding = yaml_UTF16BE_ENCODING
+ parser.raw_buffer_pos += 2
+ parser.offset += 2
+ } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
+ parser.encoding = yaml_UTF8_ENCODING
+ parser.raw_buffer_pos += 3
+ parser.offset += 3
+ } else {
+ parser.encoding = yaml_UTF8_ENCODING
+ }
+ return true
+}
+
+// Update the raw buffer.
+func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
+ size_read := 0
+
+ // Return if the raw buffer is full.
+ if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
+ return true
+ }
+
+ // Return on EOF.
+ if parser.eof {
+ return true
+ }
+
+ // Move the remaining bytes in the raw buffer to the beginning.
+ if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
+ copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
+ }
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
+ parser.raw_buffer_pos = 0
+
+ // Call the read handler to fill the buffer.
+ size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
+ if err == io.EOF {
+ parser.eof = true
+ } else if err != nil {
+ return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
+ }
+ return true
+}
+
+// Ensure that the buffer contains at least `length` characters.
+// Return true on success, false on failure.
+//
+// The length is supposed to be significantly less that the buffer size.
+func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
+ if parser.read_handler == nil {
+ panic("read handler must be set")
+ }
+
+ // If the EOF flag is set and the raw buffer is empty, do nothing.
+ if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
+ return true
+ }
+
+ // Return if the buffer contains enough characters.
+ if parser.unread >= length {
+ return true
+ }
+
+ // Determine the input encoding if it is not known yet.
+ if parser.encoding == yaml_ANY_ENCODING {
+ if !yaml_parser_determine_encoding(parser) {
+ return false
+ }
+ }
+
+ // Move the unread characters to the beginning of the buffer.
+ buffer_len := len(parser.buffer)
+ if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
+ copy(parser.buffer, parser.buffer[parser.buffer_pos:])
+ buffer_len -= parser.buffer_pos
+ parser.buffer_pos = 0
+ } else if parser.buffer_pos == buffer_len {
+ buffer_len = 0
+ parser.buffer_pos = 0
+ }
+
+ // Open the whole buffer for writing, and cut it before returning.
+ parser.buffer = parser.buffer[:cap(parser.buffer)]
+
+ // Fill the buffer until it has enough characters.
+ first := true
+ for parser.unread < length {
+
+ // Fill the raw buffer if necessary.
+ if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
+ if !yaml_parser_update_raw_buffer(parser) {
+ parser.buffer = parser.buffer[:buffer_len]
+ return false
+ }
+ }
+ first = false
+
+ // Decode the raw buffer.
+ inner:
+ for parser.raw_buffer_pos != len(parser.raw_buffer) {
+ var value rune
+ var width int
+
+ raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
+
+ // Decode the next character.
+ switch parser.encoding {
+ case yaml_UTF8_ENCODING:
+ // Decode a UTF-8 character. Check RFC 3629
+ // (http://www.ietf.org/rfc/rfc3629.txt) for more details.
+ //
+ // The following table (taken from the RFC) is used for
+ // decoding.
+ //
+ // Char. number range | UTF-8 octet sequence
+ // (hexadecimal) | (binary)
+ // --------------------+------------------------------------
+ // 0000 0000-0000 007F | 0xxxxxxx
+ // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
+ // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
+ // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ //
+ // Additionally, the characters in the range 0xD800-0xDFFF
+ // are prohibited as they are reserved for use with UTF-16
+ // surrogate pairs.
+
+ // Determine the length of the UTF-8 sequence.
+ octet := parser.raw_buffer[parser.raw_buffer_pos]
+ switch {
+ case octet&0x80 == 0x00:
+ width = 1
+ case octet&0xE0 == 0xC0:
+ width = 2
+ case octet&0xF0 == 0xE0:
+ width = 3
+ case octet&0xF8 == 0xF0:
+ width = 4
+ default:
+ // The leading octet is invalid.
+ return yaml_parser_set_reader_error(parser,
+ "invalid leading UTF-8 octet",
+ parser.offset, int(octet))
+ }
+
+ // Check if the raw buffer contains an incomplete character.
+ if width > raw_unread {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-8 octet sequence",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Decode the leading octet.
+ switch {
+ case octet&0x80 == 0x00:
+ value = rune(octet & 0x7F)
+ case octet&0xE0 == 0xC0:
+ value = rune(octet & 0x1F)
+ case octet&0xF0 == 0xE0:
+ value = rune(octet & 0x0F)
+ case octet&0xF8 == 0xF0:
+ value = rune(octet & 0x07)
+ default:
+ value = 0
+ }
+
+ // Check and decode the trailing octets.
+ for k := 1; k < width; k++ {
+ octet = parser.raw_buffer[parser.raw_buffer_pos+k]
+
+ // Check if the octet is valid.
+ if (octet & 0xC0) != 0x80 {
+ return yaml_parser_set_reader_error(parser,
+ "invalid trailing UTF-8 octet",
+ parser.offset+k, int(octet))
+ }
+
+ // Decode the octet.
+ value = (value << 6) + rune(octet&0x3F)
+ }
+
+ // Check the length of the sequence against the value.
+ switch {
+ case width == 1:
+ case width == 2 && value >= 0x80:
+ case width == 3 && value >= 0x800:
+ case width == 4 && value >= 0x10000:
+ default:
+ return yaml_parser_set_reader_error(parser,
+ "invalid length of a UTF-8 sequence",
+ parser.offset, -1)
+ }
+
+ // Check the range of the value.
+ if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
+ return yaml_parser_set_reader_error(parser,
+ "invalid Unicode character",
+ parser.offset, int(value))
+ }
+
+ case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
+ var low, high int
+ if parser.encoding == yaml_UTF16LE_ENCODING {
+ low, high = 0, 1
+ } else {
+ low, high = 1, 0
+ }
+
+ // The UTF-16 encoding is not as simple as one might
+ // naively think. Check RFC 2781
+ // (http://www.ietf.org/rfc/rfc2781.txt).
+ //
+ // Normally, two subsequent bytes describe a Unicode
+ // character. However a special technique (called a
+ // surrogate pair) is used for specifying character
+ // values larger than 0xFFFF.
+ //
+ // A surrogate pair consists of two pseudo-characters:
+ // high surrogate area (0xD800-0xDBFF)
+ // low surrogate area (0xDC00-0xDFFF)
+ //
+ // The following formulas are used for decoding
+ // and encoding characters using surrogate pairs:
+ //
+ // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
+ // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
+ // W1 = 110110yyyyyyyyyy
+ // W2 = 110111xxxxxxxxxx
+ //
+ // where U is the character value, W1 is the high surrogate
+ // area, W2 is the low surrogate area.
+
+ // Check for incomplete UTF-16 character.
+ if raw_unread < 2 {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-16 character",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Get the character.
+ value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
+
+ // Check for unexpected low surrogate area.
+ if value&0xFC00 == 0xDC00 {
+ return yaml_parser_set_reader_error(parser,
+ "unexpected low surrogate area",
+ parser.offset, int(value))
+ }
+
+ // Check for a high surrogate area.
+ if value&0xFC00 == 0xD800 {
+ width = 4
+
+ // Check for incomplete surrogate pair.
+ if raw_unread < 4 {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-16 surrogate pair",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Get the next character.
+ value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
+
+ // Check for a low surrogate area.
+ if value2&0xFC00 != 0xDC00 {
+ return yaml_parser_set_reader_error(parser,
+ "expected low surrogate area",
+ parser.offset+2, int(value2))
+ }
+
+ // Generate the value of the surrogate pair.
+ value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
+ } else {
+ width = 2
+ }
+
+ default:
+ panic("impossible")
+ }
+
+ // Check if the character is in the allowed range:
+ // #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
+ // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
+ // | [#x10000-#x10FFFF] (32 bit)
+ switch {
+ case value == 0x09:
+ case value == 0x0A:
+ case value == 0x0D:
+ case value >= 0x20 && value <= 0x7E:
+ case value == 0x85:
+ case value >= 0xA0 && value <= 0xD7FF:
+ case value >= 0xE000 && value <= 0xFFFD:
+ case value >= 0x10000 && value <= 0x10FFFF:
+ default:
+ return yaml_parser_set_reader_error(parser,
+ "control characters are not allowed",
+ parser.offset, int(value))
+ }
+
+ // Move the raw pointers.
+ parser.raw_buffer_pos += width
+ parser.offset += width
+
+ // Finally put the character into the buffer.
+ if value <= 0x7F {
+ // 0000 0000-0000 007F . 0xxxxxxx
+ parser.buffer[buffer_len+0] = byte(value)
+ buffer_len += 1
+ } else if value <= 0x7FF {
+ // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
+ parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
+ buffer_len += 2
+ } else if value <= 0xFFFF {
+ // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
+ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
+ parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
+ buffer_len += 3
+ } else {
+ // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
+ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
+ parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
+ parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
+ buffer_len += 4
+ }
+
+ parser.unread++
+ }
+
+ // On EOF, put NUL into the buffer and return.
+ if parser.eof {
+ parser.buffer[buffer_len] = 0
+ buffer_len++
+ parser.unread++
+ break
+ }
+ }
+ parser.buffer = parser.buffer[:buffer_len]
+ return true
+}
diff --git a/src/kube2msb/vendor/gopkg.in/yaml.v2/resolve.go b/src/kube2msb/vendor/gopkg.in/yaml.v2/resolve.go
new file mode 100644
index 0000000..93a8632
--- /dev/null
+++ b/src/kube2msb/vendor/gopkg.in/yaml.v2/resolve.go
@@ -0,0 +1,203 @@
+package yaml
+
+import (
+ "encoding/base64"
+ "math"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+type resolveMapItem struct {
+ value interface{}
+ tag string
+}
+
+var resolveTable = make([]byte, 256)
+var resolveMap = make(map[string]resolveMapItem)
+
+func init() {
+ t := resolveTable
+ t[int('+')] = 'S' // Sign
+ t[int('-')] = 'S'
+ for _, c := range "0123456789" {
+ t[int(c)] = 'D' // Digit
+ }
+ for _, c := range "yYnNtTfFoO~" {
+ t[int(c)] = 'M' // In map
+ }
+ t[int('.')] = '.' // Float (potentially in map)
+
+ var resolveMapList = []struct {
+ v interface{}
+ tag string
+ l []string
+ }{
+ {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}},
+ {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}},
+ {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}},
+ {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}},
+ {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}},
+ {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}},
+ {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}},
+ {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}},
+ {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}},
+ {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}},
+ {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}},
+ {"<<", yaml_MERGE_TAG, []string{"<<"}},
+ }
+
+ m := resolveMap
+ for _, item := range resolveMapList {
+ for _, s := range item.l {
+ m[s] = resolveMapItem{item.v, item.tag}
+ }
+ }
+}
+
+const longTagPrefix = "tag:yaml.org,2002:"
+
+func shortTag(tag string) string {
+ // TODO This can easily be made faster and produce less garbage.
+ if strings.HasPrefix(tag, longTagPrefix) {
+ return "!!" + tag[len(longTagPrefix):]
+ }
+ return tag
+}
+
+func longTag(tag string) string {
+ if strings.HasPrefix(tag, "!!") {
+ return longTagPrefix + tag[2:]
+ }
+ return tag
+}
+
+func resolvableTag(tag string) bool {
+ switch tag {
+ case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG:
+ return true
+ }
+ return false
+}
+
+func resolve(tag string, in string) (rtag string, out interface{}) {
+ if !resolvableTag(tag) {
+ return tag, in
+ }
+
+ defer func() {
+ switch tag {
+ case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG:
+ return
+ }
+ failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
+ }()
+
+ // Any data is accepted as a !!str or !!binary.
+ // Otherwise, the prefix is enough of a hint about what it might be.
+ hint := byte('N')
+ if in != "" {
+ hint = resolveTable[in[0]]
+ }
+ if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG {
+ // Handle things we can lookup in a map.
+ if item, ok := resolveMap[in]; ok {
+ return item.tag, item.value
+ }
+
+ // Base 60 floats are a bad idea, were dropped in YAML 1.2, and
+ // are purposefully unsupported here. They're still quoted on
+ // the way out for compatibility with other parser, though.
+
+ switch hint {
+ case 'M':
+ // We've already checked the map above.
+
+ case '.':
+ // Not in the map, so maybe a normal float.
+ floatv, err := strconv.ParseFloat(in, 64)
+ if err == nil {
+ return yaml_FLOAT_TAG, floatv
+ }
+
+ case 'D', 'S':
+ // Int, float, or timestamp.
+ plain := strings.Replace(in, "_", "", -1)
+ intv, err := strconv.ParseInt(plain, 0, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return yaml_INT_TAG, int(intv)
+ } else {
+ return yaml_INT_TAG, intv
+ }
+ }
+ uintv, err := strconv.ParseUint(plain, 0, 64)
+ if err == nil {
+ return yaml_INT_TAG, uintv
+ }
+ floatv, err := strconv.ParseFloat(plain, 64)
+ if err == nil {
+ return yaml_FLOAT_TAG, floatv
+ }
+ if strings.HasPrefix(plain, "0b") {
+ intv, err := strconv.ParseInt(plain[2:], 2, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return yaml_INT_TAG, int(intv)
+ } else {
+ return yaml_INT_TAG, intv
+ }
+ }
+ uintv, err := strconv.ParseUint(plain[2:], 2, 64)
+ if err == nil {
+ return yaml_INT_TAG, uintv
+ }
+ } else if strings.HasPrefix(plain, "-0b") {
+ intv, err := strconv.ParseInt(plain[3:], 2, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return yaml_INT_TAG, -int(intv)
+ } else {
+ return yaml_INT_TAG, -intv
+ }
+ }
+ }
+ // XXX Handle timestamps here.
+
+ default:
+ panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")")
+ }
+ }
+ if tag == yaml_BINARY_TAG {
+ return yaml_BINARY_TAG, in
+ }
+ if utf8.ValidString(in) {
+ return yaml_STR_TAG, in
+ }
+ return yaml_BINARY_TAG, encodeBase64(in)
+}
+
+// encodeBase64 encodes s as base64 that is broken up into multiple lines
+// as appropriate for the resulting length.
+func encodeBase64(s string) string {
+ const lineLen = 70
+ encLen := base64.StdEncoding.EncodedLen(len(s))
+ lines := encLen/lineLen + 1
+ buf := make([]byte, encLen*2+lines)
+ in := buf[0:encLen]
+ out := buf[encLen:]
+ base64.StdEncoding.Encode(in, []byte(s))
+ k := 0
+ for i := 0; i < len(in); i += lineLen {
+ j := i + lineLen
+ if j > len(in) {
+ j = len(in)
+ }
+ k += copy(out[k:], in[i:j])
+ if lines > 1 {
+ out[k] = '\n'
+ k++
+ }
+ }
+ return string(out[:k])
+}
diff --git a/src/kube2msb/vendor/gopkg.in/yaml.v2/scannerc.go b/src/kube2msb/vendor/gopkg.in/yaml.v2/scannerc.go
new file mode 100644
index 0000000..2580800
--- /dev/null
+++ b/src/kube2msb/vendor/gopkg.in/yaml.v2/scannerc.go
@@ -0,0 +1,2710 @@
+package yaml
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// Introduction
+// ************
+//
+// The following notes assume that you are familiar with the YAML specification
+// (http://yaml.org/spec/cvs/current.html). We mostly follow it, although in
+// some cases we are less restrictive that it requires.
+//
+// The process of transforming a YAML stream into a sequence of events is
+// divided on two steps: Scanning and Parsing.
+//
+// The Scanner transforms the input stream into a sequence of tokens, while the
+// parser transform the sequence of tokens produced by the Scanner into a
+// sequence of parsing events.
+//
+// The Scanner is rather clever and complicated. The Parser, on the contrary,
+// is a straightforward implementation of a recursive-descendant parser (or,
+// LL(1) parser, as it is usually called).
+//
+// Actually there are two issues of Scanning that might be called "clever", the
+// rest is quite straightforward. The issues are "block collection start" and
+// "simple keys". Both issues are explained below in details.
+//
+// Here the Scanning step is explained and implemented. We start with the list
+// of all the tokens produced by the Scanner together with short descriptions.
+//
+// Now, tokens:
+//
+// STREAM-START(encoding) # The stream start.
+// STREAM-END # The stream end.
+// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive.
+// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive.
+// DOCUMENT-START # '---'
+// DOCUMENT-END # '...'
+// BLOCK-SEQUENCE-START # Indentation increase denoting a block
+// BLOCK-MAPPING-START # sequence or a block mapping.
+// BLOCK-END # Indentation decrease.
+// FLOW-SEQUENCE-START # '['
+// FLOW-SEQUENCE-END # ']'
+// BLOCK-SEQUENCE-START # '{'
+// BLOCK-SEQUENCE-END # '}'
+// BLOCK-ENTRY # '-'
+// FLOW-ENTRY # ','
+// KEY # '?' or nothing (simple keys).
+// VALUE # ':'
+// ALIAS(anchor) # '*anchor'
+// ANCHOR(anchor) # '&anchor'
+// TAG(handle,suffix) # '!handle!suffix'
+// SCALAR(value,style) # A scalar.
+//
+// The following two tokens are "virtual" tokens denoting the beginning and the
+// end of the stream:
+//
+// STREAM-START(encoding)
+// STREAM-END
+//
+// We pass the information about the input stream encoding with the
+// STREAM-START token.
+//
+// The next two tokens are responsible for tags:
+//
+// VERSION-DIRECTIVE(major,minor)
+// TAG-DIRECTIVE(handle,prefix)
+//
+// Example:
+//
+// %YAML 1.1
+// %TAG ! !foo
+// %TAG !yaml! tag:yaml.org,2002:
+// ---
+//
+// The correspoding sequence of tokens:
+//
+// STREAM-START(utf-8)
+// VERSION-DIRECTIVE(1,1)
+// TAG-DIRECTIVE("!","!foo")
+// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:")
+// DOCUMENT-START
+// STREAM-END
+//
+// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole
+// line.
+//
+// The document start and end indicators are represented by:
+//
+// DOCUMENT-START
+// DOCUMENT-END
+//
+// Note that if a YAML stream contains an implicit document (without '---'
+// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be
+// produced.
+//
+// In the following examples, we present whole documents together with the
+// produced tokens.
+//
+// 1. An implicit document:
+//
+// 'a scalar'
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// SCALAR("a scalar",single-quoted)
+// STREAM-END
+//
+// 2. An explicit document:
+//
+// ---
+// 'a scalar'
+// ...
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// DOCUMENT-START
+// SCALAR("a scalar",single-quoted)
+// DOCUMENT-END
+// STREAM-END
+//
+// 3. Several documents in a stream:
+//
+// 'a scalar'
+// ---
+// 'another scalar'
+// ---
+// 'yet another scalar'
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// SCALAR("a scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("another scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("yet another scalar",single-quoted)
+// STREAM-END
+//
+// We have already introduced the SCALAR token above. The following tokens are
+// used to describe aliases, anchors, tag, and scalars:
+//
+// ALIAS(anchor)
+// ANCHOR(anchor)
+// TAG(handle,suffix)
+// SCALAR(value,style)
+//
+// The following series of examples illustrate the usage of these tokens:
+//
+// 1. A recursive sequence:
+//
+// &A [ *A ]
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// ANCHOR("A")
+// FLOW-SEQUENCE-START
+// ALIAS("A")
+// FLOW-SEQUENCE-END
+// STREAM-END
+//
+// 2. A tagged scalar:
+//
+// !!float "3.14" # A good approximation.
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// TAG("!!","float")
+// SCALAR("3.14",double-quoted)
+// STREAM-END
+//
+// 3. Various scalar styles:
+//
+// --- # Implicit empty plain scalars do not produce tokens.
+// --- a plain scalar
+// --- 'a single-quoted scalar'
+// --- "a double-quoted scalar"
+// --- |-
+// a literal scalar
+// --- >-
+// a folded
+// scalar
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// DOCUMENT-START
+// DOCUMENT-START
+// SCALAR("a plain scalar",plain)
+// DOCUMENT-START
+// SCALAR("a single-quoted scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("a double-quoted scalar",double-quoted)
+// DOCUMENT-START
+// SCALAR("a literal scalar",literal)
+// DOCUMENT-START
+// SCALAR("a folded scalar",folded)
+// STREAM-END
+//
+// Now it's time to review collection-related tokens. We will start with
+// flow collections:
+//
+// FLOW-SEQUENCE-START
+// FLOW-SEQUENCE-END
+// FLOW-MAPPING-START
+// FLOW-MAPPING-END
+// FLOW-ENTRY
+// KEY
+// VALUE
+//
+// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and
+// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}'
+// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the
+// indicators '?' and ':', which are used for denoting mapping keys and values,
+// are represented by the KEY and VALUE tokens.
+//
+// The following examples show flow collections:
+//
+// 1. A flow sequence:
+//
+// [item 1, item 2, item 3]
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// FLOW-SEQUENCE-START
+// SCALAR("item 1",plain)
+// FLOW-ENTRY
+// SCALAR("item 2",plain)
+// FLOW-ENTRY
+// SCALAR("item 3",plain)
+// FLOW-SEQUENCE-END
+// STREAM-END
+//
+// 2. A flow mapping:
+//
+// {
+// a simple key: a value, # Note that the KEY token is produced.
+// ? a complex key: another value,
+// }
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// FLOW-MAPPING-START
+// KEY
+// SCALAR("a simple key",plain)
+// VALUE
+// SCALAR("a value",plain)
+// FLOW-ENTRY
+// KEY
+// SCALAR("a complex key",plain)
+// VALUE
+// SCALAR("another value",plain)
+// FLOW-ENTRY
+// FLOW-MAPPING-END
+// STREAM-END
+//
+// A simple key is a key which is not denoted by the '?' indicator. Note that
+// the Scanner still produce the KEY token whenever it encounters a simple key.
+//
+// For scanning block collections, the following tokens are used (note that we
+// repeat KEY and VALUE here):
+//
+// BLOCK-SEQUENCE-START
+// BLOCK-MAPPING-START
+// BLOCK-END
+// BLOCK-ENTRY
+// KEY
+// VALUE
+//
+// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation
+// increase that precedes a block collection (cf. the INDENT token in Python).
+// The token BLOCK-END denote indentation decrease that ends a block collection
+// (cf. the DEDENT token in Python). However YAML has some syntax pecularities
+// that makes detections of these tokens more complex.
+//
+// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators
+// '-', '?', and ':' correspondingly.
+//
+// The following examples show how the tokens BLOCK-SEQUENCE-START,
+// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner:
+//
+// 1. Block sequences:
+//
+// - item 1
+// - item 2
+// -
+// - item 3.1
+// - item 3.2
+// -
+// key 1: value 1
+// key 2: value 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-ENTRY
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 3.1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 3.2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// 2. Block mappings:
+//
+// a simple key: a value # The KEY token is produced here.
+// ? a complex key
+// : another value
+// a mapping:
+// key 1: value 1
+// key 2: value 2
+// a sequence:
+// - item 1
+// - item 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("a simple key",plain)
+// VALUE
+// SCALAR("a value",plain)
+// KEY
+// SCALAR("a complex key",plain)
+// VALUE
+// SCALAR("another value",plain)
+// KEY
+// SCALAR("a mapping",plain)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// KEY
+// SCALAR("a sequence",plain)
+// VALUE
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// YAML does not always require to start a new block collection from a new
+// line. If the current line contains only '-', '?', and ':' indicators, a new
+// block collection may start at the current line. The following examples
+// illustrate this case:
+//
+// 1. Collections in a sequence:
+//
+// - - item 1
+// - item 2
+// - key 1: value 1
+// key 2: value 2
+// - ? complex key
+// : complex value
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("complex key")
+// VALUE
+// SCALAR("complex value")
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// 2. Collections in a mapping:
+//
+// ? a sequence
+// : - item 1
+// - item 2
+// ? a mapping
+// : key 1: value 1
+// key 2: value 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("a sequence",plain)
+// VALUE
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// KEY
+// SCALAR("a mapping",plain)
+// VALUE
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// YAML also permits non-indented sequences if they are included into a block
+// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced:
+//
+// key:
+// - item 1 # BLOCK-SEQUENCE-START is NOT produced here.
+// - item 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key",plain)
+// VALUE
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+//
+
+// Ensure that the buffer contains the required number of characters.
+// Return true on success, false on failure (reader error or memory error).
+func cache(parser *yaml_parser_t, length int) bool {
+ // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B)
+ return parser.unread >= length || yaml_parser_update_buffer(parser, length)
+}
+
+// Advance the buffer pointer.
+func skip(parser *yaml_parser_t) {
+ parser.mark.index++
+ parser.mark.column++
+ parser.unread--
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+}
+
+func skip_line(parser *yaml_parser_t) {
+ if is_crlf(parser.buffer, parser.buffer_pos) {
+ parser.mark.index += 2
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread -= 2
+ parser.buffer_pos += 2
+ } else if is_break(parser.buffer, parser.buffer_pos) {
+ parser.mark.index++
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread--
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+ }
+}
+
+// Copy a character to a string buffer and advance pointers.
+func read(parser *yaml_parser_t, s []byte) []byte {
+ w := width(parser.buffer[parser.buffer_pos])
+ if w == 0 {
+ panic("invalid character sequence")
+ }
+ if len(s) == 0 {
+ s = make([]byte, 0, 32)
+ }
+ if w == 1 && len(s)+w <= cap(s) {
+ s = s[:len(s)+1]
+ s[len(s)-1] = parser.buffer[parser.buffer_pos]
+ parser.buffer_pos++
+ } else {
+ s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...)
+ parser.buffer_pos += w
+ }
+ parser.mark.index++
+ parser.mark.column++
+ parser.unread--
+ return s
+}
+
+// Copy a line break character to a string buffer and advance pointers.
+func read_line(parser *yaml_parser_t, s []byte) []byte {
+ buf := parser.buffer
+ pos := parser.buffer_pos
+ switch {
+ case buf[pos] == '\r' && buf[pos+1] == '\n':
+ // CR LF . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 2
+ parser.mark.index++
+ parser.unread--
+ case buf[pos] == '\r' || buf[pos] == '\n':
+ // CR|LF . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 1
+ case buf[pos] == '\xC2' && buf[pos+1] == '\x85':
+ // NEL . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 2
+ case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'):
+ // LS|PS . LS|PS
+ s = append(s, buf[parser.buffer_pos:pos+3]...)
+ parser.buffer_pos += 3
+ default:
+ return s
+ }
+ parser.mark.index++
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread--
+ return s
+}
+
+// Get the next token.
+func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool {
+ // Erase the token object.
+ *token = yaml_token_t{} // [Go] Is this necessary?
+
+ // No tokens after STREAM-END or error.
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR {
+ return true
+ }
+
+ // Ensure that the tokens queue contains enough tokens.
+ if !parser.token_available {
+ if !yaml_parser_fetch_more_tokens(parser) {
+ return false
+ }
+ }
+
+ // Fetch the next token from the queue.
+ *token = parser.tokens[parser.tokens_head]
+ parser.tokens_head++
+ parser.tokens_parsed++
+ parser.token_available = false
+
+ if token.typ == yaml_STREAM_END_TOKEN {
+ parser.stream_end_produced = true
+ }
+ return true
+}
+
+// Set the scanner error and return false.
+func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool {
+ parser.error = yaml_SCANNER_ERROR
+ parser.context = context
+ parser.context_mark = context_mark
+ parser.problem = problem
+ parser.problem_mark = parser.mark
+ return false
+}
+
+func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool {
+ context := "while parsing a tag"
+ if directive {
+ context = "while parsing a %TAG directive"
+ }
+ return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet")
+}
+
+func trace(args ...interface{}) func() {
+ pargs := append([]interface{}{"+++"}, args...)
+ fmt.Println(pargs...)
+ pargs = append([]interface{}{"---"}, args...)
+ return func() { fmt.Println(pargs...) }
+}
+
+// Ensure that the tokens queue contains at least one token which can be
+// returned to the Parser.
+func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
+ // While we need more tokens to fetch, do it.
+ for {
+ // Check if we really need to fetch more tokens.
+ need_more_tokens := false
+
+ if parser.tokens_head == len(parser.tokens) {
+ // Queue is empty.
+ need_more_tokens = true
+ } else {
+ // Check if any potential simple key may occupy the head position.
+ if !yaml_parser_stale_simple_keys(parser) {
+ return false
+ }
+
+ for i := range parser.simple_keys {
+ simple_key := &parser.simple_keys[i]
+ if simple_key.possible && simple_key.token_number == parser.tokens_parsed {
+ need_more_tokens = true
+ break
+ }
+ }
+ }
+
+ // We are finished.
+ if !need_more_tokens {
+ break
+ }
+ // Fetch the next token.
+ if !yaml_parser_fetch_next_token(parser) {
+ return false
+ }
+ }
+
+ parser.token_available = true
+ return true
+}
+
+// The dispatcher for token fetchers.
+func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool {
+ // Ensure that the buffer is initialized.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check if we just started scanning. Fetch STREAM-START then.
+ if !parser.stream_start_produced {
+ return yaml_parser_fetch_stream_start(parser)
+ }
+
+ // Eat whitespaces and comments until we reach the next token.
+ if !yaml_parser_scan_to_next_token(parser) {
+ return false
+ }
+
+ // Remove obsolete potential simple keys.
+ if !yaml_parser_stale_simple_keys(parser) {
+ return false
+ }
+
+ // Check the indentation level against the current column.
+ if !yaml_parser_unroll_indent(parser, parser.mark.column) {
+ return false
+ }
+
+ // Ensure that the buffer contains at least 4 characters. 4 is the length
+ // of the longest indicators ('--- ' and '... ').
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+
+ // Is it the end of the stream?
+ if is_z(parser.buffer, parser.buffer_pos) {
+ return yaml_parser_fetch_stream_end(parser)
+ }
+
+ // Is it a directive?
+ if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' {
+ return yaml_parser_fetch_directive(parser)
+ }
+
+ buf := parser.buffer
+ pos := parser.buffer_pos
+
+ // Is it the document start indicator?
+ if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) {
+ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN)
+ }
+
+ // Is it the document end indicator?
+ if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) {
+ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN)
+ }
+
+ // Is it the flow sequence start indicator?
+ if buf[pos] == '[' {
+ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN)
+ }
+
+ // Is it the flow mapping start indicator?
+ if parser.buffer[parser.buffer_pos] == '{' {
+ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN)
+ }
+
+ // Is it the flow sequence end indicator?
+ if parser.buffer[parser.buffer_pos] == ']' {
+ return yaml_parser_fetch_flow_collection_end(parser,
+ yaml_FLOW_SEQUENCE_END_TOKEN)
+ }
+
+ // Is it the flow mapping end indicator?
+ if parser.buffer[parser.buffer_pos] == '}' {
+ return yaml_parser_fetch_flow_collection_end(parser,
+ yaml_FLOW_MAPPING_END_TOKEN)
+ }
+
+ // Is it the flow entry indicator?
+ if parser.buffer[parser.buffer_pos] == ',' {
+ return yaml_parser_fetch_flow_entry(parser)
+ }
+
+ // Is it the block entry indicator?
+ if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) {
+ return yaml_parser_fetch_block_entry(parser)
+ }
+
+ // Is it the key indicator?
+ if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_key(parser)
+ }
+
+ // Is it the value indicator?
+ if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_value(parser)
+ }
+
+ // Is it an alias?
+ if parser.buffer[parser.buffer_pos] == '*' {
+ return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN)
+ }
+
+ // Is it an anchor?
+ if parser.buffer[parser.buffer_pos] == '&' {
+ return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN)
+ }
+
+ // Is it a tag?
+ if parser.buffer[parser.buffer_pos] == '!' {
+ return yaml_parser_fetch_tag(parser)
+ }
+
+ // Is it a literal scalar?
+ if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 {
+ return yaml_parser_fetch_block_scalar(parser, true)
+ }
+
+ // Is it a folded scalar?
+ if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 {
+ return yaml_parser_fetch_block_scalar(parser, false)
+ }
+
+ // Is it a single-quoted scalar?
+ if parser.buffer[parser.buffer_pos] == '\'' {
+ return yaml_parser_fetch_flow_scalar(parser, true)
+ }
+
+ // Is it a double-quoted scalar?
+ if parser.buffer[parser.buffer_pos] == '"' {
+ return yaml_parser_fetch_flow_scalar(parser, false)
+ }
+
+ // Is it a plain scalar?
+ //
+ // A plain scalar may start with any non-blank characters except
+ //
+ // '-', '?', ':', ',', '[', ']', '{', '}',
+ // '#', '&', '*', '!', '|', '>', '\'', '\"',
+ // '%', '@', '`'.
+ //
+ // In the block context (and, for the '-' indicator, in the flow context
+ // too), it may also start with the characters
+ //
+ // '-', '?', ':'
+ //
+ // if it is followed by a non-space character.
+ //
+ // The last rule is more restrictive than the specification requires.
+ // [Go] Make this logic more reasonable.
+ //switch parser.buffer[parser.buffer_pos] {
+ //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`':
+ //}
+ if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' ||
+ parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' ||
+ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+ parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' ||
+ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' ||
+ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' ||
+ parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' ||
+ parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' ||
+ parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') ||
+ (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) ||
+ (parser.flow_level == 0 &&
+ (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') &&
+ !is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_plain_scalar(parser)
+ }
+
+ // If we don't determine the token type so far, it is an error.
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning for the next token", parser.mark,
+ "found character that cannot start any token")
+}
+
+// Check the list of potential simple keys and remove the positions that
+// cannot contain simple keys anymore.
+func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool {
+ // Check for a potential simple key for each flow level.
+ for i := range parser.simple_keys {
+ simple_key := &parser.simple_keys[i]
+
+ // The specification requires that a simple key
+ //
+ // - is limited to a single line,
+ // - is shorter than 1024 characters.
+ if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) {
+
+ // Check if the potential simple key to be removed is required.
+ if simple_key.required {
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning a simple key", simple_key.mark,
+ "could not find expected ':'")
+ }
+ simple_key.possible = false
+ }
+ }
+ return true
+}
+
+// Check if a simple key may start at the current position and add it if
+// needed.
+func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
+ // A simple key is required at the current position if the scanner is in
+ // the block context and the current column coincides with the indentation
+ // level.
+
+ required := parser.flow_level == 0 && parser.indent == parser.mark.column
+
+ // A simple key is required only when it is the first token in the current
+ // line. Therefore it is always allowed. But we add a check anyway.
+ if required && !parser.simple_key_allowed {
+ panic("should not happen")
+ }
+
+ //
+ // If the current position may start a simple key, save it.
+ //
+ if parser.simple_key_allowed {
+ simple_key := yaml_simple_key_t{
+ possible: true,
+ required: required,
+ token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+ }
+ simple_key.mark = parser.mark
+
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+ parser.simple_keys[len(parser.simple_keys)-1] = simple_key
+ }
+ return true
+}
+
+// Remove a potential simple key at the current flow level.
+func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
+ i := len(parser.simple_keys) - 1
+ if parser.simple_keys[i].possible {
+ // If the key is required, it is an error.
+ if parser.simple_keys[i].required {
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning a simple key", parser.simple_keys[i].mark,
+ "could not find expected ':'")
+ }
+ }
+ // Remove the key from the stack.
+ parser.simple_keys[i].possible = false
+ return true
+}
+
+// Increase the flow level and resize the simple key list if needed.
+func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
+ // Reset the simple key on the next level.
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+
+ // Increase the flow level.
+ parser.flow_level++
+ return true
+}
+
+// Decrease the flow level.
+func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
+ if parser.flow_level > 0 {
+ parser.flow_level--
+ parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1]
+ }
+ return true
+}
+
+// Push the current indentation level to the stack and set the new level
+// the current column is greater than the indentation level. In this case,
+// append or insert the specified token into the token queue.
+func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool {
+ // In the flow context, do nothing.
+ if parser.flow_level > 0 {
+ return true
+ }
+
+ if parser.indent < column {
+ // Push the current indentation level to the stack and set the new
+ // indentation level.
+ parser.indents = append(parser.indents, parser.indent)
+ parser.indent = column
+
+ // Create a token and insert it into the queue.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: mark,
+ end_mark: mark,
+ }
+ if number > -1 {
+ number -= parser.tokens_parsed
+ }
+ yaml_insert_token(parser, number, &token)
+ }
+ return true
+}
+
+// Pop indentation levels from the indents stack until the current level
+// becomes less or equal to the column. For each indentation level, append
+// the BLOCK-END token.
+func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool {
+ // In the flow context, do nothing.
+ if parser.flow_level > 0 {
+ return true
+ }
+
+ // Loop through the indentation levels in the stack.
+ for parser.indent > column {
+ // Create a token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_BLOCK_END_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+
+ // Pop the indentation level.
+ parser.indent = parser.indents[len(parser.indents)-1]
+ parser.indents = parser.indents[:len(parser.indents)-1]
+ }
+ return true
+}
+
+// Initialize the scanner and produce the STREAM-START token.
+func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool {
+
+ // Set the initial indentation.
+ parser.indent = -1
+
+ // Initialize the simple key stack.
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+
+ // A simple key is allowed at the beginning of the stream.
+ parser.simple_key_allowed = true
+
+ // We have started.
+ parser.stream_start_produced = true
+
+ // Create the STREAM-START token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_STREAM_START_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ encoding: parser.encoding,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the STREAM-END token and shut down the scanner.
+func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool {
+
+ // Force new line.
+ if parser.mark.column != 0 {
+ parser.mark.column = 0
+ parser.mark.line++
+ }
+
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Create the STREAM-END token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_STREAM_END_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token.
+func yaml_parser_fetch_directive(parser *yaml_parser_t) bool {
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token.
+ token := yaml_token_t{}
+ if !yaml_parser_scan_directive(parser, &token) {
+ return false
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the DOCUMENT-START or DOCUMENT-END token.
+func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Consume the token.
+ start_mark := parser.mark
+
+ skip(parser)
+ skip(parser)
+ skip(parser)
+
+ end_mark := parser.mark
+
+ // Create the DOCUMENT-START or DOCUMENT-END token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token.
+func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // The indicators '[' and '{' may start a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // Increase the flow level.
+ if !yaml_parser_increase_flow_level(parser) {
+ return false
+ }
+
+ // A simple key may follow the indicators '[' and '{'.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token.
+func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // Reset any potential simple key on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Decrease the flow level.
+ if !yaml_parser_decrease_flow_level(parser) {
+ return false
+ }
+
+ // No simple keys after the indicators ']' and '}'.
+ parser.simple_key_allowed = false
+
+ // Consume the token.
+
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-ENTRY token.
+func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool {
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after ','.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-ENTRY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_FLOW_ENTRY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the BLOCK-ENTRY token.
+func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool {
+ // Check if the scanner is in the block context.
+ if parser.flow_level == 0 {
+ // Check if we are allowed to start a new entry.
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "block sequence entries are not allowed in this context")
+ }
+ // Add the BLOCK-SEQUENCE-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) {
+ return false
+ }
+ } else {
+ // It is an error for the '-' indicator to occur in the flow context,
+ // but we let the Parser detect and report about it because the Parser
+ // is able to point to the context.
+ }
+
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after '-'.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the BLOCK-ENTRY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_BLOCK_ENTRY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the KEY token.
+func yaml_parser_fetch_key(parser *yaml_parser_t) bool {
+
+ // In the block context, additional checks are required.
+ if parser.flow_level == 0 {
+ // Check if we are allowed to start a new key (not nessesary simple).
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "mapping keys are not allowed in this context")
+ }
+ // Add the BLOCK-MAPPING-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+ return false
+ }
+ }
+
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after '?' in the block context.
+ parser.simple_key_allowed = parser.flow_level == 0
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the KEY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_KEY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the VALUE token.
+func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
+
+ simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
+
+ // Have we found a simple key?
+ if simple_key.possible {
+ // Create the KEY token and insert it into the queue.
+ token := yaml_token_t{
+ typ: yaml_KEY_TOKEN,
+ start_mark: simple_key.mark,
+ end_mark: simple_key.mark,
+ }
+ yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token)
+
+ // In the block context, we may need to add the BLOCK-MAPPING-START token.
+ if !yaml_parser_roll_indent(parser, simple_key.mark.column,
+ simple_key.token_number,
+ yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) {
+ return false
+ }
+
+ // Remove the simple key.
+ simple_key.possible = false
+
+ // A simple key cannot follow another simple key.
+ parser.simple_key_allowed = false
+
+ } else {
+ // The ':' indicator follows a complex key.
+
+ // In the block context, extra checks are required.
+ if parser.flow_level == 0 {
+
+ // Check if we are allowed to start a complex value.
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "mapping values are not allowed in this context")
+ }
+
+ // Add the BLOCK-MAPPING-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+ return false
+ }
+ }
+
+ // Simple keys after ':' are allowed in the block context.
+ parser.simple_key_allowed = parser.flow_level == 0
+ }
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the VALUE token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_VALUE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the ALIAS or ANCHOR token.
+func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // An anchor or an alias could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow an anchor or an alias.
+ parser.simple_key_allowed = false
+
+ // Create the ALIAS or ANCHOR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_anchor(parser, &token, typ) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the TAG token.
+func yaml_parser_fetch_tag(parser *yaml_parser_t) bool {
+ // A tag could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a tag.
+ parser.simple_key_allowed = false
+
+ // Create the TAG token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_tag(parser, &token) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens.
+func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool {
+ // Remove any potential simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // A simple key may follow a block scalar.
+ parser.simple_key_allowed = true
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_block_scalar(parser, &token, literal) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens.
+func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool {
+ // A plain scalar could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a flow scalar.
+ parser.simple_key_allowed = false
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_flow_scalar(parser, &token, single) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,plain) token.
+func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool {
+ // A plain scalar could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a flow scalar.
+ parser.simple_key_allowed = false
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_plain_scalar(parser, &token) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Eat whitespaces and comments until the next token is found.
+func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool {
+
+ // Until the next token is not found.
+ for {
+ // Allow the BOM mark to start a line.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ }
+
+ // Eat whitespaces.
+ // Tabs are allowed:
+ // - in the flow context
+ // - in the block context, but not at the beginning of the line or
+ // after '-', '?', or ':' (complex value).
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Eat a comment until a line break.
+ if parser.buffer[parser.buffer_pos] == '#' {
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // If it is a line break, eat it.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+
+ // In the block context, a new line may start a simple key.
+ if parser.flow_level == 0 {
+ parser.simple_key_allowed = true
+ }
+ } else {
+ break // We have found a token.
+ }
+ }
+
+ return true
+}
+
+// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool {
+ // Eat '%'.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Scan the directive name.
+ var name []byte
+ if !yaml_parser_scan_directive_name(parser, start_mark, &name) {
+ return false
+ }
+
+ // Is it a YAML directive?
+ if bytes.Equal(name, []byte("YAML")) {
+ // Scan the VERSION directive value.
+ var major, minor int8
+ if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) {
+ return false
+ }
+ end_mark := parser.mark
+
+ // Create a VERSION-DIRECTIVE token.
+ *token = yaml_token_t{
+ typ: yaml_VERSION_DIRECTIVE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ major: major,
+ minor: minor,
+ }
+
+ // Is it a TAG directive?
+ } else if bytes.Equal(name, []byte("TAG")) {
+ // Scan the TAG directive value.
+ var handle, prefix []byte
+ if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) {
+ return false
+ }
+ end_mark := parser.mark
+
+ // Create a TAG-DIRECTIVE token.
+ *token = yaml_token_t{
+ typ: yaml_TAG_DIRECTIVE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: handle,
+ prefix: prefix,
+ }
+
+ // Unknown directive.
+ } else {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "found unknown directive name")
+ return false
+ }
+
+ // Eat the rest of the line including any comments.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ if parser.buffer[parser.buffer_pos] == '#' {
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // Check if we are at the end of the line.
+ if !is_breakz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "did not find expected comment or line break")
+ return false
+ }
+
+ // Eat a line break.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ }
+
+ return true
+}
+
+// Scan the directive name.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^
+//
+func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool {
+ // Consume the directive name.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ var s []byte
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the name is empty.
+ if len(s) == 0 {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "could not find expected directive name")
+ return false
+ }
+
+ // Check for an blank character after the name.
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "found unexpected non-alphabetical character")
+ return false
+ }
+ *name = s
+ return true
+}
+
+// Scan the value of VERSION-DIRECTIVE.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^^^
+func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool {
+ // Eat whitespaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Consume the major version number.
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, major) {
+ return false
+ }
+
+ // Eat '.'.
+ if parser.buffer[parser.buffer_pos] != '.' {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "did not find expected digit or '.' character")
+ }
+
+ skip(parser)
+
+ // Consume the minor version number.
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) {
+ return false
+ }
+ return true
+}
+
+const max_number_length = 2
+
+// Scan the version number of VERSION-DIRECTIVE.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^
+// %YAML 1.1 # a comment \n
+// ^
+func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool {
+
+ // Repeat while the next character is digit.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ var value, length int8
+ for is_digit(parser.buffer, parser.buffer_pos) {
+ // Check if the number is too long.
+ length++
+ if length > max_number_length {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "found extremely long version number")
+ }
+ value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos))
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the number was present.
+ if length == 0 {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "did not find expected version number")
+ }
+ *number = value
+ return true
+}
+
+// Scan the value of a TAG-DIRECTIVE token.
+//
+// Scope:
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool {
+ var handle_value, prefix_value []byte
+
+ // Eat whitespaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Scan a handle.
+ if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) {
+ return false
+ }
+
+ // Expect a whitespace.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blank(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+ start_mark, "did not find expected whitespace")
+ return false
+ }
+
+ // Eat whitespaces.
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Scan a prefix.
+ if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) {
+ return false
+ }
+
+ // Expect a whitespace or line break.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+ start_mark, "did not find expected whitespace or line break")
+ return false
+ }
+
+ *handle = handle_value
+ *prefix = prefix_value
+ return true
+}
+
+func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool {
+ var s []byte
+
+ // Eat the indicator character.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Consume the value.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ end_mark := parser.mark
+
+ /*
+ * Check if length of the anchor is greater than 0 and it is followed by
+ * a whitespace character or one of the indicators:
+ *
+ * '?', ':', ',', ']', '}', '%', '@', '`'.
+ */
+
+ if len(s) == 0 ||
+ !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' ||
+ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' ||
+ parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' ||
+ parser.buffer[parser.buffer_pos] == '`') {
+ context := "while scanning an alias"
+ if typ == yaml_ANCHOR_TOKEN {
+ context = "while scanning an anchor"
+ }
+ yaml_parser_set_scanner_error(parser, context, start_mark,
+ "did not find expected alphabetic or numeric character")
+ return false
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ }
+
+ return true
+}
+
+/*
+ * Scan a TAG token.
+ */
+
+func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool {
+ var handle, suffix []byte
+
+ start_mark := parser.mark
+
+ // Check if the tag is in the canonical form.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ if parser.buffer[parser.buffer_pos+1] == '<' {
+ // Keep the handle as ''
+
+ // Eat '!<'
+ skip(parser)
+ skip(parser)
+
+ // Consume the tag value.
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+ return false
+ }
+
+ // Check for '>' and eat it.
+ if parser.buffer[parser.buffer_pos] != '>' {
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
+ start_mark, "did not find the expected '>'")
+ return false
+ }
+
+ skip(parser)
+ } else {
+ // The tag has either the '!suffix' or the '!handle!suffix' form.
+
+ // First, try to scan a handle.
+ if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) {
+ return false
+ }
+
+ // Check if it is, indeed, handle.
+ if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' {
+ // Scan the suffix now.
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+ return false
+ }
+ } else {
+ // It wasn't a handle after all. Scan the rest of the tag.
+ if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) {
+ return false
+ }
+
+ // Set the handle to '!'.
+ handle = []byte{'!'}
+
+ // A special case: the '!' tag. Set the handle to '' and the
+ // suffix to '!'.
+ if len(suffix) == 0 {
+ handle, suffix = suffix, handle
+ }
+ }
+ }
+
+ // Check the character which ends the tag.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
+ start_mark, "did not find expected whitespace or line break")
+ return false
+ }
+
+ end_mark := parser.mark
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_TAG_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: handle,
+ suffix: suffix,
+ }
+ return true
+}
+
+// Scan a tag handle.
+func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool {
+ // Check the initial '!' character.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.buffer[parser.buffer_pos] != '!' {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected '!'")
+ return false
+ }
+
+ var s []byte
+
+ // Copy the '!' character.
+ s = read(parser, s)
+
+ // Copy all subsequent alphabetical and numerical characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the trailing character is '!' and copy it.
+ if parser.buffer[parser.buffer_pos] == '!' {
+ s = read(parser, s)
+ } else {
+ // It's either the '!' tag or not really a tag handle. If it's a %TAG
+ // directive, it's an error. If it's a tag token, it must be a part of URI.
+ if directive && !(s[0] == '!' && s[1] == 0) {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected '!'")
+ return false
+ }
+ }
+
+ *handle = s
+ return true
+}
+
+// Scan a tag.
+func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool {
+ //size_t length = head ? strlen((char *)head) : 0
+ var s []byte
+
+ // Copy the head if needed.
+ //
+ // Note that we don't copy the leading '!' character.
+ if len(head) > 1 {
+ s = append(s, head[1:]...)
+ }
+
+ // Scan the tag.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // The set of characters that may appear in URI is as follows:
+ //
+ // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&',
+ // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']',
+ // '%'.
+ // [Go] Convert this into more reasonable logic.
+ for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' ||
+ parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' ||
+ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' ||
+ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' ||
+ parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' ||
+ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' ||
+ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' ||
+ parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' ||
+ parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' ||
+ parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' ||
+ parser.buffer[parser.buffer_pos] == '%' {
+ // Check if it is a URI-escape sequence.
+ if parser.buffer[parser.buffer_pos] == '%' {
+ if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) {
+ return false
+ }
+ } else {
+ s = read(parser, s)
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the tag is non-empty.
+ if len(s) == 0 {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected tag URI")
+ return false
+ }
+ *uri = s
+ return true
+}
+
+// Decode an URI-escape sequence corresponding to a single UTF-8 character.
+func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool {
+
+ // Decode the required number of characters.
+ w := 1024
+ for w > 0 {
+ // Check for a URI-escaped octet.
+ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+ return false
+ }
+
+ if !(parser.buffer[parser.buffer_pos] == '%' &&
+ is_hex(parser.buffer, parser.buffer_pos+1) &&
+ is_hex(parser.buffer, parser.buffer_pos+2)) {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find URI escaped octet")
+ }
+
+ // Get the octet.
+ octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2))
+
+ // If it is the leading octet, determine the length of the UTF-8 sequence.
+ if w == 1024 {
+ w = width(octet)
+ if w == 0 {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "found an incorrect leading UTF-8 octet")
+ }
+ } else {
+ // Check if the trailing octet is correct.
+ if octet&0xC0 != 0x80 {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "found an incorrect trailing UTF-8 octet")
+ }
+ }
+
+ // Copy the octet and move the pointers.
+ *s = append(*s, octet)
+ skip(parser)
+ skip(parser)
+ skip(parser)
+ w--
+ }
+ return true
+}
+
+// Scan a block scalar.
+func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool {
+ // Eat the indicator '|' or '>'.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Scan the additional block scalar indicators.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check for a chomping indicator.
+ var chomping, increment int
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+ // Set the chomping method and eat the indicator.
+ if parser.buffer[parser.buffer_pos] == '+' {
+ chomping = +1
+ } else {
+ chomping = -1
+ }
+ skip(parser)
+
+ // Check for an indentation indicator.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if is_digit(parser.buffer, parser.buffer_pos) {
+ // Check that the indentation is greater than 0.
+ if parser.buffer[parser.buffer_pos] == '0' {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found an indentation indicator equal to 0")
+ return false
+ }
+
+ // Get the indentation level and eat the indicator.
+ increment = as_digit(parser.buffer, parser.buffer_pos)
+ skip(parser)
+ }
+
+ } else if is_digit(parser.buffer, parser.buffer_pos) {
+ // Do the same as above, but in the opposite order.
+
+ if parser.buffer[parser.buffer_pos] == '0' {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found an indentation indicator equal to 0")
+ return false
+ }
+ increment = as_digit(parser.buffer, parser.buffer_pos)
+ skip(parser)
+
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+ if parser.buffer[parser.buffer_pos] == '+' {
+ chomping = +1
+ } else {
+ chomping = -1
+ }
+ skip(parser)
+ }
+ }
+
+ // Eat whitespaces and comments to the end of the line.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ if parser.buffer[parser.buffer_pos] == '#' {
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // Check if we are at the end of the line.
+ if !is_breakz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "did not find expected comment or line break")
+ return false
+ }
+
+ // Eat a line break.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ }
+
+ end_mark := parser.mark
+
+ // Set the indentation level if it was specified.
+ var indent int
+ if increment > 0 {
+ if parser.indent >= 0 {
+ indent = parser.indent + increment
+ } else {
+ indent = increment
+ }
+ }
+
+ // Scan the leading line breaks and determine the indentation level if needed.
+ var s, leading_break, trailing_breaks []byte
+ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+ return false
+ }
+
+ // Scan the block scalar content.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ var leading_blank, trailing_blank bool
+ for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) {
+ // We are at the beginning of a non-empty line.
+
+ // Is it a trailing whitespace?
+ trailing_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+ // Check if we need to fold the leading line break.
+ if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' {
+ // Do we need to join the lines by space?
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ }
+ } else {
+ s = append(s, leading_break...)
+ }
+ leading_break = leading_break[:0]
+
+ // Append the remaining line breaks.
+ s = append(s, trailing_breaks...)
+ trailing_breaks = trailing_breaks[:0]
+
+ // Is it a leading whitespace?
+ leading_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+ // Consume the current line.
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Consume the line break.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ leading_break = read_line(parser, leading_break)
+
+ // Eat the following indentation spaces and line breaks.
+ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+ return false
+ }
+ }
+
+ // Chomp the tail.
+ if chomping != -1 {
+ s = append(s, leading_break...)
+ }
+ if chomping == 1 {
+ s = append(s, trailing_breaks...)
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_LITERAL_SCALAR_STYLE,
+ }
+ if !literal {
+ token.style = yaml_FOLDED_SCALAR_STYLE
+ }
+ return true
+}
+
+// Scan indentation spaces and line breaks for a block scalar. Determine the
+// indentation level if needed.
+func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool {
+ *end_mark = parser.mark
+
+ // Eat the indentation spaces and line breaks.
+ max_indent := 0
+ for {
+ // Eat the indentation spaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ if parser.mark.column > max_indent {
+ max_indent = parser.mark.column
+ }
+
+ // Check for a tab character messing the indentation.
+ if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) {
+ return yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found a tab character where an indentation space is expected")
+ }
+
+ // Have we found a non-empty line?
+ if !is_break(parser.buffer, parser.buffer_pos) {
+ break
+ }
+
+ // Consume the line break.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ // [Go] Should really be returning breaks instead.
+ *breaks = read_line(parser, *breaks)
+ *end_mark = parser.mark
+ }
+
+ // Determine the indentation level if needed.
+ if *indent == 0 {
+ *indent = max_indent
+ if *indent < parser.indent+1 {
+ *indent = parser.indent + 1
+ }
+ if *indent < 1 {
+ *indent = 1
+ }
+ }
+ return true
+}
+
+// Scan a quoted scalar.
+func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool {
+ // Eat the left quote.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Consume the content of the quoted scalar.
+ var s, leading_break, trailing_breaks, whitespaces []byte
+ for {
+ // Check that there are no document indicators at the beginning of the line.
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+
+ if parser.mark.column == 0 &&
+ ((parser.buffer[parser.buffer_pos+0] == '-' &&
+ parser.buffer[parser.buffer_pos+1] == '-' &&
+ parser.buffer[parser.buffer_pos+2] == '-') ||
+ (parser.buffer[parser.buffer_pos+0] == '.' &&
+ parser.buffer[parser.buffer_pos+1] == '.' &&
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
+ is_blankz(parser.buffer, parser.buffer_pos+3) {
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+ start_mark, "found unexpected document indicator")
+ return false
+ }
+
+ // Check for EOF.
+ if is_z(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+ start_mark, "found unexpected end of stream")
+ return false
+ }
+
+ // Consume non-blank characters.
+ leading_blanks := false
+ for !is_blankz(parser.buffer, parser.buffer_pos) {
+ if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' {
+ // Is is an escaped single quote.
+ s = append(s, '\'')
+ skip(parser)
+ skip(parser)
+
+ } else if single && parser.buffer[parser.buffer_pos] == '\'' {
+ // It is a right single quote.
+ break
+ } else if !single && parser.buffer[parser.buffer_pos] == '"' {
+ // It is a right double quote.
+ break
+
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) {
+ // It is an escaped line break.
+ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+ return false
+ }
+ skip(parser)
+ skip_line(parser)
+ leading_blanks = true
+ break
+
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' {
+ // It is an escape sequence.
+ code_length := 0
+
+ // Check the escape character.
+ switch parser.buffer[parser.buffer_pos+1] {
+ case '0':
+ s = append(s, 0)
+ case 'a':
+ s = append(s, '\x07')
+ case 'b':
+ s = append(s, '\x08')
+ case 't', '\t':
+ s = append(s, '\x09')
+ case 'n':
+ s = append(s, '\x0A')
+ case 'v':
+ s = append(s, '\x0B')
+ case 'f':
+ s = append(s, '\x0C')
+ case 'r':
+ s = append(s, '\x0D')
+ case 'e':
+ s = append(s, '\x1B')
+ case ' ':
+ s = append(s, '\x20')
+ case '"':
+ s = append(s, '"')
+ case '\'':
+ s = append(s, '\'')
+ case '\\':
+ s = append(s, '\\')
+ case 'N': // NEL (#x85)
+ s = append(s, '\xC2')
+ s = append(s, '\x85')
+ case '_': // #xA0
+ s = append(s, '\xC2')
+ s = append(s, '\xA0')
+ case 'L': // LS (#x2028)
+ s = append(s, '\xE2')
+ s = append(s, '\x80')
+ s = append(s, '\xA8')
+ case 'P': // PS (#x2029)
+ s = append(s, '\xE2')
+ s = append(s, '\x80')
+ s = append(s, '\xA9')
+ case 'x':
+ code_length = 2
+ case 'u':
+ code_length = 4
+ case 'U':
+ code_length = 8
+ default:
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "found unknown escape character")
+ return false
+ }
+
+ skip(parser)
+ skip(parser)
+
+ // Consume an arbitrary escape code.
+ if code_length > 0 {
+ var value int
+
+ // Scan the character value.
+ if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) {
+ return false
+ }
+ for k := 0; k < code_length; k++ {
+ if !is_hex(parser.buffer, parser.buffer_pos+k) {
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "did not find expected hexdecimal number")
+ return false
+ }
+ value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k)
+ }
+
+ // Check the value and write the character.
+ if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "found invalid Unicode character escape code")
+ return false
+ }
+ if value <= 0x7F {
+ s = append(s, byte(value))
+ } else if value <= 0x7FF {
+ s = append(s, byte(0xC0+(value>>6)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ } else if value <= 0xFFFF {
+ s = append(s, byte(0xE0+(value>>12)))
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ } else {
+ s = append(s, byte(0xF0+(value>>18)))
+ s = append(s, byte(0x80+((value>>12)&0x3F)))
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ }
+
+ // Advance the pointer.
+ for k := 0; k < code_length; k++ {
+ skip(parser)
+ }
+ }
+ } else {
+ // It is a non-escaped non-blank character.
+ s = read(parser, s)
+ }
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ }
+
+ // Check if we are at the end of the scalar.
+ if single {
+ if parser.buffer[parser.buffer_pos] == '\'' {
+ break
+ }
+ } else {
+ if parser.buffer[parser.buffer_pos] == '"' {
+ break
+ }
+ }
+
+ // Consume blank characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+ if is_blank(parser.buffer, parser.buffer_pos) {
+ // Consume a space or a tab character.
+ if !leading_blanks {
+ whitespaces = read(parser, whitespaces)
+ } else {
+ skip(parser)
+ }
+ } else {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ // Check if it is a first line break.
+ if !leading_blanks {
+ whitespaces = whitespaces[:0]
+ leading_break = read_line(parser, leading_break)
+ leading_blanks = true
+ } else {
+ trailing_breaks = read_line(parser, trailing_breaks)
+ }
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Join the whitespaces or fold line breaks.
+ if leading_blanks {
+ // Do we need to fold line breaks?
+ if len(leading_break) > 0 && leading_break[0] == '\n' {
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ } else {
+ s = append(s, trailing_breaks...)
+ }
+ } else {
+ s = append(s, leading_break...)
+ s = append(s, trailing_breaks...)
+ }
+ trailing_breaks = trailing_breaks[:0]
+ leading_break = leading_break[:0]
+ } else {
+ s = append(s, whitespaces...)
+ whitespaces = whitespaces[:0]
+ }
+ }
+
+ // Eat the right quote.
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_SINGLE_QUOTED_SCALAR_STYLE,
+ }
+ if !single {
+ token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ return true
+}
+
+// Scan a plain scalar.
+func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool {
+
+ var s, leading_break, trailing_breaks, whitespaces []byte
+ var leading_blanks bool
+ var indent = parser.indent + 1
+
+ start_mark := parser.mark
+ end_mark := parser.mark
+
+ // Consume the content of the plain scalar.
+ for {
+ // Check for a document indicator.
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+ if parser.mark.column == 0 &&
+ ((parser.buffer[parser.buffer_pos+0] == '-' &&
+ parser.buffer[parser.buffer_pos+1] == '-' &&
+ parser.buffer[parser.buffer_pos+2] == '-') ||
+ (parser.buffer[parser.buffer_pos+0] == '.' &&
+ parser.buffer[parser.buffer_pos+1] == '.' &&
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
+ is_blankz(parser.buffer, parser.buffer_pos+3) {
+ break
+ }
+
+ // Check for a comment.
+ if parser.buffer[parser.buffer_pos] == '#' {
+ break
+ }
+
+ // Consume non-blank characters.
+ for !is_blankz(parser.buffer, parser.buffer_pos) {
+
+ // Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13".
+ if parser.flow_level > 0 &&
+ parser.buffer[parser.buffer_pos] == ':' &&
+ !is_blankz(parser.buffer, parser.buffer_pos+1) {
+ yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
+ start_mark, "found unexpected ':'")
+ return false
+ }
+
+ // Check for indicators that may end a plain scalar.
+ if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) ||
+ (parser.flow_level > 0 &&
+ (parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' ||
+ parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+ parser.buffer[parser.buffer_pos] == '}')) {
+ break
+ }
+
+ // Check if we need to join whitespaces and breaks.
+ if leading_blanks || len(whitespaces) > 0 {
+ if leading_blanks {
+ // Do we need to fold line breaks?
+ if leading_break[0] == '\n' {
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ } else {
+ s = append(s, trailing_breaks...)
+ }
+ } else {
+ s = append(s, leading_break...)
+ s = append(s, trailing_breaks...)
+ }
+ trailing_breaks = trailing_breaks[:0]
+ leading_break = leading_break[:0]
+ leading_blanks = false
+ } else {
+ s = append(s, whitespaces...)
+ whitespaces = whitespaces[:0]
+ }
+ }
+
+ // Copy the character.
+ s = read(parser, s)
+
+ end_mark = parser.mark
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ }
+
+ // Is it the end?
+ if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) {
+ break
+ }
+
+ // Consume blank characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+ if is_blank(parser.buffer, parser.buffer_pos) {
+
+ // Check for tab character that abuse indentation.
+ if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
+ start_mark, "found a tab character that violate indentation")
+ return false
+ }
+
+ // Consume a space or a tab character.
+ if !leading_blanks {
+ whitespaces = read(parser, whitespaces)
+ } else {
+ skip(parser)
+ }
+ } else {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ // Check if it is a first line break.
+ if !leading_blanks {
+ whitespaces = whitespaces[:0]
+ leading_break = read_line(parser, leading_break)
+ leading_blanks = true
+ } else {
+ trailing_breaks = read_line(parser, trailing_breaks)
+ }
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check indentation level.
+ if parser.flow_level == 0 && parser.mark.column < indent {
+ break
+ }
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_PLAIN_SCALAR_STYLE,
+ }
+
+ // Note that we change the 'simple_key_allowed' flag.
+ if leading_blanks {
+ parser.simple_key_allowed = true
+ }
+ return true
+}
diff --git a/src/kube2msb/vendor/gopkg.in/yaml.v2/sorter.go b/src/kube2msb/vendor/gopkg.in/yaml.v2/sorter.go
new file mode 100644
index 0000000..5958822
--- /dev/null
+++ b/src/kube2msb/vendor/gopkg.in/yaml.v2/sorter.go
@@ -0,0 +1,104 @@
+package yaml
+
+import (
+ "reflect"
+ "unicode"
+)
+
+type keyList []reflect.Value
+
+func (l keyList) Len() int { return len(l) }
+func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+func (l keyList) Less(i, j int) bool {
+ a := l[i]
+ b := l[j]
+ ak := a.Kind()
+ bk := b.Kind()
+ for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
+ a = a.Elem()
+ ak = a.Kind()
+ }
+ for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
+ b = b.Elem()
+ bk = b.Kind()
+ }
+ af, aok := keyFloat(a)
+ bf, bok := keyFloat(b)
+ if aok && bok {
+ if af != bf {
+ return af < bf
+ }
+ if ak != bk {
+ return ak < bk
+ }
+ return numLess(a, b)
+ }
+ if ak != reflect.String || bk != reflect.String {
+ return ak < bk
+ }
+ ar, br := []rune(a.String()), []rune(b.String())
+ for i := 0; i < len(ar) && i < len(br); i++ {
+ if ar[i] == br[i] {
+ continue
+ }
+ al := unicode.IsLetter(ar[i])
+ bl := unicode.IsLetter(br[i])
+ if al && bl {
+ return ar[i] < br[i]
+ }
+ if al || bl {
+ return bl
+ }
+ var ai, bi int
+ var an, bn int64
+ for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
+ an = an*10 + int64(ar[ai]-'0')
+ }
+ for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
+ bn = bn*10 + int64(br[bi]-'0')
+ }
+ if an != bn {
+ return an < bn
+ }
+ if ai != bi {
+ return ai < bi
+ }
+ return ar[i] < br[i]
+ }
+ return len(ar) < len(br)
+}
+
+// keyFloat returns a float value for v if it is a number/bool
+// and whether it is a number/bool or not.
+func keyFloat(v reflect.Value) (f float64, ok bool) {
+ switch v.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return float64(v.Int()), true
+ case reflect.Float32, reflect.Float64:
+ return v.Float(), true
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return float64(v.Uint()), true
+ case reflect.Bool:
+ if v.Bool() {
+ return 1, true
+ }
+ return 0, true
+ }
+ return 0, false
+}
+
+// numLess returns whether a < b.
+// a and b must necessarily have the same kind.
+func numLess(a, b reflect.Value) bool {
+ switch a.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return a.Int() < b.Int()
+ case reflect.Float32, reflect.Float64:
+ return a.Float() < b.Float()
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return a.Uint() < b.Uint()
+ case reflect.Bool:
+ return !a.Bool() && b.Bool()
+ }
+ panic("not a number")
+}
diff --git a/src/kube2msb/vendor/gopkg.in/yaml.v2/writerc.go b/src/kube2msb/vendor/gopkg.in/yaml.v2/writerc.go
new file mode 100644
index 0000000..190362f
--- /dev/null
+++ b/src/kube2msb/vendor/gopkg.in/yaml.v2/writerc.go
@@ -0,0 +1,89 @@
+package yaml
+
+// Set the writer error and return false.
+func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
+ emitter.error = yaml_WRITER_ERROR
+ emitter.problem = problem
+ return false
+}
+
+// Flush the output buffer.
+func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
+ if emitter.write_handler == nil {
+ panic("write handler not set")
+ }
+
+ // Check if the buffer is empty.
+ if emitter.buffer_pos == 0 {
+ return true
+ }
+
+ // If the output encoding is UTF-8, we don't need to recode the buffer.
+ if emitter.encoding == yaml_UTF8_ENCODING {
+ if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
+ return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
+ }
+ emitter.buffer_pos = 0
+ return true
+ }
+
+ // Recode the buffer into the raw buffer.
+ var low, high int
+ if emitter.encoding == yaml_UTF16LE_ENCODING {
+ low, high = 0, 1
+ } else {
+ high, low = 1, 0
+ }
+
+ pos := 0
+ for pos < emitter.buffer_pos {
+ // See the "reader.c" code for more details on UTF-8 encoding. Note
+ // that we assume that the buffer contains a valid UTF-8 sequence.
+
+ // Read the next UTF-8 character.
+ octet := emitter.buffer[pos]
+
+ var w int
+ var value rune
+ switch {
+ case octet&0x80 == 0x00:
+ w, value = 1, rune(octet&0x7F)
+ case octet&0xE0 == 0xC0:
+ w, value = 2, rune(octet&0x1F)
+ case octet&0xF0 == 0xE0:
+ w, value = 3, rune(octet&0x0F)
+ case octet&0xF8 == 0xF0:
+ w, value = 4, rune(octet&0x07)
+ }
+ for k := 1; k < w; k++ {
+ octet = emitter.buffer[pos+k]
+ value = (value << 6) + (rune(octet) & 0x3F)
+ }
+ pos += w
+
+ // Write the character.
+ if value < 0x10000 {
+ var b [2]byte
+ b[high] = byte(value >> 8)
+ b[low] = byte(value & 0xFF)
+ emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1])
+ } else {
+ // Write the character using a surrogate pair (check "reader.c").
+ var b [4]byte
+ value -= 0x10000
+ b[high] = byte(0xD8 + (value >> 18))
+ b[low] = byte((value >> 10) & 0xFF)
+ b[high+2] = byte(0xDC + ((value >> 8) & 0xFF))
+ b[low+2] = byte(value & 0xFF)
+ emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3])
+ }
+ }
+
+ // Write the raw buffer.
+ if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil {
+ return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
+ }
+ emitter.buffer_pos = 0
+ emitter.raw_buffer = emitter.raw_buffer[:0]
+ return true
+}
diff --git a/src/kube2msb/vendor/gopkg.in/yaml.v2/yaml.go b/src/kube2msb/vendor/gopkg.in/yaml.v2/yaml.go
new file mode 100644
index 0000000..36d6b88
--- /dev/null
+++ b/src/kube2msb/vendor/gopkg.in/yaml.v2/yaml.go
@@ -0,0 +1,346 @@
+// Package yaml implements YAML support for the Go language.
+//
+// Source code and other details for the project are available at GitHub:
+//
+// https://github.com/go-yaml/yaml
+//
+package yaml
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+)
+
+// MapSlice encodes and decodes as a YAML map.
+// The order of keys is preserved when encoding and decoding.
+type MapSlice []MapItem
+
+// MapItem is an item in a MapSlice.
+type MapItem struct {
+ Key, Value interface{}
+}
+
+// The Unmarshaler interface may be implemented by types to customize their
+// behavior when being unmarshaled from a YAML document. The UnmarshalYAML
+// method receives a function that may be called to unmarshal the original
+// YAML value into a field or variable. It is safe to call the unmarshal
+// function parameter more than once if necessary.
+type Unmarshaler interface {
+ UnmarshalYAML(unmarshal func(interface{}) error) error
+}
+
+// The Marshaler interface may be implemented by types to customize their
+// behavior when being marshaled into a YAML document. The returned value
+// is marshaled in place of the original value implementing Marshaler.
+//
+// If an error is returned by MarshalYAML, the marshaling procedure stops
+// and returns with the provided error.
+type Marshaler interface {
+ MarshalYAML() (interface{}, error)
+}
+
+// Unmarshal decodes the first document found within the in byte slice
+// and assigns decoded values into the out value.
+//
+// Maps and pointers (to a struct, string, int, etc) are accepted as out
+// values. If an internal pointer within a struct is not initialized,
+// the yaml package will initialize it if necessary for unmarshalling
+// the provided data. The out parameter must not be nil.
+//
+// The type of the decoded values should be compatible with the respective
+// values in out. If one or more values cannot be decoded due to a type
+// mismatches, decoding continues partially until the end of the YAML
+// content, and a *yaml.TypeError is returned with details for all
+// missed values.
+//
+// Struct fields are only unmarshalled if they are exported (have an
+// upper case first letter), and are unmarshalled using the field name
+// lowercased as the default key. Custom keys may be defined via the
+// "yaml" name in the field tag: the content preceding the first comma
+// is used as the key, and the following comma-separated options are
+// used to tweak the marshalling process (see Marshal).
+// Conflicting names result in a runtime error.
+//
+// For example:
+//
+// type T struct {
+// F int `yaml:"a,omitempty"`
+// B int
+// }
+// var t T
+// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
+//
+// See the documentation of Marshal for the format of tags and a list of
+// supported tag options.
+//
+func Unmarshal(in []byte, out interface{}) (err error) {
+ defer handleErr(&err)
+ d := newDecoder()
+ p := newParser(in)
+ defer p.destroy()
+ node := p.parse()
+ if node != nil {
+ v := reflect.ValueOf(out)
+ if v.Kind() == reflect.Ptr && !v.IsNil() {
+ v = v.Elem()
+ }
+ d.unmarshal(node, v)
+ }
+ if len(d.terrors) > 0 {
+ return &TypeError{d.terrors}
+ }
+ return nil
+}
+
+// Marshal serializes the value provided into a YAML document. The structure
+// of the generated document will reflect the structure of the value itself.
+// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
+//
+// Struct fields are only unmarshalled if they are exported (have an upper case
+// first letter), and are unmarshalled using the field name lowercased as the
+// default key. Custom keys may be defined via the "yaml" name in the field
+// tag: the content preceding the first comma is used as the key, and the
+// following comma-separated options are used to tweak the marshalling process.
+// Conflicting names result in a runtime error.
+//
+// The field tag format accepted is:
+//
+// `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
+//
+// The following flags are currently supported:
+//
+// omitempty Only include the field if it's not set to the zero
+// value for the type or to empty slices or maps.
+// Does not apply to zero valued structs.
+//
+// flow Marshal using a flow style (useful for structs,
+// sequences and maps).
+//
+// inline Inline the field, which must be a struct or a map,
+// causing all of its fields or keys to be processed as if
+// they were part of the outer struct. For maps, keys must
+// not conflict with the yaml keys of other struct fields.
+//
+// In addition, if the key is "-", the field is ignored.
+//
+// For example:
+//
+// type T struct {
+// F int "a,omitempty"
+// B int
+// }
+// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
+// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
+//
+func Marshal(in interface{}) (out []byte, err error) {
+ defer handleErr(&err)
+ e := newEncoder()
+ defer e.destroy()
+ e.marshal("", reflect.ValueOf(in))
+ e.finish()
+ out = e.out
+ return
+}
+
+func handleErr(err *error) {
+ if v := recover(); v != nil {
+ if e, ok := v.(yamlError); ok {
+ *err = e.err
+ } else {
+ panic(v)
+ }
+ }
+}
+
+type yamlError struct {
+ err error
+}
+
+func fail(err error) {
+ panic(yamlError{err})
+}
+
+func failf(format string, args ...interface{}) {
+ panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
+}
+
+// A TypeError is returned by Unmarshal when one or more fields in
+// the YAML document cannot be properly decoded into the requested
+// types. When this error is returned, the value is still
+// unmarshaled partially.
+type TypeError struct {
+ Errors []string
+}
+
+func (e *TypeError) Error() string {
+ return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n "))
+}
+
+// --------------------------------------------------------------------------
+// Maintain a mapping of keys to structure field indexes
+
+// The code in this section was copied from mgo/bson.
+
+// structInfo holds details for the serialization of fields of
+// a given struct.
+type structInfo struct {
+ FieldsMap map[string]fieldInfo
+ FieldsList []fieldInfo
+
+ // InlineMap is the number of the field in the struct that
+ // contains an ,inline map, or -1 if there's none.
+ InlineMap int
+}
+
+type fieldInfo struct {
+ Key string
+ Num int
+ OmitEmpty bool
+ Flow bool
+
+ // Inline holds the field index if the field is part of an inlined struct.
+ Inline []int
+}
+
+var structMap = make(map[reflect.Type]*structInfo)
+var fieldMapMutex sync.RWMutex
+
+func getStructInfo(st reflect.Type) (*structInfo, error) {
+ fieldMapMutex.RLock()
+ sinfo, found := structMap[st]
+ fieldMapMutex.RUnlock()
+ if found {
+ return sinfo, nil
+ }
+
+ n := st.NumField()
+ fieldsMap := make(map[string]fieldInfo)
+ fieldsList := make([]fieldInfo, 0, n)
+ inlineMap := -1
+ for i := 0; i != n; i++ {
+ field := st.Field(i)
+ if field.PkgPath != "" && !field.Anonymous {
+ continue // Private field
+ }
+
+ info := fieldInfo{Num: i}
+
+ tag := field.Tag.Get("yaml")
+ if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
+ tag = string(field.Tag)
+ }
+ if tag == "-" {
+ continue
+ }
+
+ inline := false
+ fields := strings.Split(tag, ",")
+ if len(fields) > 1 {
+ for _, flag := range fields[1:] {
+ switch flag {
+ case "omitempty":
+ info.OmitEmpty = true
+ case "flow":
+ info.Flow = true
+ case "inline":
+ inline = true
+ default:
+ return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st))
+ }
+ }
+ tag = fields[0]
+ }
+
+ if inline {
+ switch field.Type.Kind() {
+ case reflect.Map:
+ if inlineMap >= 0 {
+ return nil, errors.New("Multiple ,inline maps in struct " + st.String())
+ }
+ if field.Type.Key() != reflect.TypeOf("") {
+ return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
+ }
+ inlineMap = info.Num
+ case reflect.Struct:
+ sinfo, err := getStructInfo(field.Type)
+ if err != nil {
+ return nil, err
+ }
+ for _, finfo := range sinfo.FieldsList {
+ if _, found := fieldsMap[finfo.Key]; found {
+ msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+ if finfo.Inline == nil {
+ finfo.Inline = []int{i, finfo.Num}
+ } else {
+ finfo.Inline = append([]int{i}, finfo.Inline...)
+ }
+ fieldsMap[finfo.Key] = finfo
+ fieldsList = append(fieldsList, finfo)
+ }
+ default:
+ //return nil, errors.New("Option ,inline needs a struct value or map field")
+ return nil, errors.New("Option ,inline needs a struct value field")
+ }
+ continue
+ }
+
+ if tag != "" {
+ info.Key = tag
+ } else {
+ info.Key = strings.ToLower(field.Name)
+ }
+
+ if _, found = fieldsMap[info.Key]; found {
+ msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+
+ fieldsList = append(fieldsList, info)
+ fieldsMap[info.Key] = info
+ }
+
+ sinfo = &structInfo{fieldsMap, fieldsList, inlineMap}
+
+ fieldMapMutex.Lock()
+ structMap[st] = sinfo
+ fieldMapMutex.Unlock()
+ return sinfo, nil
+}
+
+func isZero(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.String:
+ return len(v.String()) == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ case reflect.Slice:
+ return v.Len() == 0
+ case reflect.Map:
+ return v.Len() == 0
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Struct:
+ vt := v.Type()
+ for i := v.NumField() - 1; i >= 0; i-- {
+ if vt.Field(i).PkgPath != "" {
+ continue // Private field
+ }
+ if !isZero(v.Field(i)) {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
diff --git a/src/kube2msb/vendor/gopkg.in/yaml.v2/yamlh.go b/src/kube2msb/vendor/gopkg.in/yaml.v2/yamlh.go
new file mode 100644
index 0000000..d60a6b6
--- /dev/null
+++ b/src/kube2msb/vendor/gopkg.in/yaml.v2/yamlh.go
@@ -0,0 +1,716 @@
+package yaml
+
+import (
+ "io"
+)
+
+// The version directive data.
+type yaml_version_directive_t struct {
+ major int8 // The major version number.
+ minor int8 // The minor version number.
+}
+
+// The tag directive data.
+type yaml_tag_directive_t struct {
+ handle []byte // The tag handle.
+ prefix []byte // The tag prefix.
+}
+
+type yaml_encoding_t int
+
+// The stream encoding.
+const (
+ // Let the parser choose the encoding.
+ yaml_ANY_ENCODING yaml_encoding_t = iota
+
+ yaml_UTF8_ENCODING // The default UTF-8 encoding.
+ yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
+ yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
+)
+
+type yaml_break_t int
+
+// Line break types.
+const (
+ // Let the parser choose the break type.
+ yaml_ANY_BREAK yaml_break_t = iota
+
+ yaml_CR_BREAK // Use CR for line breaks (Mac style).
+ yaml_LN_BREAK // Use LN for line breaks (Unix style).
+ yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
+)
+
+type yaml_error_type_t int
+
+// Many bad things could happen with the parser and emitter.
+const (
+ // No error is produced.
+ yaml_NO_ERROR yaml_error_type_t = iota
+
+ yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory.
+ yaml_READER_ERROR // Cannot read or decode the input stream.
+ yaml_SCANNER_ERROR // Cannot scan the input stream.
+ yaml_PARSER_ERROR // Cannot parse the input stream.
+ yaml_COMPOSER_ERROR // Cannot compose a YAML document.
+ yaml_WRITER_ERROR // Cannot write to the output stream.
+ yaml_EMITTER_ERROR // Cannot emit a YAML stream.
+)
+
+// The pointer position.
+type yaml_mark_t struct {
+ index int // The position index.
+ line int // The position line.
+ column int // The position column.
+}
+
+// Node Styles
+
+type yaml_style_t int8
+
+type yaml_scalar_style_t yaml_style_t
+
+// Scalar styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
+
+ yaml_PLAIN_SCALAR_STYLE // The plain scalar style.
+ yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
+ yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
+ yaml_LITERAL_SCALAR_STYLE // The literal scalar style.
+ yaml_FOLDED_SCALAR_STYLE // The folded scalar style.
+)
+
+type yaml_sequence_style_t yaml_style_t
+
+// Sequence styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
+
+ yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
+ yaml_FLOW_SEQUENCE_STYLE // The flow sequence style.
+)
+
+type yaml_mapping_style_t yaml_style_t
+
+// Mapping styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
+
+ yaml_BLOCK_MAPPING_STYLE // The block mapping style.
+ yaml_FLOW_MAPPING_STYLE // The flow mapping style.
+)
+
+// Tokens
+
+type yaml_token_type_t int
+
+// Token types.
+const (
+ // An empty token.
+ yaml_NO_TOKEN yaml_token_type_t = iota
+
+ yaml_STREAM_START_TOKEN // A STREAM-START token.
+ yaml_STREAM_END_TOKEN // A STREAM-END token.
+
+ yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
+ yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token.
+ yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token.
+ yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token.
+
+ yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
+ yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token.
+ yaml_BLOCK_END_TOKEN // A BLOCK-END token.
+
+ yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
+ yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token.
+ yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token.
+ yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token.
+
+ yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
+ yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token.
+ yaml_KEY_TOKEN // A KEY token.
+ yaml_VALUE_TOKEN // A VALUE token.
+
+ yaml_ALIAS_TOKEN // An ALIAS token.
+ yaml_ANCHOR_TOKEN // An ANCHOR token.
+ yaml_TAG_TOKEN // A TAG token.
+ yaml_SCALAR_TOKEN // A SCALAR token.
+)
+
+func (tt yaml_token_type_t) String() string {
+ switch tt {
+ case yaml_NO_TOKEN:
+ return "yaml_NO_TOKEN"
+ case yaml_STREAM_START_TOKEN:
+ return "yaml_STREAM_START_TOKEN"
+ case yaml_STREAM_END_TOKEN:
+ return "yaml_STREAM_END_TOKEN"
+ case yaml_VERSION_DIRECTIVE_TOKEN:
+ return "yaml_VERSION_DIRECTIVE_TOKEN"
+ case yaml_TAG_DIRECTIVE_TOKEN:
+ return "yaml_TAG_DIRECTIVE_TOKEN"
+ case yaml_DOCUMENT_START_TOKEN:
+ return "yaml_DOCUMENT_START_TOKEN"
+ case yaml_DOCUMENT_END_TOKEN:
+ return "yaml_DOCUMENT_END_TOKEN"
+ case yaml_BLOCK_SEQUENCE_START_TOKEN:
+ return "yaml_BLOCK_SEQUENCE_START_TOKEN"
+ case yaml_BLOCK_MAPPING_START_TOKEN:
+ return "yaml_BLOCK_MAPPING_START_TOKEN"
+ case yaml_BLOCK_END_TOKEN:
+ return "yaml_BLOCK_END_TOKEN"
+ case yaml_FLOW_SEQUENCE_START_TOKEN:
+ return "yaml_FLOW_SEQUENCE_START_TOKEN"
+ case yaml_FLOW_SEQUENCE_END_TOKEN:
+ return "yaml_FLOW_SEQUENCE_END_TOKEN"
+ case yaml_FLOW_MAPPING_START_TOKEN:
+ return "yaml_FLOW_MAPPING_START_TOKEN"
+ case yaml_FLOW_MAPPING_END_TOKEN:
+ return "yaml_FLOW_MAPPING_END_TOKEN"
+ case yaml_BLOCK_ENTRY_TOKEN:
+ return "yaml_BLOCK_ENTRY_TOKEN"
+ case yaml_FLOW_ENTRY_TOKEN:
+ return "yaml_FLOW_ENTRY_TOKEN"
+ case yaml_KEY_TOKEN:
+ return "yaml_KEY_TOKEN"
+ case yaml_VALUE_TOKEN:
+ return "yaml_VALUE_TOKEN"
+ case yaml_ALIAS_TOKEN:
+ return "yaml_ALIAS_TOKEN"
+ case yaml_ANCHOR_TOKEN:
+ return "yaml_ANCHOR_TOKEN"
+ case yaml_TAG_TOKEN:
+ return "yaml_TAG_TOKEN"
+ case yaml_SCALAR_TOKEN:
+ return "yaml_SCALAR_TOKEN"
+ }
+ return "<unknown token>"
+}
+
+// The token structure.
+type yaml_token_t struct {
+ // The token type.
+ typ yaml_token_type_t
+
+ // The start/end of the token.
+ start_mark, end_mark yaml_mark_t
+
+ // The stream encoding (for yaml_STREAM_START_TOKEN).
+ encoding yaml_encoding_t
+
+ // The alias/anchor/scalar value or tag/tag directive handle
+ // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
+ value []byte
+
+ // The tag suffix (for yaml_TAG_TOKEN).
+ suffix []byte
+
+ // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
+ prefix []byte
+
+ // The scalar style (for yaml_SCALAR_TOKEN).
+ style yaml_scalar_style_t
+
+ // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
+ major, minor int8
+}
+
+// Events
+
+type yaml_event_type_t int8
+
+// Event types.
+const (
+ // An empty event.
+ yaml_NO_EVENT yaml_event_type_t = iota
+
+ yaml_STREAM_START_EVENT // A STREAM-START event.
+ yaml_STREAM_END_EVENT // A STREAM-END event.
+ yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
+ yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event.
+ yaml_ALIAS_EVENT // An ALIAS event.
+ yaml_SCALAR_EVENT // A SCALAR event.
+ yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
+ yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event.
+ yaml_MAPPING_START_EVENT // A MAPPING-START event.
+ yaml_MAPPING_END_EVENT // A MAPPING-END event.
+)
+
+// The event structure.
+type yaml_event_t struct {
+
+ // The event type.
+ typ yaml_event_type_t
+
+ // The start and end of the event.
+ start_mark, end_mark yaml_mark_t
+
+ // The document encoding (for yaml_STREAM_START_EVENT).
+ encoding yaml_encoding_t
+
+ // The version directive (for yaml_DOCUMENT_START_EVENT).
+ version_directive *yaml_version_directive_t
+
+ // The list of tag directives (for yaml_DOCUMENT_START_EVENT).
+ tag_directives []yaml_tag_directive_t
+
+ // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
+ anchor []byte
+
+ // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+ tag []byte
+
+ // The scalar value (for yaml_SCALAR_EVENT).
+ value []byte
+
+ // Is the document start/end indicator implicit, or the tag optional?
+ // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
+ implicit bool
+
+ // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
+ quoted_implicit bool
+
+ // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+ style yaml_style_t
+}
+
+func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) }
+func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
+func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) }
+
+// Nodes
+
+const (
+ yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null.
+ yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false.
+ yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values.
+ yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values.
+ yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values.
+ yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
+
+ yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
+ yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
+
+ // Not in original libyaml.
+ yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
+ yaml_MERGE_TAG = "tag:yaml.org,2002:merge"
+
+ yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
+ yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
+ yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.
+)
+
+type yaml_node_type_t int
+
+// Node types.
+const (
+ // An empty node.
+ yaml_NO_NODE yaml_node_type_t = iota
+
+ yaml_SCALAR_NODE // A scalar node.
+ yaml_SEQUENCE_NODE // A sequence node.
+ yaml_MAPPING_NODE // A mapping node.
+)
+
+// An element of a sequence node.
+type yaml_node_item_t int
+
+// An element of a mapping node.
+type yaml_node_pair_t struct {
+ key int // The key of the element.
+ value int // The value of the element.
+}
+
+// The node structure.
+type yaml_node_t struct {
+ typ yaml_node_type_t // The node type.
+ tag []byte // The node tag.
+
+ // The node data.
+
+ // The scalar parameters (for yaml_SCALAR_NODE).
+ scalar struct {
+ value []byte // The scalar value.
+ length int // The length of the scalar value.
+ style yaml_scalar_style_t // The scalar style.
+ }
+
+ // The sequence parameters (for YAML_SEQUENCE_NODE).
+ sequence struct {
+ items_data []yaml_node_item_t // The stack of sequence items.
+ style yaml_sequence_style_t // The sequence style.
+ }
+
+ // The mapping parameters (for yaml_MAPPING_NODE).
+ mapping struct {
+ pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value).
+ pairs_start *yaml_node_pair_t // The beginning of the stack.
+ pairs_end *yaml_node_pair_t // The end of the stack.
+ pairs_top *yaml_node_pair_t // The top of the stack.
+ style yaml_mapping_style_t // The mapping style.
+ }
+
+ start_mark yaml_mark_t // The beginning of the node.
+ end_mark yaml_mark_t // The end of the node.
+
+}
+
+// The document structure.
+type yaml_document_t struct {
+
+ // The document nodes.
+ nodes []yaml_node_t
+
+ // The version directive.
+ version_directive *yaml_version_directive_t
+
+ // The list of tag directives.
+ tag_directives_data []yaml_tag_directive_t
+ tag_directives_start int // The beginning of the tag directives list.
+ tag_directives_end int // The end of the tag directives list.
+
+ start_implicit int // Is the document start indicator implicit?
+ end_implicit int // Is the document end indicator implicit?
+
+ // The start/end of the document.
+ start_mark, end_mark yaml_mark_t
+}
+
+// The prototype of a read handler.
+//
+// The read handler is called when the parser needs to read more bytes from the
+// source. The handler should write not more than size bytes to the buffer.
+// The number of written bytes should be set to the size_read variable.
+//
+// [in,out] data A pointer to an application data specified by
+// yaml_parser_set_input().
+// [out] buffer The buffer to write the data from the source.
+// [in] size The size of the buffer.
+// [out] size_read The actual number of bytes read from the source.
+//
+// On success, the handler should return 1. If the handler failed,
+// the returned value should be 0. On EOF, the handler should set the
+// size_read to 0 and return 1.
+type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
+
+// This structure holds information about a potential simple key.
+type yaml_simple_key_t struct {
+ possible bool // Is a simple key possible?
+ required bool // Is a simple key required?
+ token_number int // The number of the token.
+ mark yaml_mark_t // The position mark.
+}
+
+// The states of the parser.
+type yaml_parser_state_t int
+
+const (
+ yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
+
+ yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document.
+ yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START.
+ yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document.
+ yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END.
+ yaml_PARSE_BLOCK_NODE_STATE // Expect a block node.
+ yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
+ yaml_PARSE_FLOW_NODE_STATE // Expect a flow node.
+ yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence.
+ yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence.
+ yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence.
+ yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
+ yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key.
+ yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value.
+ yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry.
+ yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping.
+ yaml_PARSE_END_STATE // Expect nothing.
+)
+
+func (ps yaml_parser_state_t) String() string {
+ switch ps {
+ case yaml_PARSE_STREAM_START_STATE:
+ return "yaml_PARSE_STREAM_START_STATE"
+ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+ return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
+ case yaml_PARSE_DOCUMENT_START_STATE:
+ return "yaml_PARSE_DOCUMENT_START_STATE"
+ case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+ return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
+ case yaml_PARSE_DOCUMENT_END_STATE:
+ return "yaml_PARSE_DOCUMENT_END_STATE"
+ case yaml_PARSE_BLOCK_NODE_STATE:
+ return "yaml_PARSE_BLOCK_NODE_STATE"
+ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+ return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
+ case yaml_PARSE_FLOW_NODE_STATE:
+ return "yaml_PARSE_FLOW_NODE_STATE"
+ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+ return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
+ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
+ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
+ case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
+ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
+ case yaml_PARSE_END_STATE:
+ return "yaml_PARSE_END_STATE"
+ }
+ return "<unknown parser state>"
+}
+
+// This structure holds aliases data.
+type yaml_alias_data_t struct {
+ anchor []byte // The anchor.
+ index int // The node id.
+ mark yaml_mark_t // The anchor mark.
+}
+
+// The parser structure.
+//
+// All members are internal. Manage the structure using the
+// yaml_parser_ family of functions.
+type yaml_parser_t struct {
+
+ // Error handling
+
+ error yaml_error_type_t // Error type.
+
+ problem string // Error description.
+
+ // The byte about which the problem occured.
+ problem_offset int
+ problem_value int
+ problem_mark yaml_mark_t
+
+ // The error context.
+ context string
+ context_mark yaml_mark_t
+
+ // Reader stuff
+
+ read_handler yaml_read_handler_t // Read handler.
+
+ input_file io.Reader // File input data.
+ input []byte // String input data.
+ input_pos int
+
+ eof bool // EOF flag
+
+ buffer []byte // The working buffer.
+ buffer_pos int // The current position of the buffer.
+
+ unread int // The number of unread characters in the buffer.
+
+ raw_buffer []byte // The raw buffer.
+ raw_buffer_pos int // The current position of the buffer.
+
+ encoding yaml_encoding_t // The input encoding.
+
+ offset int // The offset of the current position (in bytes).
+ mark yaml_mark_t // The mark of the current position.
+
+ // Scanner stuff
+
+ stream_start_produced bool // Have we started to scan the input stream?
+ stream_end_produced bool // Have we reached the end of the input stream?
+
+ flow_level int // The number of unclosed '[' and '{' indicators.
+
+ tokens []yaml_token_t // The tokens queue.
+ tokens_head int // The head of the tokens queue.
+ tokens_parsed int // The number of tokens fetched from the queue.
+ token_available bool // Does the tokens queue contain a token ready for dequeueing.
+
+ indent int // The current indentation level.
+ indents []int // The indentation levels stack.
+
+ simple_key_allowed bool // May a simple key occur at the current position?
+ simple_keys []yaml_simple_key_t // The stack of simple keys.
+
+ // Parser stuff
+
+ state yaml_parser_state_t // The current parser state.
+ states []yaml_parser_state_t // The parser states stack.
+ marks []yaml_mark_t // The stack of marks.
+ tag_directives []yaml_tag_directive_t // The list of TAG directives.
+
+ // Dumper stuff
+
+ aliases []yaml_alias_data_t // The alias data.
+
+ document *yaml_document_t // The currently parsed document.
+}
+
+// Emitter Definitions
+
+// The prototype of a write handler.
+//
+// The write handler is called when the emitter needs to flush the accumulated
+// characters to the output. The handler should write @a size bytes of the
+// @a buffer to the output.
+//
+// @param[in,out] data A pointer to an application data specified by
+// yaml_emitter_set_output().
+// @param[in] buffer The buffer with bytes to be written.
+// @param[in] size The size of the buffer.
+//
+// @returns On success, the handler should return @c 1. If the handler failed,
+// the returned value should be @c 0.
+//
+type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
+
+type yaml_emitter_state_t int
+
+// The emitter states.
+const (
+ // Expect STREAM-START.
+ yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
+
+ yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END.
+ yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END.
+ yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document.
+ yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END.
+ yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence.
+ yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence.
+ yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
+ yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence.
+ yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence.
+ yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping.
+ yaml_EMIT_END_STATE // Expect nothing.
+)
+
+// The emitter structure.
+//
+// All members are internal. Manage the structure using the @c yaml_emitter_
+// family of functions.
+type yaml_emitter_t struct {
+
+ // Error handling
+
+ error yaml_error_type_t // Error type.
+ problem string // Error description.
+
+ // Writer stuff
+
+ write_handler yaml_write_handler_t // Write handler.
+
+ output_buffer *[]byte // String output data.
+ output_file io.Writer // File output data.
+
+ buffer []byte // The working buffer.
+ buffer_pos int // The current position of the buffer.
+
+ raw_buffer []byte // The raw buffer.
+ raw_buffer_pos int // The current position of the buffer.
+
+ encoding yaml_encoding_t // The stream encoding.
+
+ // Emitter stuff
+
+ canonical bool // If the output is in the canonical style?
+ best_indent int // The number of indentation spaces.
+ best_width int // The preferred width of the output lines.
+ unicode bool // Allow unescaped non-ASCII characters?
+ line_break yaml_break_t // The preferred line break.
+
+ state yaml_emitter_state_t // The current emitter state.
+ states []yaml_emitter_state_t // The stack of states.
+
+ events []yaml_event_t // The event queue.
+ events_head int // The head of the event queue.
+
+ indents []int // The stack of indentation levels.
+
+ tag_directives []yaml_tag_directive_t // The list of tag directives.
+
+ indent int // The current indentation level.
+
+ flow_level int // The current flow level.
+
+ root_context bool // Is it the document root context?
+ sequence_context bool // Is it a sequence context?
+ mapping_context bool // Is it a mapping context?
+ simple_key_context bool // Is it a simple mapping key context?
+
+ line int // The current line.
+ column int // The current column.
+ whitespace bool // If the last character was a whitespace?
+ indention bool // If the last character was an indentation character (' ', '-', '?', ':')?
+ open_ended bool // If an explicit document end is required?
+
+ // Anchor analysis.
+ anchor_data struct {
+ anchor []byte // The anchor value.
+ alias bool // Is it an alias?
+ }
+
+ // Tag analysis.
+ tag_data struct {
+ handle []byte // The tag handle.
+ suffix []byte // The tag suffix.
+ }
+
+ // Scalar analysis.
+ scalar_data struct {
+ value []byte // The scalar value.
+ multiline bool // Does the scalar contain line breaks?
+ flow_plain_allowed bool // Can the scalar be expessed in the flow plain style?
+ block_plain_allowed bool // Can the scalar be expressed in the block plain style?
+ single_quoted_allowed bool // Can the scalar be expressed in the single quoted style?
+ block_allowed bool // Can the scalar be expressed in the literal or folded styles?
+ style yaml_scalar_style_t // The output style.
+ }
+
+ // Dumper stuff
+
+ opened bool // If the stream was already opened?
+ closed bool // If the stream was already closed?
+
+ // The information associated with the document nodes.
+ anchors *struct {
+ references int // The number of references.
+ anchor int // The anchor id.
+ serialized bool // If the node has been emitted?
+ }
+
+ last_anchor_id int // The last assigned anchor id.
+
+ document *yaml_document_t // The currently emitted document.
+}
diff --git a/src/kube2msb/vendor/gopkg.in/yaml.v2/yamlprivateh.go b/src/kube2msb/vendor/gopkg.in/yaml.v2/yamlprivateh.go
new file mode 100644
index 0000000..8110ce3
--- /dev/null
+++ b/src/kube2msb/vendor/gopkg.in/yaml.v2/yamlprivateh.go
@@ -0,0 +1,173 @@
+package yaml
+
+const (
+ // The size of the input raw buffer.
+ input_raw_buffer_size = 512
+
+ // The size of the input buffer.
+ // It should be possible to decode the whole raw buffer.
+ input_buffer_size = input_raw_buffer_size * 3
+
+ // The size of the output buffer.
+ output_buffer_size = 128
+
+ // The size of the output raw buffer.
+ // It should be possible to encode the whole output buffer.
+ output_raw_buffer_size = (output_buffer_size*2 + 2)
+
+ // The size of other stacks and queues.
+ initial_stack_size = 16
+ initial_queue_size = 16
+ initial_string_size = 16
+)
+
+// Check if the character at the specified position is an alphabetical
+// character, a digit, '_', or '-'.
+func is_alpha(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
+}
+
+// Check if the character at the specified position is a digit.
+func is_digit(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9'
+}
+
+// Get the value of a digit.
+func as_digit(b []byte, i int) int {
+ return int(b[i]) - '0'
+}
+
+// Check if the character at the specified position is a hex-digit.
+func is_hex(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
+}
+
+// Get the value of a hex-digit.
+func as_hex(b []byte, i int) int {
+ bi := b[i]
+ if bi >= 'A' && bi <= 'F' {
+ return int(bi) - 'A' + 10
+ }
+ if bi >= 'a' && bi <= 'f' {
+ return int(bi) - 'a' + 10
+ }
+ return int(bi) - '0'
+}
+
+// Check if the character is ASCII.
+func is_ascii(b []byte, i int) bool {
+ return b[i] <= 0x7F
+}
+
+// Check if the character at the start of the buffer can be printed unescaped.
+func is_printable(b []byte, i int) bool {
+ return ((b[i] == 0x0A) || // . == #x0A
+ (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
+ (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
+ (b[i] > 0xC2 && b[i] < 0xED) ||
+ (b[i] == 0xED && b[i+1] < 0xA0) ||
+ (b[i] == 0xEE) ||
+ (b[i] == 0xEF && // #xE000 <= . <= #xFFFD
+ !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
+ !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
+}
+
+// Check if the character at the specified position is NUL.
+func is_z(b []byte, i int) bool {
+ return b[i] == 0x00
+}
+
+// Check if the beginning of the buffer is a BOM.
+func is_bom(b []byte, i int) bool {
+ return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
+}
+
+// Check if the character at the specified position is space.
+func is_space(b []byte, i int) bool {
+ return b[i] == ' '
+}
+
+// Check if the character at the specified position is tab.
+func is_tab(b []byte, i int) bool {
+ return b[i] == '\t'
+}
+
+// Check if the character at the specified position is blank (space or tab).
+func is_blank(b []byte, i int) bool {
+ //return is_space(b, i) || is_tab(b, i)
+ return b[i] == ' ' || b[i] == '\t'
+}
+
+// Check if the character at the specified position is a line break.
+func is_break(b []byte, i int) bool {
+ return (b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
+}
+
+func is_crlf(b []byte, i int) bool {
+ return b[i] == '\r' && b[i+1] == '\n'
+}
+
+// Check if the character is a line break or NUL.
+func is_breakz(b []byte, i int) bool {
+ //return is_break(b, i) || is_z(b, i)
+ return ( // is_break:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ // is_z:
+ b[i] == 0)
+}
+
+// Check if the character is a line break, space, or NUL.
+func is_spacez(b []byte, i int) bool {
+ //return is_space(b, i) || is_breakz(b, i)
+ return ( // is_space:
+ b[i] == ' ' ||
+ // is_breakz:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ b[i] == 0)
+}
+
+// Check if the character is a line break, space, tab, or NUL.
+func is_blankz(b []byte, i int) bool {
+ //return is_blank(b, i) || is_breakz(b, i)
+ return ( // is_blank:
+ b[i] == ' ' || b[i] == '\t' ||
+ // is_breakz:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ b[i] == 0)
+}
+
+// Determine the width of the character.
+func width(b byte) int {
+ // Don't replace these by a switch without first
+ // confirming that it is being inlined.
+ if b&0x80 == 0x00 {
+ return 1
+ }
+ if b&0xE0 == 0xC0 {
+ return 2
+ }
+ if b&0xF0 == 0xE0 {
+ return 3
+ }
+ if b&0xF8 == 0xF0 {
+ return 4
+ }
+ return 0
+
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/LICENSE b/src/kube2msb/vendor/k8s.io/kubernetes/LICENSE
new file mode 100644
index 0000000..00b2401
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2014 The Kubernetes Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/OWNERS b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/OWNERS
new file mode 100644
index 0000000..d28472e
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/OWNERS
@@ -0,0 +1,6 @@
+assignees:
+ - bgrant0607
+ - erictune
+ - lavalamp
+ - smarterclayton
+ - thockin
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/context.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/context.go
new file mode 100644
index 0000000..096144b
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/context.go
@@ -0,0 +1,121 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package api
+
+import (
+ stderrs "errors"
+ "time"
+
+ "golang.org/x/net/context"
+ "k8s.io/kubernetes/pkg/auth/user"
+)
+
+// Context carries values across API boundaries.
+// This context matches the context.Context interface
+// (https://blog.golang.org/context), for the purposes
+// of passing the api.Context through to the storage tier.
+// TODO: Determine the extent that this abstraction+interface
+// is used by the api, and whether we can remove.
+type Context interface {
+ // Value returns the value associated with key or nil if none.
+ Value(key interface{}) interface{}
+
+ // Deadline returns the time when this Context will be canceled, if any.
+ Deadline() (deadline time.Time, ok bool)
+
+ // Done returns a channel that is closed when this Context is canceled
+ // or times out.
+ Done() <-chan struct{}
+
+ // Err indicates why this context was canceled, after the Done channel
+ // is closed.
+ Err() error
+}
+
+// The key type is unexported to prevent collisions
+type key int
+
+// namespaceKey is the context key for the request namespace.
+const namespaceKey key = 0
+
+// userKey is the context key for the request user.
+const userKey key = 1
+
+// NewContext instantiates a base context object for request flows.
+func NewContext() Context {
+ return context.TODO()
+}
+
+// NewDefaultContext instantiates a base context object for request flows in the default namespace
+func NewDefaultContext() Context {
+ return WithNamespace(NewContext(), NamespaceDefault)
+}
+
+// WithValue returns a copy of parent in which the value associated with key is val.
+func WithValue(parent Context, key interface{}, val interface{}) Context {
+ internalCtx, ok := parent.(context.Context)
+ if !ok {
+ panic(stderrs.New("Invalid context type"))
+ }
+ return context.WithValue(internalCtx, key, val)
+}
+
+// WithNamespace returns a copy of parent in which the namespace value is set
+func WithNamespace(parent Context, namespace string) Context {
+ return WithValue(parent, namespaceKey, namespace)
+}
+
+// NamespaceFrom returns the value of the namespace key on the ctx
+func NamespaceFrom(ctx Context) (string, bool) {
+ namespace, ok := ctx.Value(namespaceKey).(string)
+ return namespace, ok
+}
+
+// NamespaceValue returns the value of the namespace key on the ctx, or the empty string if none
+func NamespaceValue(ctx Context) string {
+ namespace, _ := NamespaceFrom(ctx)
+ return namespace
+}
+
+// ValidNamespace returns false if the namespace on the context differs from the resource. If the resource has no namespace, it is set to the value in the context.
+func ValidNamespace(ctx Context, resource *ObjectMeta) bool {
+ ns, ok := NamespaceFrom(ctx)
+ if len(resource.Namespace) == 0 {
+ resource.Namespace = ns
+ }
+ return ns == resource.Namespace && ok
+}
+
+// WithNamespaceDefaultIfNone returns a context whose namespace is the default if and only if the parent context has no namespace value
+func WithNamespaceDefaultIfNone(parent Context) Context {
+ namespace, ok := NamespaceFrom(parent)
+ if !ok || len(namespace) == 0 {
+ return WithNamespace(parent, NamespaceDefault)
+ }
+ return parent
+}
+
+// WithUser returns a copy of parent in which the user value is set
+func WithUser(parent Context, user user.Info) Context {
+ return WithValue(parent, userKey, user)
+}
+
+// UserFrom returns the value of the user key on the ctx
+func UserFrom(ctx Context) (user.Info, bool) {
+ user, ok := ctx.Value(userKey).(user.Info)
+ return user, ok
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/conversion.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/conversion.go
new file mode 100644
index 0000000..07585d8
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/conversion.go
@@ -0,0 +1,160 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package api
+
+import (
+ "k8s.io/kubernetes/pkg/api/resource"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/conversion"
+ "k8s.io/kubernetes/pkg/fields"
+ "k8s.io/kubernetes/pkg/labels"
+ "k8s.io/kubernetes/pkg/util/intstr"
+)
+
+func init() {
+ Scheme.AddDefaultingFuncs(
+ func(obj *ListOptions) {
+ if obj.LabelSelector == nil {
+ obj.LabelSelector = labels.Everything()
+ }
+ if obj.FieldSelector == nil {
+ obj.FieldSelector = fields.Everything()
+ }
+ },
+ )
+ Scheme.AddConversionFuncs(
+ Convert_unversioned_TypeMeta_To_unversioned_TypeMeta,
+ Convert_unversioned_ListMeta_To_unversioned_ListMeta,
+ Convert_intstr_IntOrString_To_intstr_IntOrString,
+ Convert_unversioned_Time_To_unversioned_Time,
+ Convert_Slice_string_To_unversioned_Time,
+ Convert_string_To_labels_Selector,
+ Convert_string_To_fields_Selector,
+ Convert_Pointer_bool_To_bool,
+ Convert_bool_To_Pointer_bool,
+ Convert_Pointer_string_To_string,
+ Convert_string_To_Pointer_string,
+ Convert_labels_Selector_To_string,
+ Convert_fields_Selector_To_string,
+ Convert_resource_Quantity_To_resource_Quantity,
+ )
+}
+
+func Convert_Pointer_string_To_string(in **string, out *string, s conversion.Scope) error {
+ if *in == nil {
+ *out = ""
+ return nil
+ }
+ *out = **in
+ return nil
+}
+
+func Convert_string_To_Pointer_string(in *string, out **string, s conversion.Scope) error {
+ if in == nil {
+ stringVar := ""
+ *out = &stringVar
+ return nil
+ }
+ *out = in
+ return nil
+}
+
+func Convert_Pointer_bool_To_bool(in **bool, out *bool, s conversion.Scope) error {
+ if *in == nil {
+ *out = false
+ return nil
+ }
+ *out = **in
+ return nil
+}
+
+func Convert_bool_To_Pointer_bool(in *bool, out **bool, s conversion.Scope) error {
+ if in == nil {
+ boolVar := false
+ *out = &boolVar
+ return nil
+ }
+ *out = in
+ return nil
+}
+
+func Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(in, out *unversioned.TypeMeta, s conversion.Scope) error {
+ // These values are explicitly not copied
+ //out.APIVersion = in.APIVersion
+ //out.Kind = in.Kind
+ return nil
+}
+
+func Convert_unversioned_ListMeta_To_unversioned_ListMeta(in, out *unversioned.ListMeta, s conversion.Scope) error {
+ *out = *in
+ return nil
+}
+
+func Convert_intstr_IntOrString_To_intstr_IntOrString(in, out *intstr.IntOrString, s conversion.Scope) error {
+ *out = *in
+ return nil
+}
+
+func Convert_unversioned_Time_To_unversioned_Time(in *unversioned.Time, out *unversioned.Time, s conversion.Scope) error {
+ // Cannot deep copy these, because time.Time has unexported fields.
+ *out = *in
+ return nil
+}
+
+// Convert_Slice_string_To_unversioned_Time allows converting a URL query parameter value
+func Convert_Slice_string_To_unversioned_Time(input *[]string, out *unversioned.Time, s conversion.Scope) error {
+ str := ""
+ if len(*input) > 0 {
+ str = (*input)[0]
+ }
+ return out.UnmarshalQueryParameter(str)
+}
+
+func Convert_string_To_labels_Selector(in *string, out *labels.Selector, s conversion.Scope) error {
+ selector, err := labels.Parse(*in)
+ if err != nil {
+ return err
+ }
+ *out = selector
+ return nil
+}
+func Convert_string_To_fields_Selector(in *string, out *fields.Selector, s conversion.Scope) error {
+ selector, err := fields.ParseSelector(*in)
+ if err != nil {
+ return err
+ }
+ *out = selector
+ return nil
+}
+func Convert_labels_Selector_To_string(in *labels.Selector, out *string, s conversion.Scope) error {
+ if *in == nil {
+ return nil
+ }
+ *out = (*in).String()
+ return nil
+}
+func Convert_fields_Selector_To_string(in *fields.Selector, out *string, s conversion.Scope) error {
+ if *in == nil {
+ return nil
+ }
+ *out = (*in).String()
+ return nil
+}
+func Convert_resource_Quantity_To_resource_Quantity(in *resource.Quantity, out *resource.Quantity, s conversion.Scope) error {
+ *out = *in
+ return nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/deep_copy_generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/deep_copy_generated.go
new file mode 100644
index 0000000..340f365
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/deep_copy_generated.go
@@ -0,0 +1,2950 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by deepcopy-gen. Do not edit it manually!
+
+package api
+
+import (
+ unversioned "k8s.io/kubernetes/pkg/api/unversioned"
+ conversion "k8s.io/kubernetes/pkg/conversion"
+ fields "k8s.io/kubernetes/pkg/fields"
+ labels "k8s.io/kubernetes/pkg/labels"
+ runtime "k8s.io/kubernetes/pkg/runtime"
+ types "k8s.io/kubernetes/pkg/types"
+)
+
+func init() {
+ if err := Scheme.AddGeneratedDeepCopyFuncs(
+ DeepCopy_api_AWSElasticBlockStoreVolumeSource,
+ DeepCopy_api_Affinity,
+ DeepCopy_api_AttachedVolume,
+ DeepCopy_api_AzureFileVolumeSource,
+ DeepCopy_api_Binding,
+ DeepCopy_api_Capabilities,
+ DeepCopy_api_CephFSVolumeSource,
+ DeepCopy_api_CinderVolumeSource,
+ DeepCopy_api_ComponentCondition,
+ DeepCopy_api_ComponentStatus,
+ DeepCopy_api_ComponentStatusList,
+ DeepCopy_api_ConfigMap,
+ DeepCopy_api_ConfigMapKeySelector,
+ DeepCopy_api_ConfigMapList,
+ DeepCopy_api_ConfigMapVolumeSource,
+ DeepCopy_api_Container,
+ DeepCopy_api_ContainerImage,
+ DeepCopy_api_ContainerPort,
+ DeepCopy_api_ContainerState,
+ DeepCopy_api_ContainerStateRunning,
+ DeepCopy_api_ContainerStateTerminated,
+ DeepCopy_api_ContainerStateWaiting,
+ DeepCopy_api_ContainerStatus,
+ DeepCopy_api_ConversionError,
+ DeepCopy_api_DaemonEndpoint,
+ DeepCopy_api_DeleteOptions,
+ DeepCopy_api_DownwardAPIVolumeFile,
+ DeepCopy_api_DownwardAPIVolumeSource,
+ DeepCopy_api_EmptyDirVolumeSource,
+ DeepCopy_api_EndpointAddress,
+ DeepCopy_api_EndpointPort,
+ DeepCopy_api_EndpointSubset,
+ DeepCopy_api_Endpoints,
+ DeepCopy_api_EndpointsList,
+ DeepCopy_api_EnvVar,
+ DeepCopy_api_EnvVarSource,
+ DeepCopy_api_Event,
+ DeepCopy_api_EventList,
+ DeepCopy_api_EventSource,
+ DeepCopy_api_ExecAction,
+ DeepCopy_api_ExportOptions,
+ DeepCopy_api_FCVolumeSource,
+ DeepCopy_api_FlexVolumeSource,
+ DeepCopy_api_FlockerVolumeSource,
+ DeepCopy_api_GCEPersistentDiskVolumeSource,
+ DeepCopy_api_GitRepoVolumeSource,
+ DeepCopy_api_GlusterfsVolumeSource,
+ DeepCopy_api_HTTPGetAction,
+ DeepCopy_api_HTTPHeader,
+ DeepCopy_api_Handler,
+ DeepCopy_api_HostPathVolumeSource,
+ DeepCopy_api_ISCSIVolumeSource,
+ DeepCopy_api_KeyToPath,
+ DeepCopy_api_Lifecycle,
+ DeepCopy_api_LimitRange,
+ DeepCopy_api_LimitRangeItem,
+ DeepCopy_api_LimitRangeList,
+ DeepCopy_api_LimitRangeSpec,
+ DeepCopy_api_List,
+ DeepCopy_api_ListOptions,
+ DeepCopy_api_LoadBalancerIngress,
+ DeepCopy_api_LoadBalancerStatus,
+ DeepCopy_api_LocalObjectReference,
+ DeepCopy_api_NFSVolumeSource,
+ DeepCopy_api_Namespace,
+ DeepCopy_api_NamespaceList,
+ DeepCopy_api_NamespaceSpec,
+ DeepCopy_api_NamespaceStatus,
+ DeepCopy_api_Node,
+ DeepCopy_api_NodeAddress,
+ DeepCopy_api_NodeAffinity,
+ DeepCopy_api_NodeCondition,
+ DeepCopy_api_NodeDaemonEndpoints,
+ DeepCopy_api_NodeList,
+ DeepCopy_api_NodeProxyOptions,
+ DeepCopy_api_NodeResources,
+ DeepCopy_api_NodeSelector,
+ DeepCopy_api_NodeSelectorRequirement,
+ DeepCopy_api_NodeSelectorTerm,
+ DeepCopy_api_NodeSpec,
+ DeepCopy_api_NodeStatus,
+ DeepCopy_api_NodeSystemInfo,
+ DeepCopy_api_ObjectFieldSelector,
+ DeepCopy_api_ObjectMeta,
+ DeepCopy_api_ObjectReference,
+ DeepCopy_api_OwnerReference,
+ DeepCopy_api_PersistentVolume,
+ DeepCopy_api_PersistentVolumeClaim,
+ DeepCopy_api_PersistentVolumeClaimList,
+ DeepCopy_api_PersistentVolumeClaimSpec,
+ DeepCopy_api_PersistentVolumeClaimStatus,
+ DeepCopy_api_PersistentVolumeClaimVolumeSource,
+ DeepCopy_api_PersistentVolumeList,
+ DeepCopy_api_PersistentVolumeSource,
+ DeepCopy_api_PersistentVolumeSpec,
+ DeepCopy_api_PersistentVolumeStatus,
+ DeepCopy_api_Pod,
+ DeepCopy_api_PodAffinity,
+ DeepCopy_api_PodAffinityTerm,
+ DeepCopy_api_PodAntiAffinity,
+ DeepCopy_api_PodAttachOptions,
+ DeepCopy_api_PodCondition,
+ DeepCopy_api_PodExecOptions,
+ DeepCopy_api_PodList,
+ DeepCopy_api_PodLogOptions,
+ DeepCopy_api_PodProxyOptions,
+ DeepCopy_api_PodSecurityContext,
+ DeepCopy_api_PodSpec,
+ DeepCopy_api_PodStatus,
+ DeepCopy_api_PodStatusResult,
+ DeepCopy_api_PodTemplate,
+ DeepCopy_api_PodTemplateList,
+ DeepCopy_api_PodTemplateSpec,
+ DeepCopy_api_Preconditions,
+ DeepCopy_api_PreferredSchedulingTerm,
+ DeepCopy_api_Probe,
+ DeepCopy_api_RBDVolumeSource,
+ DeepCopy_api_RangeAllocation,
+ DeepCopy_api_ReplicationController,
+ DeepCopy_api_ReplicationControllerList,
+ DeepCopy_api_ReplicationControllerSpec,
+ DeepCopy_api_ReplicationControllerStatus,
+ DeepCopy_api_ResourceFieldSelector,
+ DeepCopy_api_ResourceQuota,
+ DeepCopy_api_ResourceQuotaList,
+ DeepCopy_api_ResourceQuotaSpec,
+ DeepCopy_api_ResourceQuotaStatus,
+ DeepCopy_api_ResourceRequirements,
+ DeepCopy_api_SELinuxOptions,
+ DeepCopy_api_Secret,
+ DeepCopy_api_SecretKeySelector,
+ DeepCopy_api_SecretList,
+ DeepCopy_api_SecretVolumeSource,
+ DeepCopy_api_SecurityContext,
+ DeepCopy_api_SerializedReference,
+ DeepCopy_api_Service,
+ DeepCopy_api_ServiceAccount,
+ DeepCopy_api_ServiceAccountList,
+ DeepCopy_api_ServiceList,
+ DeepCopy_api_ServicePort,
+ DeepCopy_api_ServiceProxyOptions,
+ DeepCopy_api_ServiceSpec,
+ DeepCopy_api_ServiceStatus,
+ DeepCopy_api_TCPSocketAction,
+ DeepCopy_api_Taint,
+ DeepCopy_api_Toleration,
+ DeepCopy_api_Volume,
+ DeepCopy_api_VolumeMount,
+ DeepCopy_api_VolumeSource,
+ DeepCopy_api_VsphereVirtualDiskVolumeSource,
+ DeepCopy_api_WeightedPodAffinityTerm,
+ ); err != nil {
+ // if one of the deep copy functions is malformed, detect it immediately.
+ panic(err)
+ }
+}
+
+func DeepCopy_api_AWSElasticBlockStoreVolumeSource(in AWSElasticBlockStoreVolumeSource, out *AWSElasticBlockStoreVolumeSource, c *conversion.Cloner) error {
+ out.VolumeID = in.VolumeID
+ out.FSType = in.FSType
+ out.Partition = in.Partition
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
+func DeepCopy_api_Affinity(in Affinity, out *Affinity, c *conversion.Cloner) error {
+ if in.NodeAffinity != nil {
+ in, out := in.NodeAffinity, &out.NodeAffinity
+ *out = new(NodeAffinity)
+ if err := DeepCopy_api_NodeAffinity(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.NodeAffinity = nil
+ }
+ if in.PodAffinity != nil {
+ in, out := in.PodAffinity, &out.PodAffinity
+ *out = new(PodAffinity)
+ if err := DeepCopy_api_PodAffinity(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.PodAffinity = nil
+ }
+ if in.PodAntiAffinity != nil {
+ in, out := in.PodAntiAffinity, &out.PodAntiAffinity
+ *out = new(PodAntiAffinity)
+ if err := DeepCopy_api_PodAntiAffinity(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.PodAntiAffinity = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_AttachedVolume(in AttachedVolume, out *AttachedVolume, c *conversion.Cloner) error {
+ out.Name = in.Name
+ out.DevicePath = in.DevicePath
+ return nil
+}
+
+func DeepCopy_api_AzureFileVolumeSource(in AzureFileVolumeSource, out *AzureFileVolumeSource, c *conversion.Cloner) error {
+ out.SecretName = in.SecretName
+ out.ShareName = in.ShareName
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
+func DeepCopy_api_Binding(in Binding, out *Binding, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ out.Target = in.Target
+ return nil
+}
+
+func DeepCopy_api_Capabilities(in Capabilities, out *Capabilities, c *conversion.Cloner) error {
+ if in.Add != nil {
+ in, out := in.Add, &out.Add
+ *out = make([]Capability, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.Add = nil
+ }
+ if in.Drop != nil {
+ in, out := in.Drop, &out.Drop
+ *out = make([]Capability, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.Drop = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_CephFSVolumeSource(in CephFSVolumeSource, out *CephFSVolumeSource, c *conversion.Cloner) error {
+ if in.Monitors != nil {
+ in, out := in.Monitors, &out.Monitors
+ *out = make([]string, len(in))
+ copy(*out, in)
+ } else {
+ out.Monitors = nil
+ }
+ out.Path = in.Path
+ out.User = in.User
+ out.SecretFile = in.SecretFile
+ if in.SecretRef != nil {
+ in, out := in.SecretRef, &out.SecretRef
+ *out = new(LocalObjectReference)
+ **out = *in
+ } else {
+ out.SecretRef = nil
+ }
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
+func DeepCopy_api_CinderVolumeSource(in CinderVolumeSource, out *CinderVolumeSource, c *conversion.Cloner) error {
+ out.VolumeID = in.VolumeID
+ out.FSType = in.FSType
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
+func DeepCopy_api_ComponentCondition(in ComponentCondition, out *ComponentCondition, c *conversion.Cloner) error {
+ out.Type = in.Type
+ out.Status = in.Status
+ out.Message = in.Message
+ out.Error = in.Error
+ return nil
+}
+
+func DeepCopy_api_ComponentStatus(in ComponentStatus, out *ComponentStatus, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if in.Conditions != nil {
+ in, out := in.Conditions, &out.Conditions
+ *out = make([]ComponentCondition, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.Conditions = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_ComponentStatusList(in ComponentStatusList, out *ComponentStatusList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]ComponentStatus, len(in))
+ for i := range in {
+ if err := DeepCopy_api_ComponentStatus(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_ConfigMap(in ConfigMap, out *ConfigMap, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if in.Data != nil {
+ in, out := in.Data, &out.Data
+ *out = make(map[string]string)
+ for key, val := range in {
+ (*out)[key] = val
+ }
+ } else {
+ out.Data = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_ConfigMapKeySelector(in ConfigMapKeySelector, out *ConfigMapKeySelector, c *conversion.Cloner) error {
+ out.LocalObjectReference = in.LocalObjectReference
+ out.Key = in.Key
+ return nil
+}
+
+func DeepCopy_api_ConfigMapList(in ConfigMapList, out *ConfigMapList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]ConfigMap, len(in))
+ for i := range in {
+ if err := DeepCopy_api_ConfigMap(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_ConfigMapVolumeSource(in ConfigMapVolumeSource, out *ConfigMapVolumeSource, c *conversion.Cloner) error {
+ out.LocalObjectReference = in.LocalObjectReference
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]KeyToPath, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_Container(in Container, out *Container, c *conversion.Cloner) error {
+ out.Name = in.Name
+ out.Image = in.Image
+ if in.Command != nil {
+ in, out := in.Command, &out.Command
+ *out = make([]string, len(in))
+ copy(*out, in)
+ } else {
+ out.Command = nil
+ }
+ if in.Args != nil {
+ in, out := in.Args, &out.Args
+ *out = make([]string, len(in))
+ copy(*out, in)
+ } else {
+ out.Args = nil
+ }
+ out.WorkingDir = in.WorkingDir
+ if in.Ports != nil {
+ in, out := in.Ports, &out.Ports
+ *out = make([]ContainerPort, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.Ports = nil
+ }
+ if in.Env != nil {
+ in, out := in.Env, &out.Env
+ *out = make([]EnvVar, len(in))
+ for i := range in {
+ if err := DeepCopy_api_EnvVar(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Env = nil
+ }
+ if err := DeepCopy_api_ResourceRequirements(in.Resources, &out.Resources, c); err != nil {
+ return err
+ }
+ if in.VolumeMounts != nil {
+ in, out := in.VolumeMounts, &out.VolumeMounts
+ *out = make([]VolumeMount, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.VolumeMounts = nil
+ }
+ if in.LivenessProbe != nil {
+ in, out := in.LivenessProbe, &out.LivenessProbe
+ *out = new(Probe)
+ if err := DeepCopy_api_Probe(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.LivenessProbe = nil
+ }
+ if in.ReadinessProbe != nil {
+ in, out := in.ReadinessProbe, &out.ReadinessProbe
+ *out = new(Probe)
+ if err := DeepCopy_api_Probe(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.ReadinessProbe = nil
+ }
+ if in.Lifecycle != nil {
+ in, out := in.Lifecycle, &out.Lifecycle
+ *out = new(Lifecycle)
+ if err := DeepCopy_api_Lifecycle(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.Lifecycle = nil
+ }
+ out.TerminationMessagePath = in.TerminationMessagePath
+ out.ImagePullPolicy = in.ImagePullPolicy
+ if in.SecurityContext != nil {
+ in, out := in.SecurityContext, &out.SecurityContext
+ *out = new(SecurityContext)
+ if err := DeepCopy_api_SecurityContext(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.SecurityContext = nil
+ }
+ out.Stdin = in.Stdin
+ out.StdinOnce = in.StdinOnce
+ out.TTY = in.TTY
+ return nil
+}
+
+func DeepCopy_api_ContainerImage(in ContainerImage, out *ContainerImage, c *conversion.Cloner) error {
+ if in.Names != nil {
+ in, out := in.Names, &out.Names
+ *out = make([]string, len(in))
+ copy(*out, in)
+ } else {
+ out.Names = nil
+ }
+ out.SizeBytes = in.SizeBytes
+ return nil
+}
+
+func DeepCopy_api_ContainerPort(in ContainerPort, out *ContainerPort, c *conversion.Cloner) error {
+ out.Name = in.Name
+ out.HostPort = in.HostPort
+ out.ContainerPort = in.ContainerPort
+ out.Protocol = in.Protocol
+ out.HostIP = in.HostIP
+ return nil
+}
+
+func DeepCopy_api_ContainerState(in ContainerState, out *ContainerState, c *conversion.Cloner) error {
+ if in.Waiting != nil {
+ in, out := in.Waiting, &out.Waiting
+ *out = new(ContainerStateWaiting)
+ **out = *in
+ } else {
+ out.Waiting = nil
+ }
+ if in.Running != nil {
+ in, out := in.Running, &out.Running
+ *out = new(ContainerStateRunning)
+ if err := DeepCopy_api_ContainerStateRunning(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.Running = nil
+ }
+ if in.Terminated != nil {
+ in, out := in.Terminated, &out.Terminated
+ *out = new(ContainerStateTerminated)
+ if err := DeepCopy_api_ContainerStateTerminated(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.Terminated = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_ContainerStateRunning(in ContainerStateRunning, out *ContainerStateRunning, c *conversion.Cloner) error {
+ out.StartedAt = in.StartedAt.DeepCopy()
+ return nil
+}
+
+func DeepCopy_api_ContainerStateTerminated(in ContainerStateTerminated, out *ContainerStateTerminated, c *conversion.Cloner) error {
+ out.ExitCode = in.ExitCode
+ out.Signal = in.Signal
+ out.Reason = in.Reason
+ out.Message = in.Message
+ out.StartedAt = in.StartedAt.DeepCopy()
+ out.FinishedAt = in.FinishedAt.DeepCopy()
+ out.ContainerID = in.ContainerID
+ return nil
+}
+
+func DeepCopy_api_ContainerStateWaiting(in ContainerStateWaiting, out *ContainerStateWaiting, c *conversion.Cloner) error {
+ out.Reason = in.Reason
+ out.Message = in.Message
+ return nil
+}
+
+func DeepCopy_api_ContainerStatus(in ContainerStatus, out *ContainerStatus, c *conversion.Cloner) error {
+ out.Name = in.Name
+ if err := DeepCopy_api_ContainerState(in.State, &out.State, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_api_ContainerState(in.LastTerminationState, &out.LastTerminationState, c); err != nil {
+ return err
+ }
+ out.Ready = in.Ready
+ out.RestartCount = in.RestartCount
+ out.Image = in.Image
+ out.ImageID = in.ImageID
+ out.ContainerID = in.ContainerID
+ return nil
+}
+
+func DeepCopy_api_ConversionError(in ConversionError, out *ConversionError, c *conversion.Cloner) error {
+ if in.In == nil {
+ out.In = nil
+ } else if newVal, err := c.DeepCopy(in.In); err != nil {
+ return err
+ } else {
+ out.In = newVal.(interface{})
+ }
+ if in.Out == nil {
+ out.Out = nil
+ } else if newVal, err := c.DeepCopy(in.Out); err != nil {
+ return err
+ } else {
+ out.Out = newVal.(interface{})
+ }
+ out.Message = in.Message
+ return nil
+}
+
+func DeepCopy_api_DaemonEndpoint(in DaemonEndpoint, out *DaemonEndpoint, c *conversion.Cloner) error {
+ out.Port = in.Port
+ return nil
+}
+
+func DeepCopy_api_DeleteOptions(in DeleteOptions, out *DeleteOptions, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if in.GracePeriodSeconds != nil {
+ in, out := in.GracePeriodSeconds, &out.GracePeriodSeconds
+ *out = new(int64)
+ **out = *in
+ } else {
+ out.GracePeriodSeconds = nil
+ }
+ if in.Preconditions != nil {
+ in, out := in.Preconditions, &out.Preconditions
+ *out = new(Preconditions)
+ if err := DeepCopy_api_Preconditions(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.Preconditions = nil
+ }
+ if in.OrphanDependents != nil {
+ in, out := in.OrphanDependents, &out.OrphanDependents
+ *out = new(bool)
+ **out = *in
+ } else {
+ out.OrphanDependents = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_DownwardAPIVolumeFile(in DownwardAPIVolumeFile, out *DownwardAPIVolumeFile, c *conversion.Cloner) error {
+ out.Path = in.Path
+ if in.FieldRef != nil {
+ in, out := in.FieldRef, &out.FieldRef
+ *out = new(ObjectFieldSelector)
+ **out = *in
+ } else {
+ out.FieldRef = nil
+ }
+ if in.ResourceFieldRef != nil {
+ in, out := in.ResourceFieldRef, &out.ResourceFieldRef
+ *out = new(ResourceFieldSelector)
+ if err := DeepCopy_api_ResourceFieldSelector(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.ResourceFieldRef = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_DownwardAPIVolumeSource(in DownwardAPIVolumeSource, out *DownwardAPIVolumeSource, c *conversion.Cloner) error {
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]DownwardAPIVolumeFile, len(in))
+ for i := range in {
+ if err := DeepCopy_api_DownwardAPIVolumeFile(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_EmptyDirVolumeSource(in EmptyDirVolumeSource, out *EmptyDirVolumeSource, c *conversion.Cloner) error {
+ out.Medium = in.Medium
+ return nil
+}
+
+func DeepCopy_api_EndpointAddress(in EndpointAddress, out *EndpointAddress, c *conversion.Cloner) error {
+ out.IP = in.IP
+ out.Hostname = in.Hostname
+ if in.TargetRef != nil {
+ in, out := in.TargetRef, &out.TargetRef
+ *out = new(ObjectReference)
+ **out = *in
+ } else {
+ out.TargetRef = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_EndpointPort(in EndpointPort, out *EndpointPort, c *conversion.Cloner) error {
+ out.Name = in.Name
+ out.Port = in.Port
+ out.Protocol = in.Protocol
+ return nil
+}
+
+func DeepCopy_api_EndpointSubset(in EndpointSubset, out *EndpointSubset, c *conversion.Cloner) error {
+ if in.Addresses != nil {
+ in, out := in.Addresses, &out.Addresses
+ *out = make([]EndpointAddress, len(in))
+ for i := range in {
+ if err := DeepCopy_api_EndpointAddress(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Addresses = nil
+ }
+ if in.NotReadyAddresses != nil {
+ in, out := in.NotReadyAddresses, &out.NotReadyAddresses
+ *out = make([]EndpointAddress, len(in))
+ for i := range in {
+ if err := DeepCopy_api_EndpointAddress(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.NotReadyAddresses = nil
+ }
+ if in.Ports != nil {
+ in, out := in.Ports, &out.Ports
+ *out = make([]EndpointPort, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.Ports = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_Endpoints(in Endpoints, out *Endpoints, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if in.Subsets != nil {
+ in, out := in.Subsets, &out.Subsets
+ *out = make([]EndpointSubset, len(in))
+ for i := range in {
+ if err := DeepCopy_api_EndpointSubset(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Subsets = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_EndpointsList(in EndpointsList, out *EndpointsList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]Endpoints, len(in))
+ for i := range in {
+ if err := DeepCopy_api_Endpoints(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_EnvVar(in EnvVar, out *EnvVar, c *conversion.Cloner) error {
+ out.Name = in.Name
+ out.Value = in.Value
+ if in.ValueFrom != nil {
+ in, out := in.ValueFrom, &out.ValueFrom
+ *out = new(EnvVarSource)
+ if err := DeepCopy_api_EnvVarSource(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.ValueFrom = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_EnvVarSource(in EnvVarSource, out *EnvVarSource, c *conversion.Cloner) error {
+ if in.FieldRef != nil {
+ in, out := in.FieldRef, &out.FieldRef
+ *out = new(ObjectFieldSelector)
+ **out = *in
+ } else {
+ out.FieldRef = nil
+ }
+ if in.ResourceFieldRef != nil {
+ in, out := in.ResourceFieldRef, &out.ResourceFieldRef
+ *out = new(ResourceFieldSelector)
+ if err := DeepCopy_api_ResourceFieldSelector(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.ResourceFieldRef = nil
+ }
+ if in.ConfigMapKeyRef != nil {
+ in, out := in.ConfigMapKeyRef, &out.ConfigMapKeyRef
+ *out = new(ConfigMapKeySelector)
+ **out = *in
+ } else {
+ out.ConfigMapKeyRef = nil
+ }
+ if in.SecretKeyRef != nil {
+ in, out := in.SecretKeyRef, &out.SecretKeyRef
+ *out = new(SecretKeySelector)
+ **out = *in
+ } else {
+ out.SecretKeyRef = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_Event(in Event, out *Event, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ out.InvolvedObject = in.InvolvedObject
+ out.Reason = in.Reason
+ out.Message = in.Message
+ out.Source = in.Source
+ out.FirstTimestamp = in.FirstTimestamp.DeepCopy()
+ out.LastTimestamp = in.LastTimestamp.DeepCopy()
+ out.Count = in.Count
+ out.Type = in.Type
+ return nil
+}
+
+func DeepCopy_api_EventList(in EventList, out *EventList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]Event, len(in))
+ for i := range in {
+ if err := DeepCopy_api_Event(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_EventSource(in EventSource, out *EventSource, c *conversion.Cloner) error {
+ out.Component = in.Component
+ out.Host = in.Host
+ return nil
+}
+
+func DeepCopy_api_ExecAction(in ExecAction, out *ExecAction, c *conversion.Cloner) error {
+ if in.Command != nil {
+ in, out := in.Command, &out.Command
+ *out = make([]string, len(in))
+ copy(*out, in)
+ } else {
+ out.Command = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_ExportOptions(in ExportOptions, out *ExportOptions, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.Export = in.Export
+ out.Exact = in.Exact
+ return nil
+}
+
+func DeepCopy_api_FCVolumeSource(in FCVolumeSource, out *FCVolumeSource, c *conversion.Cloner) error {
+ if in.TargetWWNs != nil {
+ in, out := in.TargetWWNs, &out.TargetWWNs
+ *out = make([]string, len(in))
+ copy(*out, in)
+ } else {
+ out.TargetWWNs = nil
+ }
+ if in.Lun != nil {
+ in, out := in.Lun, &out.Lun
+ *out = new(int32)
+ **out = *in
+ } else {
+ out.Lun = nil
+ }
+ out.FSType = in.FSType
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
+func DeepCopy_api_FlexVolumeSource(in FlexVolumeSource, out *FlexVolumeSource, c *conversion.Cloner) error {
+ out.Driver = in.Driver
+ out.FSType = in.FSType
+ if in.SecretRef != nil {
+ in, out := in.SecretRef, &out.SecretRef
+ *out = new(LocalObjectReference)
+ **out = *in
+ } else {
+ out.SecretRef = nil
+ }
+ out.ReadOnly = in.ReadOnly
+ if in.Options != nil {
+ in, out := in.Options, &out.Options
+ *out = make(map[string]string)
+ for key, val := range in {
+ (*out)[key] = val
+ }
+ } else {
+ out.Options = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_FlockerVolumeSource(in FlockerVolumeSource, out *FlockerVolumeSource, c *conversion.Cloner) error {
+ out.DatasetName = in.DatasetName
+ return nil
+}
+
+func DeepCopy_api_GCEPersistentDiskVolumeSource(in GCEPersistentDiskVolumeSource, out *GCEPersistentDiskVolumeSource, c *conversion.Cloner) error {
+ out.PDName = in.PDName
+ out.FSType = in.FSType
+ out.Partition = in.Partition
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
+func DeepCopy_api_GitRepoVolumeSource(in GitRepoVolumeSource, out *GitRepoVolumeSource, c *conversion.Cloner) error {
+ out.Repository = in.Repository
+ out.Revision = in.Revision
+ out.Directory = in.Directory
+ return nil
+}
+
+func DeepCopy_api_GlusterfsVolumeSource(in GlusterfsVolumeSource, out *GlusterfsVolumeSource, c *conversion.Cloner) error {
+ out.EndpointsName = in.EndpointsName
+ out.Path = in.Path
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
+func DeepCopy_api_HTTPGetAction(in HTTPGetAction, out *HTTPGetAction, c *conversion.Cloner) error {
+ out.Path = in.Path
+ out.Port = in.Port
+ out.Host = in.Host
+ out.Scheme = in.Scheme
+ if in.HTTPHeaders != nil {
+ in, out := in.HTTPHeaders, &out.HTTPHeaders
+ *out = make([]HTTPHeader, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.HTTPHeaders = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_HTTPHeader(in HTTPHeader, out *HTTPHeader, c *conversion.Cloner) error {
+ out.Name = in.Name
+ out.Value = in.Value
+ return nil
+}
+
+func DeepCopy_api_Handler(in Handler, out *Handler, c *conversion.Cloner) error {
+ if in.Exec != nil {
+ in, out := in.Exec, &out.Exec
+ *out = new(ExecAction)
+ if err := DeepCopy_api_ExecAction(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.Exec = nil
+ }
+ if in.HTTPGet != nil {
+ in, out := in.HTTPGet, &out.HTTPGet
+ *out = new(HTTPGetAction)
+ if err := DeepCopy_api_HTTPGetAction(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.HTTPGet = nil
+ }
+ if in.TCPSocket != nil {
+ in, out := in.TCPSocket, &out.TCPSocket
+ *out = new(TCPSocketAction)
+ **out = *in
+ } else {
+ out.TCPSocket = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_HostPathVolumeSource(in HostPathVolumeSource, out *HostPathVolumeSource, c *conversion.Cloner) error {
+ out.Path = in.Path
+ return nil
+}
+
+func DeepCopy_api_ISCSIVolumeSource(in ISCSIVolumeSource, out *ISCSIVolumeSource, c *conversion.Cloner) error {
+ out.TargetPortal = in.TargetPortal
+ out.IQN = in.IQN
+ out.Lun = in.Lun
+ out.ISCSIInterface = in.ISCSIInterface
+ out.FSType = in.FSType
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
+func DeepCopy_api_KeyToPath(in KeyToPath, out *KeyToPath, c *conversion.Cloner) error {
+ out.Key = in.Key
+ out.Path = in.Path
+ return nil
+}
+
+func DeepCopy_api_Lifecycle(in Lifecycle, out *Lifecycle, c *conversion.Cloner) error {
+ if in.PostStart != nil {
+ in, out := in.PostStart, &out.PostStart
+ *out = new(Handler)
+ if err := DeepCopy_api_Handler(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.PostStart = nil
+ }
+ if in.PreStop != nil {
+ in, out := in.PreStop, &out.PreStop
+ *out = new(Handler)
+ if err := DeepCopy_api_Handler(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.PreStop = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_LimitRange(in LimitRange, out *LimitRange, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_api_LimitRangeSpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_api_LimitRangeItem(in LimitRangeItem, out *LimitRangeItem, c *conversion.Cloner) error {
+ out.Type = in.Type
+ if in.Max != nil {
+ in, out := in.Max, &out.Max
+ *out = make(ResourceList)
+ for key, val := range in {
+ (*out)[key] = val.DeepCopy()
+ }
+ } else {
+ out.Max = nil
+ }
+ if in.Min != nil {
+ in, out := in.Min, &out.Min
+ *out = make(ResourceList)
+ for key, val := range in {
+ (*out)[key] = val.DeepCopy()
+ }
+ } else {
+ out.Min = nil
+ }
+ if in.Default != nil {
+ in, out := in.Default, &out.Default
+ *out = make(ResourceList)
+ for key, val := range in {
+ (*out)[key] = val.DeepCopy()
+ }
+ } else {
+ out.Default = nil
+ }
+ if in.DefaultRequest != nil {
+ in, out := in.DefaultRequest, &out.DefaultRequest
+ *out = make(ResourceList)
+ for key, val := range in {
+ (*out)[key] = val.DeepCopy()
+ }
+ } else {
+ out.DefaultRequest = nil
+ }
+ if in.MaxLimitRequestRatio != nil {
+ in, out := in.MaxLimitRequestRatio, &out.MaxLimitRequestRatio
+ *out = make(ResourceList)
+ for key, val := range in {
+ (*out)[key] = val.DeepCopy()
+ }
+ } else {
+ out.MaxLimitRequestRatio = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_LimitRangeList(in LimitRangeList, out *LimitRangeList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]LimitRange, len(in))
+ for i := range in {
+ if err := DeepCopy_api_LimitRange(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_LimitRangeSpec(in LimitRangeSpec, out *LimitRangeSpec, c *conversion.Cloner) error {
+ if in.Limits != nil {
+ in, out := in.Limits, &out.Limits
+ *out = make([]LimitRangeItem, len(in))
+ for i := range in {
+ if err := DeepCopy_api_LimitRangeItem(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Limits = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_List(in List, out *List, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]runtime.Object, len(in))
+ for i := range in {
+ if newVal, err := c.DeepCopy(in[i]); err != nil {
+ return err
+ } else {
+ (*out)[i] = newVal.(runtime.Object)
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_ListOptions(in ListOptions, out *ListOptions, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if in.LabelSelector == nil {
+ out.LabelSelector = nil
+ } else if newVal, err := c.DeepCopy(in.LabelSelector); err != nil {
+ return err
+ } else {
+ out.LabelSelector = newVal.(labels.Selector)
+ }
+ if in.FieldSelector == nil {
+ out.FieldSelector = nil
+ } else if newVal, err := c.DeepCopy(in.FieldSelector); err != nil {
+ return err
+ } else {
+ out.FieldSelector = newVal.(fields.Selector)
+ }
+ out.Watch = in.Watch
+ out.ResourceVersion = in.ResourceVersion
+ if in.TimeoutSeconds != nil {
+ in, out := in.TimeoutSeconds, &out.TimeoutSeconds
+ *out = new(int64)
+ **out = *in
+ } else {
+ out.TimeoutSeconds = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_LoadBalancerIngress(in LoadBalancerIngress, out *LoadBalancerIngress, c *conversion.Cloner) error {
+ out.IP = in.IP
+ out.Hostname = in.Hostname
+ return nil
+}
+
+func DeepCopy_api_LoadBalancerStatus(in LoadBalancerStatus, out *LoadBalancerStatus, c *conversion.Cloner) error {
+ if in.Ingress != nil {
+ in, out := in.Ingress, &out.Ingress
+ *out = make([]LoadBalancerIngress, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.Ingress = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_LocalObjectReference(in LocalObjectReference, out *LocalObjectReference, c *conversion.Cloner) error {
+ out.Name = in.Name
+ return nil
+}
+
+func DeepCopy_api_NFSVolumeSource(in NFSVolumeSource, out *NFSVolumeSource, c *conversion.Cloner) error {
+ out.Server = in.Server
+ out.Path = in.Path
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
+func DeepCopy_api_Namespace(in Namespace, out *Namespace, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_api_NamespaceSpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ out.Status = in.Status
+ return nil
+}
+
+func DeepCopy_api_NamespaceList(in NamespaceList, out *NamespaceList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]Namespace, len(in))
+ for i := range in {
+ if err := DeepCopy_api_Namespace(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_NamespaceSpec(in NamespaceSpec, out *NamespaceSpec, c *conversion.Cloner) error {
+ if in.Finalizers != nil {
+ in, out := in.Finalizers, &out.Finalizers
+ *out = make([]FinalizerName, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.Finalizers = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_NamespaceStatus(in NamespaceStatus, out *NamespaceStatus, c *conversion.Cloner) error {
+ out.Phase = in.Phase
+ return nil
+}
+
+func DeepCopy_api_Node(in Node, out *Node, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ out.Spec = in.Spec
+ if err := DeepCopy_api_NodeStatus(in.Status, &out.Status, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_api_NodeAddress(in NodeAddress, out *NodeAddress, c *conversion.Cloner) error {
+ out.Type = in.Type
+ out.Address = in.Address
+ return nil
+}
+
+func DeepCopy_api_NodeAffinity(in NodeAffinity, out *NodeAffinity, c *conversion.Cloner) error {
+ if in.RequiredDuringSchedulingIgnoredDuringExecution != nil {
+ in, out := in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution
+ *out = new(NodeSelector)
+ if err := DeepCopy_api_NodeSelector(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.RequiredDuringSchedulingIgnoredDuringExecution = nil
+ }
+ if in.PreferredDuringSchedulingIgnoredDuringExecution != nil {
+ in, out := in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution
+ *out = make([]PreferredSchedulingTerm, len(in))
+ for i := range in {
+ if err := DeepCopy_api_PreferredSchedulingTerm(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.PreferredDuringSchedulingIgnoredDuringExecution = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_NodeCondition(in NodeCondition, out *NodeCondition, c *conversion.Cloner) error {
+ out.Type = in.Type
+ out.Status = in.Status
+ out.LastHeartbeatTime = in.LastHeartbeatTime.DeepCopy()
+ out.LastTransitionTime = in.LastTransitionTime.DeepCopy()
+ out.Reason = in.Reason
+ out.Message = in.Message
+ return nil
+}
+
+func DeepCopy_api_NodeDaemonEndpoints(in NodeDaemonEndpoints, out *NodeDaemonEndpoints, c *conversion.Cloner) error {
+ out.KubeletEndpoint = in.KubeletEndpoint
+ return nil
+}
+
+func DeepCopy_api_NodeList(in NodeList, out *NodeList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]Node, len(in))
+ for i := range in {
+ if err := DeepCopy_api_Node(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_NodeProxyOptions(in NodeProxyOptions, out *NodeProxyOptions, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.Path = in.Path
+ return nil
+}
+
+func DeepCopy_api_NodeResources(in NodeResources, out *NodeResources, c *conversion.Cloner) error {
+ if in.Capacity != nil {
+ in, out := in.Capacity, &out.Capacity
+ *out = make(ResourceList)
+ for key, val := range in {
+ (*out)[key] = val.DeepCopy()
+ }
+ } else {
+ out.Capacity = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_NodeSelector(in NodeSelector, out *NodeSelector, c *conversion.Cloner) error {
+ if in.NodeSelectorTerms != nil {
+ in, out := in.NodeSelectorTerms, &out.NodeSelectorTerms
+ *out = make([]NodeSelectorTerm, len(in))
+ for i := range in {
+ if err := DeepCopy_api_NodeSelectorTerm(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.NodeSelectorTerms = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_NodeSelectorRequirement(in NodeSelectorRequirement, out *NodeSelectorRequirement, c *conversion.Cloner) error {
+ out.Key = in.Key
+ out.Operator = in.Operator
+ if in.Values != nil {
+ in, out := in.Values, &out.Values
+ *out = make([]string, len(in))
+ copy(*out, in)
+ } else {
+ out.Values = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_NodeSelectorTerm(in NodeSelectorTerm, out *NodeSelectorTerm, c *conversion.Cloner) error {
+ if in.MatchExpressions != nil {
+ in, out := in.MatchExpressions, &out.MatchExpressions
+ *out = make([]NodeSelectorRequirement, len(in))
+ for i := range in {
+ if err := DeepCopy_api_NodeSelectorRequirement(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.MatchExpressions = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_NodeSpec(in NodeSpec, out *NodeSpec, c *conversion.Cloner) error {
+ out.PodCIDR = in.PodCIDR
+ out.ExternalID = in.ExternalID
+ out.ProviderID = in.ProviderID
+ out.Unschedulable = in.Unschedulable
+ return nil
+}
+
+func DeepCopy_api_NodeStatus(in NodeStatus, out *NodeStatus, c *conversion.Cloner) error {
+ if in.Capacity != nil {
+ in, out := in.Capacity, &out.Capacity
+ *out = make(ResourceList)
+ for key, val := range in {
+ (*out)[key] = val.DeepCopy()
+ }
+ } else {
+ out.Capacity = nil
+ }
+ if in.Allocatable != nil {
+ in, out := in.Allocatable, &out.Allocatable
+ *out = make(ResourceList)
+ for key, val := range in {
+ (*out)[key] = val.DeepCopy()
+ }
+ } else {
+ out.Allocatable = nil
+ }
+ out.Phase = in.Phase
+ if in.Conditions != nil {
+ in, out := in.Conditions, &out.Conditions
+ *out = make([]NodeCondition, len(in))
+ for i := range in {
+ if err := DeepCopy_api_NodeCondition(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Conditions = nil
+ }
+ if in.Addresses != nil {
+ in, out := in.Addresses, &out.Addresses
+ *out = make([]NodeAddress, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.Addresses = nil
+ }
+ out.DaemonEndpoints = in.DaemonEndpoints
+ out.NodeInfo = in.NodeInfo
+ if in.Images != nil {
+ in, out := in.Images, &out.Images
+ *out = make([]ContainerImage, len(in))
+ for i := range in {
+ if err := DeepCopy_api_ContainerImage(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Images = nil
+ }
+ if in.VolumesInUse != nil {
+ in, out := in.VolumesInUse, &out.VolumesInUse
+ *out = make([]UniqueVolumeName, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.VolumesInUse = nil
+ }
+ if in.VolumesAttached != nil {
+ in, out := in.VolumesAttached, &out.VolumesAttached
+ *out = make([]AttachedVolume, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.VolumesAttached = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_NodeSystemInfo(in NodeSystemInfo, out *NodeSystemInfo, c *conversion.Cloner) error {
+ out.MachineID = in.MachineID
+ out.SystemUUID = in.SystemUUID
+ out.BootID = in.BootID
+ out.KernelVersion = in.KernelVersion
+ out.OSImage = in.OSImage
+ out.ContainerRuntimeVersion = in.ContainerRuntimeVersion
+ out.KubeletVersion = in.KubeletVersion
+ out.KubeProxyVersion = in.KubeProxyVersion
+ out.OperatingSystem = in.OperatingSystem
+ out.Architecture = in.Architecture
+ return nil
+}
+
+func DeepCopy_api_ObjectFieldSelector(in ObjectFieldSelector, out *ObjectFieldSelector, c *conversion.Cloner) error {
+ out.APIVersion = in.APIVersion
+ out.FieldPath = in.FieldPath
+ return nil
+}
+
+func DeepCopy_api_ObjectMeta(in ObjectMeta, out *ObjectMeta, c *conversion.Cloner) error {
+ out.Name = in.Name
+ out.GenerateName = in.GenerateName
+ out.Namespace = in.Namespace
+ out.SelfLink = in.SelfLink
+ out.UID = in.UID
+ out.ResourceVersion = in.ResourceVersion
+ out.Generation = in.Generation
+ out.CreationTimestamp = in.CreationTimestamp.DeepCopy()
+ if in.DeletionTimestamp != nil {
+ in, out := in.DeletionTimestamp, &out.DeletionTimestamp
+ *out = new(unversioned.Time)
+ **out = in.DeepCopy()
+ } else {
+ out.DeletionTimestamp = nil
+ }
+ if in.DeletionGracePeriodSeconds != nil {
+ in, out := in.DeletionGracePeriodSeconds, &out.DeletionGracePeriodSeconds
+ *out = new(int64)
+ **out = *in
+ } else {
+ out.DeletionGracePeriodSeconds = nil
+ }
+ if in.Labels != nil {
+ in, out := in.Labels, &out.Labels
+ *out = make(map[string]string)
+ for key, val := range in {
+ (*out)[key] = val
+ }
+ } else {
+ out.Labels = nil
+ }
+ if in.Annotations != nil {
+ in, out := in.Annotations, &out.Annotations
+ *out = make(map[string]string)
+ for key, val := range in {
+ (*out)[key] = val
+ }
+ } else {
+ out.Annotations = nil
+ }
+ if in.OwnerReferences != nil {
+ in, out := in.OwnerReferences, &out.OwnerReferences
+ *out = make([]OwnerReference, len(in))
+ for i := range in {
+ if err := DeepCopy_api_OwnerReference(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.OwnerReferences = nil
+ }
+ if in.Finalizers != nil {
+ in, out := in.Finalizers, &out.Finalizers
+ *out = make([]string, len(in))
+ copy(*out, in)
+ } else {
+ out.Finalizers = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_ObjectReference(in ObjectReference, out *ObjectReference, c *conversion.Cloner) error {
+ out.Kind = in.Kind
+ out.Namespace = in.Namespace
+ out.Name = in.Name
+ out.UID = in.UID
+ out.APIVersion = in.APIVersion
+ out.ResourceVersion = in.ResourceVersion
+ out.FieldPath = in.FieldPath
+ return nil
+}
+
+func DeepCopy_api_OwnerReference(in OwnerReference, out *OwnerReference, c *conversion.Cloner) error {
+ out.APIVersion = in.APIVersion
+ out.Kind = in.Kind
+ out.Name = in.Name
+ out.UID = in.UID
+ if in.Controller != nil {
+ in, out := in.Controller, &out.Controller
+ *out = new(bool)
+ **out = *in
+ } else {
+ out.Controller = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_PersistentVolume(in PersistentVolume, out *PersistentVolume, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_api_PersistentVolumeSpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ out.Status = in.Status
+ return nil
+}
+
+func DeepCopy_api_PersistentVolumeClaim(in PersistentVolumeClaim, out *PersistentVolumeClaim, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_api_PersistentVolumeClaimSpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_api_PersistentVolumeClaimStatus(in.Status, &out.Status, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_api_PersistentVolumeClaimList(in PersistentVolumeClaimList, out *PersistentVolumeClaimList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]PersistentVolumeClaim, len(in))
+ for i := range in {
+ if err := DeepCopy_api_PersistentVolumeClaim(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_PersistentVolumeClaimSpec(in PersistentVolumeClaimSpec, out *PersistentVolumeClaimSpec, c *conversion.Cloner) error {
+ if in.AccessModes != nil {
+ in, out := in.AccessModes, &out.AccessModes
+ *out = make([]PersistentVolumeAccessMode, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.AccessModes = nil
+ }
+ if in.Selector != nil {
+ in, out := in.Selector, &out.Selector
+ *out = new(unversioned.LabelSelector)
+ if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.Selector = nil
+ }
+ if err := DeepCopy_api_ResourceRequirements(in.Resources, &out.Resources, c); err != nil {
+ return err
+ }
+ out.VolumeName = in.VolumeName
+ return nil
+}
+
+func DeepCopy_api_PersistentVolumeClaimStatus(in PersistentVolumeClaimStatus, out *PersistentVolumeClaimStatus, c *conversion.Cloner) error {
+ out.Phase = in.Phase
+ if in.AccessModes != nil {
+ in, out := in.AccessModes, &out.AccessModes
+ *out = make([]PersistentVolumeAccessMode, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.AccessModes = nil
+ }
+ if in.Capacity != nil {
+ in, out := in.Capacity, &out.Capacity
+ *out = make(ResourceList)
+ for key, val := range in {
+ (*out)[key] = val.DeepCopy()
+ }
+ } else {
+ out.Capacity = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_PersistentVolumeClaimVolumeSource(in PersistentVolumeClaimVolumeSource, out *PersistentVolumeClaimVolumeSource, c *conversion.Cloner) error {
+ out.ClaimName = in.ClaimName
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
+func DeepCopy_api_PersistentVolumeList(in PersistentVolumeList, out *PersistentVolumeList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]PersistentVolume, len(in))
+ for i := range in {
+ if err := DeepCopy_api_PersistentVolume(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_PersistentVolumeSource(in PersistentVolumeSource, out *PersistentVolumeSource, c *conversion.Cloner) error {
+ if in.GCEPersistentDisk != nil {
+ in, out := in.GCEPersistentDisk, &out.GCEPersistentDisk
+ *out = new(GCEPersistentDiskVolumeSource)
+ **out = *in
+ } else {
+ out.GCEPersistentDisk = nil
+ }
+ if in.AWSElasticBlockStore != nil {
+ in, out := in.AWSElasticBlockStore, &out.AWSElasticBlockStore
+ *out = new(AWSElasticBlockStoreVolumeSource)
+ **out = *in
+ } else {
+ out.AWSElasticBlockStore = nil
+ }
+ if in.HostPath != nil {
+ in, out := in.HostPath, &out.HostPath
+ *out = new(HostPathVolumeSource)
+ **out = *in
+ } else {
+ out.HostPath = nil
+ }
+ if in.Glusterfs != nil {
+ in, out := in.Glusterfs, &out.Glusterfs
+ *out = new(GlusterfsVolumeSource)
+ **out = *in
+ } else {
+ out.Glusterfs = nil
+ }
+ if in.NFS != nil {
+ in, out := in.NFS, &out.NFS
+ *out = new(NFSVolumeSource)
+ **out = *in
+ } else {
+ out.NFS = nil
+ }
+ if in.RBD != nil {
+ in, out := in.RBD, &out.RBD
+ *out = new(RBDVolumeSource)
+ if err := DeepCopy_api_RBDVolumeSource(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.RBD = nil
+ }
+ if in.ISCSI != nil {
+ in, out := in.ISCSI, &out.ISCSI
+ *out = new(ISCSIVolumeSource)
+ **out = *in
+ } else {
+ out.ISCSI = nil
+ }
+ if in.FlexVolume != nil {
+ in, out := in.FlexVolume, &out.FlexVolume
+ *out = new(FlexVolumeSource)
+ if err := DeepCopy_api_FlexVolumeSource(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.FlexVolume = nil
+ }
+ if in.Cinder != nil {
+ in, out := in.Cinder, &out.Cinder
+ *out = new(CinderVolumeSource)
+ **out = *in
+ } else {
+ out.Cinder = nil
+ }
+ if in.CephFS != nil {
+ in, out := in.CephFS, &out.CephFS
+ *out = new(CephFSVolumeSource)
+ if err := DeepCopy_api_CephFSVolumeSource(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.CephFS = nil
+ }
+ if in.FC != nil {
+ in, out := in.FC, &out.FC
+ *out = new(FCVolumeSource)
+ if err := DeepCopy_api_FCVolumeSource(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.FC = nil
+ }
+ if in.Flocker != nil {
+ in, out := in.Flocker, &out.Flocker
+ *out = new(FlockerVolumeSource)
+ **out = *in
+ } else {
+ out.Flocker = nil
+ }
+ if in.AzureFile != nil {
+ in, out := in.AzureFile, &out.AzureFile
+ *out = new(AzureFileVolumeSource)
+ **out = *in
+ } else {
+ out.AzureFile = nil
+ }
+ if in.VsphereVolume != nil {
+ in, out := in.VsphereVolume, &out.VsphereVolume
+ *out = new(VsphereVirtualDiskVolumeSource)
+ **out = *in
+ } else {
+ out.VsphereVolume = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_PersistentVolumeSpec(in PersistentVolumeSpec, out *PersistentVolumeSpec, c *conversion.Cloner) error {
+ if in.Capacity != nil {
+ in, out := in.Capacity, &out.Capacity
+ *out = make(ResourceList)
+ for key, val := range in {
+ (*out)[key] = val.DeepCopy()
+ }
+ } else {
+ out.Capacity = nil
+ }
+ if err := DeepCopy_api_PersistentVolumeSource(in.PersistentVolumeSource, &out.PersistentVolumeSource, c); err != nil {
+ return err
+ }
+ if in.AccessModes != nil {
+ in, out := in.AccessModes, &out.AccessModes
+ *out = make([]PersistentVolumeAccessMode, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.AccessModes = nil
+ }
+ if in.ClaimRef != nil {
+ in, out := in.ClaimRef, &out.ClaimRef
+ *out = new(ObjectReference)
+ **out = *in
+ } else {
+ out.ClaimRef = nil
+ }
+ out.PersistentVolumeReclaimPolicy = in.PersistentVolumeReclaimPolicy
+ return nil
+}
+
+func DeepCopy_api_PersistentVolumeStatus(in PersistentVolumeStatus, out *PersistentVolumeStatus, c *conversion.Cloner) error {
+ out.Phase = in.Phase
+ out.Message = in.Message
+ out.Reason = in.Reason
+ return nil
+}
+
+func DeepCopy_api_Pod(in Pod, out *Pod, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_api_PodSpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_api_PodStatus(in.Status, &out.Status, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_api_PodAffinity(in PodAffinity, out *PodAffinity, c *conversion.Cloner) error {
+ if in.RequiredDuringSchedulingIgnoredDuringExecution != nil {
+ in, out := in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution
+ *out = make([]PodAffinityTerm, len(in))
+ for i := range in {
+ if err := DeepCopy_api_PodAffinityTerm(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.RequiredDuringSchedulingIgnoredDuringExecution = nil
+ }
+ if in.PreferredDuringSchedulingIgnoredDuringExecution != nil {
+ in, out := in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution
+ *out = make([]WeightedPodAffinityTerm, len(in))
+ for i := range in {
+ if err := DeepCopy_api_WeightedPodAffinityTerm(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.PreferredDuringSchedulingIgnoredDuringExecution = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_PodAffinityTerm(in PodAffinityTerm, out *PodAffinityTerm, c *conversion.Cloner) error {
+ if in.LabelSelector != nil {
+ in, out := in.LabelSelector, &out.LabelSelector
+ *out = new(unversioned.LabelSelector)
+ if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.LabelSelector = nil
+ }
+ if in.Namespaces != nil {
+ in, out := in.Namespaces, &out.Namespaces
+ *out = make([]string, len(in))
+ copy(*out, in)
+ } else {
+ out.Namespaces = nil
+ }
+ out.TopologyKey = in.TopologyKey
+ return nil
+}
+
+func DeepCopy_api_PodAntiAffinity(in PodAntiAffinity, out *PodAntiAffinity, c *conversion.Cloner) error {
+ if in.RequiredDuringSchedulingIgnoredDuringExecution != nil {
+ in, out := in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution
+ *out = make([]PodAffinityTerm, len(in))
+ for i := range in {
+ if err := DeepCopy_api_PodAffinityTerm(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.RequiredDuringSchedulingIgnoredDuringExecution = nil
+ }
+ if in.PreferredDuringSchedulingIgnoredDuringExecution != nil {
+ in, out := in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution
+ *out = make([]WeightedPodAffinityTerm, len(in))
+ for i := range in {
+ if err := DeepCopy_api_WeightedPodAffinityTerm(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.PreferredDuringSchedulingIgnoredDuringExecution = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_PodAttachOptions(in PodAttachOptions, out *PodAttachOptions, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.Stdin = in.Stdin
+ out.Stdout = in.Stdout
+ out.Stderr = in.Stderr
+ out.TTY = in.TTY
+ out.Container = in.Container
+ return nil
+}
+
+func DeepCopy_api_PodCondition(in PodCondition, out *PodCondition, c *conversion.Cloner) error {
+ out.Type = in.Type
+ out.Status = in.Status
+ out.LastProbeTime = in.LastProbeTime.DeepCopy()
+ out.LastTransitionTime = in.LastTransitionTime.DeepCopy()
+ out.Reason = in.Reason
+ out.Message = in.Message
+ return nil
+}
+
+func DeepCopy_api_PodExecOptions(in PodExecOptions, out *PodExecOptions, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.Stdin = in.Stdin
+ out.Stdout = in.Stdout
+ out.Stderr = in.Stderr
+ out.TTY = in.TTY
+ out.Container = in.Container
+ if in.Command != nil {
+ in, out := in.Command, &out.Command
+ *out = make([]string, len(in))
+ copy(*out, in)
+ } else {
+ out.Command = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_PodList(in PodList, out *PodList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]Pod, len(in))
+ for i := range in {
+ if err := DeepCopy_api_Pod(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_PodLogOptions(in PodLogOptions, out *PodLogOptions, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.Container = in.Container
+ out.Follow = in.Follow
+ out.Previous = in.Previous
+ if in.SinceSeconds != nil {
+ in, out := in.SinceSeconds, &out.SinceSeconds
+ *out = new(int64)
+ **out = *in
+ } else {
+ out.SinceSeconds = nil
+ }
+ if in.SinceTime != nil {
+ in, out := in.SinceTime, &out.SinceTime
+ *out = new(unversioned.Time)
+ **out = in.DeepCopy()
+ } else {
+ out.SinceTime = nil
+ }
+ out.Timestamps = in.Timestamps
+ if in.TailLines != nil {
+ in, out := in.TailLines, &out.TailLines
+ *out = new(int64)
+ **out = *in
+ } else {
+ out.TailLines = nil
+ }
+ if in.LimitBytes != nil {
+ in, out := in.LimitBytes, &out.LimitBytes
+ *out = new(int64)
+ **out = *in
+ } else {
+ out.LimitBytes = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_PodProxyOptions(in PodProxyOptions, out *PodProxyOptions, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.Path = in.Path
+ return nil
+}
+
+func DeepCopy_api_PodSecurityContext(in PodSecurityContext, out *PodSecurityContext, c *conversion.Cloner) error {
+ out.HostNetwork = in.HostNetwork
+ out.HostPID = in.HostPID
+ out.HostIPC = in.HostIPC
+ if in.SELinuxOptions != nil {
+ in, out := in.SELinuxOptions, &out.SELinuxOptions
+ *out = new(SELinuxOptions)
+ **out = *in
+ } else {
+ out.SELinuxOptions = nil
+ }
+ if in.RunAsUser != nil {
+ in, out := in.RunAsUser, &out.RunAsUser
+ *out = new(int64)
+ **out = *in
+ } else {
+ out.RunAsUser = nil
+ }
+ if in.RunAsNonRoot != nil {
+ in, out := in.RunAsNonRoot, &out.RunAsNonRoot
+ *out = new(bool)
+ **out = *in
+ } else {
+ out.RunAsNonRoot = nil
+ }
+ if in.SupplementalGroups != nil {
+ in, out := in.SupplementalGroups, &out.SupplementalGroups
+ *out = make([]int64, len(in))
+ copy(*out, in)
+ } else {
+ out.SupplementalGroups = nil
+ }
+ if in.FSGroup != nil {
+ in, out := in.FSGroup, &out.FSGroup
+ *out = new(int64)
+ **out = *in
+ } else {
+ out.FSGroup = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_PodSpec(in PodSpec, out *PodSpec, c *conversion.Cloner) error {
+ if in.Volumes != nil {
+ in, out := in.Volumes, &out.Volumes
+ *out = make([]Volume, len(in))
+ for i := range in {
+ if err := DeepCopy_api_Volume(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Volumes = nil
+ }
+ if in.InitContainers != nil {
+ in, out := in.InitContainers, &out.InitContainers
+ *out = make([]Container, len(in))
+ for i := range in {
+ if err := DeepCopy_api_Container(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.InitContainers = nil
+ }
+ if in.Containers != nil {
+ in, out := in.Containers, &out.Containers
+ *out = make([]Container, len(in))
+ for i := range in {
+ if err := DeepCopy_api_Container(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Containers = nil
+ }
+ out.RestartPolicy = in.RestartPolicy
+ if in.TerminationGracePeriodSeconds != nil {
+ in, out := in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds
+ *out = new(int64)
+ **out = *in
+ } else {
+ out.TerminationGracePeriodSeconds = nil
+ }
+ if in.ActiveDeadlineSeconds != nil {
+ in, out := in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds
+ *out = new(int64)
+ **out = *in
+ } else {
+ out.ActiveDeadlineSeconds = nil
+ }
+ out.DNSPolicy = in.DNSPolicy
+ if in.NodeSelector != nil {
+ in, out := in.NodeSelector, &out.NodeSelector
+ *out = make(map[string]string)
+ for key, val := range in {
+ (*out)[key] = val
+ }
+ } else {
+ out.NodeSelector = nil
+ }
+ out.ServiceAccountName = in.ServiceAccountName
+ out.NodeName = in.NodeName
+ if in.SecurityContext != nil {
+ in, out := in.SecurityContext, &out.SecurityContext
+ *out = new(PodSecurityContext)
+ if err := DeepCopy_api_PodSecurityContext(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.SecurityContext = nil
+ }
+ if in.ImagePullSecrets != nil {
+ in, out := in.ImagePullSecrets, &out.ImagePullSecrets
+ *out = make([]LocalObjectReference, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.ImagePullSecrets = nil
+ }
+ out.Hostname = in.Hostname
+ out.Subdomain = in.Subdomain
+ return nil
+}
+
+func DeepCopy_api_PodStatus(in PodStatus, out *PodStatus, c *conversion.Cloner) error {
+ out.Phase = in.Phase
+ if in.Conditions != nil {
+ in, out := in.Conditions, &out.Conditions
+ *out = make([]PodCondition, len(in))
+ for i := range in {
+ if err := DeepCopy_api_PodCondition(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Conditions = nil
+ }
+ out.Message = in.Message
+ out.Reason = in.Reason
+ out.HostIP = in.HostIP
+ out.PodIP = in.PodIP
+ if in.StartTime != nil {
+ in, out := in.StartTime, &out.StartTime
+ *out = new(unversioned.Time)
+ **out = in.DeepCopy()
+ } else {
+ out.StartTime = nil
+ }
+ if in.InitContainerStatuses != nil {
+ in, out := in.InitContainerStatuses, &out.InitContainerStatuses
+ *out = make([]ContainerStatus, len(in))
+ for i := range in {
+ if err := DeepCopy_api_ContainerStatus(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.InitContainerStatuses = nil
+ }
+ if in.ContainerStatuses != nil {
+ in, out := in.ContainerStatuses, &out.ContainerStatuses
+ *out = make([]ContainerStatus, len(in))
+ for i := range in {
+ if err := DeepCopy_api_ContainerStatus(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.ContainerStatuses = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_PodStatusResult(in PodStatusResult, out *PodStatusResult, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_api_PodStatus(in.Status, &out.Status, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_api_PodTemplate(in PodTemplate, out *PodTemplate, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_api_PodTemplateSpec(in.Template, &out.Template, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_api_PodTemplateList(in PodTemplateList, out *PodTemplateList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]PodTemplate, len(in))
+ for i := range in {
+ if err := DeepCopy_api_PodTemplate(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_PodTemplateSpec(in PodTemplateSpec, out *PodTemplateSpec, c *conversion.Cloner) error {
+ if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_api_PodSpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_api_Preconditions(in Preconditions, out *Preconditions, c *conversion.Cloner) error {
+ if in.UID != nil {
+ in, out := in.UID, &out.UID
+ *out = new(types.UID)
+ **out = *in
+ } else {
+ out.UID = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_PreferredSchedulingTerm(in PreferredSchedulingTerm, out *PreferredSchedulingTerm, c *conversion.Cloner) error {
+ out.Weight = in.Weight
+ if err := DeepCopy_api_NodeSelectorTerm(in.Preference, &out.Preference, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_api_Probe(in Probe, out *Probe, c *conversion.Cloner) error {
+ if err := DeepCopy_api_Handler(in.Handler, &out.Handler, c); err != nil {
+ return err
+ }
+ out.InitialDelaySeconds = in.InitialDelaySeconds
+ out.TimeoutSeconds = in.TimeoutSeconds
+ out.PeriodSeconds = in.PeriodSeconds
+ out.SuccessThreshold = in.SuccessThreshold
+ out.FailureThreshold = in.FailureThreshold
+ return nil
+}
+
+func DeepCopy_api_RBDVolumeSource(in RBDVolumeSource, out *RBDVolumeSource, c *conversion.Cloner) error {
+ if in.CephMonitors != nil {
+ in, out := in.CephMonitors, &out.CephMonitors
+ *out = make([]string, len(in))
+ copy(*out, in)
+ } else {
+ out.CephMonitors = nil
+ }
+ out.RBDImage = in.RBDImage
+ out.FSType = in.FSType
+ out.RBDPool = in.RBDPool
+ out.RadosUser = in.RadosUser
+ out.Keyring = in.Keyring
+ if in.SecretRef != nil {
+ in, out := in.SecretRef, &out.SecretRef
+ *out = new(LocalObjectReference)
+ **out = *in
+ } else {
+ out.SecretRef = nil
+ }
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
+func DeepCopy_api_RangeAllocation(in RangeAllocation, out *RangeAllocation, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ out.Range = in.Range
+ if in.Data != nil {
+ in, out := in.Data, &out.Data
+ *out = make([]byte, len(in))
+ copy(*out, in)
+ } else {
+ out.Data = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_ReplicationController(in ReplicationController, out *ReplicationController, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_api_ReplicationControllerSpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ out.Status = in.Status
+ return nil
+}
+
+func DeepCopy_api_ReplicationControllerList(in ReplicationControllerList, out *ReplicationControllerList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]ReplicationController, len(in))
+ for i := range in {
+ if err := DeepCopy_api_ReplicationController(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_ReplicationControllerSpec(in ReplicationControllerSpec, out *ReplicationControllerSpec, c *conversion.Cloner) error {
+ out.Replicas = in.Replicas
+ if in.Selector != nil {
+ in, out := in.Selector, &out.Selector
+ *out = make(map[string]string)
+ for key, val := range in {
+ (*out)[key] = val
+ }
+ } else {
+ out.Selector = nil
+ }
+ if in.Template != nil {
+ in, out := in.Template, &out.Template
+ *out = new(PodTemplateSpec)
+ if err := DeepCopy_api_PodTemplateSpec(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.Template = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_ReplicationControllerStatus(in ReplicationControllerStatus, out *ReplicationControllerStatus, c *conversion.Cloner) error {
+ out.Replicas = in.Replicas
+ out.FullyLabeledReplicas = in.FullyLabeledReplicas
+ out.ObservedGeneration = in.ObservedGeneration
+ return nil
+}
+
+func DeepCopy_api_ResourceFieldSelector(in ResourceFieldSelector, out *ResourceFieldSelector, c *conversion.Cloner) error {
+ out.ContainerName = in.ContainerName
+ out.Resource = in.Resource
+ out.Divisor = in.Divisor.DeepCopy()
+ return nil
+}
+
+func DeepCopy_api_ResourceQuota(in ResourceQuota, out *ResourceQuota, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_api_ResourceQuotaSpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_api_ResourceQuotaStatus(in.Status, &out.Status, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_api_ResourceQuotaList(in ResourceQuotaList, out *ResourceQuotaList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]ResourceQuota, len(in))
+ for i := range in {
+ if err := DeepCopy_api_ResourceQuota(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_ResourceQuotaSpec(in ResourceQuotaSpec, out *ResourceQuotaSpec, c *conversion.Cloner) error {
+ if in.Hard != nil {
+ in, out := in.Hard, &out.Hard
+ *out = make(ResourceList)
+ for key, val := range in {
+ (*out)[key] = val.DeepCopy()
+ }
+ } else {
+ out.Hard = nil
+ }
+ if in.Scopes != nil {
+ in, out := in.Scopes, &out.Scopes
+ *out = make([]ResourceQuotaScope, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.Scopes = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_ResourceQuotaStatus(in ResourceQuotaStatus, out *ResourceQuotaStatus, c *conversion.Cloner) error {
+ if in.Hard != nil {
+ in, out := in.Hard, &out.Hard
+ *out = make(ResourceList)
+ for key, val := range in {
+ (*out)[key] = val.DeepCopy()
+ }
+ } else {
+ out.Hard = nil
+ }
+ if in.Used != nil {
+ in, out := in.Used, &out.Used
+ *out = make(ResourceList)
+ for key, val := range in {
+ (*out)[key] = val.DeepCopy()
+ }
+ } else {
+ out.Used = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_ResourceRequirements(in ResourceRequirements, out *ResourceRequirements, c *conversion.Cloner) error {
+ if in.Limits != nil {
+ in, out := in.Limits, &out.Limits
+ *out = make(ResourceList)
+ for key, val := range in {
+ (*out)[key] = val.DeepCopy()
+ }
+ } else {
+ out.Limits = nil
+ }
+ if in.Requests != nil {
+ in, out := in.Requests, &out.Requests
+ *out = make(ResourceList)
+ for key, val := range in {
+ (*out)[key] = val.DeepCopy()
+ }
+ } else {
+ out.Requests = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_SELinuxOptions(in SELinuxOptions, out *SELinuxOptions, c *conversion.Cloner) error {
+ out.User = in.User
+ out.Role = in.Role
+ out.Type = in.Type
+ out.Level = in.Level
+ return nil
+}
+
+func DeepCopy_api_Secret(in Secret, out *Secret, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if in.Data != nil {
+ in, out := in.Data, &out.Data
+ *out = make(map[string][]byte)
+ for key, val := range in {
+ if newVal, err := c.DeepCopy(val); err != nil {
+ return err
+ } else {
+ (*out)[key] = newVal.([]byte)
+ }
+ }
+ } else {
+ out.Data = nil
+ }
+ out.Type = in.Type
+ return nil
+}
+
+func DeepCopy_api_SecretKeySelector(in SecretKeySelector, out *SecretKeySelector, c *conversion.Cloner) error {
+ out.LocalObjectReference = in.LocalObjectReference
+ out.Key = in.Key
+ return nil
+}
+
+func DeepCopy_api_SecretList(in SecretList, out *SecretList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]Secret, len(in))
+ for i := range in {
+ if err := DeepCopy_api_Secret(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_SecretVolumeSource(in SecretVolumeSource, out *SecretVolumeSource, c *conversion.Cloner) error {
+ out.SecretName = in.SecretName
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]KeyToPath, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_SecurityContext(in SecurityContext, out *SecurityContext, c *conversion.Cloner) error {
+ if in.Capabilities != nil {
+ in, out := in.Capabilities, &out.Capabilities
+ *out = new(Capabilities)
+ if err := DeepCopy_api_Capabilities(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.Capabilities = nil
+ }
+ if in.Privileged != nil {
+ in, out := in.Privileged, &out.Privileged
+ *out = new(bool)
+ **out = *in
+ } else {
+ out.Privileged = nil
+ }
+ if in.SELinuxOptions != nil {
+ in, out := in.SELinuxOptions, &out.SELinuxOptions
+ *out = new(SELinuxOptions)
+ **out = *in
+ } else {
+ out.SELinuxOptions = nil
+ }
+ if in.RunAsUser != nil {
+ in, out := in.RunAsUser, &out.RunAsUser
+ *out = new(int64)
+ **out = *in
+ } else {
+ out.RunAsUser = nil
+ }
+ if in.RunAsNonRoot != nil {
+ in, out := in.RunAsNonRoot, &out.RunAsNonRoot
+ *out = new(bool)
+ **out = *in
+ } else {
+ out.RunAsNonRoot = nil
+ }
+ if in.ReadOnlyRootFilesystem != nil {
+ in, out := in.ReadOnlyRootFilesystem, &out.ReadOnlyRootFilesystem
+ *out = new(bool)
+ **out = *in
+ } else {
+ out.ReadOnlyRootFilesystem = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_SerializedReference(in SerializedReference, out *SerializedReference, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.Reference = in.Reference
+ return nil
+}
+
+func DeepCopy_api_Service(in Service, out *Service, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_api_ServiceSpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_api_ServiceStatus(in.Status, &out.Status, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_api_ServiceAccount(in ServiceAccount, out *ServiceAccount, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if in.Secrets != nil {
+ in, out := in.Secrets, &out.Secrets
+ *out = make([]ObjectReference, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.Secrets = nil
+ }
+ if in.ImagePullSecrets != nil {
+ in, out := in.ImagePullSecrets, &out.ImagePullSecrets
+ *out = make([]LocalObjectReference, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.ImagePullSecrets = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_ServiceAccountList(in ServiceAccountList, out *ServiceAccountList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]ServiceAccount, len(in))
+ for i := range in {
+ if err := DeepCopy_api_ServiceAccount(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_ServiceList(in ServiceList, out *ServiceList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]Service, len(in))
+ for i := range in {
+ if err := DeepCopy_api_Service(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_ServicePort(in ServicePort, out *ServicePort, c *conversion.Cloner) error {
+ out.Name = in.Name
+ out.Protocol = in.Protocol
+ out.Port = in.Port
+ out.TargetPort = in.TargetPort
+ out.NodePort = in.NodePort
+ return nil
+}
+
+func DeepCopy_api_ServiceProxyOptions(in ServiceProxyOptions, out *ServiceProxyOptions, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.Path = in.Path
+ return nil
+}
+
+func DeepCopy_api_ServiceSpec(in ServiceSpec, out *ServiceSpec, c *conversion.Cloner) error {
+ out.Type = in.Type
+ if in.Ports != nil {
+ in, out := in.Ports, &out.Ports
+ *out = make([]ServicePort, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.Ports = nil
+ }
+ if in.Selector != nil {
+ in, out := in.Selector, &out.Selector
+ *out = make(map[string]string)
+ for key, val := range in {
+ (*out)[key] = val
+ }
+ } else {
+ out.Selector = nil
+ }
+ out.ClusterIP = in.ClusterIP
+ if in.ExternalIPs != nil {
+ in, out := in.ExternalIPs, &out.ExternalIPs
+ *out = make([]string, len(in))
+ copy(*out, in)
+ } else {
+ out.ExternalIPs = nil
+ }
+ out.LoadBalancerIP = in.LoadBalancerIP
+ out.SessionAffinity = in.SessionAffinity
+ if in.LoadBalancerSourceRanges != nil {
+ in, out := in.LoadBalancerSourceRanges, &out.LoadBalancerSourceRanges
+ *out = make([]string, len(in))
+ copy(*out, in)
+ } else {
+ out.LoadBalancerSourceRanges = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_ServiceStatus(in ServiceStatus, out *ServiceStatus, c *conversion.Cloner) error {
+ if err := DeepCopy_api_LoadBalancerStatus(in.LoadBalancer, &out.LoadBalancer, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_api_TCPSocketAction(in TCPSocketAction, out *TCPSocketAction, c *conversion.Cloner) error {
+ out.Port = in.Port
+ return nil
+}
+
+func DeepCopy_api_Taint(in Taint, out *Taint, c *conversion.Cloner) error {
+ out.Key = in.Key
+ out.Value = in.Value
+ out.Effect = in.Effect
+ return nil
+}
+
+func DeepCopy_api_Toleration(in Toleration, out *Toleration, c *conversion.Cloner) error {
+ out.Key = in.Key
+ out.Operator = in.Operator
+ out.Value = in.Value
+ out.Effect = in.Effect
+ return nil
+}
+
+func DeepCopy_api_Volume(in Volume, out *Volume, c *conversion.Cloner) error {
+ out.Name = in.Name
+ if err := DeepCopy_api_VolumeSource(in.VolumeSource, &out.VolumeSource, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_api_VolumeMount(in VolumeMount, out *VolumeMount, c *conversion.Cloner) error {
+ out.Name = in.Name
+ out.ReadOnly = in.ReadOnly
+ out.MountPath = in.MountPath
+ out.SubPath = in.SubPath
+ return nil
+}
+
+func DeepCopy_api_VolumeSource(in VolumeSource, out *VolumeSource, c *conversion.Cloner) error {
+ if in.HostPath != nil {
+ in, out := in.HostPath, &out.HostPath
+ *out = new(HostPathVolumeSource)
+ **out = *in
+ } else {
+ out.HostPath = nil
+ }
+ if in.EmptyDir != nil {
+ in, out := in.EmptyDir, &out.EmptyDir
+ *out = new(EmptyDirVolumeSource)
+ **out = *in
+ } else {
+ out.EmptyDir = nil
+ }
+ if in.GCEPersistentDisk != nil {
+ in, out := in.GCEPersistentDisk, &out.GCEPersistentDisk
+ *out = new(GCEPersistentDiskVolumeSource)
+ **out = *in
+ } else {
+ out.GCEPersistentDisk = nil
+ }
+ if in.AWSElasticBlockStore != nil {
+ in, out := in.AWSElasticBlockStore, &out.AWSElasticBlockStore
+ *out = new(AWSElasticBlockStoreVolumeSource)
+ **out = *in
+ } else {
+ out.AWSElasticBlockStore = nil
+ }
+ if in.GitRepo != nil {
+ in, out := in.GitRepo, &out.GitRepo
+ *out = new(GitRepoVolumeSource)
+ **out = *in
+ } else {
+ out.GitRepo = nil
+ }
+ if in.Secret != nil {
+ in, out := in.Secret, &out.Secret
+ *out = new(SecretVolumeSource)
+ if err := DeepCopy_api_SecretVolumeSource(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.Secret = nil
+ }
+ if in.NFS != nil {
+ in, out := in.NFS, &out.NFS
+ *out = new(NFSVolumeSource)
+ **out = *in
+ } else {
+ out.NFS = nil
+ }
+ if in.ISCSI != nil {
+ in, out := in.ISCSI, &out.ISCSI
+ *out = new(ISCSIVolumeSource)
+ **out = *in
+ } else {
+ out.ISCSI = nil
+ }
+ if in.Glusterfs != nil {
+ in, out := in.Glusterfs, &out.Glusterfs
+ *out = new(GlusterfsVolumeSource)
+ **out = *in
+ } else {
+ out.Glusterfs = nil
+ }
+ if in.PersistentVolumeClaim != nil {
+ in, out := in.PersistentVolumeClaim, &out.PersistentVolumeClaim
+ *out = new(PersistentVolumeClaimVolumeSource)
+ **out = *in
+ } else {
+ out.PersistentVolumeClaim = nil
+ }
+ if in.RBD != nil {
+ in, out := in.RBD, &out.RBD
+ *out = new(RBDVolumeSource)
+ if err := DeepCopy_api_RBDVolumeSource(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.RBD = nil
+ }
+ if in.FlexVolume != nil {
+ in, out := in.FlexVolume, &out.FlexVolume
+ *out = new(FlexVolumeSource)
+ if err := DeepCopy_api_FlexVolumeSource(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.FlexVolume = nil
+ }
+ if in.Cinder != nil {
+ in, out := in.Cinder, &out.Cinder
+ *out = new(CinderVolumeSource)
+ **out = *in
+ } else {
+ out.Cinder = nil
+ }
+ if in.CephFS != nil {
+ in, out := in.CephFS, &out.CephFS
+ *out = new(CephFSVolumeSource)
+ if err := DeepCopy_api_CephFSVolumeSource(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.CephFS = nil
+ }
+ if in.Flocker != nil {
+ in, out := in.Flocker, &out.Flocker
+ *out = new(FlockerVolumeSource)
+ **out = *in
+ } else {
+ out.Flocker = nil
+ }
+ if in.DownwardAPI != nil {
+ in, out := in.DownwardAPI, &out.DownwardAPI
+ *out = new(DownwardAPIVolumeSource)
+ if err := DeepCopy_api_DownwardAPIVolumeSource(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.DownwardAPI = nil
+ }
+ if in.FC != nil {
+ in, out := in.FC, &out.FC
+ *out = new(FCVolumeSource)
+ if err := DeepCopy_api_FCVolumeSource(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.FC = nil
+ }
+ if in.AzureFile != nil {
+ in, out := in.AzureFile, &out.AzureFile
+ *out = new(AzureFileVolumeSource)
+ **out = *in
+ } else {
+ out.AzureFile = nil
+ }
+ if in.ConfigMap != nil {
+ in, out := in.ConfigMap, &out.ConfigMap
+ *out = new(ConfigMapVolumeSource)
+ if err := DeepCopy_api_ConfigMapVolumeSource(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.ConfigMap = nil
+ }
+ if in.VsphereVolume != nil {
+ in, out := in.VsphereVolume, &out.VsphereVolume
+ *out = new(VsphereVirtualDiskVolumeSource)
+ **out = *in
+ } else {
+ out.VsphereVolume = nil
+ }
+ return nil
+}
+
+func DeepCopy_api_VsphereVirtualDiskVolumeSource(in VsphereVirtualDiskVolumeSource, out *VsphereVirtualDiskVolumeSource, c *conversion.Cloner) error {
+ out.VolumePath = in.VolumePath
+ out.FSType = in.FSType
+ return nil
+}
+
+func DeepCopy_api_WeightedPodAffinityTerm(in WeightedPodAffinityTerm, out *WeightedPodAffinityTerm, c *conversion.Cloner) error {
+ out.Weight = in.Weight
+ if err := DeepCopy_api_PodAffinityTerm(in.PodAffinityTerm, &out.PodAffinityTerm, c); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/doc.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/doc.go
new file mode 100644
index 0000000..1507a88
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/doc.go
@@ -0,0 +1,24 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package,register
+
+// Package api contains the latest (or "internal") version of the
+// Kubernetes API objects. This is the API objects as represented in memory.
+// The contract presented to clients is located in the versioned packages,
+// which are sub-directories. The first one is "v1". Those packages
+// describe how a particular version is serialized to storage/network.
+package api
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/endpoints/util.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/endpoints/util.go
new file mode 100644
index 0000000..792a253
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/endpoints/util.go
@@ -0,0 +1,238 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package endpoints
+
+import (
+ "bytes"
+ "crypto/md5"
+ "encoding/hex"
+ "hash"
+ "sort"
+
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/types"
+ hashutil "k8s.io/kubernetes/pkg/util/hash"
+)
+
+const (
+ // TODO: to be deleted after v1.3 is released
+ // Its value is the json representation of map[string(IP)][HostRecord]
+ // example: '{"10.245.1.6":{"HostName":"my-webserver"}}'
+ PodHostnamesAnnotation = "endpoints.beta.kubernetes.io/hostnames-map"
+)
+
+// TODO: to be deleted after v1.3 is released
+type HostRecord struct {
+ HostName string
+}
+
+// RepackSubsets takes a slice of EndpointSubset objects, expands it to the full
+// representation, and then repacks that into the canonical layout. This
+// ensures that code which operates on these objects can rely on the common
+// form for things like comparison. The result is a newly allocated slice.
+func RepackSubsets(subsets []api.EndpointSubset) []api.EndpointSubset {
+ // First map each unique port definition to the sets of hosts that
+ // offer it.
+ allAddrs := map[addressKey]*api.EndpointAddress{}
+ portToAddrReadyMap := map[api.EndpointPort]addressSet{}
+ for i := range subsets {
+ for _, port := range subsets[i].Ports {
+ for k := range subsets[i].Addresses {
+ mapAddressByPort(&subsets[i].Addresses[k], port, true, allAddrs, portToAddrReadyMap)
+ }
+ for k := range subsets[i].NotReadyAddresses {
+ mapAddressByPort(&subsets[i].NotReadyAddresses[k], port, false, allAddrs, portToAddrReadyMap)
+ }
+ }
+ }
+
+ // Next, map the sets of hosts to the sets of ports they offer.
+ // Go does not allow maps or slices as keys to maps, so we have
+ // to synthesize an artificial key and do a sort of 2-part
+ // associative entity.
+ type keyString string
+ keyToAddrReadyMap := map[keyString]addressSet{}
+ addrReadyMapKeyToPorts := map[keyString][]api.EndpointPort{}
+ for port, addrs := range portToAddrReadyMap {
+ key := keyString(hashAddresses(addrs))
+ keyToAddrReadyMap[key] = addrs
+ addrReadyMapKeyToPorts[key] = append(addrReadyMapKeyToPorts[key], port)
+ }
+
+ // Next, build the N-to-M association the API wants.
+ final := []api.EndpointSubset{}
+ for key, ports := range addrReadyMapKeyToPorts {
+ var readyAddrs, notReadyAddrs []api.EndpointAddress
+ for addr, ready := range keyToAddrReadyMap[key] {
+ if ready {
+ readyAddrs = append(readyAddrs, *addr)
+ } else {
+ notReadyAddrs = append(notReadyAddrs, *addr)
+ }
+ }
+ final = append(final, api.EndpointSubset{Addresses: readyAddrs, NotReadyAddresses: notReadyAddrs, Ports: ports})
+ }
+
+ // Finally, sort it.
+ return SortSubsets(final)
+}
+
+// The sets of hosts must be de-duped, using IP+UID as the key.
+type addressKey struct {
+ ip string
+ uid types.UID
+}
+
+// mapAddressByPort adds an address into a map by its ports, registering the address with a unique pointer, and preserving
+// any existing ready state.
+func mapAddressByPort(addr *api.EndpointAddress, port api.EndpointPort, ready bool, allAddrs map[addressKey]*api.EndpointAddress, portToAddrReadyMap map[api.EndpointPort]addressSet) *api.EndpointAddress {
+ // use addressKey to distinguish between two endpoints that are identical addresses
+ // but may have come from different hosts, for attribution. For instance, Mesos
+ // assigns pods the node IP, but the pods are distinct.
+ key := addressKey{ip: addr.IP}
+ if addr.TargetRef != nil {
+ key.uid = addr.TargetRef.UID
+ }
+
+ // Accumulate the address. The full EndpointAddress structure is preserved for use when
+ // we rebuild the subsets so that the final TargetRef has all of the necessary data.
+ existingAddress := allAddrs[key]
+ if existingAddress == nil {
+ // Make a copy so we don't write to the
+ // input args of this function.
+ existingAddress = &api.EndpointAddress{}
+ *existingAddress = *addr
+ allAddrs[key] = existingAddress
+ }
+
+ // Remember that this port maps to this address.
+ if _, found := portToAddrReadyMap[port]; !found {
+ portToAddrReadyMap[port] = addressSet{}
+ }
+ // if we have not yet recorded this port for this address, or if the previous
+ // state was ready, write the current ready state. not ready always trumps
+ // ready.
+ if wasReady, found := portToAddrReadyMap[port][existingAddress]; !found || wasReady {
+ portToAddrReadyMap[port][existingAddress] = ready
+ }
+ return existingAddress
+}
+
+type addressSet map[*api.EndpointAddress]bool
+
+type addrReady struct {
+ addr *api.EndpointAddress
+ ready bool
+}
+
+func hashAddresses(addrs addressSet) string {
+ // Flatten the list of addresses into a string so it can be used as a
+ // map key. Unfortunately, DeepHashObject is implemented in terms of
+ // spew, and spew does not handle non-primitive map keys well. So
+ // first we collapse it into a slice, sort the slice, then hash that.
+ slice := make([]addrReady, 0, len(addrs))
+ for k, ready := range addrs {
+ slice = append(slice, addrReady{k, ready})
+ }
+ sort.Sort(addrsReady(slice))
+ hasher := md5.New()
+ hashutil.DeepHashObject(hasher, slice)
+ return hex.EncodeToString(hasher.Sum(nil)[0:])
+}
+
+func lessAddrReady(a, b addrReady) bool {
+ // ready is not significant to hashing since we can't have duplicate addresses
+ return LessEndpointAddress(a.addr, b.addr)
+}
+
+type addrsReady []addrReady
+
+func (sl addrsReady) Len() int { return len(sl) }
+func (sl addrsReady) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] }
+func (sl addrsReady) Less(i, j int) bool {
+ return lessAddrReady(sl[i], sl[j])
+}
+
+func LessEndpointAddress(a, b *api.EndpointAddress) bool {
+ ipComparison := bytes.Compare([]byte(a.IP), []byte(b.IP))
+ if ipComparison != 0 {
+ return ipComparison < 0
+ }
+ if b.TargetRef == nil {
+ return false
+ }
+ if a.TargetRef == nil {
+ return true
+ }
+ return a.TargetRef.UID < b.TargetRef.UID
+}
+
+type addrPtrsByIpAndUID []*api.EndpointAddress
+
+func (sl addrPtrsByIpAndUID) Len() int { return len(sl) }
+func (sl addrPtrsByIpAndUID) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] }
+func (sl addrPtrsByIpAndUID) Less(i, j int) bool {
+ return LessEndpointAddress(sl[i], sl[j])
+}
+
+// SortSubsets sorts an array of EndpointSubset objects in place. For ease of
+// use it returns the input slice.
+func SortSubsets(subsets []api.EndpointSubset) []api.EndpointSubset {
+ for i := range subsets {
+ ss := &subsets[i]
+ sort.Sort(addrsByIpAndUID(ss.Addresses))
+ sort.Sort(addrsByIpAndUID(ss.NotReadyAddresses))
+ sort.Sort(portsByHash(ss.Ports))
+ }
+ sort.Sort(subsetsByHash(subsets))
+ return subsets
+}
+
+func hashObject(hasher hash.Hash, obj interface{}) []byte {
+ hashutil.DeepHashObject(hasher, obj)
+ return hasher.Sum(nil)
+}
+
+type subsetsByHash []api.EndpointSubset
+
+func (sl subsetsByHash) Len() int { return len(sl) }
+func (sl subsetsByHash) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] }
+func (sl subsetsByHash) Less(i, j int) bool {
+ hasher := md5.New()
+ h1 := hashObject(hasher, sl[i])
+ h2 := hashObject(hasher, sl[j])
+ return bytes.Compare(h1, h2) < 0
+}
+
+type addrsByIpAndUID []api.EndpointAddress
+
+func (sl addrsByIpAndUID) Len() int { return len(sl) }
+func (sl addrsByIpAndUID) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] }
+func (sl addrsByIpAndUID) Less(i, j int) bool {
+ return LessEndpointAddress(&sl[i], &sl[j])
+}
+
+type portsByHash []api.EndpointPort
+
+func (sl portsByHash) Len() int { return len(sl) }
+func (sl portsByHash) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] }
+func (sl portsByHash) Less(i, j int) bool {
+ hasher := md5.New()
+ h1 := hashObject(hasher, sl[i])
+ h2 := hashObject(hasher, sl[j])
+ return bytes.Compare(h1, h2) < 0
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/errors/doc.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/errors/doc.go
new file mode 100644
index 0000000..58751ed
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/errors/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package errors provides detailed error types for api field validation.
+package errors
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/errors/errors.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/errors/errors.go
new file mode 100644
index 0000000..a51fe94
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/errors/errors.go
@@ -0,0 +1,456 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package errors
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "strings"
+
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/runtime"
+ "k8s.io/kubernetes/pkg/util/validation/field"
+)
+
+// HTTP Status codes not in the golang http package.
+const (
+ StatusUnprocessableEntity = 422
+ StatusTooManyRequests = 429
+ // HTTP recommendations are for servers to define 5xx error codes
+ // for scenarios not covered by behavior. In this case, ServerTimeout
+ // is an indication that a transient server error has occurred and the
+ // client *should* retry, with an optional Retry-After header to specify
+ // the back off window.
+ StatusServerTimeout = 504
+)
+
+// StatusError is an error intended for consumption by a REST API server; it can also be
+// reconstructed by clients from a REST response. Public to allow easy type switches.
+type StatusError struct {
+ ErrStatus unversioned.Status
+}
+
+// APIStatus is exposed by errors that can be converted to an api.Status object
+// for finer grained details.
+type APIStatus interface {
+ Status() unversioned.Status
+}
+
+var _ error = &StatusError{}
+
+// Error implements the Error interface.
+func (e *StatusError) Error() string {
+ return e.ErrStatus.Message
+}
+
+// Status allows access to e's status without having to know the detailed workings
+// of StatusError. Used by pkg/apiserver.
+func (e *StatusError) Status() unversioned.Status {
+ return e.ErrStatus
+}
+
+// DebugError reports extended info about the error to debug output.
+func (e *StatusError) DebugError() (string, []interface{}) {
+ if out, err := json.MarshalIndent(e.ErrStatus, "", " "); err == nil {
+ return "server response object: %s", []interface{}{string(out)}
+ }
+ return "server response object: %#v", []interface{}{e.ErrStatus}
+}
+
+// UnexpectedObjectError can be returned by FromObject if it's passed a non-status object.
+type UnexpectedObjectError struct {
+ Object runtime.Object
+}
+
+// Error returns an error message describing 'u'.
+func (u *UnexpectedObjectError) Error() string {
+ return fmt.Sprintf("unexpected object: %v", u.Object)
+}
+
+// FromObject generates an StatusError from an unversioned.Status, if that is the type of obj; otherwise,
+// returns an UnexpecteObjectError.
+func FromObject(obj runtime.Object) error {
+ switch t := obj.(type) {
+ case *unversioned.Status:
+ return &StatusError{*t}
+ }
+ return &UnexpectedObjectError{obj}
+}
+
+// NewNotFound returns a new error which indicates that the resource of the kind and the name was not found.
+func NewNotFound(qualifiedResource unversioned.GroupResource, name string) *StatusError {
+ return &StatusError{unversioned.Status{
+ Status: unversioned.StatusFailure,
+ Code: http.StatusNotFound,
+ Reason: unversioned.StatusReasonNotFound,
+ Details: &unversioned.StatusDetails{
+ Group: qualifiedResource.Group,
+ Kind: qualifiedResource.Resource,
+ Name: name,
+ },
+ Message: fmt.Sprintf("%s %q not found", qualifiedResource.String(), name),
+ }}
+}
+
+// NewAlreadyExists returns an error indicating the item requested exists by that identifier.
+func NewAlreadyExists(qualifiedResource unversioned.GroupResource, name string) *StatusError {
+ return &StatusError{unversioned.Status{
+ Status: unversioned.StatusFailure,
+ Code: http.StatusConflict,
+ Reason: unversioned.StatusReasonAlreadyExists,
+ Details: &unversioned.StatusDetails{
+ Group: qualifiedResource.Group,
+ Kind: qualifiedResource.Resource,
+ Name: name,
+ },
+ Message: fmt.Sprintf("%s %q already exists", qualifiedResource.String(), name),
+ }}
+}
+
+// NewUnauthorized returns an error indicating the client is not authorized to perform the requested
+// action.
+func NewUnauthorized(reason string) *StatusError {
+ message := reason
+ if len(message) == 0 {
+ message = "not authorized"
+ }
+ return &StatusError{unversioned.Status{
+ Status: unversioned.StatusFailure,
+ Code: http.StatusUnauthorized,
+ Reason: unversioned.StatusReasonUnauthorized,
+ Message: message,
+ }}
+}
+
+// NewForbidden returns an error indicating the requested action was forbidden
+func NewForbidden(qualifiedResource unversioned.GroupResource, name string, err error) *StatusError {
+ return &StatusError{unversioned.Status{
+ Status: unversioned.StatusFailure,
+ Code: http.StatusForbidden,
+ Reason: unversioned.StatusReasonForbidden,
+ Details: &unversioned.StatusDetails{
+ Group: qualifiedResource.Group,
+ Kind: qualifiedResource.Resource,
+ Name: name,
+ },
+ Message: fmt.Sprintf("%s %q is forbidden: %v", qualifiedResource.String(), name, err),
+ }}
+}
+
+// NewConflict returns an error indicating the item can't be updated as provided.
+func NewConflict(qualifiedResource unversioned.GroupResource, name string, err error) *StatusError {
+ return &StatusError{unversioned.Status{
+ Status: unversioned.StatusFailure,
+ Code: http.StatusConflict,
+ Reason: unversioned.StatusReasonConflict,
+ Details: &unversioned.StatusDetails{
+ Group: qualifiedResource.Group,
+ Kind: qualifiedResource.Resource,
+ Name: name,
+ },
+ Message: fmt.Sprintf("Operation cannot be fulfilled on %s %q: %v", qualifiedResource.String(), name, err),
+ }}
+}
+
+// NewGone returns an error indicating the item no longer available at the server and no forwarding address is known.
+func NewGone(message string) *StatusError {
+ return &StatusError{unversioned.Status{
+ Status: unversioned.StatusFailure,
+ Code: http.StatusGone,
+ Reason: unversioned.StatusReasonGone,
+ Message: message,
+ }}
+}
+
+// NewInvalid returns an error indicating the item is invalid and cannot be processed.
+func NewInvalid(qualifiedKind unversioned.GroupKind, name string, errs field.ErrorList) *StatusError {
+ causes := make([]unversioned.StatusCause, 0, len(errs))
+ for i := range errs {
+ err := errs[i]
+ causes = append(causes, unversioned.StatusCause{
+ Type: unversioned.CauseType(err.Type),
+ Message: err.ErrorBody(),
+ Field: err.Field,
+ })
+ }
+ return &StatusError{unversioned.Status{
+ Status: unversioned.StatusFailure,
+ Code: StatusUnprocessableEntity, // RFC 4918: StatusUnprocessableEntity
+ Reason: unversioned.StatusReasonInvalid,
+ Details: &unversioned.StatusDetails{
+ Group: qualifiedKind.Group,
+ Kind: qualifiedKind.Kind,
+ Name: name,
+ Causes: causes,
+ },
+ Message: fmt.Sprintf("%s %q is invalid: %v", qualifiedKind.String(), name, errs.ToAggregate()),
+ }}
+}
+
+// NewBadRequest creates an error that indicates that the request is invalid and can not be processed.
+func NewBadRequest(reason string) *StatusError {
+ return &StatusError{unversioned.Status{
+ Status: unversioned.StatusFailure,
+ Code: http.StatusBadRequest,
+ Reason: unversioned.StatusReasonBadRequest,
+ Message: reason,
+ }}
+}
+
+// NewServiceUnavailable creates an error that indicates that the requested service is unavailable.
+func NewServiceUnavailable(reason string) *StatusError {
+ return &StatusError{unversioned.Status{
+ Status: unversioned.StatusFailure,
+ Code: http.StatusServiceUnavailable,
+ Reason: unversioned.StatusReasonServiceUnavailable,
+ Message: reason,
+ }}
+}
+
+// NewMethodNotSupported returns an error indicating the requested action is not supported on this kind.
+func NewMethodNotSupported(qualifiedResource unversioned.GroupResource, action string) *StatusError {
+ return &StatusError{unversioned.Status{
+ Status: unversioned.StatusFailure,
+ Code: http.StatusMethodNotAllowed,
+ Reason: unversioned.StatusReasonMethodNotAllowed,
+ Details: &unversioned.StatusDetails{
+ Group: qualifiedResource.Group,
+ Kind: qualifiedResource.Resource,
+ },
+ Message: fmt.Sprintf("%s is not supported on resources of kind %q", action, qualifiedResource.String()),
+ }}
+}
+
+// NewServerTimeout returns an error indicating the requested action could not be completed due to a
+// transient error, and the client should try again.
+func NewServerTimeout(qualifiedResource unversioned.GroupResource, operation string, retryAfterSeconds int) *StatusError {
+ return &StatusError{unversioned.Status{
+ Status: unversioned.StatusFailure,
+ Code: http.StatusInternalServerError,
+ Reason: unversioned.StatusReasonServerTimeout,
+ Details: &unversioned.StatusDetails{
+ Group: qualifiedResource.Group,
+ Kind: qualifiedResource.Resource,
+ Name: operation,
+ RetryAfterSeconds: int32(retryAfterSeconds),
+ },
+ Message: fmt.Sprintf("The %s operation against %s could not be completed at this time, please try again.", operation, qualifiedResource.String()),
+ }}
+}
+
+// NewServerTimeoutForKind should not exist. Server timeouts happen when accessing resources, the Kind is just what we
+// happened to be looking at when the request failed. This delegates to keep code sane, but we should work towards removing this.
+func NewServerTimeoutForKind(qualifiedKind unversioned.GroupKind, operation string, retryAfterSeconds int) *StatusError {
+ return NewServerTimeout(unversioned.GroupResource{Group: qualifiedKind.Group, Resource: qualifiedKind.Kind}, operation, retryAfterSeconds)
+}
+
+// NewInternalError returns an error indicating the item is invalid and cannot be processed.
+func NewInternalError(err error) *StatusError {
+ return &StatusError{unversioned.Status{
+ Status: unversioned.StatusFailure,
+ Code: http.StatusInternalServerError,
+ Reason: unversioned.StatusReasonInternalError,
+ Details: &unversioned.StatusDetails{
+ Causes: []unversioned.StatusCause{{Message: err.Error()}},
+ },
+ Message: fmt.Sprintf("Internal error occurred: %v", err),
+ }}
+}
+
+// NewTimeoutError returns an error indicating that a timeout occurred before the request
+// could be completed. Clients may retry, but the operation may still complete.
+func NewTimeoutError(message string, retryAfterSeconds int) *StatusError {
+ return &StatusError{unversioned.Status{
+ Status: unversioned.StatusFailure,
+ Code: StatusServerTimeout,
+ Reason: unversioned.StatusReasonTimeout,
+ Message: fmt.Sprintf("Timeout: %s", message),
+ Details: &unversioned.StatusDetails{
+ RetryAfterSeconds: int32(retryAfterSeconds),
+ },
+ }}
+}
+
+// NewGenericServerResponse returns a new error for server responses that are not in a recognizable form.
+func NewGenericServerResponse(code int, verb string, qualifiedResource unversioned.GroupResource, name, serverMessage string, retryAfterSeconds int, isUnexpectedResponse bool) *StatusError {
+ reason := unversioned.StatusReasonUnknown
+ message := fmt.Sprintf("the server responded with the status code %d but did not return more information", code)
+ switch code {
+ case http.StatusConflict:
+ if verb == "POST" {
+ reason = unversioned.StatusReasonAlreadyExists
+ } else {
+ reason = unversioned.StatusReasonConflict
+ }
+ message = "the server reported a conflict"
+ case http.StatusNotFound:
+ reason = unversioned.StatusReasonNotFound
+ message = "the server could not find the requested resource"
+ case http.StatusBadRequest:
+ reason = unversioned.StatusReasonBadRequest
+ message = "the server rejected our request for an unknown reason"
+ case http.StatusUnauthorized:
+ reason = unversioned.StatusReasonUnauthorized
+ message = "the server has asked for the client to provide credentials"
+ case http.StatusForbidden:
+ reason = unversioned.StatusReasonForbidden
+ message = "the server does not allow access to the requested resource"
+ case http.StatusMethodNotAllowed:
+ reason = unversioned.StatusReasonMethodNotAllowed
+ message = "the server does not allow this method on the requested resource"
+ case StatusUnprocessableEntity:
+ reason = unversioned.StatusReasonInvalid
+ message = "the server rejected our request due to an error in our request"
+ case StatusServerTimeout:
+ reason = unversioned.StatusReasonServerTimeout
+ message = "the server cannot complete the requested operation at this time, try again later"
+ case StatusTooManyRequests:
+ reason = unversioned.StatusReasonTimeout
+ message = "the server has received too many requests and has asked us to try again later"
+ default:
+ if code >= 500 {
+ reason = unversioned.StatusReasonInternalError
+ message = "an error on the server has prevented the request from succeeding"
+ }
+ }
+ switch {
+ case !qualifiedResource.IsEmpty() && len(name) > 0:
+ message = fmt.Sprintf("%s (%s %s %s)", message, strings.ToLower(verb), qualifiedResource.String(), name)
+ case !qualifiedResource.IsEmpty():
+ message = fmt.Sprintf("%s (%s %s)", message, strings.ToLower(verb), qualifiedResource.String())
+ }
+ var causes []unversioned.StatusCause
+ if isUnexpectedResponse {
+ causes = []unversioned.StatusCause{
+ {
+ Type: unversioned.CauseTypeUnexpectedServerResponse,
+ Message: serverMessage,
+ },
+ }
+ } else {
+ causes = nil
+ }
+ return &StatusError{unversioned.Status{
+ Status: unversioned.StatusFailure,
+ Code: int32(code),
+ Reason: reason,
+ Details: &unversioned.StatusDetails{
+ Group: qualifiedResource.Group,
+ Kind: qualifiedResource.Resource,
+ Name: name,
+
+ Causes: causes,
+ RetryAfterSeconds: int32(retryAfterSeconds),
+ },
+ Message: message,
+ }}
+}
+
+// IsNotFound returns true if the specified error was created by NewNotFound.
+func IsNotFound(err error) bool {
+ return reasonForError(err) == unversioned.StatusReasonNotFound
+}
+
+// IsAlreadyExists determines if the err is an error which indicates that a specified resource already exists.
+func IsAlreadyExists(err error) bool {
+ return reasonForError(err) == unversioned.StatusReasonAlreadyExists
+}
+
+// IsConflict determines if the err is an error which indicates the provided update conflicts.
+func IsConflict(err error) bool {
+ return reasonForError(err) == unversioned.StatusReasonConflict
+}
+
+// IsInvalid determines if the err is an error which indicates the provided resource is not valid.
+func IsInvalid(err error) bool {
+ return reasonForError(err) == unversioned.StatusReasonInvalid
+}
+
+// IsMethodNotSupported determines if the err is an error which indicates the provided action could not
+// be performed because it is not supported by the server.
+func IsMethodNotSupported(err error) bool {
+ return reasonForError(err) == unversioned.StatusReasonMethodNotAllowed
+}
+
+// IsBadRequest determines if err is an error which indicates that the request is invalid.
+func IsBadRequest(err error) bool {
+ return reasonForError(err) == unversioned.StatusReasonBadRequest
+}
+
+// IsUnauthorized determines if err is an error which indicates that the request is unauthorized and
+// requires authentication by the user.
+func IsUnauthorized(err error) bool {
+ return reasonForError(err) == unversioned.StatusReasonUnauthorized
+}
+
+// IsForbidden determines if err is an error which indicates that the request is forbidden and cannot
+// be completed as requested.
+func IsForbidden(err error) bool {
+ return reasonForError(err) == unversioned.StatusReasonForbidden
+}
+
+// IsServerTimeout determines if err is an error which indicates that the request needs to be retried
+// by the client.
+func IsServerTimeout(err error) bool {
+ return reasonForError(err) == unversioned.StatusReasonServerTimeout
+}
+
+// IsUnexpectedServerError returns true if the server response was not in the expected API format,
+// and may be the result of another HTTP actor.
+func IsUnexpectedServerError(err error) bool {
+ switch t := err.(type) {
+ case APIStatus:
+ if d := t.Status().Details; d != nil {
+ for _, cause := range d.Causes {
+ if cause.Type == unversioned.CauseTypeUnexpectedServerResponse {
+ return true
+ }
+ }
+ }
+ }
+ return false
+}
+
+// IsUnexpectedObjectError determines if err is due to an unexpected object from the master.
+func IsUnexpectedObjectError(err error) bool {
+ _, ok := err.(*UnexpectedObjectError)
+ return err != nil && ok
+}
+
+// SuggestsClientDelay returns true if this error suggests a client delay as well as the
+// suggested seconds to wait, or false if the error does not imply a wait.
+func SuggestsClientDelay(err error) (int, bool) {
+ switch t := err.(type) {
+ case APIStatus:
+ if t.Status().Details != nil {
+ switch t.Status().Reason {
+ case unversioned.StatusReasonServerTimeout, unversioned.StatusReasonTimeout:
+ return int(t.Status().Details.RetryAfterSeconds), true
+ }
+ }
+ }
+ return 0, false
+}
+
+func reasonForError(err error) unversioned.StatusReason {
+ switch t := err.(type) {
+ case APIStatus:
+ return t.Status().Reason
+ }
+ return unversioned.StatusReasonUnknown
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/field_constants.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/field_constants.go
new file mode 100644
index 0000000..5ead0f1
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/field_constants.go
@@ -0,0 +1,38 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package api
+
+// Field path constants that are specific to the internal API
+// representation.
+const (
+ NodeUnschedulableField = "spec.unschedulable"
+ ObjectNameField = "metadata.name"
+ PodHostField = "spec.nodeName"
+ PodStatusField = "status.phase"
+ SecretTypeField = "type"
+
+ EventReasonField = "reason"
+ EventSourceField = "source"
+ EventTypeField = "type"
+ EventInvolvedKindField = "involvedObject.kind"
+ EventInvolvedNamespaceField = "involvedObject.namespace"
+ EventInvolvedNameField = "involvedObject.name"
+ EventInvolvedUIDField = "involvedObject.uid"
+ EventInvolvedAPIVersionField = "involvedObject.apiVersion"
+ EventInvolvedResourceVersionField = "involvedObject.resourceVersion"
+ EventInvolvedFieldPathField = "involvedObject.fieldPath"
+)
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/generate.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/generate.go
new file mode 100644
index 0000000..19379d3
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/generate.go
@@ -0,0 +1,64 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package api
+
+import (
+ "fmt"
+
+ utilrand "k8s.io/kubernetes/pkg/util/rand"
+)
+
+// NameGenerator generates names for objects. Some backends may have more information
+// available to guide selection of new names and this interface hides those details.
+type NameGenerator interface {
+ // GenerateName generates a valid name from the base name, adding a random suffix to the
+ // the base. If base is valid, the returned name must also be valid. The generator is
+ // responsible for knowing the maximum valid name length.
+ GenerateName(base string) string
+}
+
+// GenerateName will resolve the object name of the provided ObjectMeta to a generated version if
+// necessary. It expects that validation for ObjectMeta has already completed (that Base is a
+// valid name) and that the NameGenerator generates a name that is also valid.
+func GenerateName(u NameGenerator, meta *ObjectMeta) {
+ if len(meta.GenerateName) == 0 || len(meta.Name) != 0 {
+ return
+ }
+ meta.Name = u.GenerateName(meta.GenerateName)
+}
+
+// simpleNameGenerator generates random names.
+type simpleNameGenerator struct{}
+
+// SimpleNameGenerator is a generator that returns the name plus a random suffix of five alphanumerics
+// when a name is requested. The string is guaranteed to not exceed the length of a standard Kubernetes
+// name (63 characters)
+var SimpleNameGenerator NameGenerator = simpleNameGenerator{}
+
+const (
+ // TODO: make this flexible for non-core resources with alternate naming rules.
+ maxNameLength = 63
+ randomLength = 5
+ maxGeneratedNameLength = maxNameLength - randomLength
+)
+
+func (simpleNameGenerator) GenerateName(base string) string {
+ if len(base) > maxGeneratedNameLength {
+ base = base[:maxGeneratedNameLength]
+ }
+ return fmt.Sprintf("%s%s", base, utilrand.String(randomLength))
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/helpers.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/helpers.go
new file mode 100644
index 0000000..ca873b3
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/helpers.go
@@ -0,0 +1,502 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package api
+
+import (
+ "crypto/md5"
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "strings"
+ "time"
+
+ "k8s.io/kubernetes/pkg/api/resource"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/conversion"
+ "k8s.io/kubernetes/pkg/fields"
+ "k8s.io/kubernetes/pkg/labels"
+ "k8s.io/kubernetes/pkg/runtime"
+ "k8s.io/kubernetes/pkg/types"
+ "k8s.io/kubernetes/pkg/util/sets"
+
+ "github.com/davecgh/go-spew/spew"
+)
+
+// Conversion error conveniently packages up errors in conversions.
+type ConversionError struct {
+ In, Out interface{}
+ Message string
+}
+
+// Return a helpful string about the error
+func (c *ConversionError) Error() string {
+ return spew.Sprintf(
+ "Conversion error: %s. (in: %v(%+v) out: %v)",
+ c.Message, reflect.TypeOf(c.In), c.In, reflect.TypeOf(c.Out),
+ )
+}
+
+// Semantic can do semantic deep equality checks for api objects.
+// Example: api.Semantic.DeepEqual(aPod, aPodWithNonNilButEmptyMaps) == true
+var Semantic = conversion.EqualitiesOrDie(
+ func(a, b resource.Quantity) bool {
+ // Ignore formatting, only care that numeric value stayed the same.
+ // TODO: if we decide it's important, it should be safe to start comparing the format.
+ //
+ // Uninitialized quantities are equivalent to 0 quantities.
+ return a.Cmp(b) == 0
+ },
+ func(a, b unversioned.Time) bool {
+ return a.UTC() == b.UTC()
+ },
+ func(a, b labels.Selector) bool {
+ return a.String() == b.String()
+ },
+ func(a, b fields.Selector) bool {
+ return a.String() == b.String()
+ },
+)
+
+var standardResourceQuotaScopes = sets.NewString(
+ string(ResourceQuotaScopeTerminating),
+ string(ResourceQuotaScopeNotTerminating),
+ string(ResourceQuotaScopeBestEffort),
+ string(ResourceQuotaScopeNotBestEffort),
+)
+
+// IsStandardResourceQuotaScope returns true if the scope is a standard value
+func IsStandardResourceQuotaScope(str string) bool {
+ return standardResourceQuotaScopes.Has(str)
+}
+
+var podObjectCountQuotaResources = sets.NewString(
+ string(ResourcePods),
+)
+
+var podComputeQuotaResources = sets.NewString(
+ string(ResourceCPU),
+ string(ResourceMemory),
+ string(ResourceLimitsCPU),
+ string(ResourceLimitsMemory),
+ string(ResourceRequestsCPU),
+ string(ResourceRequestsMemory),
+)
+
+// IsResourceQuotaScopeValidForResource returns true if the resource applies to the specified scope
+func IsResourceQuotaScopeValidForResource(scope ResourceQuotaScope, resource string) bool {
+ switch scope {
+ case ResourceQuotaScopeTerminating, ResourceQuotaScopeNotTerminating, ResourceQuotaScopeNotBestEffort:
+ return podObjectCountQuotaResources.Has(resource) || podComputeQuotaResources.Has(resource)
+ case ResourceQuotaScopeBestEffort:
+ return podObjectCountQuotaResources.Has(resource)
+ default:
+ return true
+ }
+}
+
+var standardContainerResources = sets.NewString(
+ string(ResourceCPU),
+ string(ResourceMemory),
+)
+
+// IsStandardContainerResourceName returns true if the container can make a resource request
+// for the specified resource
+func IsStandardContainerResourceName(str string) bool {
+ return standardContainerResources.Has(str)
+}
+
+var standardLimitRangeTypes = sets.NewString(
+ string(LimitTypePod),
+ string(LimitTypeContainer),
+)
+
+// IsStandardLimitRangeType returns true if the type is Pod or Container
+func IsStandardLimitRangeType(str string) bool {
+ return standardLimitRangeTypes.Has(str)
+}
+
+var standardQuotaResources = sets.NewString(
+ string(ResourceCPU),
+ string(ResourceMemory),
+ string(ResourceRequestsCPU),
+ string(ResourceRequestsMemory),
+ string(ResourceLimitsCPU),
+ string(ResourceLimitsMemory),
+ string(ResourcePods),
+ string(ResourceQuotas),
+ string(ResourceServices),
+ string(ResourceReplicationControllers),
+ string(ResourceSecrets),
+ string(ResourcePersistentVolumeClaims),
+ string(ResourceConfigMaps),
+ string(ResourceServicesNodePorts),
+ string(ResourceServicesLoadBalancers),
+)
+
+// IsStandardQuotaResourceName returns true if the resource is known to
+// the quota tracking system
+func IsStandardQuotaResourceName(str string) bool {
+ return standardQuotaResources.Has(str)
+}
+
+var standardResources = sets.NewString(
+ string(ResourceCPU),
+ string(ResourceMemory),
+ string(ResourceRequestsCPU),
+ string(ResourceRequestsMemory),
+ string(ResourceLimitsCPU),
+ string(ResourceLimitsMemory),
+ string(ResourcePods),
+ string(ResourceQuotas),
+ string(ResourceServices),
+ string(ResourceReplicationControllers),
+ string(ResourceSecrets),
+ string(ResourceConfigMaps),
+ string(ResourcePersistentVolumeClaims),
+ string(ResourceStorage),
+)
+
+// IsStandardResourceName returns true if the resource is known to the system
+func IsStandardResourceName(str string) bool {
+ return standardResources.Has(str)
+}
+
+var integerResources = sets.NewString(
+ string(ResourcePods),
+ string(ResourceQuotas),
+ string(ResourceServices),
+ string(ResourceReplicationControllers),
+ string(ResourceSecrets),
+ string(ResourceConfigMaps),
+ string(ResourcePersistentVolumeClaims),
+ string(ResourceServicesNodePorts),
+ string(ResourceServicesLoadBalancers),
+)
+
+// IsIntegerResourceName returns true if the resource is measured in integer values
+func IsIntegerResourceName(str string) bool {
+ return integerResources.Has(str)
+}
+
+// NewDeleteOptions returns a DeleteOptions indicating the resource should
+// be deleted within the specified grace period. Use zero to indicate
+// immediate deletion. If you would prefer to use the default grace period,
+// use &api.DeleteOptions{} directly.
+func NewDeleteOptions(grace int64) *DeleteOptions {
+ return &DeleteOptions{GracePeriodSeconds: &grace}
+}
+
+// NewPreconditionDeleteOptions returns a DeleteOptions with a UID precondition set.
+func NewPreconditionDeleteOptions(uid string) *DeleteOptions {
+ u := types.UID(uid)
+ p := Preconditions{UID: &u}
+ return &DeleteOptions{Preconditions: &p}
+}
+
+// NewUIDPreconditions returns a Preconditions with UID set.
+func NewUIDPreconditions(uid string) *Preconditions {
+ u := types.UID(uid)
+ return &Preconditions{UID: &u}
+}
+
+// this function aims to check if the service's ClusterIP is set or not
+// the objective is not to perform validation here
+func IsServiceIPSet(service *Service) bool {
+ return service.Spec.ClusterIP != ClusterIPNone && service.Spec.ClusterIP != ""
+}
+
+// this function aims to check if the service's cluster IP is requested or not
+func IsServiceIPRequested(service *Service) bool {
+ return service.Spec.ClusterIP == ""
+}
+
+var standardFinalizers = sets.NewString(
+ string(FinalizerKubernetes),
+ FinalizerOrphan,
+)
+
+func IsStandardFinalizerName(str string) bool {
+ return standardFinalizers.Has(str)
+}
+
+// SingleObject returns a ListOptions for watching a single object.
+func SingleObject(meta ObjectMeta) ListOptions {
+ return ListOptions{
+ FieldSelector: fields.OneTermEqualSelector("metadata.name", meta.Name),
+ ResourceVersion: meta.ResourceVersion,
+ }
+}
+
+// AddToNodeAddresses appends the NodeAddresses to the passed-by-pointer slice,
+// only if they do not already exist
+func AddToNodeAddresses(addresses *[]NodeAddress, addAddresses ...NodeAddress) {
+ for _, add := range addAddresses {
+ exists := false
+ for _, existing := range *addresses {
+ if existing.Address == add.Address && existing.Type == add.Type {
+ exists = true
+ break
+ }
+ }
+ if !exists {
+ *addresses = append(*addresses, add)
+ }
+ }
+}
+
+func HashObject(obj runtime.Object, codec runtime.Codec) (string, error) {
+ data, err := runtime.Encode(codec, obj)
+ if err != nil {
+ return "", err
+ }
+ return fmt.Sprintf("%x", md5.Sum(data)), nil
+}
+
+// TODO: make method on LoadBalancerStatus?
+func LoadBalancerStatusEqual(l, r *LoadBalancerStatus) bool {
+ return ingressSliceEqual(l.Ingress, r.Ingress)
+}
+
+func ingressSliceEqual(lhs, rhs []LoadBalancerIngress) bool {
+ if len(lhs) != len(rhs) {
+ return false
+ }
+ for i := range lhs {
+ if !ingressEqual(&lhs[i], &rhs[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+func ingressEqual(lhs, rhs *LoadBalancerIngress) bool {
+ if lhs.IP != rhs.IP {
+ return false
+ }
+ if lhs.Hostname != rhs.Hostname {
+ return false
+ }
+ return true
+}
+
+// TODO: make method on LoadBalancerStatus?
+func LoadBalancerStatusDeepCopy(lb *LoadBalancerStatus) *LoadBalancerStatus {
+ c := &LoadBalancerStatus{}
+ c.Ingress = make([]LoadBalancerIngress, len(lb.Ingress))
+ for i := range lb.Ingress {
+ c.Ingress[i] = lb.Ingress[i]
+ }
+ return c
+}
+
+// GetAccessModesAsString returns a string representation of an array of access modes.
+// modes, when present, are always in the same order: RWO,ROX,RWX.
+func GetAccessModesAsString(modes []PersistentVolumeAccessMode) string {
+ modes = removeDuplicateAccessModes(modes)
+ modesStr := []string{}
+ if containsAccessMode(modes, ReadWriteOnce) {
+ modesStr = append(modesStr, "RWO")
+ }
+ if containsAccessMode(modes, ReadOnlyMany) {
+ modesStr = append(modesStr, "ROX")
+ }
+ if containsAccessMode(modes, ReadWriteMany) {
+ modesStr = append(modesStr, "RWX")
+ }
+ return strings.Join(modesStr, ",")
+}
+
+// GetAccessModesAsString returns an array of AccessModes from a string created by GetAccessModesAsString
+func GetAccessModesFromString(modes string) []PersistentVolumeAccessMode {
+ strmodes := strings.Split(modes, ",")
+ accessModes := []PersistentVolumeAccessMode{}
+ for _, s := range strmodes {
+ s = strings.Trim(s, " ")
+ switch {
+ case s == "RWO":
+ accessModes = append(accessModes, ReadWriteOnce)
+ case s == "ROX":
+ accessModes = append(accessModes, ReadOnlyMany)
+ case s == "RWX":
+ accessModes = append(accessModes, ReadWriteMany)
+ }
+ }
+ return accessModes
+}
+
+// removeDuplicateAccessModes returns an array of access modes without any duplicates
+func removeDuplicateAccessModes(modes []PersistentVolumeAccessMode) []PersistentVolumeAccessMode {
+ accessModes := []PersistentVolumeAccessMode{}
+ for _, m := range modes {
+ if !containsAccessMode(accessModes, m) {
+ accessModes = append(accessModes, m)
+ }
+ }
+ return accessModes
+}
+
+func containsAccessMode(modes []PersistentVolumeAccessMode, mode PersistentVolumeAccessMode) bool {
+ for _, m := range modes {
+ if m == mode {
+ return true
+ }
+ }
+ return false
+}
+
+// ParseRFC3339 parses an RFC3339 date in either RFC3339Nano or RFC3339 format.
+func ParseRFC3339(s string, nowFn func() unversioned.Time) (unversioned.Time, error) {
+ if t, timeErr := time.Parse(time.RFC3339Nano, s); timeErr == nil {
+ return unversioned.Time{Time: t}, nil
+ }
+ t, err := time.Parse(time.RFC3339, s)
+ if err != nil {
+ return unversioned.Time{}, err
+ }
+ return unversioned.Time{Time: t}, nil
+}
+
+// NodeSelectorRequirementsAsSelector converts the []NodeSelectorRequirement api type into a struct that implements
+// labels.Selector.
+func NodeSelectorRequirementsAsSelector(nsm []NodeSelectorRequirement) (labels.Selector, error) {
+ if len(nsm) == 0 {
+ return labels.Nothing(), nil
+ }
+ selector := labels.NewSelector()
+ for _, expr := range nsm {
+ var op labels.Operator
+ switch expr.Operator {
+ case NodeSelectorOpIn:
+ op = labels.InOperator
+ case NodeSelectorOpNotIn:
+ op = labels.NotInOperator
+ case NodeSelectorOpExists:
+ op = labels.ExistsOperator
+ case NodeSelectorOpDoesNotExist:
+ op = labels.DoesNotExistOperator
+ case NodeSelectorOpGt:
+ op = labels.GreaterThanOperator
+ case NodeSelectorOpLt:
+ op = labels.LessThanOperator
+ default:
+ return nil, fmt.Errorf("%q is not a valid node selector operator", expr.Operator)
+ }
+ r, err := labels.NewRequirement(expr.Key, op, sets.NewString(expr.Values...))
+ if err != nil {
+ return nil, err
+ }
+ selector = selector.Add(*r)
+ }
+ return selector, nil
+}
+
+const (
+ // AffinityAnnotationKey represents the key of affinity data (json serialized)
+ // in the Annotations of a Pod.
+ AffinityAnnotationKey string = "scheduler.alpha.kubernetes.io/affinity"
+
+ // TolerationsAnnotationKey represents the key of tolerations data (json serialized)
+ // in the Annotations of a Pod.
+ TolerationsAnnotationKey string = "scheduler.alpha.kubernetes.io/tolerations"
+
+ // TaintsAnnotationKey represents the key of taints data (json serialized)
+ // in the Annotations of a Node.
+ TaintsAnnotationKey string = "scheduler.alpha.kubernetes.io/taints"
+
+ // SeccompPodAnnotationKey represents the key of a seccomp profile applied
+ // to all containers of a pod.
+ SeccompPodAnnotationKey string = "seccomp.security.alpha.kubernetes.io/pod"
+
+ // SeccompContainerAnnotationKeyPrefix represents the key of a seccomp profile applied
+ // to one container of a pod.
+ SeccompContainerAnnotationKeyPrefix string = "container.seccomp.security.alpha.kubernetes.io/"
+
+ // CreatedByAnnotation represents the key used to store the spec(json)
+ // used to create the resource.
+ CreatedByAnnotation = "kubernetes.io/created-by"
+)
+
+// GetAffinityFromPod gets the json serialized affinity data from Pod.Annotations
+// and converts it to the Affinity type in api.
+func GetAffinityFromPodAnnotations(annotations map[string]string) (Affinity, error) {
+ var affinity Affinity
+ if len(annotations) > 0 && annotations[AffinityAnnotationKey] != "" {
+ err := json.Unmarshal([]byte(annotations[AffinityAnnotationKey]), &affinity)
+ if err != nil {
+ return affinity, err
+ }
+ }
+ return affinity, nil
+}
+
+// GetTolerationsFromPodAnnotations gets the json serialized tolerations data from Pod.Annotations
+// and converts it to the []Toleration type in api.
+func GetTolerationsFromPodAnnotations(annotations map[string]string) ([]Toleration, error) {
+ var tolerations []Toleration
+ if len(annotations) > 0 && annotations[TolerationsAnnotationKey] != "" {
+ err := json.Unmarshal([]byte(annotations[TolerationsAnnotationKey]), &tolerations)
+ if err != nil {
+ return tolerations, err
+ }
+ }
+ return tolerations, nil
+}
+
+// GetTaintsFromNodeAnnotations gets the json serialized taints data from Pod.Annotations
+// and converts it to the []Taint type in api.
+func GetTaintsFromNodeAnnotations(annotations map[string]string) ([]Taint, error) {
+ var taints []Taint
+ if len(annotations) > 0 && annotations[TaintsAnnotationKey] != "" {
+ err := json.Unmarshal([]byte(annotations[TaintsAnnotationKey]), &taints)
+ if err != nil {
+ return []Taint{}, err
+ }
+ }
+ return taints, nil
+}
+
+// TolerationToleratesTaint checks if the toleration tolerates the taint.
+func TolerationToleratesTaint(toleration *Toleration, taint *Taint) bool {
+ if len(toleration.Effect) != 0 && toleration.Effect != taint.Effect {
+ return false
+ }
+
+ if toleration.Key != taint.Key {
+ return false
+ }
+ // TODO: Use proper defaulting when Toleration becomes a field of PodSpec
+ if (len(toleration.Operator) == 0 || toleration.Operator == TolerationOpEqual) && toleration.Value == taint.Value {
+ return true
+ }
+ if toleration.Operator == TolerationOpExists {
+ return true
+ }
+ return false
+
+}
+
+// TaintToleratedByTolerations checks if taint is tolerated by any of the tolerations.
+func TaintToleratedByTolerations(taint *Taint, tolerations []Toleration) bool {
+ tolerated := false
+ for i := range tolerations {
+ if TolerationToleratesTaint(&tolerations[i], taint) {
+ tolerated = true
+ break
+ }
+ }
+ return tolerated
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/install/install.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/install/install.go
new file mode 100644
index 0000000..937920d
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/install/install.go
@@ -0,0 +1,251 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package install installs the v1 monolithic api, making it available as an
+// option to all of the API encoding/decoding machinery.
+package install
+
+import (
+ "fmt"
+
+ "github.com/golang/glog"
+
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/meta"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/api/v1"
+ "k8s.io/kubernetes/pkg/apimachinery"
+ "k8s.io/kubernetes/pkg/apimachinery/registered"
+ "k8s.io/kubernetes/pkg/conversion"
+ "k8s.io/kubernetes/pkg/runtime"
+ "k8s.io/kubernetes/pkg/util/sets"
+ "k8s.io/kubernetes/pkg/watch/versioned"
+)
+
+const importPrefix = "k8s.io/kubernetes/pkg/api"
+
+var accessor = meta.NewAccessor()
+
+// availableVersions lists all known external versions for this group from most preferred to least preferred
+var availableVersions = []unversioned.GroupVersion{v1.SchemeGroupVersion}
+
+func init() {
+ registered.RegisterVersions(availableVersions)
+ externalVersions := []unversioned.GroupVersion{}
+ for _, v := range availableVersions {
+ if registered.IsAllowedVersion(v) {
+ externalVersions = append(externalVersions, v)
+ }
+ }
+ if len(externalVersions) == 0 {
+ glog.V(4).Infof("No version is registered for group %v", api.GroupName)
+ return
+ }
+
+ if err := registered.EnableVersions(externalVersions...); err != nil {
+ glog.V(4).Infof("%v", err)
+ return
+ }
+ if err := enableVersions(externalVersions); err != nil {
+ glog.V(4).Infof("%v", err)
+ return
+ }
+}
+
+// TODO: enableVersions should be centralized rather than spread in each API
+// group.
+// We can combine registered.RegisterVersions, registered.EnableVersions and
+// registered.RegisterGroup once we have moved enableVersions there.
+func enableVersions(externalVersions []unversioned.GroupVersion) error {
+ addVersionsToScheme(externalVersions...)
+ preferredExternalVersion := externalVersions[0]
+
+ groupMeta := apimachinery.GroupMeta{
+ GroupVersion: preferredExternalVersion,
+ GroupVersions: externalVersions,
+ RESTMapper: newRESTMapper(externalVersions),
+ SelfLinker: runtime.SelfLinker(accessor),
+ InterfacesFor: interfacesFor,
+ }
+
+ if err := registered.RegisterGroup(groupMeta); err != nil {
+ return err
+ }
+ api.RegisterRESTMapper(groupMeta.RESTMapper)
+ return nil
+}
+
+// userResources is a group of resources mostly used by a kubectl user
+var userResources = []string{"rc", "svc", "pods", "pvc"}
+
+func newRESTMapper(externalVersions []unversioned.GroupVersion) meta.RESTMapper {
+ // the list of kinds that are scoped at the root of the api hierarchy
+ // if a kind is not enumerated here, it is assumed to have a namespace scope
+ rootScoped := sets.NewString(
+ "Node",
+ "Namespace",
+ "PersistentVolume",
+ "ComponentStatus",
+ )
+
+ // these kinds should be excluded from the list of resources
+ ignoredKinds := sets.NewString(
+ "ListOptions",
+ "DeleteOptions",
+ "Status",
+ "PodLogOptions",
+ "PodExecOptions",
+ "PodAttachOptions",
+ "PodProxyOptions",
+ "NodeProxyOptions",
+ "ServiceProxyOptions",
+ "ThirdPartyResource",
+ "ThirdPartyResourceData",
+ "ThirdPartyResourceList")
+
+ mapper := api.NewDefaultRESTMapper(externalVersions, interfacesFor, importPrefix, ignoredKinds, rootScoped)
+ // setup aliases for groups of resources
+ mapper.AddResourceAlias("all", userResources...)
+
+ return mapper
+}
+
+// InterfacesFor returns the default Codec and ResourceVersioner for a given version
+// string, or an error if the version is not known.
+func interfacesFor(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) {
+ switch version {
+ case v1.SchemeGroupVersion:
+ return &meta.VersionInterfaces{
+ ObjectConvertor: api.Scheme,
+ MetadataAccessor: accessor,
+ }, nil
+ default:
+ g, _ := registered.Group(api.GroupName)
+ return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, g.GroupVersions)
+ }
+}
+
+func addVersionsToScheme(externalVersions ...unversioned.GroupVersion) {
+ // add the internal version to Scheme
+ api.AddToScheme(api.Scheme)
+ // add the enabled external versions to Scheme
+ for _, v := range externalVersions {
+ if !registered.IsEnabledVersion(v) {
+ glog.Errorf("Version %s is not enabled, so it will not be added to the Scheme.", v)
+ continue
+ }
+ switch v {
+ case v1.SchemeGroupVersion:
+ v1.AddToScheme(api.Scheme)
+ }
+ }
+
+ // This is a "fast-path" that avoids reflection for common types. It focuses on the objects that are
+ // converted the most in the cluster.
+ // TODO: generate one of these for every external API group - this is to prove the impact
+ api.Scheme.AddGenericConversionFunc(func(objA, objB interface{}, s conversion.Scope) (bool, error) {
+ switch a := objA.(type) {
+ case *v1.Pod:
+ switch b := objB.(type) {
+ case *api.Pod:
+ return true, v1.Convert_v1_Pod_To_api_Pod(a, b, s)
+ }
+ case *api.Pod:
+ switch b := objB.(type) {
+ case *v1.Pod:
+ return true, v1.Convert_api_Pod_To_v1_Pod(a, b, s)
+ }
+
+ case *v1.Event:
+ switch b := objB.(type) {
+ case *api.Event:
+ return true, v1.Convert_v1_Event_To_api_Event(a, b, s)
+ }
+ case *api.Event:
+ switch b := objB.(type) {
+ case *v1.Event:
+ return true, v1.Convert_api_Event_To_v1_Event(a, b, s)
+ }
+
+ case *v1.ReplicationController:
+ switch b := objB.(type) {
+ case *api.ReplicationController:
+ return true, v1.Convert_v1_ReplicationController_To_api_ReplicationController(a, b, s)
+ }
+ case *api.ReplicationController:
+ switch b := objB.(type) {
+ case *v1.ReplicationController:
+ return true, v1.Convert_api_ReplicationController_To_v1_ReplicationController(a, b, s)
+ }
+
+ case *v1.Node:
+ switch b := objB.(type) {
+ case *api.Node:
+ return true, v1.Convert_v1_Node_To_api_Node(a, b, s)
+ }
+ case *api.Node:
+ switch b := objB.(type) {
+ case *v1.Node:
+ return true, v1.Convert_api_Node_To_v1_Node(a, b, s)
+ }
+
+ case *v1.Namespace:
+ switch b := objB.(type) {
+ case *api.Namespace:
+ return true, v1.Convert_v1_Namespace_To_api_Namespace(a, b, s)
+ }
+ case *api.Namespace:
+ switch b := objB.(type) {
+ case *v1.Namespace:
+ return true, v1.Convert_api_Namespace_To_v1_Namespace(a, b, s)
+ }
+
+ case *v1.Service:
+ switch b := objB.(type) {
+ case *api.Service:
+ return true, v1.Convert_v1_Service_To_api_Service(a, b, s)
+ }
+ case *api.Service:
+ switch b := objB.(type) {
+ case *v1.Service:
+ return true, v1.Convert_api_Service_To_v1_Service(a, b, s)
+ }
+
+ case *v1.Endpoints:
+ switch b := objB.(type) {
+ case *api.Endpoints:
+ return true, v1.Convert_v1_Endpoints_To_api_Endpoints(a, b, s)
+ }
+ case *api.Endpoints:
+ switch b := objB.(type) {
+ case *v1.Endpoints:
+ return true, v1.Convert_api_Endpoints_To_v1_Endpoints(a, b, s)
+ }
+
+ case *versioned.Event:
+ switch b := objB.(type) {
+ case *versioned.InternalEvent:
+ return true, versioned.Convert_versioned_Event_to_versioned_InternalEvent(a, b, s)
+ }
+ case *versioned.InternalEvent:
+ switch b := objB.(type) {
+ case *versioned.Event:
+ return true, versioned.Convert_versioned_InternalEvent_to_versioned_Event(a, b, s)
+ }
+ }
+ return false, nil
+ })
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/mapper.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/mapper.go
new file mode 100644
index 0000000..60addca
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/mapper.go
@@ -0,0 +1,60 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package api
+
+import (
+ "strings"
+
+ "k8s.io/kubernetes/pkg/api/meta"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/util/sets"
+)
+
+var RESTMapper meta.RESTMapper
+
+func init() {
+ RESTMapper = meta.MultiRESTMapper{}
+}
+
+func RegisterRESTMapper(m meta.RESTMapper) {
+ RESTMapper = append(RESTMapper.(meta.MultiRESTMapper), m)
+}
+
+func NewDefaultRESTMapper(defaultGroupVersions []unversioned.GroupVersion, interfacesFunc meta.VersionInterfacesFunc,
+ importPathPrefix string, ignoredKinds, rootScoped sets.String) *meta.DefaultRESTMapper {
+
+ mapper := meta.NewDefaultRESTMapper(defaultGroupVersions, interfacesFunc)
+ // enumerate all supported versions, get the kinds, and register with the mapper how to address
+ // our resources.
+ for _, gv := range defaultGroupVersions {
+ for kind, oType := range Scheme.KnownTypes(gv) {
+ gvk := gv.WithKind(kind)
+ // TODO: Remove import path check.
+ // We check the import path because we currently stuff both "api" and "extensions" objects
+ // into the same group within Scheme since Scheme has no notion of groups yet.
+ if !strings.Contains(oType.PkgPath(), importPathPrefix) || ignoredKinds.Has(kind) {
+ continue
+ }
+ scope := meta.RESTScopeNamespace
+ if rootScoped.Has(kind) {
+ scope = meta.RESTScopeRoot
+ }
+ mapper.Add(gvk, scope)
+ }
+ }
+ return mapper
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/meta.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/meta.go
new file mode 100644
index 0000000..7779359
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/meta.go
@@ -0,0 +1,129 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package api
+
+import (
+ "k8s.io/kubernetes/pkg/api/meta"
+ "k8s.io/kubernetes/pkg/api/meta/metatypes"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/conversion"
+ "k8s.io/kubernetes/pkg/runtime"
+ "k8s.io/kubernetes/pkg/types"
+ "k8s.io/kubernetes/pkg/util"
+)
+
+// FillObjectMetaSystemFields populates fields that are managed by the system on ObjectMeta.
+func FillObjectMetaSystemFields(ctx Context, meta *ObjectMeta) {
+ meta.CreationTimestamp = unversioned.Now()
+ meta.UID = util.NewUUID()
+ meta.SelfLink = ""
+}
+
+// HasObjectMetaSystemFieldValues returns true if fields that are managed by the system on ObjectMeta have values.
+func HasObjectMetaSystemFieldValues(meta *ObjectMeta) bool {
+ return !meta.CreationTimestamp.Time.IsZero() ||
+ len(meta.UID) != 0
+}
+
+// ObjectMetaFor returns a pointer to a provided object's ObjectMeta.
+// TODO: allow runtime.Unknown to extract this object
+// TODO: Remove this function and use meta.Accessor() instead.
+func ObjectMetaFor(obj runtime.Object) (*ObjectMeta, error) {
+ v, err := conversion.EnforcePtr(obj)
+ if err != nil {
+ return nil, err
+ }
+ var meta *ObjectMeta
+ err = runtime.FieldPtr(v, "ObjectMeta", &meta)
+ return meta, err
+}
+
+// ListMetaFor returns a pointer to a provided object's ListMeta,
+// or an error if the object does not have that pointer.
+// TODO: allow runtime.Unknown to extract this object
+func ListMetaFor(obj runtime.Object) (*unversioned.ListMeta, error) {
+ v, err := conversion.EnforcePtr(obj)
+ if err != nil {
+ return nil, err
+ }
+ var meta *unversioned.ListMeta
+ err = runtime.FieldPtr(v, "ListMeta", &meta)
+ return meta, err
+}
+
+func (obj *ObjectMeta) GetObjectMeta() meta.Object { return obj }
+
+func (obj *ObjectReference) GetObjectKind() unversioned.ObjectKind { return obj }
+
+// Namespace implements meta.Object for any object with an ObjectMeta typed field. Allows
+// fast, direct access to metadata fields for API objects.
+func (meta *ObjectMeta) GetNamespace() string { return meta.Namespace }
+func (meta *ObjectMeta) SetNamespace(namespace string) { meta.Namespace = namespace }
+func (meta *ObjectMeta) GetName() string { return meta.Name }
+func (meta *ObjectMeta) SetName(name string) { meta.Name = name }
+func (meta *ObjectMeta) GetGenerateName() string { return meta.GenerateName }
+func (meta *ObjectMeta) SetGenerateName(generateName string) { meta.GenerateName = generateName }
+func (meta *ObjectMeta) GetUID() types.UID { return meta.UID }
+func (meta *ObjectMeta) SetUID(uid types.UID) { meta.UID = uid }
+func (meta *ObjectMeta) GetResourceVersion() string { return meta.ResourceVersion }
+func (meta *ObjectMeta) SetResourceVersion(version string) { meta.ResourceVersion = version }
+func (meta *ObjectMeta) GetSelfLink() string { return meta.SelfLink }
+func (meta *ObjectMeta) SetSelfLink(selfLink string) { meta.SelfLink = selfLink }
+func (meta *ObjectMeta) GetCreationTimestamp() unversioned.Time { return meta.CreationTimestamp }
+func (meta *ObjectMeta) SetCreationTimestamp(creationTimestamp unversioned.Time) {
+ meta.CreationTimestamp = creationTimestamp
+}
+func (meta *ObjectMeta) GetDeletionTimestamp() *unversioned.Time { return meta.DeletionTimestamp }
+func (meta *ObjectMeta) SetDeletionTimestamp(deletionTimestamp *unversioned.Time) {
+ meta.DeletionTimestamp = deletionTimestamp
+}
+func (meta *ObjectMeta) GetLabels() map[string]string { return meta.Labels }
+func (meta *ObjectMeta) SetLabels(labels map[string]string) { meta.Labels = labels }
+func (meta *ObjectMeta) GetAnnotations() map[string]string { return meta.Annotations }
+func (meta *ObjectMeta) SetAnnotations(annotations map[string]string) { meta.Annotations = annotations }
+func (meta *ObjectMeta) GetFinalizers() []string { return meta.Finalizers }
+func (meta *ObjectMeta) SetFinalizers(finalizers []string) { meta.Finalizers = finalizers }
+
+func (meta *ObjectMeta) GetOwnerReferences() []metatypes.OwnerReference {
+ ret := make([]metatypes.OwnerReference, len(meta.OwnerReferences))
+ for i := 0; i < len(meta.OwnerReferences); i++ {
+ ret[i].Kind = meta.OwnerReferences[i].Kind
+ ret[i].Name = meta.OwnerReferences[i].Name
+ ret[i].UID = meta.OwnerReferences[i].UID
+ ret[i].APIVersion = meta.OwnerReferences[i].APIVersion
+ if meta.OwnerReferences[i].Controller != nil {
+ value := *meta.OwnerReferences[i].Controller
+ ret[i].Controller = &value
+ }
+ }
+ return ret
+}
+
+func (meta *ObjectMeta) SetOwnerReferences(references []metatypes.OwnerReference) {
+ newReferences := make([]OwnerReference, len(references))
+ for i := 0; i < len(references); i++ {
+ newReferences[i].Kind = references[i].Kind
+ newReferences[i].Name = references[i].Name
+ newReferences[i].UID = references[i].UID
+ newReferences[i].APIVersion = references[i].APIVersion
+ if references[i].Controller != nil {
+ value := *references[i].Controller
+ newReferences[i].Controller = &value
+ }
+ }
+ meta.OwnerReferences = newReferences
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/meta/doc.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/meta/doc.go
new file mode 100644
index 0000000..a3b18a5
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/meta/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package meta provides functions for retrieving API metadata from objects
+// belonging to the Kubernetes API
+package meta
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/meta/errors.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/meta/errors.go
new file mode 100644
index 0000000..dc4ec07
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/meta/errors.go
@@ -0,0 +1,72 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package meta
+
+import (
+ "fmt"
+
+ "k8s.io/kubernetes/pkg/api/unversioned"
+)
+
+// AmbiguousResourceError is returned if the RESTMapper finds multiple matches for a resource
+type AmbiguousResourceError struct {
+ PartialResource unversioned.GroupVersionResource
+
+ MatchingResources []unversioned.GroupVersionResource
+ MatchingKinds []unversioned.GroupVersionKind
+}
+
+func (e *AmbiguousResourceError) Error() string {
+ switch {
+ case len(e.MatchingKinds) > 0 && len(e.MatchingResources) > 0:
+ return fmt.Sprintf("%v matches multiple resources %v and kinds %v", e.PartialResource, e.MatchingResources, e.MatchingKinds)
+ case len(e.MatchingKinds) > 0:
+ return fmt.Sprintf("%v matches multiple kinds %v", e.PartialResource, e.MatchingKinds)
+ case len(e.MatchingResources) > 0:
+ return fmt.Sprintf("%v matches multiple resources %v", e.PartialResource, e.MatchingResources)
+
+ }
+
+ return fmt.Sprintf("%v matches multiple resources or kinds", e.PartialResource)
+}
+
+func IsAmbiguousResourceError(err error) bool {
+ if err == nil {
+ return false
+ }
+
+ _, ok := err.(*AmbiguousResourceError)
+ return ok
+}
+
+// NoResourceMatchError is returned if the RESTMapper can't find any match for a resource
+type NoResourceMatchError struct {
+ PartialResource unversioned.GroupVersionResource
+}
+
+func (e *NoResourceMatchError) Error() string {
+ return fmt.Sprintf("no matches for %v", e.PartialResource)
+}
+
+func IsNoResourceMatchError(err error) bool {
+ if err == nil {
+ return false
+ }
+
+ _, ok := err.(*NoResourceMatchError)
+ return ok
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/meta/help.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/meta/help.go
new file mode 100644
index 0000000..0d733a5
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/meta/help.go
@@ -0,0 +1,134 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package meta
+
+import (
+ "fmt"
+ "reflect"
+
+ "k8s.io/kubernetes/pkg/conversion"
+ "k8s.io/kubernetes/pkg/runtime"
+)
+
+// IsListType returns true if the provided Object has a slice called Items
+func IsListType(obj runtime.Object) bool {
+ _, err := GetItemsPtr(obj)
+ return err == nil
+}
+
+// GetItemsPtr returns a pointer to the list object's Items member.
+// If 'list' doesn't have an Items member, it's not really a list type
+// and an error will be returned.
+// This function will either return a pointer to a slice, or an error, but not both.
+func GetItemsPtr(list runtime.Object) (interface{}, error) {
+ v, err := conversion.EnforcePtr(list)
+ if err != nil {
+ return nil, err
+ }
+ items := v.FieldByName("Items")
+ if !items.IsValid() {
+ return nil, fmt.Errorf("no Items field in %#v", list)
+ }
+ switch items.Kind() {
+ case reflect.Interface, reflect.Ptr:
+ target := reflect.TypeOf(items.Interface()).Elem()
+ if target.Kind() != reflect.Slice {
+ return nil, fmt.Errorf("items: Expected slice, got %s", target.Kind())
+ }
+ return items.Interface(), nil
+ case reflect.Slice:
+ return items.Addr().Interface(), nil
+ default:
+ return nil, fmt.Errorf("items: Expected slice, got %s", items.Kind())
+ }
+}
+
+// ExtractList returns obj's Items element as an array of runtime.Objects.
+// Returns an error if obj is not a List type (does not have an Items member).
+func ExtractList(obj runtime.Object) ([]runtime.Object, error) {
+ itemsPtr, err := GetItemsPtr(obj)
+ if err != nil {
+ return nil, err
+ }
+ items, err := conversion.EnforcePtr(itemsPtr)
+ if err != nil {
+ return nil, err
+ }
+ list := make([]runtime.Object, items.Len())
+ for i := range list {
+ raw := items.Index(i)
+ switch item := raw.Interface().(type) {
+ case runtime.RawExtension:
+ switch {
+ case item.Object != nil:
+ list[i] = item.Object
+ case item.Raw != nil:
+ // TODO: Set ContentEncoding and ContentType correctly.
+ list[i] = &runtime.Unknown{Raw: item.Raw}
+ default:
+ list[i] = nil
+ }
+ case runtime.Object:
+ list[i] = item
+ default:
+ var found bool
+ if list[i], found = raw.Addr().Interface().(runtime.Object); !found {
+ return nil, fmt.Errorf("%v: item[%v]: Expected object, got %#v(%s)", obj, i, raw.Interface(), raw.Kind())
+ }
+ }
+ }
+ return list, nil
+}
+
+// objectSliceType is the type of a slice of Objects
+var objectSliceType = reflect.TypeOf([]runtime.Object{})
+
+// SetList sets the given list object's Items member have the elements given in
+// objects.
+// Returns an error if list is not a List type (does not have an Items member),
+// or if any of the objects are not of the right type.
+func SetList(list runtime.Object, objects []runtime.Object) error {
+ itemsPtr, err := GetItemsPtr(list)
+ if err != nil {
+ return err
+ }
+ items, err := conversion.EnforcePtr(itemsPtr)
+ if err != nil {
+ return err
+ }
+ if items.Type() == objectSliceType {
+ items.Set(reflect.ValueOf(objects))
+ return nil
+ }
+ slice := reflect.MakeSlice(items.Type(), len(objects), len(objects))
+ for i := range objects {
+ dest := slice.Index(i)
+ src, err := conversion.EnforcePtr(objects[i])
+ if err != nil {
+ return err
+ }
+ if src.Type().AssignableTo(dest.Type()) {
+ dest.Set(src)
+ } else if src.Type().ConvertibleTo(dest.Type()) {
+ dest.Set(src.Convert(dest.Type()))
+ } else {
+ return fmt.Errorf("item[%d]: can't assign or convert %v into %v", i, src.Type(), dest.Type())
+ }
+ }
+ items.Set(slice)
+ return nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/meta/interfaces.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/meta/interfaces.go
new file mode 100644
index 0000000..34c51e3
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/meta/interfaces.go
@@ -0,0 +1,180 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package meta
+
+import (
+ "k8s.io/kubernetes/pkg/api/meta/metatypes"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/runtime"
+ "k8s.io/kubernetes/pkg/types"
+)
+
+// VersionInterfaces contains the interfaces one should use for dealing with types of a particular version.
+type VersionInterfaces struct {
+ runtime.ObjectConvertor
+ MetadataAccessor
+}
+
+type ObjectMetaAccessor interface {
+ GetObjectMeta() Object
+}
+
+// Object lets you work with object metadata from any of the versioned or
+// internal API objects. Attempting to set or retrieve a field on an object that does
+// not support that field (Name, UID, Namespace on lists) will be a no-op and return
+// a default value.
+type Object interface {
+ GetNamespace() string
+ SetNamespace(namespace string)
+ GetName() string
+ SetName(name string)
+ GetGenerateName() string
+ SetGenerateName(name string)
+ GetUID() types.UID
+ SetUID(uid types.UID)
+ GetResourceVersion() string
+ SetResourceVersion(version string)
+ GetSelfLink() string
+ SetSelfLink(selfLink string)
+ GetCreationTimestamp() unversioned.Time
+ SetCreationTimestamp(timestamp unversioned.Time)
+ GetDeletionTimestamp() *unversioned.Time
+ SetDeletionTimestamp(timestamp *unversioned.Time)
+ GetLabels() map[string]string
+ SetLabels(labels map[string]string)
+ GetAnnotations() map[string]string
+ SetAnnotations(annotations map[string]string)
+ GetFinalizers() []string
+ SetFinalizers(finalizers []string)
+ GetOwnerReferences() []metatypes.OwnerReference
+ SetOwnerReferences([]metatypes.OwnerReference)
+}
+
+var _ Object = &runtime.Unstructured{}
+
+type ListMetaAccessor interface {
+ GetListMeta() List
+}
+
+// List lets you work with list metadata from any of the versioned or
+// internal API objects. Attempting to set or retrieve a field on an object that does
+// not support that field will be a no-op and return a default value.
+type List unversioned.List
+
+// Type exposes the type and APIVersion of versioned or internal API objects.
+type Type unversioned.Type
+
+// MetadataAccessor lets you work with object and list metadata from any of the versioned or
+// internal API objects. Attempting to set or retrieve a field on an object that does
+// not support that field (Name, UID, Namespace on lists) will be a no-op and return
+// a default value.
+//
+// MetadataAccessor exposes Interface in a way that can be used with multiple objects.
+type MetadataAccessor interface {
+ APIVersion(obj runtime.Object) (string, error)
+ SetAPIVersion(obj runtime.Object, version string) error
+
+ Kind(obj runtime.Object) (string, error)
+ SetKind(obj runtime.Object, kind string) error
+
+ Namespace(obj runtime.Object) (string, error)
+ SetNamespace(obj runtime.Object, namespace string) error
+
+ Name(obj runtime.Object) (string, error)
+ SetName(obj runtime.Object, name string) error
+
+ GenerateName(obj runtime.Object) (string, error)
+ SetGenerateName(obj runtime.Object, name string) error
+
+ UID(obj runtime.Object) (types.UID, error)
+ SetUID(obj runtime.Object, uid types.UID) error
+
+ SelfLink(obj runtime.Object) (string, error)
+ SetSelfLink(obj runtime.Object, selfLink string) error
+
+ Labels(obj runtime.Object) (map[string]string, error)
+ SetLabels(obj runtime.Object, labels map[string]string) error
+
+ Annotations(obj runtime.Object) (map[string]string, error)
+ SetAnnotations(obj runtime.Object, annotations map[string]string) error
+
+ runtime.ResourceVersioner
+}
+
+type RESTScopeName string
+
+const (
+ RESTScopeNameNamespace RESTScopeName = "namespace"
+ RESTScopeNameRoot RESTScopeName = "root"
+)
+
+// RESTScope contains the information needed to deal with REST resources that are in a resource hierarchy
+type RESTScope interface {
+ // Name of the scope
+ Name() RESTScopeName
+ // ParamName is the optional name of the parameter that should be inserted in the resource url
+ // If empty, no param will be inserted
+ ParamName() string
+ // ArgumentName is the optional name that should be used for the variable holding the value.
+ ArgumentName() string
+ // ParamDescription is the optional description to use to document the parameter in api documentation
+ ParamDescription() string
+}
+
+// RESTMapping contains the information needed to deal with objects of a specific
+// resource and kind in a RESTful manner.
+type RESTMapping struct {
+ // Resource is a string representing the name of this resource as a REST client would see it
+ Resource string
+
+ GroupVersionKind unversioned.GroupVersionKind
+
+ // Scope contains the information needed to deal with REST Resources that are in a resource hierarchy
+ Scope RESTScope
+
+ runtime.ObjectConvertor
+ MetadataAccessor
+}
+
+// RESTMapper allows clients to map resources to kind, and map kind and version
+// to interfaces for manipulating those objects. It is primarily intended for
+// consumers of Kubernetes compatible REST APIs as defined in docs/devel/api-conventions.md.
+//
+// The Kubernetes API provides versioned resources and object kinds which are scoped
+// to API groups. In other words, kinds and resources should not be assumed to be
+// unique across groups.
+//
+// TODO(caesarxuchao): Add proper multi-group support so that kinds & resources are
+// scoped to groups. See http://issues.k8s.io/12413 and http://issues.k8s.io/10009.
+type RESTMapper interface {
+ // KindFor takes a partial resource and returns back the single match. Returns an error if there are multiple matches
+ KindFor(resource unversioned.GroupVersionResource) (unversioned.GroupVersionKind, error)
+
+ // KindsFor takes a partial resource and returns back the list of potential kinds in priority order
+ KindsFor(resource unversioned.GroupVersionResource) ([]unversioned.GroupVersionKind, error)
+
+ // ResourceFor takes a partial resource and returns back the single match. Returns an error if there are multiple matches
+ ResourceFor(input unversioned.GroupVersionResource) (unversioned.GroupVersionResource, error)
+
+ // ResourcesFor takes a partial resource and returns back the list of potential resource in priority order
+ ResourcesFor(input unversioned.GroupVersionResource) ([]unversioned.GroupVersionResource, error)
+
+ RESTMapping(gk unversioned.GroupKind, versions ...string) (*RESTMapping, error)
+
+ AliasesForResource(resource string) ([]string, bool)
+ ResourceSingularizer(resource string) (singular string, err error)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/meta/meta.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/meta/meta.go
new file mode 100644
index 0000000..876aa4f
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/meta/meta.go
@@ -0,0 +1,567 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package meta
+
+import (
+ "fmt"
+ "reflect"
+
+ "k8s.io/kubernetes/pkg/api/meta/metatypes"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/conversion"
+ "k8s.io/kubernetes/pkg/runtime"
+ "k8s.io/kubernetes/pkg/types"
+
+ "github.com/golang/glog"
+)
+
+// errNotList is returned when an object implements the Object style interfaces but not the List style
+// interfaces.
+var errNotList = fmt.Errorf("object does not implement the List interfaces")
+
+// ListAccessor returns a List interface for the provided object or an error if the object does
+// not provide List.
+// IMPORTANT: Objects are a superset of lists, so all Objects return List metadata. Do not use this
+// check to determine whether an object *is* a List.
+// TODO: return bool instead of error
+func ListAccessor(obj interface{}) (List, error) {
+ switch t := obj.(type) {
+ case List:
+ return t, nil
+ case unversioned.List:
+ return t, nil
+ case ListMetaAccessor:
+ if m := t.GetListMeta(); m != nil {
+ return m, nil
+ }
+ return nil, errNotList
+ case unversioned.ListMetaAccessor:
+ if m := t.GetListMeta(); m != nil {
+ return m, nil
+ }
+ return nil, errNotList
+ case Object:
+ return t, nil
+ case ObjectMetaAccessor:
+ if m := t.GetObjectMeta(); m != nil {
+ return m, nil
+ }
+ return nil, errNotList
+ default:
+ return nil, errNotList
+ }
+}
+
+// errNotObject is returned when an object implements the List style interfaces but not the Object style
+// interfaces.
+var errNotObject = fmt.Errorf("object does not implement the Object interfaces")
+
+// Accessor takes an arbitrary object pointer and returns meta.Interface.
+// obj must be a pointer to an API type. An error is returned if the minimum
+// required fields are missing. Fields that are not required return the default
+// value and are a no-op if set.
+// TODO: return bool instead of error
+func Accessor(obj interface{}) (Object, error) {
+ switch t := obj.(type) {
+ case Object:
+ return t, nil
+ case ObjectMetaAccessor:
+ if m := t.GetObjectMeta(); m != nil {
+ return m, nil
+ }
+ return nil, errNotObject
+ case List, unversioned.List, ListMetaAccessor, unversioned.ListMetaAccessor:
+ return nil, errNotObject
+ default:
+ return nil, errNotObject
+ }
+}
+
+// TypeAccessor returns an interface that allows retrieving and modifying the APIVersion
+// and Kind of an in-memory internal object.
+// TODO: this interface is used to test code that does not have ObjectMeta or ListMeta
+// in round tripping (objects which can use apiVersion/kind, but do not fit the Kube
+// api conventions).
+func TypeAccessor(obj interface{}) (Type, error) {
+ if typed, ok := obj.(runtime.Object); ok {
+ return objectAccessor{typed}, nil
+ }
+ v, err := conversion.EnforcePtr(obj)
+ if err != nil {
+ return nil, err
+ }
+ t := v.Type()
+ if v.Kind() != reflect.Struct {
+ return nil, fmt.Errorf("expected struct, but got %v: %v (%#v)", v.Kind(), t, v.Interface())
+ }
+
+ typeMeta := v.FieldByName("TypeMeta")
+ if !typeMeta.IsValid() {
+ return nil, fmt.Errorf("struct %v lacks embedded TypeMeta type", t)
+ }
+ a := &genericAccessor{}
+ if err := extractFromTypeMeta(typeMeta, a); err != nil {
+ return nil, fmt.Errorf("unable to find type fields on %#v: %v", typeMeta, err)
+ }
+ return a, nil
+}
+
+type objectAccessor struct {
+ runtime.Object
+}
+
+func (obj objectAccessor) GetKind() string {
+ return obj.GetObjectKind().GroupVersionKind().Kind
+}
+
+func (obj objectAccessor) SetKind(kind string) {
+ gvk := obj.GetObjectKind().GroupVersionKind()
+ gvk.Kind = kind
+ obj.GetObjectKind().SetGroupVersionKind(gvk)
+}
+
+func (obj objectAccessor) GetAPIVersion() string {
+ return obj.GetObjectKind().GroupVersionKind().GroupVersion().String()
+}
+
+func (obj objectAccessor) SetAPIVersion(version string) {
+ gvk := obj.GetObjectKind().GroupVersionKind()
+ gv, err := unversioned.ParseGroupVersion(version)
+ if err != nil {
+ gv = unversioned.GroupVersion{Version: version}
+ }
+ gvk.Group, gvk.Version = gv.Group, gv.Version
+ obj.GetObjectKind().SetGroupVersionKind(gvk)
+}
+
+// NewAccessor returns a MetadataAccessor that can retrieve
+// or manipulate resource version on objects derived from core API
+// metadata concepts.
+func NewAccessor() MetadataAccessor {
+ return resourceAccessor{}
+}
+
+// resourceAccessor implements ResourceVersioner and SelfLinker.
+type resourceAccessor struct{}
+
+func (resourceAccessor) Kind(obj runtime.Object) (string, error) {
+ return objectAccessor{obj}.GetKind(), nil
+}
+
+func (resourceAccessor) SetKind(obj runtime.Object, kind string) error {
+ objectAccessor{obj}.SetKind(kind)
+ return nil
+}
+
+func (resourceAccessor) APIVersion(obj runtime.Object) (string, error) {
+ return objectAccessor{obj}.GetAPIVersion(), nil
+}
+
+func (resourceAccessor) SetAPIVersion(obj runtime.Object, version string) error {
+ objectAccessor{obj}.SetAPIVersion(version)
+ return nil
+}
+
+func (resourceAccessor) Namespace(obj runtime.Object) (string, error) {
+ accessor, err := Accessor(obj)
+ if err != nil {
+ return "", err
+ }
+ return accessor.GetNamespace(), nil
+}
+
+func (resourceAccessor) SetNamespace(obj runtime.Object, namespace string) error {
+ accessor, err := Accessor(obj)
+ if err != nil {
+ return err
+ }
+ accessor.SetNamespace(namespace)
+ return nil
+}
+
+func (resourceAccessor) Name(obj runtime.Object) (string, error) {
+ accessor, err := Accessor(obj)
+ if err != nil {
+ return "", err
+ }
+ return accessor.GetName(), nil
+}
+
+func (resourceAccessor) SetName(obj runtime.Object, name string) error {
+ accessor, err := Accessor(obj)
+ if err != nil {
+ return err
+ }
+ accessor.SetName(name)
+ return nil
+}
+
+func (resourceAccessor) GenerateName(obj runtime.Object) (string, error) {
+ accessor, err := Accessor(obj)
+ if err != nil {
+ return "", err
+ }
+ return accessor.GetGenerateName(), nil
+}
+
+func (resourceAccessor) SetGenerateName(obj runtime.Object, name string) error {
+ accessor, err := Accessor(obj)
+ if err != nil {
+ return err
+ }
+ accessor.SetGenerateName(name)
+ return nil
+}
+
+func (resourceAccessor) UID(obj runtime.Object) (types.UID, error) {
+ accessor, err := Accessor(obj)
+ if err != nil {
+ return "", err
+ }
+ return accessor.GetUID(), nil
+}
+
+func (resourceAccessor) SetUID(obj runtime.Object, uid types.UID) error {
+ accessor, err := Accessor(obj)
+ if err != nil {
+ return err
+ }
+ accessor.SetUID(uid)
+ return nil
+}
+
+func (resourceAccessor) SelfLink(obj runtime.Object) (string, error) {
+ accessor, err := ListAccessor(obj)
+ if err != nil {
+ return "", err
+ }
+ return accessor.GetSelfLink(), nil
+}
+
+func (resourceAccessor) SetSelfLink(obj runtime.Object, selfLink string) error {
+ accessor, err := ListAccessor(obj)
+ if err != nil {
+ return err
+ }
+ accessor.SetSelfLink(selfLink)
+ return nil
+}
+
+func (resourceAccessor) Labels(obj runtime.Object) (map[string]string, error) {
+ accessor, err := Accessor(obj)
+ if err != nil {
+ return nil, err
+ }
+ return accessor.GetLabels(), nil
+}
+
+func (resourceAccessor) SetLabels(obj runtime.Object, labels map[string]string) error {
+ accessor, err := Accessor(obj)
+ if err != nil {
+ return err
+ }
+ accessor.SetLabels(labels)
+ return nil
+}
+
+func (resourceAccessor) Annotations(obj runtime.Object) (map[string]string, error) {
+ accessor, err := Accessor(obj)
+ if err != nil {
+ return nil, err
+ }
+ return accessor.GetAnnotations(), nil
+}
+
+func (resourceAccessor) SetAnnotations(obj runtime.Object, annotations map[string]string) error {
+ accessor, err := Accessor(obj)
+ if err != nil {
+ return err
+ }
+ accessor.SetAnnotations(annotations)
+ return nil
+}
+
+func (resourceAccessor) ResourceVersion(obj runtime.Object) (string, error) {
+ accessor, err := ListAccessor(obj)
+ if err != nil {
+ return "", err
+ }
+ return accessor.GetResourceVersion(), nil
+}
+
+func (resourceAccessor) SetResourceVersion(obj runtime.Object, version string) error {
+ accessor, err := ListAccessor(obj)
+ if err != nil {
+ return err
+ }
+ accessor.SetResourceVersion(version)
+ return nil
+}
+
+// extractFromOwnerReference extracts v to o. v is the OwnerReferences field of an object.
+func extractFromOwnerReference(v reflect.Value, o *metatypes.OwnerReference) error {
+ if err := runtime.Field(v, "APIVersion", &o.APIVersion); err != nil {
+ return err
+ }
+ if err := runtime.Field(v, "Kind", &o.Kind); err != nil {
+ return err
+ }
+ if err := runtime.Field(v, "Name", &o.Name); err != nil {
+ return err
+ }
+ if err := runtime.Field(v, "UID", &o.UID); err != nil {
+ return err
+ }
+ var controllerPtr *bool
+ if err := runtime.Field(v, "Controller", &controllerPtr); err != nil {
+ return err
+ }
+ if controllerPtr != nil {
+ controller := *controllerPtr
+ o.Controller = &controller
+ }
+ return nil
+}
+
+// setOwnerReference sets v to o. v is the OwnerReferences field of an object.
+func setOwnerReference(v reflect.Value, o *metatypes.OwnerReference) error {
+ if err := runtime.SetField(o.APIVersion, v, "APIVersion"); err != nil {
+ return err
+ }
+ if err := runtime.SetField(o.Kind, v, "Kind"); err != nil {
+ return err
+ }
+ if err := runtime.SetField(o.Name, v, "Name"); err != nil {
+ return err
+ }
+ if err := runtime.SetField(o.UID, v, "UID"); err != nil {
+ return err
+ }
+ if o.Controller != nil {
+ controller := *(o.Controller)
+ if err := runtime.SetField(&controller, v, "Controller"); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// genericAccessor contains pointers to strings that can modify an arbitrary
+// struct and implements the Accessor interface.
+type genericAccessor struct {
+ namespace *string
+ name *string
+ generateName *string
+ uid *types.UID
+ apiVersion *string
+ kind *string
+ resourceVersion *string
+ selfLink *string
+ creationTimestamp *unversioned.Time
+ deletionTimestamp **unversioned.Time
+ labels *map[string]string
+ annotations *map[string]string
+ ownerReferences reflect.Value
+ finalizers *[]string
+}
+
+func (a genericAccessor) GetNamespace() string {
+ if a.namespace == nil {
+ return ""
+ }
+ return *a.namespace
+}
+
+func (a genericAccessor) SetNamespace(namespace string) {
+ if a.namespace == nil {
+ return
+ }
+ *a.namespace = namespace
+}
+
+func (a genericAccessor) GetName() string {
+ if a.name == nil {
+ return ""
+ }
+ return *a.name
+}
+
+func (a genericAccessor) SetName(name string) {
+ if a.name == nil {
+ return
+ }
+ *a.name = name
+}
+
+func (a genericAccessor) GetGenerateName() string {
+ if a.generateName == nil {
+ return ""
+ }
+ return *a.generateName
+}
+
+func (a genericAccessor) SetGenerateName(generateName string) {
+ if a.generateName == nil {
+ return
+ }
+ *a.generateName = generateName
+}
+
+func (a genericAccessor) GetUID() types.UID {
+ if a.uid == nil {
+ return ""
+ }
+ return *a.uid
+}
+
+func (a genericAccessor) SetUID(uid types.UID) {
+ if a.uid == nil {
+ return
+ }
+ *a.uid = uid
+}
+
+func (a genericAccessor) GetAPIVersion() string {
+ return *a.apiVersion
+}
+
+func (a genericAccessor) SetAPIVersion(version string) {
+ *a.apiVersion = version
+}
+
+func (a genericAccessor) GetKind() string {
+ return *a.kind
+}
+
+func (a genericAccessor) SetKind(kind string) {
+ *a.kind = kind
+}
+
+func (a genericAccessor) GetResourceVersion() string {
+ return *a.resourceVersion
+}
+
+func (a genericAccessor) SetResourceVersion(version string) {
+ *a.resourceVersion = version
+}
+
+func (a genericAccessor) GetSelfLink() string {
+ return *a.selfLink
+}
+
+func (a genericAccessor) SetSelfLink(selfLink string) {
+ *a.selfLink = selfLink
+}
+
+func (a genericAccessor) GetCreationTimestamp() unversioned.Time {
+ return *a.creationTimestamp
+}
+
+func (a genericAccessor) SetCreationTimestamp(timestamp unversioned.Time) {
+ *a.creationTimestamp = timestamp
+}
+
+func (a genericAccessor) GetDeletionTimestamp() *unversioned.Time {
+ return *a.deletionTimestamp
+}
+
+func (a genericAccessor) SetDeletionTimestamp(timestamp *unversioned.Time) {
+ *a.deletionTimestamp = timestamp
+}
+
+func (a genericAccessor) GetLabels() map[string]string {
+ if a.labels == nil {
+ return nil
+ }
+ return *a.labels
+}
+
+func (a genericAccessor) SetLabels(labels map[string]string) {
+ *a.labels = labels
+}
+
+func (a genericAccessor) GetAnnotations() map[string]string {
+ if a.annotations == nil {
+ return nil
+ }
+ return *a.annotations
+}
+
+func (a genericAccessor) SetAnnotations(annotations map[string]string) {
+ if a.annotations == nil {
+ emptyAnnotations := make(map[string]string)
+ a.annotations = &emptyAnnotations
+ }
+ *a.annotations = annotations
+}
+
+func (a genericAccessor) GetFinalizers() []string {
+ if a.finalizers == nil {
+ return nil
+ }
+ return *a.finalizers
+}
+
+func (a genericAccessor) SetFinalizers(finalizers []string) {
+ *a.finalizers = finalizers
+}
+
+func (a genericAccessor) GetOwnerReferences() []metatypes.OwnerReference {
+ var ret []metatypes.OwnerReference
+ s := a.ownerReferences
+ if s.Kind() != reflect.Ptr || s.Elem().Kind() != reflect.Slice {
+ glog.Errorf("expect %v to be a pointer to slice", s)
+ return ret
+ }
+ s = s.Elem()
+ // Set the capacity to one element greater to avoid copy if the caller later append an element.
+ ret = make([]metatypes.OwnerReference, s.Len(), s.Len()+1)
+ for i := 0; i < s.Len(); i++ {
+ if err := extractFromOwnerReference(s.Index(i), &ret[i]); err != nil {
+ glog.Errorf("extractFromOwnerReference failed: %v", err)
+ return ret
+ }
+ }
+ return ret
+}
+
+func (a genericAccessor) SetOwnerReferences(references []metatypes.OwnerReference) {
+ s := a.ownerReferences
+ if s.Kind() != reflect.Ptr || s.Elem().Kind() != reflect.Slice {
+ glog.Errorf("expect %v to be a pointer to slice", s)
+ }
+ s = s.Elem()
+ newReferences := reflect.MakeSlice(s.Type(), len(references), len(references))
+ for i := 0; i < len(references); i++ {
+ if err := setOwnerReference(newReferences.Index(i), &references[i]); err != nil {
+ glog.Errorf("setOwnerReference failed: %v", err)
+ return
+ }
+ }
+ s.Set(newReferences)
+}
+
+// extractFromTypeMeta extracts pointers to version and kind fields from an object
+func extractFromTypeMeta(v reflect.Value, a *genericAccessor) error {
+ if err := runtime.FieldPtr(v, "APIVersion", &a.apiVersion); err != nil {
+ return err
+ }
+ if err := runtime.FieldPtr(v, "Kind", &a.kind); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/meta/metatypes/types.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/meta/metatypes/types.go
new file mode 100644
index 0000000..41e6596
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/meta/metatypes/types.go
@@ -0,0 +1,30 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// The types defined in this package are used by the meta package to represent
+// the in-memory version of objects. We cannot reuse the __internal version of
+// API objects because it causes import cycle.
+package metatypes
+
+import "k8s.io/kubernetes/pkg/types"
+
+type OwnerReference struct {
+ APIVersion string
+ Kind string
+ UID types.UID
+ Name string
+ Controller *bool
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/meta/multirestmapper.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/meta/multirestmapper.go
new file mode 100644
index 0000000..790795a
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/meta/multirestmapper.go
@@ -0,0 +1,200 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package meta
+
+import (
+ "fmt"
+ "strings"
+
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ utilerrors "k8s.io/kubernetes/pkg/util/errors"
+ "k8s.io/kubernetes/pkg/util/sets"
+)
+
+// MultiRESTMapper is a wrapper for multiple RESTMappers.
+type MultiRESTMapper []RESTMapper
+
+func (m MultiRESTMapper) String() string {
+ nested := []string{}
+ for _, t := range m {
+ currString := fmt.Sprintf("%v", t)
+ splitStrings := strings.Split(currString, "\n")
+ nested = append(nested, strings.Join(splitStrings, "\n\t"))
+ }
+
+ return fmt.Sprintf("MultiRESTMapper{\n\t%s\n}", strings.Join(nested, "\n\t"))
+}
+
+// ResourceSingularizer converts a REST resource name from plural to singular (e.g., from pods to pod)
+// This implementation supports multiple REST schemas and return the first match.
+func (m MultiRESTMapper) ResourceSingularizer(resource string) (singular string, err error) {
+ for _, t := range m {
+ singular, err = t.ResourceSingularizer(resource)
+ if err == nil {
+ return
+ }
+ }
+ return
+}
+
+func (m MultiRESTMapper) ResourcesFor(resource unversioned.GroupVersionResource) ([]unversioned.GroupVersionResource, error) {
+ allGVRs := []unversioned.GroupVersionResource{}
+ for _, t := range m {
+ gvrs, err := t.ResourcesFor(resource)
+ // ignore "no match" errors, but any other error percolates back up
+ if IsNoResourceMatchError(err) {
+ continue
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ // walk the existing values to de-dup
+ for _, curr := range gvrs {
+ found := false
+ for _, existing := range allGVRs {
+ if curr == existing {
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ allGVRs = append(allGVRs, curr)
+ }
+ }
+ }
+
+ if len(allGVRs) == 0 {
+ return nil, &NoResourceMatchError{PartialResource: resource}
+ }
+
+ return allGVRs, nil
+}
+
+func (m MultiRESTMapper) KindsFor(resource unversioned.GroupVersionResource) (gvk []unversioned.GroupVersionKind, err error) {
+ allGVKs := []unversioned.GroupVersionKind{}
+ for _, t := range m {
+ gvks, err := t.KindsFor(resource)
+ // ignore "no match" errors, but any other error percolates back up
+ if IsNoResourceMatchError(err) {
+ continue
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ // walk the existing values to de-dup
+ for _, curr := range gvks {
+ found := false
+ for _, existing := range allGVKs {
+ if curr == existing {
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ allGVKs = append(allGVKs, curr)
+ }
+ }
+ }
+
+ if len(allGVKs) == 0 {
+ return nil, &NoResourceMatchError{PartialResource: resource}
+ }
+
+ return allGVKs, nil
+}
+
+func (m MultiRESTMapper) ResourceFor(resource unversioned.GroupVersionResource) (unversioned.GroupVersionResource, error) {
+ resources, err := m.ResourcesFor(resource)
+ if err != nil {
+ return unversioned.GroupVersionResource{}, err
+ }
+ if len(resources) == 1 {
+ return resources[0], nil
+ }
+
+ return unversioned.GroupVersionResource{}, &AmbiguousResourceError{PartialResource: resource, MatchingResources: resources}
+}
+
+func (m MultiRESTMapper) KindFor(resource unversioned.GroupVersionResource) (unversioned.GroupVersionKind, error) {
+ kinds, err := m.KindsFor(resource)
+ if err != nil {
+ return unversioned.GroupVersionKind{}, err
+ }
+ if len(kinds) == 1 {
+ return kinds[0], nil
+ }
+
+ return unversioned.GroupVersionKind{}, &AmbiguousResourceError{PartialResource: resource, MatchingKinds: kinds}
+}
+
+// RESTMapping provides the REST mapping for the resource based on the
+// kind and version. This implementation supports multiple REST schemas and
+// return the first match.
+func (m MultiRESTMapper) RESTMapping(gk unversioned.GroupKind, versions ...string) (*RESTMapping, error) {
+ allMappings := []*RESTMapping{}
+ errors := []error{}
+
+ for _, t := range m {
+ currMapping, err := t.RESTMapping(gk, versions...)
+ // ignore "no match" errors, but any other error percolates back up
+ if IsNoResourceMatchError(err) {
+ continue
+ }
+ if err != nil {
+ errors = append(errors, err)
+ continue
+ }
+
+ allMappings = append(allMappings, currMapping)
+ }
+
+ // if we got exactly one mapping, then use it even if other requested failed
+ if len(allMappings) == 1 {
+ return allMappings[0], nil
+ }
+ if len(allMappings) > 1 {
+ return nil, fmt.Errorf("multiple matches found for %v in %v", gk, versions)
+ }
+ if len(errors) > 0 {
+ return nil, utilerrors.NewAggregate(errors)
+ }
+ return nil, fmt.Errorf("no match found for %v in %v", gk, versions)
+}
+
+// AliasesForResource finds the first alias response for the provided mappers.
+func (m MultiRESTMapper) AliasesForResource(alias string) ([]string, bool) {
+ seenAliases := sets.NewString()
+ allAliases := []string{}
+ handled := false
+
+ for _, t := range m {
+ if currAliases, currOk := t.AliasesForResource(alias); currOk {
+ for _, currAlias := range currAliases {
+ if !seenAliases.Has(currAlias) {
+ allAliases = append(allAliases, currAlias)
+ seenAliases.Insert(currAlias)
+ }
+ }
+ handled = true
+ }
+ }
+ return allAliases, handled
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/meta/priority.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/meta/priority.go
new file mode 100644
index 0000000..1e44e45
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/meta/priority.go
@@ -0,0 +1,173 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package meta
+
+import (
+ "fmt"
+
+ "k8s.io/kubernetes/pkg/api/unversioned"
+)
+
+const (
+ AnyGroup = "*"
+ AnyVersion = "*"
+ AnyResource = "*"
+ AnyKind = "*"
+)
+
+// PriorityRESTMapper is a wrapper for automatically choosing a particular Resource or Kind
+// when multiple matches are possible
+type PriorityRESTMapper struct {
+ // Delegate is the RESTMapper to use to locate all the Kind and Resource matches
+ Delegate RESTMapper
+
+ // ResourcePriority is a list of priority patterns to apply to matching resources.
+ // The list of all matching resources is narrowed based on the patterns until only one remains.
+ // A pattern with no matches is skipped. A pattern with more than one match uses its
+ // matches as the list to continue matching against.
+ ResourcePriority []unversioned.GroupVersionResource
+
+ // KindPriority is a list of priority patterns to apply to matching kinds.
+ // The list of all matching kinds is narrowed based on the patterns until only one remains.
+ // A pattern with no matches is skipped. A pattern with more than one match uses its
+ // matches as the list to continue matching against.
+ KindPriority []unversioned.GroupVersionKind
+}
+
+func (m PriorityRESTMapper) String() string {
+ return fmt.Sprintf("PriorityRESTMapper{\n\t%v\n\t%v\n\t%v\n}", m.ResourcePriority, m.KindPriority, m.Delegate)
+}
+
+// ResourceFor finds all resources, then passes them through the ResourcePriority patterns to find a single matching hit.
+func (m PriorityRESTMapper) ResourceFor(partiallySpecifiedResource unversioned.GroupVersionResource) (unversioned.GroupVersionResource, error) {
+ originalGVRs, err := m.Delegate.ResourcesFor(partiallySpecifiedResource)
+ if err != nil {
+ return unversioned.GroupVersionResource{}, err
+ }
+ if len(originalGVRs) == 1 {
+ return originalGVRs[0], nil
+ }
+
+ remainingGVRs := append([]unversioned.GroupVersionResource{}, originalGVRs...)
+ for _, pattern := range m.ResourcePriority {
+ matchedGVRs := []unversioned.GroupVersionResource{}
+ for _, gvr := range remainingGVRs {
+ if resourceMatches(pattern, gvr) {
+ matchedGVRs = append(matchedGVRs, gvr)
+ }
+ }
+
+ switch len(matchedGVRs) {
+ case 0:
+ // if you have no matches, then nothing matched this pattern just move to the next
+ continue
+ case 1:
+ // one match, return
+ return matchedGVRs[0], nil
+ default:
+ // more than one match, use the matched hits as the list moving to the next pattern.
+ // this way you can have a series of selection criteria
+ remainingGVRs = matchedGVRs
+ }
+ }
+
+ return unversioned.GroupVersionResource{}, &AmbiguousResourceError{PartialResource: partiallySpecifiedResource, MatchingResources: originalGVRs}
+}
+
+// KindFor finds all kinds, then passes them through the KindPriority patterns to find a single matching hit.
+func (m PriorityRESTMapper) KindFor(partiallySpecifiedResource unversioned.GroupVersionResource) (unversioned.GroupVersionKind, error) {
+ originalGVKs, err := m.Delegate.KindsFor(partiallySpecifiedResource)
+ if err != nil {
+ return unversioned.GroupVersionKind{}, err
+ }
+ if len(originalGVKs) == 1 {
+ return originalGVKs[0], nil
+ }
+
+ remainingGVKs := append([]unversioned.GroupVersionKind{}, originalGVKs...)
+ for _, pattern := range m.KindPriority {
+ matchedGVKs := []unversioned.GroupVersionKind{}
+ for _, gvr := range remainingGVKs {
+ if kindMatches(pattern, gvr) {
+ matchedGVKs = append(matchedGVKs, gvr)
+ }
+ }
+
+ switch len(matchedGVKs) {
+ case 0:
+ // if you have no matches, then nothing matched this pattern just move to the next
+ continue
+ case 1:
+ // one match, return
+ return matchedGVKs[0], nil
+ default:
+ // more than one match, use the matched hits as the list moving to the next pattern.
+ // this way you can have a series of selection criteria
+ remainingGVKs = matchedGVKs
+ }
+ }
+
+ return unversioned.GroupVersionKind{}, &AmbiguousResourceError{PartialResource: partiallySpecifiedResource, MatchingKinds: originalGVKs}
+}
+
+func resourceMatches(pattern unversioned.GroupVersionResource, resource unversioned.GroupVersionResource) bool {
+ if pattern.Group != AnyGroup && pattern.Group != resource.Group {
+ return false
+ }
+ if pattern.Version != AnyVersion && pattern.Version != resource.Version {
+ return false
+ }
+ if pattern.Resource != AnyResource && pattern.Resource != resource.Resource {
+ return false
+ }
+
+ return true
+}
+
+func kindMatches(pattern unversioned.GroupVersionKind, kind unversioned.GroupVersionKind) bool {
+ if pattern.Group != AnyGroup && pattern.Group != kind.Group {
+ return false
+ }
+ if pattern.Version != AnyVersion && pattern.Version != kind.Version {
+ return false
+ }
+ if pattern.Kind != AnyKind && pattern.Kind != kind.Kind {
+ return false
+ }
+
+ return true
+}
+
+func (m PriorityRESTMapper) RESTMapping(gk unversioned.GroupKind, versions ...string) (mapping *RESTMapping, err error) {
+ return m.Delegate.RESTMapping(gk, versions...)
+}
+
+func (m PriorityRESTMapper) AliasesForResource(alias string) (aliases []string, ok bool) {
+ return m.Delegate.AliasesForResource(alias)
+}
+
+func (m PriorityRESTMapper) ResourceSingularizer(resource string) (singular string, err error) {
+ return m.Delegate.ResourceSingularizer(resource)
+}
+
+func (m PriorityRESTMapper) ResourcesFor(partiallySpecifiedResource unversioned.GroupVersionResource) ([]unversioned.GroupVersionResource, error) {
+ return m.Delegate.ResourcesFor(partiallySpecifiedResource)
+}
+
+func (m PriorityRESTMapper) KindsFor(partiallySpecifiedResource unversioned.GroupVersionResource) (gvk []unversioned.GroupVersionKind, err error) {
+ return m.Delegate.KindsFor(partiallySpecifiedResource)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/meta/restmapper.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/meta/restmapper.go
new file mode 100644
index 0000000..bf2567e
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/meta/restmapper.go
@@ -0,0 +1,520 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// TODO: move everything in this file to pkg/api/rest
+package meta
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/runtime"
+)
+
+// Implements RESTScope interface
+type restScope struct {
+ name RESTScopeName
+ paramName string
+ argumentName string
+ paramDescription string
+}
+
+func (r *restScope) Name() RESTScopeName {
+ return r.name
+}
+func (r *restScope) ParamName() string {
+ return r.paramName
+}
+func (r *restScope) ArgumentName() string {
+ return r.argumentName
+}
+func (r *restScope) ParamDescription() string {
+ return r.paramDescription
+}
+
+var RESTScopeNamespace = &restScope{
+ name: RESTScopeNameNamespace,
+ paramName: "namespaces",
+ argumentName: "namespace",
+ paramDescription: "object name and auth scope, such as for teams and projects",
+}
+
+var RESTScopeRoot = &restScope{
+ name: RESTScopeNameRoot,
+}
+
+// DefaultRESTMapper exposes mappings between the types defined in a
+// runtime.Scheme. It assumes that all types defined the provided scheme
+// can be mapped with the provided MetadataAccessor and Codec interfaces.
+//
+// The resource name of a Kind is defined as the lowercase,
+// English-plural version of the Kind string.
+// When converting from resource to Kind, the singular version of the
+// resource name is also accepted for convenience.
+//
+// TODO: Only accept plural for some operations for increased control?
+// (`get pod bar` vs `get pods bar`)
+type DefaultRESTMapper struct {
+ defaultGroupVersions []unversioned.GroupVersion
+
+ resourceToKind map[unversioned.GroupVersionResource]unversioned.GroupVersionKind
+ kindToPluralResource map[unversioned.GroupVersionKind]unversioned.GroupVersionResource
+ kindToScope map[unversioned.GroupVersionKind]RESTScope
+ singularToPlural map[unversioned.GroupVersionResource]unversioned.GroupVersionResource
+ pluralToSingular map[unversioned.GroupVersionResource]unversioned.GroupVersionResource
+
+ interfacesFunc VersionInterfacesFunc
+
+ // aliasToResource is used for mapping aliases to resources
+ aliasToResource map[string][]string
+}
+
+func (m *DefaultRESTMapper) String() string {
+ return fmt.Sprintf("DefaultRESTMapper{kindToPluralResource=%v}", m.kindToPluralResource)
+}
+
+var _ RESTMapper = &DefaultRESTMapper{}
+
+// VersionInterfacesFunc returns the appropriate typer, and metadata accessor for a
+// given api version, or an error if no such api version exists.
+type VersionInterfacesFunc func(version unversioned.GroupVersion) (*VersionInterfaces, error)
+
+// NewDefaultRESTMapper initializes a mapping between Kind and APIVersion
+// to a resource name and back based on the objects in a runtime.Scheme
+// and the Kubernetes API conventions. Takes a group name, a priority list of the versions
+// to search when an object has no default version (set empty to return an error),
+// and a function that retrieves the correct metadata for a given version.
+func NewDefaultRESTMapper(defaultGroupVersions []unversioned.GroupVersion, f VersionInterfacesFunc) *DefaultRESTMapper {
+ resourceToKind := make(map[unversioned.GroupVersionResource]unversioned.GroupVersionKind)
+ kindToPluralResource := make(map[unversioned.GroupVersionKind]unversioned.GroupVersionResource)
+ kindToScope := make(map[unversioned.GroupVersionKind]RESTScope)
+ singularToPlural := make(map[unversioned.GroupVersionResource]unversioned.GroupVersionResource)
+ pluralToSingular := make(map[unversioned.GroupVersionResource]unversioned.GroupVersionResource)
+ aliasToResource := make(map[string][]string)
+ // TODO: verify name mappings work correctly when versions differ
+
+ return &DefaultRESTMapper{
+ resourceToKind: resourceToKind,
+ kindToPluralResource: kindToPluralResource,
+ kindToScope: kindToScope,
+ defaultGroupVersions: defaultGroupVersions,
+ singularToPlural: singularToPlural,
+ pluralToSingular: pluralToSingular,
+ aliasToResource: aliasToResource,
+ interfacesFunc: f,
+ }
+}
+
+func (m *DefaultRESTMapper) Add(kind unversioned.GroupVersionKind, scope RESTScope) {
+ plural, singular := KindToResource(kind)
+
+ m.singularToPlural[singular] = plural
+ m.pluralToSingular[plural] = singular
+
+ m.resourceToKind[singular] = kind
+ m.resourceToKind[plural] = kind
+
+ m.kindToPluralResource[kind] = plural
+ m.kindToScope[kind] = scope
+}
+
+// unpluralizedSuffixes is a list of resource suffixes that are the same plural and singular
+// This is only is only necessary because some bits of code are lazy and don't actually use the RESTMapper like they should.
+// TODO eliminate this so that different callers can correctly map to resources. This probably means updating all
+// callers to use the RESTMapper they mean.
+var unpluralizedSuffixes = []string{
+ "endpoints",
+}
+
+// KindToResource converts Kind to a resource name.
+// Broken. This method only "sort of" works when used outside of this package. It assumes that Kinds and Resources match
+// and they aren't guaranteed to do so.
+func KindToResource(kind unversioned.GroupVersionKind) ( /*plural*/ unversioned.GroupVersionResource /*singular*/, unversioned.GroupVersionResource) {
+ kindName := kind.Kind
+ if len(kindName) == 0 {
+ return unversioned.GroupVersionResource{}, unversioned.GroupVersionResource{}
+ }
+ singularName := strings.ToLower(kindName)
+ singular := kind.GroupVersion().WithResource(singularName)
+
+ for _, skip := range unpluralizedSuffixes {
+ if strings.HasSuffix(singularName, skip) {
+ return singular, singular
+ }
+ }
+
+ switch string(singularName[len(singularName)-1]) {
+ case "s":
+ return kind.GroupVersion().WithResource(singularName + "es"), singular
+ case "y":
+ return kind.GroupVersion().WithResource(strings.TrimSuffix(singularName, "y") + "ies"), singular
+ }
+
+ return kind.GroupVersion().WithResource(singularName + "s"), singular
+}
+
+// ResourceSingularizer implements RESTMapper
+// It converts a resource name from plural to singular (e.g., from pods to pod)
+func (m *DefaultRESTMapper) ResourceSingularizer(resourceType string) (string, error) {
+ partialResource := unversioned.GroupVersionResource{Resource: resourceType}
+ resources, err := m.ResourcesFor(partialResource)
+ if err != nil {
+ return resourceType, err
+ }
+
+ singular := unversioned.GroupVersionResource{}
+ for _, curr := range resources {
+ currSingular, ok := m.pluralToSingular[curr]
+ if !ok {
+ continue
+ }
+ if singular.IsEmpty() {
+ singular = currSingular
+ continue
+ }
+
+ if currSingular.Resource != singular.Resource {
+ return resourceType, fmt.Errorf("multiple possibile singular resources (%v) found for %v", resources, resourceType)
+ }
+ }
+
+ if singular.IsEmpty() {
+ return resourceType, fmt.Errorf("no singular of resource %v has been defined", resourceType)
+ }
+
+ return singular.Resource, nil
+}
+
+// coerceResourceForMatching makes the resource lower case and converts internal versions to unspecified (legacy behavior)
+func coerceResourceForMatching(resource unversioned.GroupVersionResource) unversioned.GroupVersionResource {
+ resource.Resource = strings.ToLower(resource.Resource)
+ if resource.Version == runtime.APIVersionInternal {
+ resource.Version = ""
+ }
+
+ return resource
+}
+
+func (m *DefaultRESTMapper) ResourcesFor(input unversioned.GroupVersionResource) ([]unversioned.GroupVersionResource, error) {
+ resource := coerceResourceForMatching(input)
+
+ hasResource := len(resource.Resource) > 0
+ hasGroup := len(resource.Group) > 0
+ hasVersion := len(resource.Version) > 0
+
+ if !hasResource {
+ return nil, fmt.Errorf("a resource must be present, got: %v", resource)
+ }
+
+ ret := []unversioned.GroupVersionResource{}
+ switch {
+ // fully qualified. Find the exact match
+ case hasGroup && hasVersion:
+ for plural, singular := range m.pluralToSingular {
+ if singular == resource {
+ ret = append(ret, plural)
+ break
+ }
+ if plural == resource {
+ ret = append(ret, plural)
+ break
+ }
+ }
+
+ case hasGroup:
+ requestedGroupResource := resource.GroupResource()
+ for plural, singular := range m.pluralToSingular {
+ if singular.GroupResource() == requestedGroupResource {
+ ret = append(ret, plural)
+ }
+ if plural.GroupResource() == requestedGroupResource {
+ ret = append(ret, plural)
+ }
+ }
+
+ case hasVersion:
+ for plural, singular := range m.pluralToSingular {
+ if singular.Version == resource.Version && singular.Resource == resource.Resource {
+ ret = append(ret, plural)
+ }
+ if plural.Version == resource.Version && plural.Resource == resource.Resource {
+ ret = append(ret, plural)
+ }
+ }
+
+ default:
+ for plural, singular := range m.pluralToSingular {
+ if singular.Resource == resource.Resource {
+ ret = append(ret, plural)
+ }
+ if plural.Resource == resource.Resource {
+ ret = append(ret, plural)
+ }
+ }
+ }
+
+ if len(ret) == 0 {
+ return nil, &NoResourceMatchError{PartialResource: resource}
+ }
+
+ sort.Sort(resourceByPreferredGroupVersion{ret, m.defaultGroupVersions})
+ return ret, nil
+}
+
+func (m *DefaultRESTMapper) ResourceFor(resource unversioned.GroupVersionResource) (unversioned.GroupVersionResource, error) {
+ resources, err := m.ResourcesFor(resource)
+ if err != nil {
+ return unversioned.GroupVersionResource{}, err
+ }
+ if len(resources) == 1 {
+ return resources[0], nil
+ }
+
+ return unversioned.GroupVersionResource{}, &AmbiguousResourceError{PartialResource: resource, MatchingResources: resources}
+}
+
+func (m *DefaultRESTMapper) KindsFor(input unversioned.GroupVersionResource) ([]unversioned.GroupVersionKind, error) {
+ resource := coerceResourceForMatching(input)
+
+ hasResource := len(resource.Resource) > 0
+ hasGroup := len(resource.Group) > 0
+ hasVersion := len(resource.Version) > 0
+
+ if !hasResource {
+ return nil, fmt.Errorf("a resource must be present, got: %v", resource)
+ }
+
+ ret := []unversioned.GroupVersionKind{}
+ switch {
+ // fully qualified. Find the exact match
+ case hasGroup && hasVersion:
+ kind, exists := m.resourceToKind[resource]
+ if exists {
+ ret = append(ret, kind)
+ }
+
+ case hasGroup:
+ requestedGroupResource := resource.GroupResource()
+ for currResource, currKind := range m.resourceToKind {
+ if currResource.GroupResource() == requestedGroupResource {
+ ret = append(ret, currKind)
+ }
+ }
+
+ case hasVersion:
+ for currResource, currKind := range m.resourceToKind {
+ if currResource.Version == resource.Version && currResource.Resource == resource.Resource {
+ ret = append(ret, currKind)
+ }
+ }
+
+ default:
+ for currResource, currKind := range m.resourceToKind {
+ if currResource.Resource == resource.Resource {
+ ret = append(ret, currKind)
+ }
+ }
+ }
+
+ if len(ret) == 0 {
+ return nil, &NoResourceMatchError{PartialResource: input}
+ }
+
+ sort.Sort(kindByPreferredGroupVersion{ret, m.defaultGroupVersions})
+ return ret, nil
+}
+
+func (m *DefaultRESTMapper) KindFor(resource unversioned.GroupVersionResource) (unversioned.GroupVersionKind, error) {
+ kinds, err := m.KindsFor(resource)
+ if err != nil {
+ return unversioned.GroupVersionKind{}, err
+ }
+ if len(kinds) == 1 {
+ return kinds[0], nil
+ }
+
+ return unversioned.GroupVersionKind{}, &AmbiguousResourceError{PartialResource: resource, MatchingKinds: kinds}
+}
+
+type kindByPreferredGroupVersion struct {
+ list []unversioned.GroupVersionKind
+ sortOrder []unversioned.GroupVersion
+}
+
+func (o kindByPreferredGroupVersion) Len() int { return len(o.list) }
+func (o kindByPreferredGroupVersion) Swap(i, j int) { o.list[i], o.list[j] = o.list[j], o.list[i] }
+func (o kindByPreferredGroupVersion) Less(i, j int) bool {
+ lhs := o.list[i]
+ rhs := o.list[j]
+ if lhs == rhs {
+ return false
+ }
+
+ if lhs.GroupVersion() == rhs.GroupVersion() {
+ return lhs.Kind < rhs.Kind
+ }
+
+ // otherwise, the difference is in the GroupVersion, so we need to sort with respect to the preferred order
+ lhsIndex := -1
+ rhsIndex := -1
+
+ for i := range o.sortOrder {
+ if o.sortOrder[i] == lhs.GroupVersion() {
+ lhsIndex = i
+ }
+ if o.sortOrder[i] == rhs.GroupVersion() {
+ rhsIndex = i
+ }
+ }
+
+ if rhsIndex == -1 {
+ return true
+ }
+
+ return lhsIndex < rhsIndex
+}
+
+type resourceByPreferredGroupVersion struct {
+ list []unversioned.GroupVersionResource
+ sortOrder []unversioned.GroupVersion
+}
+
+func (o resourceByPreferredGroupVersion) Len() int { return len(o.list) }
+func (o resourceByPreferredGroupVersion) Swap(i, j int) { o.list[i], o.list[j] = o.list[j], o.list[i] }
+func (o resourceByPreferredGroupVersion) Less(i, j int) bool {
+ lhs := o.list[i]
+ rhs := o.list[j]
+ if lhs == rhs {
+ return false
+ }
+
+ if lhs.GroupVersion() == rhs.GroupVersion() {
+ return lhs.Resource < rhs.Resource
+ }
+
+ // otherwise, the difference is in the GroupVersion, so we need to sort with respect to the preferred order
+ lhsIndex := -1
+ rhsIndex := -1
+
+ for i := range o.sortOrder {
+ if o.sortOrder[i] == lhs.GroupVersion() {
+ lhsIndex = i
+ }
+ if o.sortOrder[i] == rhs.GroupVersion() {
+ rhsIndex = i
+ }
+ }
+
+ if rhsIndex == -1 {
+ return true
+ }
+
+ return lhsIndex < rhsIndex
+}
+
+// RESTMapping returns a struct representing the resource path and conversion interfaces a
+// RESTClient should use to operate on the provided group/kind in order of versions. If a version search
+// order is not provided, the search order provided to DefaultRESTMapper will be used to resolve which
+// version should be used to access the named group/kind.
+func (m *DefaultRESTMapper) RESTMapping(gk unversioned.GroupKind, versions ...string) (*RESTMapping, error) {
+ // Pick an appropriate version
+ var gvk *unversioned.GroupVersionKind
+ hadVersion := false
+ for _, version := range versions {
+ if len(version) == 0 || version == runtime.APIVersionInternal {
+ continue
+ }
+
+ currGVK := gk.WithVersion(version)
+ hadVersion = true
+ if _, ok := m.kindToPluralResource[currGVK]; ok {
+ gvk = &currGVK
+ break
+ }
+ }
+ // Use the default preferred versions
+ if !hadVersion && (gvk == nil) {
+ for _, gv := range m.defaultGroupVersions {
+ if gv.Group != gk.Group {
+ continue
+ }
+
+ currGVK := gk.WithVersion(gv.Version)
+ if _, ok := m.kindToPluralResource[currGVK]; ok {
+ gvk = &currGVK
+ break
+ }
+ }
+ }
+ if gvk == nil {
+ return nil, fmt.Errorf("no kind named %q is registered in versions %q", gk, versions)
+ }
+
+ // Ensure we have a REST mapping
+ resource, ok := m.kindToPluralResource[*gvk]
+ if !ok {
+ found := []unversioned.GroupVersion{}
+ for _, gv := range m.defaultGroupVersions {
+ if _, ok := m.kindToPluralResource[*gvk]; ok {
+ found = append(found, gv)
+ }
+ }
+ if len(found) > 0 {
+ return nil, fmt.Errorf("object with kind %q exists in versions %v, not %v", gvk.Kind, found, gvk.GroupVersion().String())
+ }
+ return nil, fmt.Errorf("the provided version %q and kind %q cannot be mapped to a supported object", gvk.GroupVersion().String(), gvk.Kind)
+ }
+
+ // Ensure we have a REST scope
+ scope, ok := m.kindToScope[*gvk]
+ if !ok {
+ return nil, fmt.Errorf("the provided version %q and kind %q cannot be mapped to a supported scope", gvk.GroupVersion().String(), gvk.Kind)
+ }
+
+ interfaces, err := m.interfacesFunc(gvk.GroupVersion())
+ if err != nil {
+ return nil, fmt.Errorf("the provided version %q has no relevant versions", gvk.GroupVersion().String())
+ }
+
+ retVal := &RESTMapping{
+ Resource: resource.Resource,
+ GroupVersionKind: *gvk,
+ Scope: scope,
+
+ ObjectConvertor: interfaces.ObjectConvertor,
+ MetadataAccessor: interfaces.MetadataAccessor,
+ }
+
+ return retVal, nil
+}
+
+// AddResourceAlias maps aliases to resources
+func (m *DefaultRESTMapper) AddResourceAlias(alias string, resources ...string) {
+ if len(resources) == 0 {
+ return
+ }
+ m.aliasToResource[alias] = resources
+}
+
+// AliasesForResource returns whether a resource has an alias or not
+func (m *DefaultRESTMapper) AliasesForResource(alias string) ([]string, bool) {
+ if res, ok := m.aliasToResource[alias]; ok {
+ return res, true
+ }
+ return nil, false
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/node_example.json b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/node_example.json
new file mode 100644
index 0000000..2601834
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/node_example.json
@@ -0,0 +1,49 @@
+{
+ "kind": "Node",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "e2e-test-wojtekt-minion-etd6",
+ "selfLink": "/api/v1/nodes/e2e-test-wojtekt-minion-etd6",
+ "uid": "a7e89222-e8e5-11e4-8fde-42010af09327",
+ "resourceVersion": "379",
+ "creationTimestamp": "2015-04-22T11:49:39Z"
+ },
+ "spec": {
+ "externalID": "15488322946290398375"
+ },
+ "status": {
+ "capacity": {
+ "cpu": "1",
+ "memory": "1745152Ki"
+ },
+ "conditions": [
+ {
+ "type": "Ready",
+ "status": "True",
+ "lastHeartbeatTime": "2015-04-22T11:58:17Z",
+ "lastTransitionTime": "2015-04-22T11:49:52Z",
+ "reason": "kubelet is posting ready status"
+ }
+ ],
+ "addresses": [
+ {
+ "type": "ExternalIP",
+ "address": "104.197.49.213"
+ },
+ {
+ "type": "LegacyHostIP",
+ "address": "104.197.20.11"
+ }
+ ],
+ "nodeInfo": {
+ "machineID": "",
+ "systemUUID": "D59FA3FA-7B5B-7287-5E1A-1D79F13CB577",
+ "bootID": "44a832f3-8cfb-4de5-b7d2-d66030b6cd95",
+ "kernelVersion": "3.16.0-0.bpo.4-amd64",
+ "osImage": "Debian GNU/Linux 7 (wheezy)",
+ "containerRuntimeVersion": "docker://1.5.0",
+ "kubeletVersion": "v0.15.0-484-g0c8ee980d705a3-dirty",
+ "kubeProxyVersion": "v0.15.0-484-g0c8ee980d705a3-dirty"
+ }
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/pod/util.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/pod/util.go
new file mode 100644
index 0000000..cd6f9fb
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/pod/util.go
@@ -0,0 +1,61 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package pod
+
+import (
+ "fmt"
+
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/util/intstr"
+)
+
+const (
+ // TODO: to be de!eted after v1.3 is released. PodSpec has a dedicated Hostname field.
+ // The annotation value is a string specifying the hostname to be used for the pod e.g 'my-webserver-1'
+ PodHostnameAnnotation = "pod.beta.kubernetes.io/hostname"
+
+ // TODO: to be de!eted after v1.3 is released. PodSpec has a dedicated Subdomain field.
+ // The annotation value is a string specifying the subdomain e.g. "my-web-service"
+ // If specified, on the the pod itself, "<hostname>.my-web-service.<namespace>.svc.<cluster domain>" would resolve to
+ // the pod's IP.
+ // If there is a headless service named "my-web-service" in the same namespace as the pod, then,
+ // <hostname>.my-web-service.<namespace>.svc.<cluster domain>" would be resolved by the cluster DNS Server.
+ PodSubdomainAnnotation = "pod.beta.kubernetes.io/subdomain"
+)
+
+// FindPort locates the container port for the given pod and portName. If the
+// targetPort is a number, use that. If the targetPort is a string, look that
+// string up in all named ports in all containers in the target pod. If no
+// match is found, fail.
+func FindPort(pod *api.Pod, svcPort *api.ServicePort) (int, error) {
+ portName := svcPort.TargetPort
+ switch portName.Type {
+ case intstr.String:
+ name := portName.StrVal
+ for _, container := range pod.Spec.Containers {
+ for _, port := range container.Ports {
+ if port.Name == name && port.Protocol == svcPort.Protocol {
+ return int(port.ContainerPort), nil
+ }
+ }
+ }
+ case intstr.Int:
+ return portName.IntValue(), nil
+ }
+
+ return 0, fmt.Errorf("no suitable port for manifest: %s", pod.UID)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/pod_example.json b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/pod_example.json
new file mode 100644
index 0000000..8284240
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/pod_example.json
@@ -0,0 +1,102 @@
+{
+ "kind": "Pod",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "etcd-server-e2e-test-wojtekt-master",
+ "namespace": "default",
+ "selfLink": "/api/v1/namespaces/default/pods/etcd-server-e2e-test-wojtekt-master",
+ "uid": "a671734a-e8e5-11e4-8fde-42010af09327",
+ "resourceVersion": "22",
+ "creationTimestamp": "2015-04-22T11:49:36Z",
+ "annotations": {
+ "kubernetes.io/config.mirror": "mirror",
+ "kubernetes.io/config.source": "file"
+ }
+ },
+ "spec": {
+ "volumes": [
+ {
+ "name": "varetcd",
+ "hostPath": {
+ "path": "/mnt/master-pd/var/etcd"
+ },
+ "emptyDir": null,
+ "gcePersistentDisk": null,
+ "awsElasticBlockStore": null,
+ "gitRepo": null,
+ "secret": null,
+ "nfs": null,
+ "iscsi": null,
+ "glusterfs": null
+ }
+ ],
+ "containers": [
+ {
+ "name": "etcd-container",
+ "image": "gcr.io/google_containers/etcd:2.0.9",
+ "command": [
+ "/usr/local/bin/etcd",
+ "--addr",
+ "127.0.0.1:4001",
+ "--bind-addr",
+ "127.0.0.1:4001",
+ "--data-dir",
+ "/var/etcd/data"
+ ],
+ "ports": [
+ {
+ "name": "serverport",
+ "hostPort": 2380,
+ "containerPort": 2380,
+ "protocol": "TCP"
+ },
+ {
+ "name": "clientport",
+ "hostPort": 4001,
+ "containerPort": 4001,
+ "protocol": "TCP"
+ }
+ ],
+ "resources": {},
+ "volumeMounts": [
+ {
+ "name": "varetcd",
+ "mountPath": "/var/etcd"
+ }
+ ],
+ "terminationMessagePath": "/dev/termination-log",
+ "imagePullPolicy": "IfNotPresent",
+ "capabilities": {}
+ }
+ ],
+ "restartPolicy": "Always",
+ "dnsPolicy": "ClusterFirst",
+ "nodeName": "e2e-test-wojtekt-master",
+ "hostNetwork": true
+ },
+ "status": {
+ "phase": "Running",
+ "conditions": [
+ {
+ "type": "Ready",
+ "status": "True"
+ }
+ ],
+ "containerStatuses": [
+ {
+ "name": "etcd-container",
+ "state": {
+ "running": {
+ "startedAt": "2015-04-22T11:49:32Z"
+ }
+ },
+ "lastState": {},
+ "ready": true,
+ "restartCount": 0,
+ "image": "gcr.io/google_containers/etcd:2.0.9",
+ "imageID": "docker://b6b9a86dc06aa1361357ca1b105feba961f6a4145adca6c54e142c0be0fe87b0",
+ "containerID": "docker://3cbbf818f1addfc252957b4504f56ef2907a313fe6afc47fc75373674255d46d"
+ }
+ ]
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/ref.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/ref.go
new file mode 100644
index 0000000..b864593
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/ref.go
@@ -0,0 +1,130 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package api
+
+import (
+ "errors"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "k8s.io/kubernetes/pkg/api/meta"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/runtime"
+)
+
+var (
+ // Errors that could be returned by GetReference.
+ ErrNilObject = errors.New("can't reference a nil object")
+ ErrNoSelfLink = errors.New("selfLink was empty, can't make reference")
+)
+
+// GetReference returns an ObjectReference which refers to the given
+// object, or an error if the object doesn't follow the conventions
+// that would allow this.
+// TODO: should take a meta.Interface see http://issue.k8s.io/7127
+func GetReference(obj runtime.Object) (*ObjectReference, error) {
+ if obj == nil {
+ return nil, ErrNilObject
+ }
+ if ref, ok := obj.(*ObjectReference); ok {
+ // Don't make a reference to a reference.
+ return ref, nil
+ }
+
+ gvk := obj.GetObjectKind().GroupVersionKind()
+
+ // if the object referenced is actually persisted, we can just get kind from meta
+ // if we are building an object reference to something not yet persisted, we should fallback to scheme
+ kind := gvk.Kind
+ if len(kind) == 0 {
+ // TODO: this is wrong
+ gvks, _, err := Scheme.ObjectKinds(obj)
+ if err != nil {
+ return nil, err
+ }
+ kind = gvks[0].Kind
+ }
+
+ // An object that implements only List has enough metadata to build a reference
+ var listMeta meta.List
+ objectMeta, err := meta.Accessor(obj)
+ if err != nil {
+ listMeta, err = meta.ListAccessor(obj)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ listMeta = objectMeta
+ }
+
+ // if the object referenced is actually persisted, we can also get version from meta
+ version := gvk.GroupVersion().String()
+ if len(version) == 0 {
+ selfLink := listMeta.GetSelfLink()
+ if len(selfLink) == 0 {
+ return nil, ErrNoSelfLink
+ }
+ selfLinkUrl, err := url.Parse(selfLink)
+ if err != nil {
+ return nil, err
+ }
+ // example paths: /<prefix>/<version>/*
+ parts := strings.Split(selfLinkUrl.Path, "/")
+ if len(parts) < 3 {
+ return nil, fmt.Errorf("unexpected self link format: '%v'; got version '%v'", selfLink, version)
+ }
+ version = parts[2]
+ }
+
+ // only has list metadata
+ if objectMeta == nil {
+ return &ObjectReference{
+ Kind: kind,
+ APIVersion: version,
+ ResourceVersion: listMeta.GetResourceVersion(),
+ }, nil
+ }
+
+ return &ObjectReference{
+ Kind: kind,
+ APIVersion: version,
+ Name: objectMeta.GetName(),
+ Namespace: objectMeta.GetNamespace(),
+ UID: objectMeta.GetUID(),
+ ResourceVersion: objectMeta.GetResourceVersion(),
+ }, nil
+}
+
+// GetPartialReference is exactly like GetReference, but allows you to set the FieldPath.
+func GetPartialReference(obj runtime.Object, fieldPath string) (*ObjectReference, error) {
+ ref, err := GetReference(obj)
+ if err != nil {
+ return nil, err
+ }
+ ref.FieldPath = fieldPath
+ return ref, nil
+}
+
+// IsAnAPIObject allows clients to preemptively get a reference to an API object and pass it to places that
+// intend only to get a reference to that object. This simplifies the event recording interface.
+func (obj *ObjectReference) SetGroupVersionKind(gvk unversioned.GroupVersionKind) {
+ obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind()
+}
+func (obj *ObjectReference) GroupVersionKind() unversioned.GroupVersionKind {
+ return unversioned.FromAPIVersionAndKind(obj.APIVersion, obj.Kind)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/register.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/register.go
new file mode 100644
index 0000000..6b1ed75
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/register.go
@@ -0,0 +1,115 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package api
+
+import (
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/runtime"
+ "k8s.io/kubernetes/pkg/runtime/serializer"
+)
+
+// Scheme is the default instance of runtime.Scheme to which types in the Kubernetes API are already registered.
+var Scheme = runtime.NewScheme()
+
+// Codecs provides access to encoding and decoding for the scheme
+var Codecs = serializer.NewCodecFactory(Scheme)
+
+// GroupName is the group name use in this package
+const GroupName = ""
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
+
+// Unversiond is group version for unversioned API objects
+// TODO: this should be v1 probably
+var Unversioned = unversioned.GroupVersion{Group: "", Version: "v1"}
+
+// ParameterCodec handles versioning of objects that are converted to query parameters.
+var ParameterCodec = runtime.NewParameterCodec(Scheme)
+
+// Kind takes an unqualified kind and returns back a Group qualified GroupKind
+func Kind(kind string) unversioned.GroupKind {
+ return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// Resource takes an unqualified resource and returns back a Group qualified GroupResource
+func Resource(resource string) unversioned.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+func AddToScheme(scheme *runtime.Scheme) {
+ if err := Scheme.AddIgnoredConversionType(&unversioned.TypeMeta{}, &unversioned.TypeMeta{}); err != nil {
+ panic(err)
+ }
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &Pod{},
+ &PodList{},
+ &PodStatusResult{},
+ &PodTemplate{},
+ &PodTemplateList{},
+ &ReplicationControllerList{},
+ &ReplicationController{},
+ &ServiceList{},
+ &Service{},
+ &ServiceProxyOptions{},
+ &NodeList{},
+ &Node{},
+ &NodeProxyOptions{},
+ &Endpoints{},
+ &EndpointsList{},
+ &Binding{},
+ &Event{},
+ &EventList{},
+ &List{},
+ &LimitRange{},
+ &LimitRangeList{},
+ &ResourceQuota{},
+ &ResourceQuotaList{},
+ &Namespace{},
+ &NamespaceList{},
+ &ServiceAccount{},
+ &ServiceAccountList{},
+ &Secret{},
+ &SecretList{},
+ &PersistentVolume{},
+ &PersistentVolumeList{},
+ &PersistentVolumeClaim{},
+ &PersistentVolumeClaimList{},
+ &DeleteOptions{},
+ &ListOptions{},
+ &PodAttachOptions{},
+ &PodLogOptions{},
+ &PodExecOptions{},
+ &PodProxyOptions{},
+ &ComponentStatus{},
+ &ComponentStatusList{},
+ &SerializedReference{},
+ &RangeAllocation{},
+ &ConfigMap{},
+ &ConfigMapList{},
+ )
+
+ // Register Unversioned types under their own special group
+ Scheme.AddUnversionedTypes(Unversioned,
+ &unversioned.ExportOptions{},
+ &unversioned.Status{},
+ &unversioned.APIVersions{},
+ &unversioned.APIGroupList{},
+ &unversioned.APIGroup{},
+ &unversioned.APIResourceList{},
+ )
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/replication_controller_example.json b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/replication_controller_example.json
new file mode 100644
index 0000000..5c3c4fe
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/replication_controller_example.json
@@ -0,0 +1,82 @@
+{
+ "kind": "ReplicationController",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "elasticsearch-logging-controller",
+ "namespace": "default",
+ "selfLink": "/api/v1/namespaces/default/replicationcontrollers/elasticsearch-logging-controller",
+ "uid": "aa76f162-e8e5-11e4-8fde-42010af09327",
+ "resourceVersion": "98",
+ "creationTimestamp": "2015-04-22T11:49:43Z",
+ "labels": {
+ "kubernetes.io/cluster-service": "true",
+ "name": "elasticsearch-logging"
+ }
+ },
+ "spec": {
+ "replicas": 1,
+ "selector": {
+ "name": "elasticsearch-logging"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "kubernetes.io/cluster-service": "true",
+ "name": "elasticsearch-logging"
+ }
+ },
+ "spec": {
+ "volumes": [
+ {
+ "name": "es-persistent-storage",
+ "hostPath": null,
+ "emptyDir": {
+ "medium": ""
+ },
+ "gcePersistentDisk": null,
+ "awsElasticBlockStore": null,
+ "gitRepo": null,
+ "secret": null,
+ "nfs": null,
+ "iscsi": null,
+ "glusterfs": null
+ }
+ ],
+ "containers": [
+ {
+ "name": "elasticsearch-logging",
+ "image": "gcr.io/google_containers/elasticsearch:1.0",
+ "ports": [
+ {
+ "name": "db",
+ "containerPort": 9200,
+ "protocol": "TCP"
+ },
+ {
+ "name": "transport",
+ "containerPort": 9300,
+ "protocol": "TCP"
+ }
+ ],
+ "resources": {},
+ "volumeMounts": [
+ {
+ "name": "es-persistent-storage",
+ "mountPath": "/data"
+ }
+ ],
+ "terminationMessagePath": "/dev/termination-log",
+ "imagePullPolicy": "IfNotPresent",
+ "capabilities": {}
+ }
+ ],
+ "restartPolicy": "Always",
+ "dnsPolicy": "ClusterFirst"
+ }
+ }
+ },
+ "status": {
+ "replicas": 1
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/requestcontext.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/requestcontext.go
new file mode 100644
index 0000000..14983b2
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/requestcontext.go
@@ -0,0 +1,115 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package api
+
+import (
+ "errors"
+ "net/http"
+ "sync"
+)
+
+// RequestContextMapper keeps track of the context associated with a particular request
+type RequestContextMapper interface {
+ // Get returns the context associated with the given request (if any), and true if the request has an associated context, and false if it does not.
+ Get(req *http.Request) (Context, bool)
+ // Update maps the request to the given context. If no context was previously associated with the request, an error is returned.
+ // Update should only be called with a descendant context of the previously associated context.
+ // Updating to an unrelated context may return an error in the future.
+ // The context associated with a request should only be updated by a limited set of callers.
+ // Valid examples include the authentication layer, or an audit/tracing layer.
+ Update(req *http.Request, context Context) error
+}
+
+type requestContextMap struct {
+ contexts map[*http.Request]Context
+ lock sync.Mutex
+}
+
+// NewRequestContextMapper returns a new RequestContextMapper.
+// The returned mapper must be added as a request filter using NewRequestContextFilter.
+func NewRequestContextMapper() RequestContextMapper {
+ return &requestContextMap{
+ contexts: make(map[*http.Request]Context),
+ }
+}
+
+// Get returns the context associated with the given request (if any), and true if the request has an associated context, and false if it does not.
+// Get will only return a valid context when called from inside the filter chain set up by NewRequestContextFilter()
+func (c *requestContextMap) Get(req *http.Request) (Context, bool) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ context, ok := c.contexts[req]
+ return context, ok
+}
+
+// Update maps the request to the given context.
+// If no context was previously associated with the request, an error is returned and the context is ignored.
+func (c *requestContextMap) Update(req *http.Request, context Context) error {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ if _, ok := c.contexts[req]; !ok {
+ return errors.New("No context associated")
+ }
+ // TODO: ensure the new context is a descendant of the existing one
+ c.contexts[req] = context
+ return nil
+}
+
+// init maps the request to the given context and returns true if there was no context associated with the request already.
+// if a context was already associated with the request, it ignores the given context and returns false.
+// init is intentionally unexported to ensure that all init calls are paired with a remove after a request is handled
+func (c *requestContextMap) init(req *http.Request, context Context) bool {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ if _, exists := c.contexts[req]; exists {
+ return false
+ }
+ c.contexts[req] = context
+ return true
+}
+
+// remove is intentionally unexported to ensure that the context is not removed until a request is handled
+func (c *requestContextMap) remove(req *http.Request) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ delete(c.contexts, req)
+}
+
+// NewRequestContextFilter ensures there is a Context object associated with the request before calling the passed handler.
+// After the passed handler runs, the context is cleaned up.
+func NewRequestContextFilter(mapper RequestContextMapper, handler http.Handler) (http.Handler, error) {
+ if mapper, ok := mapper.(*requestContextMap); ok {
+ return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ if mapper.init(req, NewContext()) {
+ // If we were the ones to successfully initialize, pair with a remove
+ defer mapper.remove(req)
+ }
+ handler.ServeHTTP(w, req)
+ }), nil
+ } else {
+ return handler, errors.New("Unknown RequestContextMapper implementation.")
+ }
+
+}
+
+// IsEmpty returns true if there are no contexts registered, or an error if it could not be determined. Intended for use by tests.
+func IsEmpty(requestsToContexts RequestContextMapper) (bool, error) {
+ if requestsToContexts, ok := requestsToContexts.(*requestContextMap); ok {
+ return len(requestsToContexts.contexts) == 0, nil
+ }
+ return true, errors.New("Unknown RequestContextMapper implementation")
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/resource/amount.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/resource/amount.go
new file mode 100644
index 0000000..2d3012c
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/resource/amount.go
@@ -0,0 +1,298 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resource
+
+import (
+ "math/big"
+ "strconv"
+
+ inf "gopkg.in/inf.v0"
+)
+
+// Scale is used for getting and setting the base-10 scaled value.
+// Base-2 scales are omitted for mathematical simplicity.
+// See Quantity.ScaledValue for more details.
+type Scale int32
+
+// infScale adapts a Scale value to an inf.Scale value.
+func (s Scale) infScale() inf.Scale {
+ return inf.Scale(-s) // inf.Scale is upside-down
+}
+
+const (
+ Nano Scale = -9
+ Micro Scale = -6
+ Milli Scale = -3
+ Kilo Scale = 3
+ Mega Scale = 6
+ Giga Scale = 9
+ Tera Scale = 12
+ Peta Scale = 15
+ Exa Scale = 18
+)
+
+var (
+ Zero = int64Amount{}
+
+ // Used by quantity strings - treat as read only
+ zeroBytes = []byte("0")
+)
+
+// int64Amount represents a fixed precision numerator and arbitrary scale exponent. It is faster
+// than operations on inf.Dec for values that can be represented as int64.
+type int64Amount struct {
+ value int64
+ scale Scale
+}
+
+// Sign returns 0 if the value is zero, -1 if it is less than 0, or 1 if it is greater than 0.
+func (a int64Amount) Sign() int {
+ switch {
+ case a.value == 0:
+ return 0
+ case a.value > 0:
+ return 1
+ default:
+ return -1
+ }
+}
+
+// AsInt64 returns the current amount as an int64 at scale 0, or false if the value cannot be
+// represented in an int64 OR would result in a loss of precision. This method is intended as
+// an optimization to avoid calling AsDec.
+func (a int64Amount) AsInt64() (int64, bool) {
+ if a.scale == 0 {
+ return a.value, true
+ }
+ if a.scale < 0 {
+ // TODO: attempt to reduce factors, although it is assumed that factors are reduced prior
+ // to the int64Amount being created.
+ return 0, false
+ }
+ return positiveScaleInt64(a.value, a.scale)
+}
+
+// AsScaledInt64 returns an int64 representing the value of this amount at the specified scale,
+// rounding up, or false if that would result in overflow. (1e20).AsScaledInt64(1) would result
+// in overflow because 1e19 is not representable as an int64. Note that setting a scale larger
+// than the current value may result in loss of precision - i.e. (1e-6).AsScaledInt64(0) would
+// return 1, because 0.000001 is rounded up to 1.
+func (a int64Amount) AsScaledInt64(scale Scale) (result int64, ok bool) {
+ if a.scale < scale {
+ result, _ = negativeScaleInt64(a.value, scale-a.scale)
+ return result, true
+ }
+ return positiveScaleInt64(a.value, a.scale-scale)
+}
+
+// AsDec returns an inf.Dec representation of this value.
+func (a int64Amount) AsDec() *inf.Dec {
+ var base inf.Dec
+ base.SetUnscaled(a.value)
+ base.SetScale(inf.Scale(-a.scale))
+ return &base
+}
+
+// Cmp returns 0 if a and b are equal, 1 if a is greater than b, or -1 if a is less than b.
+func (a int64Amount) Cmp(b int64Amount) int {
+ switch {
+ case a.scale == b.scale:
+ // compare only the unscaled portion
+ case a.scale > b.scale:
+ result, remainder, exact := divideByScaleInt64(b.value, a.scale-b.scale)
+ if !exact {
+ return a.AsDec().Cmp(b.AsDec())
+ }
+ if result == a.value {
+ switch {
+ case remainder == 0:
+ return 0
+ case remainder > 0:
+ return -1
+ default:
+ return 1
+ }
+ }
+ b.value = result
+ default:
+ result, remainder, exact := divideByScaleInt64(a.value, b.scale-a.scale)
+ if !exact {
+ return a.AsDec().Cmp(b.AsDec())
+ }
+ if result == b.value {
+ switch {
+ case remainder == 0:
+ return 0
+ case remainder > 0:
+ return 1
+ default:
+ return -1
+ }
+ }
+ a.value = result
+ }
+
+ switch {
+ case a.value == b.value:
+ return 0
+ case a.value < b.value:
+ return -1
+ default:
+ return 1
+ }
+}
+
+// Add adds two int64Amounts together, matching scales. It will return false and not mutate
+// a if overflow or underflow would result.
+func (a *int64Amount) Add(b int64Amount) bool {
+ switch {
+ case b.value == 0:
+ return true
+ case a.value == 0:
+ a.value = b.value
+ a.scale = b.scale
+ return true
+ case a.scale == b.scale:
+ c, ok := int64Add(a.value, b.value)
+ if !ok {
+ return false
+ }
+ a.value = c
+ case a.scale > b.scale:
+ c, ok := positiveScaleInt64(a.value, a.scale-b.scale)
+ if !ok {
+ return false
+ }
+ c, ok = int64Add(c, b.value)
+ if !ok {
+ return false
+ }
+ a.scale = b.scale
+ a.value = c
+ default:
+ c, ok := positiveScaleInt64(b.value, b.scale-a.scale)
+ if !ok {
+ return false
+ }
+ c, ok = int64Add(a.value, c)
+ if !ok {
+ return false
+ }
+ a.value = c
+ }
+ return true
+}
+
+// Sub removes the value of b from the current amount, or returns false if underflow would result.
+func (a *int64Amount) Sub(b int64Amount) bool {
+ return a.Add(int64Amount{value: -b.value, scale: b.scale})
+}
+
+// AsScale adjusts this amount to set a minimum scale, rounding up, and returns true iff no precision
+// was lost. (1.1e5).AsScale(5) would return 1.1e5, but (1.1e5).AsScale(6) would return 1e6.
+func (a int64Amount) AsScale(scale Scale) (int64Amount, bool) {
+ if a.scale >= scale {
+ return a, true
+ }
+ result, exact := negativeScaleInt64(a.value, scale-a.scale)
+ return int64Amount{value: result, scale: scale}, exact
+}
+
+// AsCanonicalBytes accepts a buffer to write the base-10 string value of this field to, and returns
+// either that buffer or a larger buffer and the current exponent of the value. The value is adjusted
+// until the exponent is a multiple of 3 - i.e. 1.1e5 would return "110", 3.
+func (a int64Amount) AsCanonicalBytes(out []byte) (result []byte, exponent int32) {
+ mantissa := a.value
+ exponent = int32(a.scale)
+
+ amount, times := removeInt64Factors(mantissa, 10)
+ exponent += int32(times)
+
+ // make sure exponent is a multiple of 3
+ var ok bool
+ switch exponent % 3 {
+ case 1, -2:
+ amount, ok = int64MultiplyScale10(amount)
+ if !ok {
+ return infDecAmount{a.AsDec()}.AsCanonicalBytes(out)
+ }
+ exponent = exponent - 1
+ case 2, -1:
+ amount, ok = int64MultiplyScale100(amount)
+ if !ok {
+ return infDecAmount{a.AsDec()}.AsCanonicalBytes(out)
+ }
+ exponent = exponent - 2
+ }
+ return strconv.AppendInt(out, amount, 10), exponent
+}
+
+// AsCanonicalBase1024Bytes accepts a buffer to write the base-1024 string value of this field to, and returns
+// either that buffer or a larger buffer and the current exponent of the value. 2048 is 2 * 1024 ^ 1 and would
+// return []byte("2048"), 1.
+func (a int64Amount) AsCanonicalBase1024Bytes(out []byte) (result []byte, exponent int32) {
+ value, ok := a.AsScaledInt64(0)
+ if !ok {
+ return infDecAmount{a.AsDec()}.AsCanonicalBase1024Bytes(out)
+ }
+ amount, exponent := removeInt64Factors(value, 1024)
+ return strconv.AppendInt(out, amount, 10), exponent
+}
+
+// infDecAmount implements common operations over an inf.Dec that are specific to the quantity
+// representation.
+type infDecAmount struct {
+ *inf.Dec
+}
+
+// AsScale adjusts this amount to set a minimum scale, rounding up, and returns true iff no precision
+// was lost. (1.1e5).AsScale(5) would return 1.1e5, but (1.1e5).AsScale(6) would return 1e6.
+func (a infDecAmount) AsScale(scale Scale) (infDecAmount, bool) {
+ tmp := &inf.Dec{}
+ tmp.Round(a.Dec, scale.infScale(), inf.RoundUp)
+ return infDecAmount{tmp}, tmp.Cmp(a.Dec) == 0
+}
+
+// AsCanonicalBytes accepts a buffer to write the base-10 string value of this field to, and returns
+// either that buffer or a larger buffer and the current exponent of the value. The value is adjusted
+// until the exponent is a multiple of 3 - i.e. 1.1e5 would return "110", 3.
+func (a infDecAmount) AsCanonicalBytes(out []byte) (result []byte, exponent int32) {
+ mantissa := a.Dec.UnscaledBig()
+ exponent = int32(-a.Dec.Scale())
+ amount := big.NewInt(0).Set(mantissa)
+ // move all factors of 10 into the exponent for easy reasoning
+ amount, times := removeBigIntFactors(amount, bigTen)
+ exponent += times
+
+ // make sure exponent is a multiple of 3
+ for exponent%3 != 0 {
+ amount.Mul(amount, bigTen)
+ exponent--
+ }
+
+ return append(out, amount.String()...), exponent
+}
+
+// AsCanonicalBase1024Bytes accepts a buffer to write the base-1024 string value of this field to, and returns
+// either that buffer or a larger buffer and the current exponent of the value. 2048 is 2 * 1024 ^ 1 and would
+// return []byte("2048"), 1.
+func (a infDecAmount) AsCanonicalBase1024Bytes(out []byte) (result []byte, exponent int32) {
+ tmp := &inf.Dec{}
+ tmp.Round(a.Dec, 0, inf.RoundUp)
+ amount, exponent := removeBigIntFactors(tmp.UnscaledBig(), big1024)
+ return append(out, amount.String()...), exponent
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/resource/generated.pb.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/resource/generated.pb.go
new file mode 100644
index 0000000..f091cde
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/resource/generated.pb.go
@@ -0,0 +1,46 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by protoc-gen-gogo.
+// source: k8s.io/kubernetes/pkg/api/resource/generated.proto
+// DO NOT EDIT!
+
+/*
+ Package resource is a generated protocol buffer package.
+
+ It is generated from these files:
+ k8s.io/kubernetes/pkg/api/resource/generated.proto
+
+ It has these top-level messages:
+ Quantity
+*/
+package resource
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+func (m *Quantity) Reset() { *m = Quantity{} }
+func (*Quantity) ProtoMessage() {}
+
+func init() {
+ proto.RegisterType((*Quantity)(nil), "k8s.io.kubernetes.pkg.api.resource.Quantity")
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/resource/generated.proto b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/resource/generated.proto
new file mode 100644
index 0000000..bdc091d
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/resource/generated.proto
@@ -0,0 +1,93 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.kubernetes.pkg.api.resource;
+
+import "k8s.io/kubernetes/pkg/util/intstr/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "resource";
+
+// Quantity is a fixed-point representation of a number.
+// It provides convenient marshaling/unmarshaling in JSON and YAML,
+// in addition to String() and Int64() accessors.
+//
+// The serialization format is:
+//
+// <quantity> ::= <signedNumber><suffix>
+// (Note that <suffix> may be empty, from the "" case in <decimalSI>.)
+// <digit> ::= 0 | 1 | ... | 9
+// <digits> ::= <digit> | <digit><digits>
+// <number> ::= <digits> | <digits>.<digits> | <digits>. | .<digits>
+// <sign> ::= "+" | "-"
+// <signedNumber> ::= <number> | <sign><number>
+// <suffix> ::= <binarySI> | <decimalExponent> | <decimalSI>
+// <binarySI> ::= Ki | Mi | Gi | Ti | Pi | Ei
+// (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)
+// <decimalSI> ::= m | "" | k | M | G | T | P | E
+// (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)
+// <decimalExponent> ::= "e" <signedNumber> | "E" <signedNumber>
+//
+// No matter which of the three exponent forms is used, no quantity may represent
+// a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal
+// places. Numbers larger or more precise will be capped or rounded up.
+// (E.g.: 0.1m will rounded up to 1m.)
+// This may be extended in the future if we require larger or smaller quantities.
+//
+// When a Quantity is parsed from a string, it will remember the type of suffix
+// it had, and will use the same type again when it is serialized.
+//
+// Before serializing, Quantity will be put in "canonical form".
+// This means that Exponent/suffix will be adjusted up or down (with a
+// corresponding increase or decrease in Mantissa) such that:
+// a. No precision is lost
+// b. No fractional digits will be emitted
+// c. The exponent (or suffix) is as large as possible.
+// The sign will be omitted unless the number is negative.
+//
+// Examples:
+// 1.5 will be serialized as "1500m"
+// 1.5Gi will be serialized as "1536Mi"
+//
+// NOTE: We reserve the right to amend this canonical format, perhaps to
+// allow 1.5 to be canonical.
+// TODO: Remove above disclaimer after all bikeshedding about format is over,
+// or after March 2015.
+//
+// Note that the quantity will NEVER be internally represented by a
+// floating point number. That is the whole point of this exercise.
+//
+// Non-canonical values will still parse as long as they are well formed,
+// but will be re-emitted in their canonical form. (So always use canonical
+// form, or don't diff.)
+//
+// This format is intended to make it difficult to use these numbers without
+// writing some sort of special handling code in the hopes that that will
+// cause implementors to also use a fixed point implementation.
+//
+// +protobuf=true
+// +protobuf.embed=string
+// +protobuf.options.marshal=false
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+message Quantity {
+ optional string string = 1;
+}
+
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/resource/math.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/resource/math.go
new file mode 100644
index 0000000..887ac74
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/resource/math.go
@@ -0,0 +1,327 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resource
+
+import (
+ "math/big"
+
+ inf "gopkg.in/inf.v0"
+)
+
+const (
+ // maxInt64Factors is the highest value that will be checked when removing factors of 10 from an int64.
+ // It is also the maximum decimal digits that can be represented with an int64.
+ maxInt64Factors = 18
+)
+
+var (
+ // Commonly needed big.Int values-- treat as read only!
+ bigTen = big.NewInt(10)
+ bigZero = big.NewInt(0)
+ bigOne = big.NewInt(1)
+ bigThousand = big.NewInt(1000)
+ big1024 = big.NewInt(1024)
+
+ // Commonly needed inf.Dec values-- treat as read only!
+ decZero = inf.NewDec(0, 0)
+ decOne = inf.NewDec(1, 0)
+ decMinusOne = inf.NewDec(-1, 0)
+ decThousand = inf.NewDec(1000, 0)
+ dec1024 = inf.NewDec(1024, 0)
+ decMinus1024 = inf.NewDec(-1024, 0)
+
+ // Largest (in magnitude) number allowed.
+ maxAllowed = infDecAmount{inf.NewDec((1<<63)-1, 0)} // == max int64
+
+ // The maximum value we can represent milli-units for.
+ // Compare with the return value of Quantity.Value() to
+ // see if it's safe to use Quantity.MilliValue().
+ MaxMilliValue = int64(((1 << 63) - 1) / 1000)
+)
+
+const mostNegative = -(mostPositive + 1)
+const mostPositive = 1<<63 - 1
+
+// int64Add returns a+b, or false if that would overflow int64.
+func int64Add(a, b int64) (int64, bool) {
+ c := a + b
+ switch {
+ case a > 0 && b > 0:
+ if c < 0 {
+ return 0, false
+ }
+ case a < 0 && b < 0:
+ if c > 0 {
+ return 0, false
+ }
+ if a == mostNegative && b == mostNegative {
+ return 0, false
+ }
+ }
+ return c, true
+}
+
+// int64Multiply returns a*b, or false if that would overflow or underflow int64.
+func int64Multiply(a, b int64) (int64, bool) {
+ if a == 0 || b == 0 || a == 1 || b == 1 {
+ return a * b, true
+ }
+ if a == mostNegative || b == mostNegative {
+ return 0, false
+ }
+ c := a * b
+ return c, c/b == a
+}
+
+// int64MultiplyScale returns a*b, assuming b is greater than one, or false if that would overflow or underflow int64.
+// Use when b is known to be greater than one.
+func int64MultiplyScale(a int64, b int64) (int64, bool) {
+ if a == 0 || a == 1 {
+ return a * b, true
+ }
+ if a == mostNegative && b != 1 {
+ return 0, false
+ }
+ c := a * b
+ return c, c/b == a
+}
+
+// int64MultiplyScale10 multiplies a by 10, or returns false if that would overflow. This method is faster than
+// int64Multiply(a, 10) because the compiler can optimize constant factor multiplication.
+func int64MultiplyScale10(a int64) (int64, bool) {
+ if a == 0 || a == 1 {
+ return a * 10, true
+ }
+ if a == mostNegative {
+ return 0, false
+ }
+ c := a * 10
+ return c, c/10 == a
+}
+
+// int64MultiplyScale100 multiplies a by 100, or returns false if that would overflow. This method is faster than
+// int64Multiply(a, 100) because the compiler can optimize constant factor multiplication.
+func int64MultiplyScale100(a int64) (int64, bool) {
+ if a == 0 || a == 1 {
+ return a * 100, true
+ }
+ if a == mostNegative {
+ return 0, false
+ }
+ c := a * 100
+ return c, c/100 == a
+}
+
+// int64MultiplyScale1000 multiplies a by 1000, or returns false if that would overflow. This method is faster than
+// int64Multiply(a, 1000) because the compiler can optimize constant factor multiplication.
+func int64MultiplyScale1000(a int64) (int64, bool) {
+ if a == 0 || a == 1 {
+ return a * 1000, true
+ }
+ if a == mostNegative {
+ return 0, false
+ }
+ c := a * 1000
+ return c, c/1000 == a
+}
+
+// positiveScaleInt64 multiplies base by 10^scale, returning false if the
+// value overflows. Passing a negative scale is undefined.
+func positiveScaleInt64(base int64, scale Scale) (int64, bool) {
+ switch scale {
+ case 0:
+ return base, true
+ case 1:
+ return int64MultiplyScale10(base)
+ case 2:
+ return int64MultiplyScale100(base)
+ case 3:
+ return int64MultiplyScale1000(base)
+ case 6:
+ return int64MultiplyScale(base, 1000000)
+ case 9:
+ return int64MultiplyScale(base, 1000000000)
+ default:
+ value := base
+ var ok bool
+ for i := Scale(0); i < scale; i++ {
+ if value, ok = int64MultiplyScale(value, 10); !ok {
+ return 0, false
+ }
+ }
+ return value, true
+ }
+}
+
+// negativeScaleInt64 reduces base by the provided scale, rounding up, until the
+// value is zero or the scale is reached. Passing a negative scale is undefined.
+// The value returned, if not exact, is rounded away from zero.
+func negativeScaleInt64(base int64, scale Scale) (result int64, exact bool) {
+ if scale == 0 {
+ return base, true
+ }
+
+ value := base
+ var fraction bool
+ for i := Scale(0); i < scale; i++ {
+ if !fraction && value%10 != 0 {
+ fraction = true
+ }
+ value = value / 10
+ if value == 0 {
+ if fraction {
+ if base > 0 {
+ return 1, false
+ }
+ return -1, false
+ }
+ return 0, true
+ }
+ }
+ if fraction {
+ if base > 0 {
+ value += 1
+ } else {
+ value += -1
+ }
+ }
+ return value, !fraction
+}
+
+func pow10Int64(b int64) int64 {
+ switch b {
+ case 0:
+ return 1
+ case 1:
+ return 10
+ case 2:
+ return 100
+ case 3:
+ return 1000
+ case 4:
+ return 10000
+ case 5:
+ return 100000
+ case 6:
+ return 1000000
+ case 7:
+ return 10000000
+ case 8:
+ return 100000000
+ case 9:
+ return 1000000000
+ case 10:
+ return 10000000000
+ case 11:
+ return 100000000000
+ case 12:
+ return 1000000000000
+ case 13:
+ return 10000000000000
+ case 14:
+ return 100000000000000
+ case 15:
+ return 1000000000000000
+ case 16:
+ return 10000000000000000
+ case 17:
+ return 100000000000000000
+ case 18:
+ return 1000000000000000000
+ default:
+ return 0
+ }
+}
+
+// powInt64 raises a to the bth power. Is not overflow aware.
+func powInt64(a, b int64) int64 {
+ p := int64(1)
+ for b > 0 {
+ if b&1 != 0 {
+ p *= a
+ }
+ b >>= 1
+ a *= a
+ }
+ return p
+}
+
+// negativeScaleInt64 returns the result of dividing base by scale * 10 and the remainder, or
+// false if no such division is possible. Dividing by negative scales is undefined.
+func divideByScaleInt64(base int64, scale Scale) (result, remainder int64, exact bool) {
+ if scale == 0 {
+ return base, 0, true
+ }
+ // the max scale representable in base 10 in an int64 is 18 decimal places
+ if scale >= 18 {
+ return 0, base, false
+ }
+ divisor := pow10Int64(int64(scale))
+ return base / divisor, base % divisor, true
+}
+
+// removeInt64Factors divides in a loop; the return values have the property that
+// value == result * base ^ scale
+func removeInt64Factors(value int64, base int64) (result int64, times int32) {
+ times = 0
+ result = value
+ negative := result < 0
+ if negative {
+ result = -result
+ }
+ switch base {
+ // allow the compiler to optimize the common cases
+ case 10:
+ for result >= 10 && result%10 == 0 {
+ times++
+ result = result / 10
+ }
+ // allow the compiler to optimize the common cases
+ case 1024:
+ for result >= 1024 && result%1024 == 0 {
+ times++
+ result = result / 1024
+ }
+ default:
+ for result >= base && result%base == 0 {
+ times++
+ result = result / base
+ }
+ }
+ if negative {
+ result = -result
+ }
+ return result, times
+}
+
+// removeBigIntFactors divides in a loop; the return values have the property that
+// d == result * factor ^ times
+// d may be modified in place.
+// If d == 0, then the return values will be (0, 0)
+func removeBigIntFactors(d, factor *big.Int) (result *big.Int, times int32) {
+ q := big.NewInt(0)
+ m := big.NewInt(0)
+ for d.Cmp(bigZero) != 0 {
+ q.DivMod(d, factor, m)
+ if m.Cmp(bigZero) != 0 {
+ break
+ }
+ times++
+ d, q = q, d
+ }
+ return d, times
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/resource/quantity.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/resource/quantity.go
new file mode 100644
index 0000000..823dd5e
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/resource/quantity.go
@@ -0,0 +1,777 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resource
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "math/big"
+ "regexp"
+ "strconv"
+ "strings"
+
+ flag "github.com/spf13/pflag"
+
+ inf "gopkg.in/inf.v0"
+)
+
+// Quantity is a fixed-point representation of a number.
+// It provides convenient marshaling/unmarshaling in JSON and YAML,
+// in addition to String() and Int64() accessors.
+//
+// The serialization format is:
+//
+// <quantity> ::= <signedNumber><suffix>
+// (Note that <suffix> may be empty, from the "" case in <decimalSI>.)
+// <digit> ::= 0 | 1 | ... | 9
+// <digits> ::= <digit> | <digit><digits>
+// <number> ::= <digits> | <digits>.<digits> | <digits>. | .<digits>
+// <sign> ::= "+" | "-"
+// <signedNumber> ::= <number> | <sign><number>
+// <suffix> ::= <binarySI> | <decimalExponent> | <decimalSI>
+// <binarySI> ::= Ki | Mi | Gi | Ti | Pi | Ei
+// (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)
+// <decimalSI> ::= m | "" | k | M | G | T | P | E
+// (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)
+// <decimalExponent> ::= "e" <signedNumber> | "E" <signedNumber>
+//
+// No matter which of the three exponent forms is used, no quantity may represent
+// a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal
+// places. Numbers larger or more precise will be capped or rounded up.
+// (E.g.: 0.1m will rounded up to 1m.)
+// This may be extended in the future if we require larger or smaller quantities.
+//
+// When a Quantity is parsed from a string, it will remember the type of suffix
+// it had, and will use the same type again when it is serialized.
+//
+// Before serializing, Quantity will be put in "canonical form".
+// This means that Exponent/suffix will be adjusted up or down (with a
+// corresponding increase or decrease in Mantissa) such that:
+// a. No precision is lost
+// b. No fractional digits will be emitted
+// c. The exponent (or suffix) is as large as possible.
+// The sign will be omitted unless the number is negative.
+//
+// Examples:
+// 1.5 will be serialized as "1500m"
+// 1.5Gi will be serialized as "1536Mi"
+//
+// NOTE: We reserve the right to amend this canonical format, perhaps to
+// allow 1.5 to be canonical.
+// TODO: Remove above disclaimer after all bikeshedding about format is over,
+// or after March 2015.
+//
+// Note that the quantity will NEVER be internally represented by a
+// floating point number. That is the whole point of this exercise.
+//
+// Non-canonical values will still parse as long as they are well formed,
+// but will be re-emitted in their canonical form. (So always use canonical
+// form, or don't diff.)
+//
+// This format is intended to make it difficult to use these numbers without
+// writing some sort of special handling code in the hopes that that will
+// cause implementors to also use a fixed point implementation.
+//
+// +protobuf=true
+// +protobuf.embed=string
+// +protobuf.options.marshal=false
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+type Quantity struct {
+ // i is the quantity in int64 scaled form, if d.Dec == nil
+ i int64Amount
+ // d is the quantity in inf.Dec form if d.Dec != nil
+ d infDecAmount
+ // s is the generated value of this quantity to avoid recalculation
+ s string
+
+ // Change Format at will. See the comment for Canonicalize for
+ // more details.
+ Format
+}
+
+// CanonicalValue allows a quantity amount to be converted to a string.
+type CanonicalValue interface {
+ // AsCanonicalBytes returns a byte array representing the string representation
+ // of the value mantissa and an int32 representing its exponent in base-10. Callers may
+ // pass a byte slice to the method to avoid allocations.
+ AsCanonicalBytes(out []byte) ([]byte, int32)
+ // AsCanonicalBase1024Bytes returns a byte array representing the string representation
+ // of the value mantissa and an int32 representing its exponent in base-1024. Callers
+ // may pass a byte slice to the method to avoid allocations.
+ AsCanonicalBase1024Bytes(out []byte) ([]byte, int32)
+}
+
+// Format lists the three possible formattings of a quantity.
+type Format string
+
+const (
+ DecimalExponent = Format("DecimalExponent") // e.g., 12e6
+ BinarySI = Format("BinarySI") // e.g., 12Mi (12 * 2^20)
+ DecimalSI = Format("DecimalSI") // e.g., 12M (12 * 10^6)
+)
+
+// MustParse turns the given string into a quantity or panics; for tests
+// or others cases where you know the string is valid.
+func MustParse(str string) Quantity {
+ q, err := ParseQuantity(str)
+ if err != nil {
+ panic(fmt.Errorf("cannot parse '%v': %v", str, err))
+ }
+ return q
+}
+
+const (
+ // splitREString is used to separate a number from its suffix; as such,
+ // this is overly permissive, but that's OK-- it will be checked later.
+ splitREString = "^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$"
+)
+
+var (
+ // splitRE is used to get the various parts of a number.
+ splitRE = regexp.MustCompile(splitREString)
+
+ // Errors that could happen while parsing a string.
+ ErrFormatWrong = errors.New("quantities must match the regular expression '" + splitREString + "'")
+ ErrNumeric = errors.New("unable to parse numeric part of quantity")
+ ErrSuffix = errors.New("unable to parse quantity's suffix")
+)
+
+// parseQuantityString is a fast scanner for quantity values.
+func parseQuantityString(str string) (positive bool, value, num, denom, suffix string, err error) {
+ positive = true
+ pos := 0
+ end := len(str)
+
+ // handle leading sign
+ if pos < end {
+ switch str[0] {
+ case '-':
+ positive = false
+ pos++
+ case '+':
+ pos++
+ }
+ }
+
+ // strip leading zeros
+Zeroes:
+ for i := pos; ; i++ {
+ if i >= end {
+ num = "0"
+ value = num
+ return
+ }
+ switch str[i] {
+ case '0':
+ pos++
+ default:
+ break Zeroes
+ }
+ }
+
+ // extract the numerator
+Num:
+ for i := pos; ; i++ {
+ if i >= end {
+ num = str[pos:end]
+ value = str[0:end]
+ return
+ }
+ switch str[i] {
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ default:
+ num = str[pos:i]
+ pos = i
+ break Num
+ }
+ }
+
+ // if we stripped all numerator positions, always return 0
+ if len(num) == 0 {
+ num = "0"
+ }
+
+ // handle a denominator
+ if pos < end && str[pos] == '.' {
+ pos++
+ Denom:
+ for i := pos; ; i++ {
+ if i >= end {
+ denom = str[pos:end]
+ value = str[0:end]
+ return
+ }
+ switch str[i] {
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ default:
+ denom = str[pos:i]
+ pos = i
+ break Denom
+ }
+ }
+ // TODO: we currently allow 1.G, but we may not want to in the future.
+ // if len(denom) == 0 {
+ // err = ErrFormatWrong
+ // return
+ // }
+ }
+ value = str[0:pos]
+
+ // grab the elements of the suffix
+ suffixStart := pos
+ for i := pos; ; i++ {
+ if i >= end {
+ suffix = str[suffixStart:end]
+ return
+ }
+ if !strings.ContainsAny(str[i:i+1], "eEinumkKMGTP") {
+ pos = i
+ break
+ }
+ }
+ if pos < end {
+ switch str[pos] {
+ case '-', '+':
+ pos++
+ }
+ }
+Suffix:
+ for i := pos; ; i++ {
+ if i >= end {
+ suffix = str[suffixStart:end]
+ return
+ }
+ switch str[i] {
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ default:
+ break Suffix
+ }
+ }
+ // we encountered a non decimal in the Suffix loop, but the last character
+ // was not a valid exponent
+ err = ErrFormatWrong
+ return
+}
+
+// ParseQuantity turns str into a Quantity, or returns an error.
+func ParseQuantity(str string) (Quantity, error) {
+ if len(str) == 0 {
+ return Quantity{}, ErrFormatWrong
+ }
+ if str == "0" {
+ return Quantity{Format: DecimalSI, s: str}, nil
+ }
+
+ positive, value, num, denom, suf, err := parseQuantityString(str)
+ if err != nil {
+ return Quantity{}, err
+ }
+
+ base, exponent, format, ok := quantitySuffixer.interpret(suffix(suf))
+ if !ok {
+ return Quantity{}, ErrSuffix
+ }
+
+ precision := int32(0)
+ scale := int32(0)
+ mantissa := int64(1)
+ switch format {
+ case DecimalExponent, DecimalSI:
+ scale = exponent
+ precision = maxInt64Factors - int32(len(num)+len(denom))
+ case BinarySI:
+ scale = 0
+ switch {
+ case exponent >= 0 && len(denom) == 0:
+ // only handle positive binary numbers with the fast path
+ mantissa = int64(int64(mantissa) << uint64(exponent))
+ // 1Mi (2^20) has ~6 digits of decimal precision, so exponent*3/10 -1 is roughly the precision
+ precision = 15 - int32(len(num)) - int32(float32(exponent)*3/10) - 1
+ default:
+ precision = -1
+ }
+ }
+
+ if precision >= 0 {
+ // if we have a denominator, shift the entire value to the left by the number of places in the
+ // denominator
+ scale -= int32(len(denom))
+ if scale >= int32(Nano) {
+ shifted := num + denom
+
+ var value int64
+ value, err := strconv.ParseInt(shifted, 10, 64)
+ if err != nil {
+ return Quantity{}, ErrNumeric
+ }
+ if result, ok := int64Multiply(value, int64(mantissa)); ok {
+ if !positive {
+ result = -result
+ }
+ // if the number is in canonical form, reuse the string
+ switch format {
+ case BinarySI:
+ if exponent%10 == 0 && (value&0x07 != 0) {
+ return Quantity{i: int64Amount{value: result, scale: Scale(scale)}, Format: format, s: str}, nil
+ }
+ default:
+ if scale%3 == 0 && !strings.HasSuffix(shifted, "000") && shifted[0] != '0' {
+ return Quantity{i: int64Amount{value: result, scale: Scale(scale)}, Format: format, s: str}, nil
+ }
+ }
+ return Quantity{i: int64Amount{value: result, scale: Scale(scale)}, Format: format}, nil
+ }
+ }
+ }
+
+ amount := new(inf.Dec)
+ if _, ok := amount.SetString(value); !ok {
+ return Quantity{}, ErrNumeric
+ }
+
+ // So that no one but us has to think about suffixes, remove it.
+ if base == 10 {
+ amount.SetScale(amount.Scale() + Scale(exponent).infScale())
+ } else if base == 2 {
+ // numericSuffix = 2 ** exponent
+ numericSuffix := big.NewInt(1).Lsh(bigOne, uint(exponent))
+ ub := amount.UnscaledBig()
+ amount.SetUnscaledBig(ub.Mul(ub, numericSuffix))
+ }
+
+ // Cap at min/max bounds.
+ sign := amount.Sign()
+ if sign == -1 {
+ amount.Neg(amount)
+ }
+
+ // This rounds non-zero values up to the minimum representable value, under the theory that
+ // if you want some resources, you should get some resources, even if you asked for way too small
+ // of an amount. Arguably, this should be inf.RoundHalfUp (normal rounding), but that would have
+ // the side effect of rounding values < .5n to zero.
+ if v, ok := amount.Unscaled(); v != int64(0) || !ok {
+ amount.Round(amount, Nano.infScale(), inf.RoundUp)
+ }
+
+ // The max is just a simple cap.
+ // TODO: this prevents accumulating quantities greater than int64, for instance quota across a cluster
+ if format == BinarySI && amount.Cmp(maxAllowed.Dec) > 0 {
+ amount.Set(maxAllowed.Dec)
+ }
+
+ if format == BinarySI && amount.Cmp(decOne) < 0 && amount.Cmp(decZero) > 0 {
+ // This avoids rounding and hopefully confusion, too.
+ format = DecimalSI
+ }
+ if sign == -1 {
+ amount.Neg(amount)
+ }
+
+ return Quantity{d: infDecAmount{amount}, Format: format}, nil
+}
+
+// DeepCopy returns a deep-copy of the Quantity value. Note that the method
+// receiver is a value, so we can mutate it in-place and return it.
+func (q Quantity) DeepCopy() Quantity {
+ if q.d.Dec != nil {
+ tmp := &inf.Dec{}
+ q.d.Dec = tmp.Set(q.d.Dec)
+ }
+ return q
+}
+
+// CanonicalizeBytes returns the canonical form of q and its suffix (see comment on Quantity).
+//
+// Note about BinarySI:
+// * If q.Format is set to BinarySI and q.Amount represents a non-zero value between
+// -1 and +1, it will be emitted as if q.Format were DecimalSI.
+// * Otherwise, if q.Format is set to BinarySI, frational parts of q.Amount will be
+// rounded up. (1.1i becomes 2i.)
+func (q *Quantity) CanonicalizeBytes(out []byte) (result, suffix []byte) {
+ if q.IsZero() {
+ return zeroBytes, nil
+ }
+
+ var rounded CanonicalValue
+ format := q.Format
+ switch format {
+ case DecimalExponent, DecimalSI:
+ case BinarySI:
+ if q.CmpInt64(-1024) > 0 && q.CmpInt64(1024) < 0 {
+ // This avoids rounding and hopefully confusion, too.
+ format = DecimalSI
+ } else {
+ var exact bool
+ if rounded, exact = q.AsScale(0); !exact {
+ // Don't lose precision-- show as DecimalSI
+ format = DecimalSI
+ }
+ }
+ default:
+ format = DecimalExponent
+ }
+
+ // TODO: If BinarySI formatting is requested but would cause rounding, upgrade to
+ // one of the other formats.
+ switch format {
+ case DecimalExponent, DecimalSI:
+ number, exponent := q.AsCanonicalBytes(out)
+ suffix, _ := quantitySuffixer.constructBytes(10, exponent, format)
+ return number, suffix
+ default:
+ // format must be BinarySI
+ number, exponent := rounded.AsCanonicalBase1024Bytes(out)
+ suffix, _ := quantitySuffixer.constructBytes(2, exponent*10, format)
+ return number, suffix
+ }
+}
+
+// AsInt64 returns a representation of the current value as an int64 if a fast conversion
+// is possible. If false is returned, callers must use the inf.Dec form of this quantity.
+func (q *Quantity) AsInt64() (int64, bool) {
+ if q.d.Dec != nil {
+ return 0, false
+ }
+ return q.i.AsInt64()
+}
+
+// ToDec promotes the quantity in place to use an inf.Dec representation and returns itself.
+func (q *Quantity) ToDec() *Quantity {
+ if q.d.Dec == nil {
+ q.d.Dec = q.i.AsDec()
+ q.i = int64Amount{}
+ }
+ return q
+}
+
+// AsDec returns the quantity as represented by a scaled inf.Dec.
+func (q *Quantity) AsDec() *inf.Dec {
+ if q.d.Dec != nil {
+ return q.d.Dec
+ }
+ q.d.Dec = q.i.AsDec()
+ q.i = int64Amount{}
+ return q.d.Dec
+}
+
+// AsCanonicalBytes returns the canonical byte representation of this quantity as a mantissa
+// and base 10 exponent. The out byte slice may be passed to the method to avoid an extra
+// allocation.
+func (q *Quantity) AsCanonicalBytes(out []byte) (result []byte, exponent int32) {
+ if q.d.Dec != nil {
+ return q.d.AsCanonicalBytes(out)
+ }
+ return q.i.AsCanonicalBytes(out)
+}
+
+// IsZero returns true if the quantity is equal to zero.
+func (q *Quantity) IsZero() bool {
+ if q.d.Dec != nil {
+ return q.d.Dec.Sign() == 0
+ }
+ return q.i.value == 0
+}
+
+// Sign returns 0 if the quantity is zero, -1 if the quantity is less than zero, or 1 if the
+// quantity is greater than zero.
+func (q *Quantity) Sign() int {
+ if q.d.Dec != nil {
+ return q.d.Dec.Sign()
+ }
+ return q.i.Sign()
+}
+
+// AsScaled returns the current value, rounded up to the provided scale, and returns
+// false if the scale resulted in a loss of precision.
+func (q *Quantity) AsScale(scale Scale) (CanonicalValue, bool) {
+ if q.d.Dec != nil {
+ return q.d.AsScale(scale)
+ }
+ return q.i.AsScale(scale)
+}
+
+// RoundUp updates the quantity to the provided scale, ensuring that the value is at
+// least 1. False is returned if the rounding operation resulted in a loss of precision.
+// Negative numbers are rounded away from zero (-9 scale 1 rounds to -10).
+func (q *Quantity) RoundUp(scale Scale) bool {
+ if q.d.Dec != nil {
+ q.s = ""
+ d, exact := q.d.AsScale(scale)
+ q.d = d
+ return exact
+ }
+ // avoid clearing the string value if we have already calculated it
+ if q.i.scale >= scale {
+ return true
+ }
+ q.s = ""
+ i, exact := q.i.AsScale(scale)
+ q.i = i
+ return exact
+}
+
+// Add adds the provide y quantity to the current value. If the current value is zero,
+// the format of the quantity will be updated to the format of y.
+func (q *Quantity) Add(y Quantity) {
+ q.s = ""
+ if q.d.Dec == nil && y.d.Dec == nil {
+ if q.i.value == 0 {
+ q.Format = y.Format
+ }
+ if q.i.Add(y.i) {
+ return
+ }
+ } else if q.IsZero() {
+ q.Format = y.Format
+ }
+ q.ToDec().d.Dec.Add(q.d.Dec, y.AsDec())
+}
+
+// Sub subtracts the provided quantity from the current value in place. If the current
+// value is zero, the format of the quantity will be updated to the format of y.
+func (q *Quantity) Sub(y Quantity) {
+ q.s = ""
+ if q.IsZero() {
+ q.Format = y.Format
+ }
+ if q.d.Dec == nil && y.d.Dec == nil && q.i.Sub(y.i) {
+ return
+ }
+ q.ToDec().d.Dec.Sub(q.d.Dec, y.AsDec())
+}
+
+// Cmp returns 0 if the quantity is equal to y, -1 if the quantity is less than y, or 1 if the
+// quantity is greater than y.
+func (q *Quantity) Cmp(y Quantity) int {
+ if q.d.Dec == nil && y.d.Dec == nil {
+ return q.i.Cmp(y.i)
+ }
+ return q.AsDec().Cmp(y.AsDec())
+}
+
+// CmpInt64 returns 0 if the quantity is equal to y, -1 if the quantity is less than y, or 1 if the
+// quantity is greater than y.
+func (q *Quantity) CmpInt64(y int64) int {
+ if q.d.Dec != nil {
+ return q.d.Dec.Cmp(inf.NewDec(y, inf.Scale(0)))
+ }
+ return q.i.Cmp(int64Amount{value: y})
+}
+
+// Neg sets quantity to be the negative value of itself.
+func (q *Quantity) Neg() {
+ q.s = ""
+ if q.d.Dec == nil {
+ q.i.value = -q.i.value
+ return
+ }
+ q.d.Dec.Neg(q.d.Dec)
+}
+
+// int64QuantityExpectedBytes is the expected width in bytes of the canonical string representation
+// of most Quantity values.
+const int64QuantityExpectedBytes = 18
+
+// String formats the Quantity as a string, caching the result if not calculated.
+// String is an expensive operation and caching this result significantly reduces the cost of
+// normal parse / marshal operations on Quantity.
+func (q *Quantity) String() string {
+ if len(q.s) == 0 {
+ result := make([]byte, 0, int64QuantityExpectedBytes)
+ number, suffix := q.CanonicalizeBytes(result)
+ number = append(number, suffix...)
+ q.s = string(number)
+ }
+ return q.s
+}
+
+// MarshalJSON implements the json.Marshaller interface.
+func (q Quantity) MarshalJSON() ([]byte, error) {
+ if len(q.s) > 0 {
+ out := make([]byte, len(q.s)+2)
+ out[0], out[len(out)-1] = '"', '"'
+ copy(out[1:], q.s)
+ return out, nil
+ }
+ result := make([]byte, int64QuantityExpectedBytes, int64QuantityExpectedBytes)
+ result[0] = '"'
+ number, suffix := q.CanonicalizeBytes(result[1:1])
+ // if the same slice was returned to us that we passed in, avoid another allocation by copying number into
+ // the source slice and returning that
+ if len(number) > 0 && &number[0] == &result[1] && (len(number)+len(suffix)+2) <= int64QuantityExpectedBytes {
+ number = append(number, suffix...)
+ number = append(number, '"')
+ return result[:1+len(number)], nil
+ }
+ // if CanonicalizeBytes needed more space than our slice provided, we may need to allocate again so use
+ // append
+ result = result[:1]
+ result = append(result, number...)
+ result = append(result, suffix...)
+ result = append(result, '"')
+ return result, nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaller interface.
+// TODO: Remove support for leading/trailing whitespace
+func (q *Quantity) UnmarshalJSON(value []byte) error {
+ l := len(value)
+ if l == 4 && bytes.Equal(value, []byte("null")) {
+ q.d.Dec = nil
+ q.i = int64Amount{}
+ return nil
+ }
+ if l >= 2 && value[0] == '"' && value[l-1] == '"' {
+ value = value[1 : l-1]
+ }
+
+ parsed, err := ParseQuantity(strings.TrimSpace(string(value)))
+ if err != nil {
+ return err
+ }
+
+ // This copy is safe because parsed will not be referred to again.
+ *q = parsed
+ return nil
+}
+
+// NewQuantity returns a new Quantity representing the given
+// value in the given format.
+func NewQuantity(value int64, format Format) *Quantity {
+ return &Quantity{
+ i: int64Amount{value: value},
+ Format: format,
+ }
+}
+
+// NewMilliQuantity returns a new Quantity representing the given
+// value * 1/1000 in the given format. Note that BinarySI formatting
+// will round fractional values, and will be changed to DecimalSI for
+// values x where (-1 < x < 1) && (x != 0).
+func NewMilliQuantity(value int64, format Format) *Quantity {
+ return &Quantity{
+ i: int64Amount{value: value, scale: -3},
+ Format: format,
+ }
+}
+
+// NewScaledQuantity returns a new Quantity representing the given
+// value * 10^scale in DecimalSI format.
+func NewScaledQuantity(value int64, scale Scale) *Quantity {
+ return &Quantity{
+ i: int64Amount{value: value, scale: scale},
+ Format: DecimalSI,
+ }
+}
+
+// Value returns the value of q; any fractional part will be lost.
+func (q *Quantity) Value() int64 {
+ return q.ScaledValue(0)
+}
+
+// MilliValue returns the value of ceil(q * 1000); this could overflow an int64;
+// if that's a concern, call Value() first to verify the number is small enough.
+func (q *Quantity) MilliValue() int64 {
+ return q.ScaledValue(Milli)
+}
+
+// ScaledValue returns the value of ceil(q * 10^scale); this could overflow an int64.
+// To detect overflow, call Value() first and verify the expected magnitude.
+func (q *Quantity) ScaledValue(scale Scale) int64 {
+ if q.d.Dec == nil {
+ i, _ := q.i.AsScaledInt64(scale)
+ return i
+ }
+ dec := q.d.Dec
+ return scaledValue(dec.UnscaledBig(), int(dec.Scale()), int(scale.infScale()))
+}
+
+// Set sets q's value to be value.
+func (q *Quantity) Set(value int64) {
+ q.SetScaled(value, 0)
+}
+
+// SetMilli sets q's value to be value * 1/1000.
+func (q *Quantity) SetMilli(value int64) {
+ q.SetScaled(value, Milli)
+}
+
+// SetScaled sets q's value to be value * 10^scale
+func (q *Quantity) SetScaled(value int64, scale Scale) {
+ q.s = ""
+ q.d.Dec = nil
+ q.i = int64Amount{value: value, scale: scale}
+}
+
+// Copy is a convenience function that makes a deep copy for you. Non-deep
+// copies of quantities share pointers and you will regret that.
+func (q *Quantity) Copy() *Quantity {
+ if q.d.Dec == nil {
+ return &Quantity{
+ s: q.s,
+ i: q.i,
+ Format: q.Format,
+ }
+ }
+ tmp := &inf.Dec{}
+ return &Quantity{
+ s: q.s,
+ d: infDecAmount{tmp.Set(q.d.Dec)},
+ Format: q.Format,
+ }
+}
+
+// qFlag is a helper type for the Flag function
+type qFlag struct {
+ dest *Quantity
+}
+
+// Sets the value of the internal Quantity. (used by flag & pflag)
+func (qf qFlag) Set(val string) error {
+ q, err := ParseQuantity(val)
+ if err != nil {
+ return err
+ }
+ // This copy is OK because q will not be referenced again.
+ *qf.dest = q
+ return nil
+}
+
+// Converts the value of the internal Quantity to a string. (used by flag & pflag)
+func (qf qFlag) String() string {
+ return qf.dest.String()
+}
+
+// States the type of flag this is (Quantity). (used by pflag)
+func (qf qFlag) Type() string {
+ return "quantity"
+}
+
+// QuantityFlag is a helper that makes a quantity flag (using standard flag package).
+// Will panic if defaultValue is not a valid quantity.
+func QuantityFlag(flagName, defaultValue, description string) *Quantity {
+ q := MustParse(defaultValue)
+ flag.Var(NewQuantityFlagValue(&q), flagName, description)
+ return &q
+}
+
+// NewQuantityFlagValue returns an object that can be used to back a flag,
+// pointing at the given Quantity variable.
+func NewQuantityFlagValue(q *Quantity) flag.Value {
+ return qFlag{q}
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/resource/quantity_proto.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/resource/quantity_proto.go
new file mode 100644
index 0000000..74dfb4e
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/resource/quantity_proto.go
@@ -0,0 +1,284 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resource
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/gogo/protobuf/proto"
+)
+
+var _ proto.Sizer = &Quantity{}
+
+func (m *Quantity) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+// MarshalTo is a customized version of the generated Protobuf unmarshaler for a struct
+// with a single string field.
+func (m *Quantity) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+
+ data[i] = 0xa
+ i++
+ // BEGIN CUSTOM MARSHAL
+ out := m.String()
+ i = encodeVarintGenerated(data, i, uint64(len(out)))
+ i += copy(data[i:], out)
+ // END CUSTOM MARSHAL
+
+ return i, nil
+}
+
+func encodeVarintGenerated(data []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ data[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ data[offset] = uint8(v)
+ return offset + 1
+}
+
+func (m *Quantity) Size() (n int) {
+ var l int
+ _ = l
+
+ // BEGIN CUSTOM SIZE
+ l = len(m.String())
+ // END CUSTOM SIZE
+
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+
+// Unmarshal is a customized version of the generated Protobuf unmarshaler for a struct
+// with a single string field.
+func (m *Quantity) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Quantity: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Quantity: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field String_", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(data[iNdEx:postIndex])
+
+ // BEGIN CUSTOM DECODE
+ p, err := ParseQuantity(s)
+ if err != nil {
+ return err
+ }
+ *m = p
+ // END CUSTOM DECODE
+
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+
+func skipGenerated(data []byte) (n int, err error) {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if data[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipGenerated(data[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
+)
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/resource/scale_int.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/resource/scale_int.go
new file mode 100644
index 0000000..55e177b
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/resource/scale_int.go
@@ -0,0 +1,95 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resource
+
+import (
+ "math"
+ "math/big"
+ "sync"
+)
+
+var (
+ // A sync pool to reduce allocation.
+ intPool sync.Pool
+ maxInt64 = big.NewInt(math.MaxInt64)
+)
+
+func init() {
+ intPool.New = func() interface{} {
+ return &big.Int{}
+ }
+}
+
+// scaledValue scales given unscaled value from scale to new Scale and returns
+// an int64. It ALWAYS rounds up the result when scale down. The final result might
+// overflow.
+//
+// scale, newScale represents the scale of the unscaled decimal.
+// The mathematical value of the decimal is unscaled * 10**(-scale).
+func scaledValue(unscaled *big.Int, scale, newScale int) int64 {
+ dif := scale - newScale
+ if dif == 0 {
+ return unscaled.Int64()
+ }
+
+ // Handle scale up
+ // This is an easy case, we do not need to care about rounding and overflow.
+ // If any intermediate operation causes overflow, the result will overflow.
+ if dif < 0 {
+ return unscaled.Int64() * int64(math.Pow10(-dif))
+ }
+
+ // Handle scale down
+ // We have to be careful about the intermediate operations.
+
+ // fast path when unscaled < max.Int64 and exp(10,dif) < max.Int64
+ const log10MaxInt64 = 19
+ if unscaled.Cmp(maxInt64) < 0 && dif < log10MaxInt64 {
+ divide := int64(math.Pow10(dif))
+ result := unscaled.Int64() / divide
+ mod := unscaled.Int64() % divide
+ if mod != 0 {
+ return result + 1
+ }
+ return result
+ }
+
+ // We should only convert back to int64 when getting the result.
+ divisor := intPool.Get().(*big.Int)
+ exp := intPool.Get().(*big.Int)
+ result := intPool.Get().(*big.Int)
+ defer func() {
+ intPool.Put(divisor)
+ intPool.Put(exp)
+ intPool.Put(result)
+ }()
+
+ // divisor = 10^(dif)
+ // TODO: create loop up table if exp costs too much.
+ divisor.Exp(bigTen, exp.SetInt64(int64(dif)), nil)
+ // reuse exp
+ remainder := exp
+
+ // result = unscaled / divisor
+ // remainder = unscaled % divisor
+ result.DivMod(unscaled, divisor, remainder)
+ if remainder.Sign() != 0 {
+ return result.Int64() + 1
+ }
+
+ return result.Int64()
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/resource/suffix.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/resource/suffix.go
new file mode 100644
index 0000000..5ed7abe
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/resource/suffix.go
@@ -0,0 +1,198 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resource
+
+import (
+ "strconv"
+)
+
+type suffix string
+
+// suffixer can interpret and construct suffixes.
+type suffixer interface {
+ interpret(suffix) (base, exponent int32, fmt Format, ok bool)
+ construct(base, exponent int32, fmt Format) (s suffix, ok bool)
+ constructBytes(base, exponent int32, fmt Format) (s []byte, ok bool)
+}
+
+// quantitySuffixer handles suffixes for all three formats that quantity
+// can handle.
+var quantitySuffixer = newSuffixer()
+
+type bePair struct {
+ base, exponent int32
+}
+
+type listSuffixer struct {
+ suffixToBE map[suffix]bePair
+ beToSuffix map[bePair]suffix
+ beToSuffixBytes map[bePair][]byte
+}
+
+func (ls *listSuffixer) addSuffix(s suffix, pair bePair) {
+ if ls.suffixToBE == nil {
+ ls.suffixToBE = map[suffix]bePair{}
+ }
+ if ls.beToSuffix == nil {
+ ls.beToSuffix = map[bePair]suffix{}
+ }
+ if ls.beToSuffixBytes == nil {
+ ls.beToSuffixBytes = map[bePair][]byte{}
+ }
+ ls.suffixToBE[s] = pair
+ ls.beToSuffix[pair] = s
+ ls.beToSuffixBytes[pair] = []byte(s)
+}
+
+func (ls *listSuffixer) lookup(s suffix) (base, exponent int32, ok bool) {
+ pair, ok := ls.suffixToBE[s]
+ if !ok {
+ return 0, 0, false
+ }
+ return pair.base, pair.exponent, true
+}
+
+func (ls *listSuffixer) construct(base, exponent int32) (s suffix, ok bool) {
+ s, ok = ls.beToSuffix[bePair{base, exponent}]
+ return
+}
+
+func (ls *listSuffixer) constructBytes(base, exponent int32) (s []byte, ok bool) {
+ s, ok = ls.beToSuffixBytes[bePair{base, exponent}]
+ return
+}
+
+type suffixHandler struct {
+ decSuffixes listSuffixer
+ binSuffixes listSuffixer
+}
+
+type fastLookup struct {
+ *suffixHandler
+}
+
+func (l fastLookup) interpret(s suffix) (base, exponent int32, format Format, ok bool) {
+ switch s {
+ case "":
+ return 10, 0, DecimalSI, true
+ case "n":
+ return 10, -9, DecimalSI, true
+ case "u":
+ return 10, -6, DecimalSI, true
+ case "m":
+ return 10, -3, DecimalSI, true
+ case "k":
+ return 10, 3, DecimalSI, true
+ case "M":
+ return 10, 6, DecimalSI, true
+ case "G":
+ return 10, 9, DecimalSI, true
+ }
+ return l.suffixHandler.interpret(s)
+}
+
+func newSuffixer() suffixer {
+ sh := &suffixHandler{}
+
+ // IMPORTANT: if you change this section you must change fastLookup
+
+ sh.binSuffixes.addSuffix("Ki", bePair{2, 10})
+ sh.binSuffixes.addSuffix("Mi", bePair{2, 20})
+ sh.binSuffixes.addSuffix("Gi", bePair{2, 30})
+ sh.binSuffixes.addSuffix("Ti", bePair{2, 40})
+ sh.binSuffixes.addSuffix("Pi", bePair{2, 50})
+ sh.binSuffixes.addSuffix("Ei", bePair{2, 60})
+ // Don't emit an error when trying to produce
+ // a suffix for 2^0.
+ sh.decSuffixes.addSuffix("", bePair{2, 0})
+
+ sh.decSuffixes.addSuffix("n", bePair{10, -9})
+ sh.decSuffixes.addSuffix("u", bePair{10, -6})
+ sh.decSuffixes.addSuffix("m", bePair{10, -3})
+ sh.decSuffixes.addSuffix("", bePair{10, 0})
+ sh.decSuffixes.addSuffix("k", bePair{10, 3})
+ sh.decSuffixes.addSuffix("M", bePair{10, 6})
+ sh.decSuffixes.addSuffix("G", bePair{10, 9})
+ sh.decSuffixes.addSuffix("T", bePair{10, 12})
+ sh.decSuffixes.addSuffix("P", bePair{10, 15})
+ sh.decSuffixes.addSuffix("E", bePair{10, 18})
+
+ return fastLookup{sh}
+}
+
+func (sh *suffixHandler) construct(base, exponent int32, fmt Format) (s suffix, ok bool) {
+ switch fmt {
+ case DecimalSI:
+ return sh.decSuffixes.construct(base, exponent)
+ case BinarySI:
+ return sh.binSuffixes.construct(base, exponent)
+ case DecimalExponent:
+ if base != 10 {
+ return "", false
+ }
+ if exponent == 0 {
+ return "", true
+ }
+ return suffix("e" + strconv.FormatInt(int64(exponent), 10)), true
+ }
+ return "", false
+}
+
+func (sh *suffixHandler) constructBytes(base, exponent int32, format Format) (s []byte, ok bool) {
+ switch format {
+ case DecimalSI:
+ return sh.decSuffixes.constructBytes(base, exponent)
+ case BinarySI:
+ return sh.binSuffixes.constructBytes(base, exponent)
+ case DecimalExponent:
+ if base != 10 {
+ return nil, false
+ }
+ if exponent == 0 {
+ return nil, true
+ }
+ result := make([]byte, 8, 8)
+ result[0] = 'e'
+ number := strconv.AppendInt(result[1:1], int64(exponent), 10)
+ if &result[1] == &number[0] {
+ return result[:1+len(number)], true
+ }
+ result = append(result[:1], number...)
+ return result, true
+ }
+ return nil, false
+}
+
+func (sh *suffixHandler) interpret(suffix suffix) (base, exponent int32, fmt Format, ok bool) {
+ // Try lookup tables first
+ if b, e, ok := sh.decSuffixes.lookup(suffix); ok {
+ return b, e, DecimalSI, true
+ }
+ if b, e, ok := sh.binSuffixes.lookup(suffix); ok {
+ return b, e, BinarySI, true
+ }
+
+ if len(suffix) > 1 && (suffix[0] == 'E' || suffix[0] == 'e') {
+ parsed, err := strconv.ParseInt(string(suffix[1:]), 10, 64)
+ if err != nil {
+ return 0, 0, DecimalExponent, false
+ }
+ return 10, int32(parsed), DecimalExponent, true
+ }
+
+ return 0, 0, DecimalExponent, false
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/resource_helpers.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/resource_helpers.go
new file mode 100644
index 0000000..2c683da
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/resource_helpers.go
@@ -0,0 +1,209 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package api
+
+import (
+ "k8s.io/kubernetes/pkg/api/resource"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+)
+
+// Returns string version of ResourceName.
+func (self ResourceName) String() string {
+ return string(self)
+}
+
+// Returns the CPU limit if specified.
+func (self *ResourceList) Cpu() *resource.Quantity {
+ if val, ok := (*self)[ResourceCPU]; ok {
+ return &val
+ }
+ return &resource.Quantity{Format: resource.DecimalSI}
+}
+
+// Returns the Memory limit if specified.
+func (self *ResourceList) Memory() *resource.Quantity {
+ if val, ok := (*self)[ResourceMemory]; ok {
+ return &val
+ }
+ return &resource.Quantity{Format: resource.BinarySI}
+}
+
+func (self *ResourceList) Pods() *resource.Quantity {
+ if val, ok := (*self)[ResourcePods]; ok {
+ return &val
+ }
+ return &resource.Quantity{}
+}
+
+func (self *ResourceList) NvidiaGPU() *resource.Quantity {
+ if val, ok := (*self)[ResourceNvidiaGPU]; ok {
+ return &val
+ }
+ return &resource.Quantity{}
+}
+
+func GetContainerStatus(statuses []ContainerStatus, name string) (ContainerStatus, bool) {
+ for i := range statuses {
+ if statuses[i].Name == name {
+ return statuses[i], true
+ }
+ }
+ return ContainerStatus{}, false
+}
+
+func GetExistingContainerStatus(statuses []ContainerStatus, name string) ContainerStatus {
+ for i := range statuses {
+ if statuses[i].Name == name {
+ return statuses[i]
+ }
+ }
+ return ContainerStatus{}
+}
+
+// IsPodReady returns true if a pod is ready; false otherwise.
+func IsPodReady(pod *Pod) bool {
+ return IsPodReadyConditionTrue(pod.Status)
+}
+
+// IsPodReady retruns true if a pod is ready; false otherwise.
+func IsPodReadyConditionTrue(status PodStatus) bool {
+ condition := GetPodReadyCondition(status)
+ return condition != nil && condition.Status == ConditionTrue
+}
+
+// Extracts the pod ready condition from the given status and returns that.
+// Returns nil if the condition is not present.
+func GetPodReadyCondition(status PodStatus) *PodCondition {
+ _, condition := GetPodCondition(&status, PodReady)
+ return condition
+}
+
+// GetPodCondition extracts the provided condition from the given status and returns that.
+// Returns nil and -1 if the condition is not present, and the the index of the located condition.
+func GetPodCondition(status *PodStatus, conditionType PodConditionType) (int, *PodCondition) {
+ if status == nil {
+ return -1, nil
+ }
+ for i := range status.Conditions {
+ if status.Conditions[i].Type == conditionType {
+ return i, &status.Conditions[i]
+ }
+ }
+ return -1, nil
+}
+
+// GetNodeCondition extracts the provided condition from the given status and returns that.
+// Returns nil and -1 if the condition is not present, and the the index of the located condition.
+func GetNodeCondition(status *NodeStatus, conditionType NodeConditionType) (int, *NodeCondition) {
+ if status == nil {
+ return -1, nil
+ }
+ for i := range status.Conditions {
+ if status.Conditions[i].Type == conditionType {
+ return i, &status.Conditions[i]
+ }
+ }
+ return -1, nil
+}
+
+// Updates existing pod condition or creates a new one. Sets LastTransitionTime to now if the
+// status has changed.
+// Returns true if pod condition has changed or has been added.
+func UpdatePodCondition(status *PodStatus, condition *PodCondition) bool {
+ condition.LastTransitionTime = unversioned.Now()
+ // Try to find this pod condition.
+ conditionIndex, oldCondition := GetPodCondition(status, condition.Type)
+
+ if oldCondition == nil {
+ // We are adding new pod condition.
+ status.Conditions = append(status.Conditions, *condition)
+ return true
+ } else {
+ // We are updating an existing condition, so we need to check if it has changed.
+ if condition.Status == oldCondition.Status {
+ condition.LastTransitionTime = oldCondition.LastTransitionTime
+ }
+
+ isEqual := condition.Status == oldCondition.Status &&
+ condition.Reason == oldCondition.Reason &&
+ condition.Message == oldCondition.Message &&
+ condition.LastProbeTime.Equal(oldCondition.LastProbeTime) &&
+ condition.LastTransitionTime.Equal(oldCondition.LastTransitionTime)
+
+ status.Conditions[conditionIndex] = *condition
+ // Return true if one of the fields have changed.
+ return !isEqual
+ }
+}
+
+// IsNodeReady returns true if a node is ready; false otherwise.
+func IsNodeReady(node *Node) bool {
+ for _, c := range node.Status.Conditions {
+ if c.Type == NodeReady {
+ return c.Status == ConditionTrue
+ }
+ }
+ return false
+}
+
+// PodRequestsAndLimits returns a dictionary of all defined resources summed up for all
+// containers of the pod.
+func PodRequestsAndLimits(pod *Pod) (reqs map[ResourceName]resource.Quantity, limits map[ResourceName]resource.Quantity, err error) {
+ reqs, limits = map[ResourceName]resource.Quantity{}, map[ResourceName]resource.Quantity{}
+ for _, container := range pod.Spec.Containers {
+ for name, quantity := range container.Resources.Requests {
+ if value, ok := reqs[name]; !ok {
+ reqs[name] = *quantity.Copy()
+ } else {
+ value.Add(quantity)
+ reqs[name] = value
+ }
+ }
+ for name, quantity := range container.Resources.Limits {
+ if value, ok := limits[name]; !ok {
+ limits[name] = *quantity.Copy()
+ } else {
+ value.Add(quantity)
+ limits[name] = value
+ }
+ }
+ }
+ // init containers define the minimum of any resource
+ for _, container := range pod.Spec.InitContainers {
+ for name, quantity := range container.Resources.Requests {
+ value, ok := reqs[name]
+ if !ok {
+ reqs[name] = *quantity.Copy()
+ continue
+ }
+ if quantity.Cmp(value) > 0 {
+ reqs[name] = *quantity.Copy()
+ }
+ }
+ for name, quantity := range container.Resources.Limits {
+ value, ok := limits[name]
+ if !ok {
+ limits[name] = *quantity.Copy()
+ continue
+ }
+ if quantity.Cmp(value) > 0 {
+ limits[name] = *quantity.Copy()
+ }
+ }
+ }
+ return
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/service/annotations.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/service/annotations.go
new file mode 100644
index 0000000..ee275d3
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/service/annotations.go
@@ -0,0 +1,28 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package service
+
+const (
+ // AnnotationLoadBalancerSourceRangesKey is the key of the annotation on a service to set allowed ingress ranges on their LoadBalancers
+ //
+ // It should be a comma-separated list of CIDRs, e.g. `0.0.0.0/0` to
+ // allow full access (the default) or `18.0.0.0/8,56.0.0.0/8` to allow
+ // access only from the CIDRs currently allocated to MIT & the USPS.
+ //
+ // Not all cloud providers support this annotation, though AWS & GCE do.
+ AnnotationLoadBalancerSourceRangesKey = "service.beta.kubernetes.io/load-balancer-source-ranges"
+)
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/service/util.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/service/util.go
new file mode 100644
index 0000000..6f0e14e
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/service/util.go
@@ -0,0 +1,68 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package service
+
+import (
+ "fmt"
+ "strings"
+
+ "k8s.io/kubernetes/pkg/api"
+ netsets "k8s.io/kubernetes/pkg/util/net/sets"
+)
+
+const (
+ defaultLoadBalancerSourceRanges = "0.0.0.0/0"
+)
+
+// IsAllowAll checks whether the netsets.IPNet allows traffic from 0.0.0.0/0
+func IsAllowAll(ipnets netsets.IPNet) bool {
+ for _, s := range ipnets.StringSlice() {
+ if s == "0.0.0.0/0" {
+ return true
+ }
+ }
+ return false
+}
+
+// GetLoadBalancerSourceRanges first try to parse and verify LoadBalancerSourceRanges field from a service.
+// If the field is not specified, turn to parse and verify the AnnotationLoadBalancerSourceRangesKey annotation from a service,
+// extracting the source ranges to allow, and if not present returns a default (allow-all) value.
+func GetLoadBalancerSourceRanges(service *api.Service) (netsets.IPNet, error) {
+ var ipnets netsets.IPNet
+ var err error
+ // if SourceRange field is specified, ignore sourceRange annotation
+ if len(service.Spec.LoadBalancerSourceRanges) > 0 {
+ specs := service.Spec.LoadBalancerSourceRanges
+ ipnets, err = netsets.ParseIPNets(specs...)
+
+ if err != nil {
+ return nil, fmt.Errorf("service.Spec.LoadBalancerSourceRanges: %v is not valid. Expecting a list of IP ranges. For example, 10.0.0.0/24. Error msg: %v", specs, err)
+ }
+ } else {
+ val := service.Annotations[AnnotationLoadBalancerSourceRangesKey]
+ val = strings.TrimSpace(val)
+ if val == "" {
+ val = defaultLoadBalancerSourceRanges
+ }
+ specs := strings.Split(val, ",")
+ ipnets, err = netsets.ParseIPNets(specs...)
+ if err != nil {
+ return nil, fmt.Errorf("%s: %s is not valid. Expecting a comma-separated list of source IP ranges. For example, 10.0.0.0/24,192.168.2.0/24", AnnotationLoadBalancerSourceRangesKey, val)
+ }
+ }
+ return ipnets, nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/types.generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/types.generated.go
new file mode 100644
index 0000000..67d3dbc
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/types.generated.go
@@ -0,0 +1,59756 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// ************************************************************
+// DO NOT EDIT.
+// THIS FILE IS AUTO-GENERATED BY codecgen.
+// ************************************************************
+
+package api
+
+import (
+ "errors"
+ "fmt"
+ codec1978 "github.com/ugorji/go/codec"
+ pkg3_resource "k8s.io/kubernetes/pkg/api/resource"
+ pkg2_unversioned "k8s.io/kubernetes/pkg/api/unversioned"
+ pkg6_fields "k8s.io/kubernetes/pkg/fields"
+ pkg5_labels "k8s.io/kubernetes/pkg/labels"
+ pkg7_runtime "k8s.io/kubernetes/pkg/runtime"
+ pkg1_types "k8s.io/kubernetes/pkg/types"
+ pkg4_intstr "k8s.io/kubernetes/pkg/util/intstr"
+ "reflect"
+ "runtime"
+ time "time"
+)
+
+const (
+ // ----- content types ----
+ codecSelferC_UTF81234 = 1
+ codecSelferC_RAW1234 = 0
+ // ----- value types used ----
+ codecSelferValueTypeArray1234 = 10
+ codecSelferValueTypeMap1234 = 9
+ // ----- containerStateValues ----
+ codecSelfer_containerMapKey1234 = 2
+ codecSelfer_containerMapValue1234 = 3
+ codecSelfer_containerMapEnd1234 = 4
+ codecSelfer_containerArrayElem1234 = 6
+ codecSelfer_containerArrayEnd1234 = 7
+)
+
+var (
+ codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits())
+ codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`)
+)
+
+type codecSelfer1234 struct{}
+
+func init() {
+ if codec1978.GenVersion != 5 {
+ _, file, _, _ := runtime.Caller(0)
+ err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v",
+ 5, codec1978.GenVersion, file)
+ panic(err)
+ }
+ if false { // reference the types, but skip this branch at build/run time
+ var v0 pkg3_resource.Quantity
+ var v1 pkg2_unversioned.Time
+ var v2 pkg6_fields.Selector
+ var v3 pkg5_labels.Selector
+ var v4 pkg7_runtime.Object
+ var v5 pkg1_types.UID
+ var v6 pkg4_intstr.IntOrString
+ var v7 time.Time
+ _, _, _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5, v6, v7
+ }
+}
+
+func (x *ObjectMeta) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [14]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Name != ""
+ yyq2[1] = x.GenerateName != ""
+ yyq2[2] = x.Namespace != ""
+ yyq2[3] = x.SelfLink != ""
+ yyq2[4] = x.UID != ""
+ yyq2[5] = x.ResourceVersion != ""
+ yyq2[6] = x.Generation != 0
+ yyq2[7] = true
+ yyq2[8] = x.DeletionTimestamp != nil
+ yyq2[9] = x.DeletionGracePeriodSeconds != nil
+ yyq2[10] = len(x.Labels) != 0
+ yyq2[11] = len(x.Annotations) != 0
+ yyq2[12] = len(x.OwnerReferences) != 0
+ yyq2[13] = len(x.Finalizers) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(14)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("name"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.GenerateName))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("generateName"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.GenerateName))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Namespace))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("namespace"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Namespace))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.SelfLink))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("selfLink"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.SelfLink))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.UID) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.UID))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("uid"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.UID) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.UID))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("resourceVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[6] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Generation))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[6] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("generation"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Generation))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[7] {
+ yy25 := &x.CreationTimestamp
+ yym26 := z.EncBinary()
+ _ = yym26
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy25) {
+ } else if yym26 {
+ z.EncBinaryMarshal(yy25)
+ } else if !yym26 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy25)
+ } else {
+ z.EncFallback(yy25)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[7] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("creationTimestamp"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy27 := &x.CreationTimestamp
+ yym28 := z.EncBinary()
+ _ = yym28
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy27) {
+ } else if yym28 {
+ z.EncBinaryMarshal(yy27)
+ } else if !yym28 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy27)
+ } else {
+ z.EncFallback(yy27)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[8] {
+ if x.DeletionTimestamp == nil {
+ r.EncodeNil()
+ } else {
+ yym30 := z.EncBinary()
+ _ = yym30
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.DeletionTimestamp) {
+ } else if yym30 {
+ z.EncBinaryMarshal(x.DeletionTimestamp)
+ } else if !yym30 && z.IsJSONHandle() {
+ z.EncJSONMarshal(x.DeletionTimestamp)
+ } else {
+ z.EncFallback(x.DeletionTimestamp)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[8] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("deletionTimestamp"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.DeletionTimestamp == nil {
+ r.EncodeNil()
+ } else {
+ yym31 := z.EncBinary()
+ _ = yym31
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.DeletionTimestamp) {
+ } else if yym31 {
+ z.EncBinaryMarshal(x.DeletionTimestamp)
+ } else if !yym31 && z.IsJSONHandle() {
+ z.EncJSONMarshal(x.DeletionTimestamp)
+ } else {
+ z.EncFallback(x.DeletionTimestamp)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[9] {
+ if x.DeletionGracePeriodSeconds == nil {
+ r.EncodeNil()
+ } else {
+ yy33 := *x.DeletionGracePeriodSeconds
+ yym34 := z.EncBinary()
+ _ = yym34
+ if false {
+ } else {
+ r.EncodeInt(int64(yy33))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[9] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("deletionGracePeriodSeconds"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.DeletionGracePeriodSeconds == nil {
+ r.EncodeNil()
+ } else {
+ yy35 := *x.DeletionGracePeriodSeconds
+ yym36 := z.EncBinary()
+ _ = yym36
+ if false {
+ } else {
+ r.EncodeInt(int64(yy35))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[10] {
+ if x.Labels == nil {
+ r.EncodeNil()
+ } else {
+ yym38 := z.EncBinary()
+ _ = yym38
+ if false {
+ } else {
+ z.F.EncMapStringStringV(x.Labels, false, e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[10] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("labels"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Labels == nil {
+ r.EncodeNil()
+ } else {
+ yym39 := z.EncBinary()
+ _ = yym39
+ if false {
+ } else {
+ z.F.EncMapStringStringV(x.Labels, false, e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[11] {
+ if x.Annotations == nil {
+ r.EncodeNil()
+ } else {
+ yym41 := z.EncBinary()
+ _ = yym41
+ if false {
+ } else {
+ z.F.EncMapStringStringV(x.Annotations, false, e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[11] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("annotations"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Annotations == nil {
+ r.EncodeNil()
+ } else {
+ yym42 := z.EncBinary()
+ _ = yym42
+ if false {
+ } else {
+ z.F.EncMapStringStringV(x.Annotations, false, e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[12] {
+ if x.OwnerReferences == nil {
+ r.EncodeNil()
+ } else {
+ yym44 := z.EncBinary()
+ _ = yym44
+ if false {
+ } else {
+ h.encSliceOwnerReference(([]OwnerReference)(x.OwnerReferences), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[12] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("ownerReferences"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.OwnerReferences == nil {
+ r.EncodeNil()
+ } else {
+ yym45 := z.EncBinary()
+ _ = yym45
+ if false {
+ } else {
+ h.encSliceOwnerReference(([]OwnerReference)(x.OwnerReferences), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[13] {
+ if x.Finalizers == nil {
+ r.EncodeNil()
+ } else {
+ yym47 := z.EncBinary()
+ _ = yym47
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Finalizers, false, e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[13] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("finalizers"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Finalizers == nil {
+ r.EncodeNil()
+ } else {
+ yym48 := z.EncBinary()
+ _ = yym48
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Finalizers, false, e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ObjectMeta) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ObjectMeta) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "name":
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ case "generateName":
+ if r.TryDecodeAsNil() {
+ x.GenerateName = ""
+ } else {
+ x.GenerateName = string(r.DecodeString())
+ }
+ case "namespace":
+ if r.TryDecodeAsNil() {
+ x.Namespace = ""
+ } else {
+ x.Namespace = string(r.DecodeString())
+ }
+ case "selfLink":
+ if r.TryDecodeAsNil() {
+ x.SelfLink = ""
+ } else {
+ x.SelfLink = string(r.DecodeString())
+ }
+ case "uid":
+ if r.TryDecodeAsNil() {
+ x.UID = ""
+ } else {
+ x.UID = pkg1_types.UID(r.DecodeString())
+ }
+ case "resourceVersion":
+ if r.TryDecodeAsNil() {
+ x.ResourceVersion = ""
+ } else {
+ x.ResourceVersion = string(r.DecodeString())
+ }
+ case "generation":
+ if r.TryDecodeAsNil() {
+ x.Generation = 0
+ } else {
+ x.Generation = int64(r.DecodeInt(64))
+ }
+ case "creationTimestamp":
+ if r.TryDecodeAsNil() {
+ x.CreationTimestamp = pkg2_unversioned.Time{}
+ } else {
+ yyv11 := &x.CreationTimestamp
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else if yym12 {
+ z.DecBinaryUnmarshal(yyv11)
+ } else if !yym12 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv11)
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ case "deletionTimestamp":
+ if r.TryDecodeAsNil() {
+ if x.DeletionTimestamp != nil {
+ x.DeletionTimestamp = nil
+ }
+ } else {
+ if x.DeletionTimestamp == nil {
+ x.DeletionTimestamp = new(pkg2_unversioned.Time)
+ }
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.DeletionTimestamp) {
+ } else if yym14 {
+ z.DecBinaryUnmarshal(x.DeletionTimestamp)
+ } else if !yym14 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(x.DeletionTimestamp)
+ } else {
+ z.DecFallback(x.DeletionTimestamp, false)
+ }
+ }
+ case "deletionGracePeriodSeconds":
+ if r.TryDecodeAsNil() {
+ if x.DeletionGracePeriodSeconds != nil {
+ x.DeletionGracePeriodSeconds = nil
+ }
+ } else {
+ if x.DeletionGracePeriodSeconds == nil {
+ x.DeletionGracePeriodSeconds = new(int64)
+ }
+ yym16 := z.DecBinary()
+ _ = yym16
+ if false {
+ } else {
+ *((*int64)(x.DeletionGracePeriodSeconds)) = int64(r.DecodeInt(64))
+ }
+ }
+ case "labels":
+ if r.TryDecodeAsNil() {
+ x.Labels = nil
+ } else {
+ yyv17 := &x.Labels
+ yym18 := z.DecBinary()
+ _ = yym18
+ if false {
+ } else {
+ z.F.DecMapStringStringX(yyv17, false, d)
+ }
+ }
+ case "annotations":
+ if r.TryDecodeAsNil() {
+ x.Annotations = nil
+ } else {
+ yyv19 := &x.Annotations
+ yym20 := z.DecBinary()
+ _ = yym20
+ if false {
+ } else {
+ z.F.DecMapStringStringX(yyv19, false, d)
+ }
+ }
+ case "ownerReferences":
+ if r.TryDecodeAsNil() {
+ x.OwnerReferences = nil
+ } else {
+ yyv21 := &x.OwnerReferences
+ yym22 := z.DecBinary()
+ _ = yym22
+ if false {
+ } else {
+ h.decSliceOwnerReference((*[]OwnerReference)(yyv21), d)
+ }
+ }
+ case "finalizers":
+ if r.TryDecodeAsNil() {
+ x.Finalizers = nil
+ } else {
+ yyv23 := &x.Finalizers
+ yym24 := z.DecBinary()
+ _ = yym24
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv23, false, d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ObjectMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj25 int
+ var yyb25 bool
+ var yyhl25 bool = l >= 0
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.GenerateName = ""
+ } else {
+ x.GenerateName = string(r.DecodeString())
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Namespace = ""
+ } else {
+ x.Namespace = string(r.DecodeString())
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.SelfLink = ""
+ } else {
+ x.SelfLink = string(r.DecodeString())
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.UID = ""
+ } else {
+ x.UID = pkg1_types.UID(r.DecodeString())
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ResourceVersion = ""
+ } else {
+ x.ResourceVersion = string(r.DecodeString())
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Generation = 0
+ } else {
+ x.Generation = int64(r.DecodeInt(64))
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.CreationTimestamp = pkg2_unversioned.Time{}
+ } else {
+ yyv33 := &x.CreationTimestamp
+ yym34 := z.DecBinary()
+ _ = yym34
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv33) {
+ } else if yym34 {
+ z.DecBinaryUnmarshal(yyv33)
+ } else if !yym34 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv33)
+ } else {
+ z.DecFallback(yyv33, false)
+ }
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.DeletionTimestamp != nil {
+ x.DeletionTimestamp = nil
+ }
+ } else {
+ if x.DeletionTimestamp == nil {
+ x.DeletionTimestamp = new(pkg2_unversioned.Time)
+ }
+ yym36 := z.DecBinary()
+ _ = yym36
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.DeletionTimestamp) {
+ } else if yym36 {
+ z.DecBinaryUnmarshal(x.DeletionTimestamp)
+ } else if !yym36 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(x.DeletionTimestamp)
+ } else {
+ z.DecFallback(x.DeletionTimestamp, false)
+ }
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.DeletionGracePeriodSeconds != nil {
+ x.DeletionGracePeriodSeconds = nil
+ }
+ } else {
+ if x.DeletionGracePeriodSeconds == nil {
+ x.DeletionGracePeriodSeconds = new(int64)
+ }
+ yym38 := z.DecBinary()
+ _ = yym38
+ if false {
+ } else {
+ *((*int64)(x.DeletionGracePeriodSeconds)) = int64(r.DecodeInt(64))
+ }
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Labels = nil
+ } else {
+ yyv39 := &x.Labels
+ yym40 := z.DecBinary()
+ _ = yym40
+ if false {
+ } else {
+ z.F.DecMapStringStringX(yyv39, false, d)
+ }
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Annotations = nil
+ } else {
+ yyv41 := &x.Annotations
+ yym42 := z.DecBinary()
+ _ = yym42
+ if false {
+ } else {
+ z.F.DecMapStringStringX(yyv41, false, d)
+ }
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.OwnerReferences = nil
+ } else {
+ yyv43 := &x.OwnerReferences
+ yym44 := z.DecBinary()
+ _ = yym44
+ if false {
+ } else {
+ h.decSliceOwnerReference((*[]OwnerReference)(yyv43), d)
+ }
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Finalizers = nil
+ } else {
+ yyv45 := &x.Finalizers
+ yym46 := z.DecBinary()
+ _ = yym46
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv45, false, d)
+ }
+ }
+ for {
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj25-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [21]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.VolumeSource.HostPath != nil && x.HostPath != nil
+ yyq2[2] = x.VolumeSource.EmptyDir != nil && x.EmptyDir != nil
+ yyq2[3] = x.VolumeSource.GCEPersistentDisk != nil && x.GCEPersistentDisk != nil
+ yyq2[4] = x.VolumeSource.AWSElasticBlockStore != nil && x.AWSElasticBlockStore != nil
+ yyq2[5] = x.VolumeSource.GitRepo != nil && x.GitRepo != nil
+ yyq2[6] = x.VolumeSource.Secret != nil && x.Secret != nil
+ yyq2[7] = x.VolumeSource.NFS != nil && x.NFS != nil
+ yyq2[8] = x.VolumeSource.ISCSI != nil && x.ISCSI != nil
+ yyq2[9] = x.VolumeSource.Glusterfs != nil && x.Glusterfs != nil
+ yyq2[10] = x.VolumeSource.PersistentVolumeClaim != nil && x.PersistentVolumeClaim != nil
+ yyq2[11] = x.VolumeSource.RBD != nil && x.RBD != nil
+ yyq2[12] = x.VolumeSource.FlexVolume != nil && x.FlexVolume != nil
+ yyq2[13] = x.VolumeSource.Cinder != nil && x.Cinder != nil
+ yyq2[14] = x.VolumeSource.CephFS != nil && x.CephFS != nil
+ yyq2[15] = x.VolumeSource.Flocker != nil && x.Flocker != nil
+ yyq2[16] = x.VolumeSource.DownwardAPI != nil && x.DownwardAPI != nil
+ yyq2[17] = x.VolumeSource.FC != nil && x.FC != nil
+ yyq2[18] = x.VolumeSource.AzureFile != nil && x.AzureFile != nil
+ yyq2[19] = x.VolumeSource.ConfigMap != nil && x.ConfigMap != nil
+ yyq2[20] = x.VolumeSource.VsphereVolume != nil && x.VsphereVolume != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(21)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("name"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ }
+ var yyn6 bool
+ if x.VolumeSource.HostPath == nil {
+ yyn6 = true
+ goto LABEL6
+ }
+ LABEL6:
+ if yyr2 || yy2arr2 {
+ if yyn6 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.HostPath == nil {
+ r.EncodeNil()
+ } else {
+ x.HostPath.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("hostPath"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn6 {
+ r.EncodeNil()
+ } else {
+ if x.HostPath == nil {
+ r.EncodeNil()
+ } else {
+ x.HostPath.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn9 bool
+ if x.VolumeSource.EmptyDir == nil {
+ yyn9 = true
+ goto LABEL9
+ }
+ LABEL9:
+ if yyr2 || yy2arr2 {
+ if yyn9 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.EmptyDir == nil {
+ r.EncodeNil()
+ } else {
+ x.EmptyDir.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("emptyDir"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn9 {
+ r.EncodeNil()
+ } else {
+ if x.EmptyDir == nil {
+ r.EncodeNil()
+ } else {
+ x.EmptyDir.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn12 bool
+ if x.VolumeSource.GCEPersistentDisk == nil {
+ yyn12 = true
+ goto LABEL12
+ }
+ LABEL12:
+ if yyr2 || yy2arr2 {
+ if yyn12 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ if x.GCEPersistentDisk == nil {
+ r.EncodeNil()
+ } else {
+ x.GCEPersistentDisk.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("gcePersistentDisk"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn12 {
+ r.EncodeNil()
+ } else {
+ if x.GCEPersistentDisk == nil {
+ r.EncodeNil()
+ } else {
+ x.GCEPersistentDisk.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn15 bool
+ if x.VolumeSource.AWSElasticBlockStore == nil {
+ yyn15 = true
+ goto LABEL15
+ }
+ LABEL15:
+ if yyr2 || yy2arr2 {
+ if yyn15 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ if x.AWSElasticBlockStore == nil {
+ r.EncodeNil()
+ } else {
+ x.AWSElasticBlockStore.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("awsElasticBlockStore"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn15 {
+ r.EncodeNil()
+ } else {
+ if x.AWSElasticBlockStore == nil {
+ r.EncodeNil()
+ } else {
+ x.AWSElasticBlockStore.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn18 bool
+ if x.VolumeSource.GitRepo == nil {
+ yyn18 = true
+ goto LABEL18
+ }
+ LABEL18:
+ if yyr2 || yy2arr2 {
+ if yyn18 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ if x.GitRepo == nil {
+ r.EncodeNil()
+ } else {
+ x.GitRepo.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("gitRepo"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn18 {
+ r.EncodeNil()
+ } else {
+ if x.GitRepo == nil {
+ r.EncodeNil()
+ } else {
+ x.GitRepo.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn21 bool
+ if x.VolumeSource.Secret == nil {
+ yyn21 = true
+ goto LABEL21
+ }
+ LABEL21:
+ if yyr2 || yy2arr2 {
+ if yyn21 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[6] {
+ if x.Secret == nil {
+ r.EncodeNil()
+ } else {
+ x.Secret.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[6] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("secret"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn21 {
+ r.EncodeNil()
+ } else {
+ if x.Secret == nil {
+ r.EncodeNil()
+ } else {
+ x.Secret.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn24 bool
+ if x.VolumeSource.NFS == nil {
+ yyn24 = true
+ goto LABEL24
+ }
+ LABEL24:
+ if yyr2 || yy2arr2 {
+ if yyn24 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[7] {
+ if x.NFS == nil {
+ r.EncodeNil()
+ } else {
+ x.NFS.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[7] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("nfs"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn24 {
+ r.EncodeNil()
+ } else {
+ if x.NFS == nil {
+ r.EncodeNil()
+ } else {
+ x.NFS.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn27 bool
+ if x.VolumeSource.ISCSI == nil {
+ yyn27 = true
+ goto LABEL27
+ }
+ LABEL27:
+ if yyr2 || yy2arr2 {
+ if yyn27 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[8] {
+ if x.ISCSI == nil {
+ r.EncodeNil()
+ } else {
+ x.ISCSI.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[8] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("iscsi"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn27 {
+ r.EncodeNil()
+ } else {
+ if x.ISCSI == nil {
+ r.EncodeNil()
+ } else {
+ x.ISCSI.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn30 bool
+ if x.VolumeSource.Glusterfs == nil {
+ yyn30 = true
+ goto LABEL30
+ }
+ LABEL30:
+ if yyr2 || yy2arr2 {
+ if yyn30 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[9] {
+ if x.Glusterfs == nil {
+ r.EncodeNil()
+ } else {
+ x.Glusterfs.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[9] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("glusterfs"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn30 {
+ r.EncodeNil()
+ } else {
+ if x.Glusterfs == nil {
+ r.EncodeNil()
+ } else {
+ x.Glusterfs.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn33 bool
+ if x.VolumeSource.PersistentVolumeClaim == nil {
+ yyn33 = true
+ goto LABEL33
+ }
+ LABEL33:
+ if yyr2 || yy2arr2 {
+ if yyn33 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[10] {
+ if x.PersistentVolumeClaim == nil {
+ r.EncodeNil()
+ } else {
+ x.PersistentVolumeClaim.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[10] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("persistentVolumeClaim"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn33 {
+ r.EncodeNil()
+ } else {
+ if x.PersistentVolumeClaim == nil {
+ r.EncodeNil()
+ } else {
+ x.PersistentVolumeClaim.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn36 bool
+ if x.VolumeSource.RBD == nil {
+ yyn36 = true
+ goto LABEL36
+ }
+ LABEL36:
+ if yyr2 || yy2arr2 {
+ if yyn36 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[11] {
+ if x.RBD == nil {
+ r.EncodeNil()
+ } else {
+ x.RBD.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[11] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("rbd"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn36 {
+ r.EncodeNil()
+ } else {
+ if x.RBD == nil {
+ r.EncodeNil()
+ } else {
+ x.RBD.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn39 bool
+ if x.VolumeSource.FlexVolume == nil {
+ yyn39 = true
+ goto LABEL39
+ }
+ LABEL39:
+ if yyr2 || yy2arr2 {
+ if yyn39 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[12] {
+ if x.FlexVolume == nil {
+ r.EncodeNil()
+ } else {
+ x.FlexVolume.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[12] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("flexVolume"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn39 {
+ r.EncodeNil()
+ } else {
+ if x.FlexVolume == nil {
+ r.EncodeNil()
+ } else {
+ x.FlexVolume.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn42 bool
+ if x.VolumeSource.Cinder == nil {
+ yyn42 = true
+ goto LABEL42
+ }
+ LABEL42:
+ if yyr2 || yy2arr2 {
+ if yyn42 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[13] {
+ if x.Cinder == nil {
+ r.EncodeNil()
+ } else {
+ x.Cinder.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[13] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("cinder"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn42 {
+ r.EncodeNil()
+ } else {
+ if x.Cinder == nil {
+ r.EncodeNil()
+ } else {
+ x.Cinder.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn45 bool
+ if x.VolumeSource.CephFS == nil {
+ yyn45 = true
+ goto LABEL45
+ }
+ LABEL45:
+ if yyr2 || yy2arr2 {
+ if yyn45 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[14] {
+ if x.CephFS == nil {
+ r.EncodeNil()
+ } else {
+ x.CephFS.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[14] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("cephfs"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn45 {
+ r.EncodeNil()
+ } else {
+ if x.CephFS == nil {
+ r.EncodeNil()
+ } else {
+ x.CephFS.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn48 bool
+ if x.VolumeSource.Flocker == nil {
+ yyn48 = true
+ goto LABEL48
+ }
+ LABEL48:
+ if yyr2 || yy2arr2 {
+ if yyn48 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[15] {
+ if x.Flocker == nil {
+ r.EncodeNil()
+ } else {
+ x.Flocker.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[15] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("flocker"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn48 {
+ r.EncodeNil()
+ } else {
+ if x.Flocker == nil {
+ r.EncodeNil()
+ } else {
+ x.Flocker.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn51 bool
+ if x.VolumeSource.DownwardAPI == nil {
+ yyn51 = true
+ goto LABEL51
+ }
+ LABEL51:
+ if yyr2 || yy2arr2 {
+ if yyn51 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[16] {
+ if x.DownwardAPI == nil {
+ r.EncodeNil()
+ } else {
+ x.DownwardAPI.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[16] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("downwardAPI"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn51 {
+ r.EncodeNil()
+ } else {
+ if x.DownwardAPI == nil {
+ r.EncodeNil()
+ } else {
+ x.DownwardAPI.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn54 bool
+ if x.VolumeSource.FC == nil {
+ yyn54 = true
+ goto LABEL54
+ }
+ LABEL54:
+ if yyr2 || yy2arr2 {
+ if yyn54 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[17] {
+ if x.FC == nil {
+ r.EncodeNil()
+ } else {
+ x.FC.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[17] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("fc"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn54 {
+ r.EncodeNil()
+ } else {
+ if x.FC == nil {
+ r.EncodeNil()
+ } else {
+ x.FC.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn57 bool
+ if x.VolumeSource.AzureFile == nil {
+ yyn57 = true
+ goto LABEL57
+ }
+ LABEL57:
+ if yyr2 || yy2arr2 {
+ if yyn57 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[18] {
+ if x.AzureFile == nil {
+ r.EncodeNil()
+ } else {
+ x.AzureFile.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[18] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("azureFile"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn57 {
+ r.EncodeNil()
+ } else {
+ if x.AzureFile == nil {
+ r.EncodeNil()
+ } else {
+ x.AzureFile.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn60 bool
+ if x.VolumeSource.ConfigMap == nil {
+ yyn60 = true
+ goto LABEL60
+ }
+ LABEL60:
+ if yyr2 || yy2arr2 {
+ if yyn60 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[19] {
+ if x.ConfigMap == nil {
+ r.EncodeNil()
+ } else {
+ x.ConfigMap.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[19] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("configMap"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn60 {
+ r.EncodeNil()
+ } else {
+ if x.ConfigMap == nil {
+ r.EncodeNil()
+ } else {
+ x.ConfigMap.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn63 bool
+ if x.VolumeSource.VsphereVolume == nil {
+ yyn63 = true
+ goto LABEL63
+ }
+ LABEL63:
+ if yyr2 || yy2arr2 {
+ if yyn63 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[20] {
+ if x.VsphereVolume == nil {
+ r.EncodeNil()
+ } else {
+ x.VsphereVolume.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[20] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("vsphereVolume"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn63 {
+ r.EncodeNil()
+ } else {
+ if x.VsphereVolume == nil {
+ r.EncodeNil()
+ } else {
+ x.VsphereVolume.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *Volume) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *Volume) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "name":
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ case "hostPath":
+ if x.VolumeSource.HostPath == nil {
+ x.VolumeSource.HostPath = new(HostPathVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.HostPath != nil {
+ x.HostPath = nil
+ }
+ } else {
+ if x.HostPath == nil {
+ x.HostPath = new(HostPathVolumeSource)
+ }
+ x.HostPath.CodecDecodeSelf(d)
+ }
+ case "emptyDir":
+ if x.VolumeSource.EmptyDir == nil {
+ x.VolumeSource.EmptyDir = new(EmptyDirVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.EmptyDir != nil {
+ x.EmptyDir = nil
+ }
+ } else {
+ if x.EmptyDir == nil {
+ x.EmptyDir = new(EmptyDirVolumeSource)
+ }
+ x.EmptyDir.CodecDecodeSelf(d)
+ }
+ case "gcePersistentDisk":
+ if x.VolumeSource.GCEPersistentDisk == nil {
+ x.VolumeSource.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.GCEPersistentDisk != nil {
+ x.GCEPersistentDisk = nil
+ }
+ } else {
+ if x.GCEPersistentDisk == nil {
+ x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource)
+ }
+ x.GCEPersistentDisk.CodecDecodeSelf(d)
+ }
+ case "awsElasticBlockStore":
+ if x.VolumeSource.AWSElasticBlockStore == nil {
+ x.VolumeSource.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.AWSElasticBlockStore != nil {
+ x.AWSElasticBlockStore = nil
+ }
+ } else {
+ if x.AWSElasticBlockStore == nil {
+ x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource)
+ }
+ x.AWSElasticBlockStore.CodecDecodeSelf(d)
+ }
+ case "gitRepo":
+ if x.VolumeSource.GitRepo == nil {
+ x.VolumeSource.GitRepo = new(GitRepoVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.GitRepo != nil {
+ x.GitRepo = nil
+ }
+ } else {
+ if x.GitRepo == nil {
+ x.GitRepo = new(GitRepoVolumeSource)
+ }
+ x.GitRepo.CodecDecodeSelf(d)
+ }
+ case "secret":
+ if x.VolumeSource.Secret == nil {
+ x.VolumeSource.Secret = new(SecretVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.Secret != nil {
+ x.Secret = nil
+ }
+ } else {
+ if x.Secret == nil {
+ x.Secret = new(SecretVolumeSource)
+ }
+ x.Secret.CodecDecodeSelf(d)
+ }
+ case "nfs":
+ if x.VolumeSource.NFS == nil {
+ x.VolumeSource.NFS = new(NFSVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.NFS != nil {
+ x.NFS = nil
+ }
+ } else {
+ if x.NFS == nil {
+ x.NFS = new(NFSVolumeSource)
+ }
+ x.NFS.CodecDecodeSelf(d)
+ }
+ case "iscsi":
+ if x.VolumeSource.ISCSI == nil {
+ x.VolumeSource.ISCSI = new(ISCSIVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.ISCSI != nil {
+ x.ISCSI = nil
+ }
+ } else {
+ if x.ISCSI == nil {
+ x.ISCSI = new(ISCSIVolumeSource)
+ }
+ x.ISCSI.CodecDecodeSelf(d)
+ }
+ case "glusterfs":
+ if x.VolumeSource.Glusterfs == nil {
+ x.VolumeSource.Glusterfs = new(GlusterfsVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.Glusterfs != nil {
+ x.Glusterfs = nil
+ }
+ } else {
+ if x.Glusterfs == nil {
+ x.Glusterfs = new(GlusterfsVolumeSource)
+ }
+ x.Glusterfs.CodecDecodeSelf(d)
+ }
+ case "persistentVolumeClaim":
+ if x.VolumeSource.PersistentVolumeClaim == nil {
+ x.VolumeSource.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.PersistentVolumeClaim != nil {
+ x.PersistentVolumeClaim = nil
+ }
+ } else {
+ if x.PersistentVolumeClaim == nil {
+ x.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource)
+ }
+ x.PersistentVolumeClaim.CodecDecodeSelf(d)
+ }
+ case "rbd":
+ if x.VolumeSource.RBD == nil {
+ x.VolumeSource.RBD = new(RBDVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.RBD != nil {
+ x.RBD = nil
+ }
+ } else {
+ if x.RBD == nil {
+ x.RBD = new(RBDVolumeSource)
+ }
+ x.RBD.CodecDecodeSelf(d)
+ }
+ case "flexVolume":
+ if x.VolumeSource.FlexVolume == nil {
+ x.VolumeSource.FlexVolume = new(FlexVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.FlexVolume != nil {
+ x.FlexVolume = nil
+ }
+ } else {
+ if x.FlexVolume == nil {
+ x.FlexVolume = new(FlexVolumeSource)
+ }
+ x.FlexVolume.CodecDecodeSelf(d)
+ }
+ case "cinder":
+ if x.VolumeSource.Cinder == nil {
+ x.VolumeSource.Cinder = new(CinderVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.Cinder != nil {
+ x.Cinder = nil
+ }
+ } else {
+ if x.Cinder == nil {
+ x.Cinder = new(CinderVolumeSource)
+ }
+ x.Cinder.CodecDecodeSelf(d)
+ }
+ case "cephfs":
+ if x.VolumeSource.CephFS == nil {
+ x.VolumeSource.CephFS = new(CephFSVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.CephFS != nil {
+ x.CephFS = nil
+ }
+ } else {
+ if x.CephFS == nil {
+ x.CephFS = new(CephFSVolumeSource)
+ }
+ x.CephFS.CodecDecodeSelf(d)
+ }
+ case "flocker":
+ if x.VolumeSource.Flocker == nil {
+ x.VolumeSource.Flocker = new(FlockerVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.Flocker != nil {
+ x.Flocker = nil
+ }
+ } else {
+ if x.Flocker == nil {
+ x.Flocker = new(FlockerVolumeSource)
+ }
+ x.Flocker.CodecDecodeSelf(d)
+ }
+ case "downwardAPI":
+ if x.VolumeSource.DownwardAPI == nil {
+ x.VolumeSource.DownwardAPI = new(DownwardAPIVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.DownwardAPI != nil {
+ x.DownwardAPI = nil
+ }
+ } else {
+ if x.DownwardAPI == nil {
+ x.DownwardAPI = new(DownwardAPIVolumeSource)
+ }
+ x.DownwardAPI.CodecDecodeSelf(d)
+ }
+ case "fc":
+ if x.VolumeSource.FC == nil {
+ x.VolumeSource.FC = new(FCVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.FC != nil {
+ x.FC = nil
+ }
+ } else {
+ if x.FC == nil {
+ x.FC = new(FCVolumeSource)
+ }
+ x.FC.CodecDecodeSelf(d)
+ }
+ case "azureFile":
+ if x.VolumeSource.AzureFile == nil {
+ x.VolumeSource.AzureFile = new(AzureFileVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.AzureFile != nil {
+ x.AzureFile = nil
+ }
+ } else {
+ if x.AzureFile == nil {
+ x.AzureFile = new(AzureFileVolumeSource)
+ }
+ x.AzureFile.CodecDecodeSelf(d)
+ }
+ case "configMap":
+ if x.VolumeSource.ConfigMap == nil {
+ x.VolumeSource.ConfigMap = new(ConfigMapVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.ConfigMap != nil {
+ x.ConfigMap = nil
+ }
+ } else {
+ if x.ConfigMap == nil {
+ x.ConfigMap = new(ConfigMapVolumeSource)
+ }
+ x.ConfigMap.CodecDecodeSelf(d)
+ }
+ case "vsphereVolume":
+ if x.VolumeSource.VsphereVolume == nil {
+ x.VolumeSource.VsphereVolume = new(VsphereVirtualDiskVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.VsphereVolume != nil {
+ x.VsphereVolume = nil
+ }
+ } else {
+ if x.VsphereVolume == nil {
+ x.VsphereVolume = new(VsphereVirtualDiskVolumeSource)
+ }
+ x.VsphereVolume.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj25 int
+ var yyb25 bool
+ var yyhl25 bool = l >= 0
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ if x.VolumeSource.HostPath == nil {
+ x.VolumeSource.HostPath = new(HostPathVolumeSource)
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.HostPath != nil {
+ x.HostPath = nil
+ }
+ } else {
+ if x.HostPath == nil {
+ x.HostPath = new(HostPathVolumeSource)
+ }
+ x.HostPath.CodecDecodeSelf(d)
+ }
+ if x.VolumeSource.EmptyDir == nil {
+ x.VolumeSource.EmptyDir = new(EmptyDirVolumeSource)
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.EmptyDir != nil {
+ x.EmptyDir = nil
+ }
+ } else {
+ if x.EmptyDir == nil {
+ x.EmptyDir = new(EmptyDirVolumeSource)
+ }
+ x.EmptyDir.CodecDecodeSelf(d)
+ }
+ if x.VolumeSource.GCEPersistentDisk == nil {
+ x.VolumeSource.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource)
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.GCEPersistentDisk != nil {
+ x.GCEPersistentDisk = nil
+ }
+ } else {
+ if x.GCEPersistentDisk == nil {
+ x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource)
+ }
+ x.GCEPersistentDisk.CodecDecodeSelf(d)
+ }
+ if x.VolumeSource.AWSElasticBlockStore == nil {
+ x.VolumeSource.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource)
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.AWSElasticBlockStore != nil {
+ x.AWSElasticBlockStore = nil
+ }
+ } else {
+ if x.AWSElasticBlockStore == nil {
+ x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource)
+ }
+ x.AWSElasticBlockStore.CodecDecodeSelf(d)
+ }
+ if x.VolumeSource.GitRepo == nil {
+ x.VolumeSource.GitRepo = new(GitRepoVolumeSource)
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.GitRepo != nil {
+ x.GitRepo = nil
+ }
+ } else {
+ if x.GitRepo == nil {
+ x.GitRepo = new(GitRepoVolumeSource)
+ }
+ x.GitRepo.CodecDecodeSelf(d)
+ }
+ if x.VolumeSource.Secret == nil {
+ x.VolumeSource.Secret = new(SecretVolumeSource)
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Secret != nil {
+ x.Secret = nil
+ }
+ } else {
+ if x.Secret == nil {
+ x.Secret = new(SecretVolumeSource)
+ }
+ x.Secret.CodecDecodeSelf(d)
+ }
+ if x.VolumeSource.NFS == nil {
+ x.VolumeSource.NFS = new(NFSVolumeSource)
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.NFS != nil {
+ x.NFS = nil
+ }
+ } else {
+ if x.NFS == nil {
+ x.NFS = new(NFSVolumeSource)
+ }
+ x.NFS.CodecDecodeSelf(d)
+ }
+ if x.VolumeSource.ISCSI == nil {
+ x.VolumeSource.ISCSI = new(ISCSIVolumeSource)
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.ISCSI != nil {
+ x.ISCSI = nil
+ }
+ } else {
+ if x.ISCSI == nil {
+ x.ISCSI = new(ISCSIVolumeSource)
+ }
+ x.ISCSI.CodecDecodeSelf(d)
+ }
+ if x.VolumeSource.Glusterfs == nil {
+ x.VolumeSource.Glusterfs = new(GlusterfsVolumeSource)
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Glusterfs != nil {
+ x.Glusterfs = nil
+ }
+ } else {
+ if x.Glusterfs == nil {
+ x.Glusterfs = new(GlusterfsVolumeSource)
+ }
+ x.Glusterfs.CodecDecodeSelf(d)
+ }
+ if x.VolumeSource.PersistentVolumeClaim == nil {
+ x.VolumeSource.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource)
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.PersistentVolumeClaim != nil {
+ x.PersistentVolumeClaim = nil
+ }
+ } else {
+ if x.PersistentVolumeClaim == nil {
+ x.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource)
+ }
+ x.PersistentVolumeClaim.CodecDecodeSelf(d)
+ }
+ if x.VolumeSource.RBD == nil {
+ x.VolumeSource.RBD = new(RBDVolumeSource)
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.RBD != nil {
+ x.RBD = nil
+ }
+ } else {
+ if x.RBD == nil {
+ x.RBD = new(RBDVolumeSource)
+ }
+ x.RBD.CodecDecodeSelf(d)
+ }
+ if x.VolumeSource.FlexVolume == nil {
+ x.VolumeSource.FlexVolume = new(FlexVolumeSource)
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.FlexVolume != nil {
+ x.FlexVolume = nil
+ }
+ } else {
+ if x.FlexVolume == nil {
+ x.FlexVolume = new(FlexVolumeSource)
+ }
+ x.FlexVolume.CodecDecodeSelf(d)
+ }
+ if x.VolumeSource.Cinder == nil {
+ x.VolumeSource.Cinder = new(CinderVolumeSource)
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Cinder != nil {
+ x.Cinder = nil
+ }
+ } else {
+ if x.Cinder == nil {
+ x.Cinder = new(CinderVolumeSource)
+ }
+ x.Cinder.CodecDecodeSelf(d)
+ }
+ if x.VolumeSource.CephFS == nil {
+ x.VolumeSource.CephFS = new(CephFSVolumeSource)
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.CephFS != nil {
+ x.CephFS = nil
+ }
+ } else {
+ if x.CephFS == nil {
+ x.CephFS = new(CephFSVolumeSource)
+ }
+ x.CephFS.CodecDecodeSelf(d)
+ }
+ if x.VolumeSource.Flocker == nil {
+ x.VolumeSource.Flocker = new(FlockerVolumeSource)
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Flocker != nil {
+ x.Flocker = nil
+ }
+ } else {
+ if x.Flocker == nil {
+ x.Flocker = new(FlockerVolumeSource)
+ }
+ x.Flocker.CodecDecodeSelf(d)
+ }
+ if x.VolumeSource.DownwardAPI == nil {
+ x.VolumeSource.DownwardAPI = new(DownwardAPIVolumeSource)
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.DownwardAPI != nil {
+ x.DownwardAPI = nil
+ }
+ } else {
+ if x.DownwardAPI == nil {
+ x.DownwardAPI = new(DownwardAPIVolumeSource)
+ }
+ x.DownwardAPI.CodecDecodeSelf(d)
+ }
+ if x.VolumeSource.FC == nil {
+ x.VolumeSource.FC = new(FCVolumeSource)
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.FC != nil {
+ x.FC = nil
+ }
+ } else {
+ if x.FC == nil {
+ x.FC = new(FCVolumeSource)
+ }
+ x.FC.CodecDecodeSelf(d)
+ }
+ if x.VolumeSource.AzureFile == nil {
+ x.VolumeSource.AzureFile = new(AzureFileVolumeSource)
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.AzureFile != nil {
+ x.AzureFile = nil
+ }
+ } else {
+ if x.AzureFile == nil {
+ x.AzureFile = new(AzureFileVolumeSource)
+ }
+ x.AzureFile.CodecDecodeSelf(d)
+ }
+ if x.VolumeSource.ConfigMap == nil {
+ x.VolumeSource.ConfigMap = new(ConfigMapVolumeSource)
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.ConfigMap != nil {
+ x.ConfigMap = nil
+ }
+ } else {
+ if x.ConfigMap == nil {
+ x.ConfigMap = new(ConfigMapVolumeSource)
+ }
+ x.ConfigMap.CodecDecodeSelf(d)
+ }
+ if x.VolumeSource.VsphereVolume == nil {
+ x.VolumeSource.VsphereVolume = new(VsphereVirtualDiskVolumeSource)
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.VsphereVolume != nil {
+ x.VsphereVolume = nil
+ }
+ } else {
+ if x.VsphereVolume == nil {
+ x.VsphereVolume = new(VsphereVirtualDiskVolumeSource)
+ }
+ x.VsphereVolume.CodecDecodeSelf(d)
+ }
+ for {
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj25-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [20]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.HostPath != nil
+ yyq2[1] = x.EmptyDir != nil
+ yyq2[2] = x.GCEPersistentDisk != nil
+ yyq2[3] = x.AWSElasticBlockStore != nil
+ yyq2[4] = x.GitRepo != nil
+ yyq2[5] = x.Secret != nil
+ yyq2[6] = x.NFS != nil
+ yyq2[7] = x.ISCSI != nil
+ yyq2[8] = x.Glusterfs != nil
+ yyq2[9] = x.PersistentVolumeClaim != nil
+ yyq2[10] = x.RBD != nil
+ yyq2[11] = x.FlexVolume != nil
+ yyq2[12] = x.Cinder != nil
+ yyq2[13] = x.CephFS != nil
+ yyq2[14] = x.Flocker != nil
+ yyq2[15] = x.DownwardAPI != nil
+ yyq2[16] = x.FC != nil
+ yyq2[17] = x.AzureFile != nil
+ yyq2[18] = x.ConfigMap != nil
+ yyq2[19] = x.VsphereVolume != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(20)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.HostPath == nil {
+ r.EncodeNil()
+ } else {
+ x.HostPath.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("hostPath"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.HostPath == nil {
+ r.EncodeNil()
+ } else {
+ x.HostPath.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.EmptyDir == nil {
+ r.EncodeNil()
+ } else {
+ x.EmptyDir.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("emptyDir"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.EmptyDir == nil {
+ r.EncodeNil()
+ } else {
+ x.EmptyDir.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.GCEPersistentDisk == nil {
+ r.EncodeNil()
+ } else {
+ x.GCEPersistentDisk.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("gcePersistentDisk"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.GCEPersistentDisk == nil {
+ r.EncodeNil()
+ } else {
+ x.GCEPersistentDisk.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ if x.AWSElasticBlockStore == nil {
+ r.EncodeNil()
+ } else {
+ x.AWSElasticBlockStore.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("awsElasticBlockStore"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.AWSElasticBlockStore == nil {
+ r.EncodeNil()
+ } else {
+ x.AWSElasticBlockStore.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ if x.GitRepo == nil {
+ r.EncodeNil()
+ } else {
+ x.GitRepo.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("gitRepo"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.GitRepo == nil {
+ r.EncodeNil()
+ } else {
+ x.GitRepo.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ if x.Secret == nil {
+ r.EncodeNil()
+ } else {
+ x.Secret.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("secret"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Secret == nil {
+ r.EncodeNil()
+ } else {
+ x.Secret.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[6] {
+ if x.NFS == nil {
+ r.EncodeNil()
+ } else {
+ x.NFS.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[6] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("nfs"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.NFS == nil {
+ r.EncodeNil()
+ } else {
+ x.NFS.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[7] {
+ if x.ISCSI == nil {
+ r.EncodeNil()
+ } else {
+ x.ISCSI.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[7] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("iscsi"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.ISCSI == nil {
+ r.EncodeNil()
+ } else {
+ x.ISCSI.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[8] {
+ if x.Glusterfs == nil {
+ r.EncodeNil()
+ } else {
+ x.Glusterfs.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[8] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("glusterfs"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Glusterfs == nil {
+ r.EncodeNil()
+ } else {
+ x.Glusterfs.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[9] {
+ if x.PersistentVolumeClaim == nil {
+ r.EncodeNil()
+ } else {
+ x.PersistentVolumeClaim.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[9] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("persistentVolumeClaim"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.PersistentVolumeClaim == nil {
+ r.EncodeNil()
+ } else {
+ x.PersistentVolumeClaim.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[10] {
+ if x.RBD == nil {
+ r.EncodeNil()
+ } else {
+ x.RBD.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[10] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("rbd"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.RBD == nil {
+ r.EncodeNil()
+ } else {
+ x.RBD.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[11] {
+ if x.FlexVolume == nil {
+ r.EncodeNil()
+ } else {
+ x.FlexVolume.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[11] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("flexVolume"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.FlexVolume == nil {
+ r.EncodeNil()
+ } else {
+ x.FlexVolume.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[12] {
+ if x.Cinder == nil {
+ r.EncodeNil()
+ } else {
+ x.Cinder.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[12] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("cinder"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Cinder == nil {
+ r.EncodeNil()
+ } else {
+ x.Cinder.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[13] {
+ if x.CephFS == nil {
+ r.EncodeNil()
+ } else {
+ x.CephFS.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[13] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("cephfs"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.CephFS == nil {
+ r.EncodeNil()
+ } else {
+ x.CephFS.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[14] {
+ if x.Flocker == nil {
+ r.EncodeNil()
+ } else {
+ x.Flocker.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[14] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("flocker"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Flocker == nil {
+ r.EncodeNil()
+ } else {
+ x.Flocker.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[15] {
+ if x.DownwardAPI == nil {
+ r.EncodeNil()
+ } else {
+ x.DownwardAPI.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[15] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("downwardAPI"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.DownwardAPI == nil {
+ r.EncodeNil()
+ } else {
+ x.DownwardAPI.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[16] {
+ if x.FC == nil {
+ r.EncodeNil()
+ } else {
+ x.FC.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[16] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("fc"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.FC == nil {
+ r.EncodeNil()
+ } else {
+ x.FC.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[17] {
+ if x.AzureFile == nil {
+ r.EncodeNil()
+ } else {
+ x.AzureFile.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[17] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("azureFile"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.AzureFile == nil {
+ r.EncodeNil()
+ } else {
+ x.AzureFile.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[18] {
+ if x.ConfigMap == nil {
+ r.EncodeNil()
+ } else {
+ x.ConfigMap.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[18] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("configMap"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.ConfigMap == nil {
+ r.EncodeNil()
+ } else {
+ x.ConfigMap.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[19] {
+ if x.VsphereVolume == nil {
+ r.EncodeNil()
+ } else {
+ x.VsphereVolume.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[19] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("vsphereVolume"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.VsphereVolume == nil {
+ r.EncodeNil()
+ } else {
+ x.VsphereVolume.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *VolumeSource) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *VolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "hostPath":
+ if r.TryDecodeAsNil() {
+ if x.HostPath != nil {
+ x.HostPath = nil
+ }
+ } else {
+ if x.HostPath == nil {
+ x.HostPath = new(HostPathVolumeSource)
+ }
+ x.HostPath.CodecDecodeSelf(d)
+ }
+ case "emptyDir":
+ if r.TryDecodeAsNil() {
+ if x.EmptyDir != nil {
+ x.EmptyDir = nil
+ }
+ } else {
+ if x.EmptyDir == nil {
+ x.EmptyDir = new(EmptyDirVolumeSource)
+ }
+ x.EmptyDir.CodecDecodeSelf(d)
+ }
+ case "gcePersistentDisk":
+ if r.TryDecodeAsNil() {
+ if x.GCEPersistentDisk != nil {
+ x.GCEPersistentDisk = nil
+ }
+ } else {
+ if x.GCEPersistentDisk == nil {
+ x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource)
+ }
+ x.GCEPersistentDisk.CodecDecodeSelf(d)
+ }
+ case "awsElasticBlockStore":
+ if r.TryDecodeAsNil() {
+ if x.AWSElasticBlockStore != nil {
+ x.AWSElasticBlockStore = nil
+ }
+ } else {
+ if x.AWSElasticBlockStore == nil {
+ x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource)
+ }
+ x.AWSElasticBlockStore.CodecDecodeSelf(d)
+ }
+ case "gitRepo":
+ if r.TryDecodeAsNil() {
+ if x.GitRepo != nil {
+ x.GitRepo = nil
+ }
+ } else {
+ if x.GitRepo == nil {
+ x.GitRepo = new(GitRepoVolumeSource)
+ }
+ x.GitRepo.CodecDecodeSelf(d)
+ }
+ case "secret":
+ if r.TryDecodeAsNil() {
+ if x.Secret != nil {
+ x.Secret = nil
+ }
+ } else {
+ if x.Secret == nil {
+ x.Secret = new(SecretVolumeSource)
+ }
+ x.Secret.CodecDecodeSelf(d)
+ }
+ case "nfs":
+ if r.TryDecodeAsNil() {
+ if x.NFS != nil {
+ x.NFS = nil
+ }
+ } else {
+ if x.NFS == nil {
+ x.NFS = new(NFSVolumeSource)
+ }
+ x.NFS.CodecDecodeSelf(d)
+ }
+ case "iscsi":
+ if r.TryDecodeAsNil() {
+ if x.ISCSI != nil {
+ x.ISCSI = nil
+ }
+ } else {
+ if x.ISCSI == nil {
+ x.ISCSI = new(ISCSIVolumeSource)
+ }
+ x.ISCSI.CodecDecodeSelf(d)
+ }
+ case "glusterfs":
+ if r.TryDecodeAsNil() {
+ if x.Glusterfs != nil {
+ x.Glusterfs = nil
+ }
+ } else {
+ if x.Glusterfs == nil {
+ x.Glusterfs = new(GlusterfsVolumeSource)
+ }
+ x.Glusterfs.CodecDecodeSelf(d)
+ }
+ case "persistentVolumeClaim":
+ if r.TryDecodeAsNil() {
+ if x.PersistentVolumeClaim != nil {
+ x.PersistentVolumeClaim = nil
+ }
+ } else {
+ if x.PersistentVolumeClaim == nil {
+ x.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource)
+ }
+ x.PersistentVolumeClaim.CodecDecodeSelf(d)
+ }
+ case "rbd":
+ if r.TryDecodeAsNil() {
+ if x.RBD != nil {
+ x.RBD = nil
+ }
+ } else {
+ if x.RBD == nil {
+ x.RBD = new(RBDVolumeSource)
+ }
+ x.RBD.CodecDecodeSelf(d)
+ }
+ case "flexVolume":
+ if r.TryDecodeAsNil() {
+ if x.FlexVolume != nil {
+ x.FlexVolume = nil
+ }
+ } else {
+ if x.FlexVolume == nil {
+ x.FlexVolume = new(FlexVolumeSource)
+ }
+ x.FlexVolume.CodecDecodeSelf(d)
+ }
+ case "cinder":
+ if r.TryDecodeAsNil() {
+ if x.Cinder != nil {
+ x.Cinder = nil
+ }
+ } else {
+ if x.Cinder == nil {
+ x.Cinder = new(CinderVolumeSource)
+ }
+ x.Cinder.CodecDecodeSelf(d)
+ }
+ case "cephfs":
+ if r.TryDecodeAsNil() {
+ if x.CephFS != nil {
+ x.CephFS = nil
+ }
+ } else {
+ if x.CephFS == nil {
+ x.CephFS = new(CephFSVolumeSource)
+ }
+ x.CephFS.CodecDecodeSelf(d)
+ }
+ case "flocker":
+ if r.TryDecodeAsNil() {
+ if x.Flocker != nil {
+ x.Flocker = nil
+ }
+ } else {
+ if x.Flocker == nil {
+ x.Flocker = new(FlockerVolumeSource)
+ }
+ x.Flocker.CodecDecodeSelf(d)
+ }
+ case "downwardAPI":
+ if r.TryDecodeAsNil() {
+ if x.DownwardAPI != nil {
+ x.DownwardAPI = nil
+ }
+ } else {
+ if x.DownwardAPI == nil {
+ x.DownwardAPI = new(DownwardAPIVolumeSource)
+ }
+ x.DownwardAPI.CodecDecodeSelf(d)
+ }
+ case "fc":
+ if r.TryDecodeAsNil() {
+ if x.FC != nil {
+ x.FC = nil
+ }
+ } else {
+ if x.FC == nil {
+ x.FC = new(FCVolumeSource)
+ }
+ x.FC.CodecDecodeSelf(d)
+ }
+ case "azureFile":
+ if r.TryDecodeAsNil() {
+ if x.AzureFile != nil {
+ x.AzureFile = nil
+ }
+ } else {
+ if x.AzureFile == nil {
+ x.AzureFile = new(AzureFileVolumeSource)
+ }
+ x.AzureFile.CodecDecodeSelf(d)
+ }
+ case "configMap":
+ if r.TryDecodeAsNil() {
+ if x.ConfigMap != nil {
+ x.ConfigMap = nil
+ }
+ } else {
+ if x.ConfigMap == nil {
+ x.ConfigMap = new(ConfigMapVolumeSource)
+ }
+ x.ConfigMap.CodecDecodeSelf(d)
+ }
+ case "vsphereVolume":
+ if r.TryDecodeAsNil() {
+ if x.VsphereVolume != nil {
+ x.VsphereVolume = nil
+ }
+ } else {
+ if x.VsphereVolume == nil {
+ x.VsphereVolume = new(VsphereVirtualDiskVolumeSource)
+ }
+ x.VsphereVolume.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj24 int
+ var yyb24 bool
+ var yyhl24 bool = l >= 0
+ yyj24++
+ if yyhl24 {
+ yyb24 = yyj24 > l
+ } else {
+ yyb24 = r.CheckBreak()
+ }
+ if yyb24 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.HostPath != nil {
+ x.HostPath = nil
+ }
+ } else {
+ if x.HostPath == nil {
+ x.HostPath = new(HostPathVolumeSource)
+ }
+ x.HostPath.CodecDecodeSelf(d)
+ }
+ yyj24++
+ if yyhl24 {
+ yyb24 = yyj24 > l
+ } else {
+ yyb24 = r.CheckBreak()
+ }
+ if yyb24 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.EmptyDir != nil {
+ x.EmptyDir = nil
+ }
+ } else {
+ if x.EmptyDir == nil {
+ x.EmptyDir = new(EmptyDirVolumeSource)
+ }
+ x.EmptyDir.CodecDecodeSelf(d)
+ }
+ yyj24++
+ if yyhl24 {
+ yyb24 = yyj24 > l
+ } else {
+ yyb24 = r.CheckBreak()
+ }
+ if yyb24 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.GCEPersistentDisk != nil {
+ x.GCEPersistentDisk = nil
+ }
+ } else {
+ if x.GCEPersistentDisk == nil {
+ x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource)
+ }
+ x.GCEPersistentDisk.CodecDecodeSelf(d)
+ }
+ yyj24++
+ if yyhl24 {
+ yyb24 = yyj24 > l
+ } else {
+ yyb24 = r.CheckBreak()
+ }
+ if yyb24 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.AWSElasticBlockStore != nil {
+ x.AWSElasticBlockStore = nil
+ }
+ } else {
+ if x.AWSElasticBlockStore == nil {
+ x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource)
+ }
+ x.AWSElasticBlockStore.CodecDecodeSelf(d)
+ }
+ yyj24++
+ if yyhl24 {
+ yyb24 = yyj24 > l
+ } else {
+ yyb24 = r.CheckBreak()
+ }
+ if yyb24 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.GitRepo != nil {
+ x.GitRepo = nil
+ }
+ } else {
+ if x.GitRepo == nil {
+ x.GitRepo = new(GitRepoVolumeSource)
+ }
+ x.GitRepo.CodecDecodeSelf(d)
+ }
+ yyj24++
+ if yyhl24 {
+ yyb24 = yyj24 > l
+ } else {
+ yyb24 = r.CheckBreak()
+ }
+ if yyb24 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Secret != nil {
+ x.Secret = nil
+ }
+ } else {
+ if x.Secret == nil {
+ x.Secret = new(SecretVolumeSource)
+ }
+ x.Secret.CodecDecodeSelf(d)
+ }
+ yyj24++
+ if yyhl24 {
+ yyb24 = yyj24 > l
+ } else {
+ yyb24 = r.CheckBreak()
+ }
+ if yyb24 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.NFS != nil {
+ x.NFS = nil
+ }
+ } else {
+ if x.NFS == nil {
+ x.NFS = new(NFSVolumeSource)
+ }
+ x.NFS.CodecDecodeSelf(d)
+ }
+ yyj24++
+ if yyhl24 {
+ yyb24 = yyj24 > l
+ } else {
+ yyb24 = r.CheckBreak()
+ }
+ if yyb24 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.ISCSI != nil {
+ x.ISCSI = nil
+ }
+ } else {
+ if x.ISCSI == nil {
+ x.ISCSI = new(ISCSIVolumeSource)
+ }
+ x.ISCSI.CodecDecodeSelf(d)
+ }
+ yyj24++
+ if yyhl24 {
+ yyb24 = yyj24 > l
+ } else {
+ yyb24 = r.CheckBreak()
+ }
+ if yyb24 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Glusterfs != nil {
+ x.Glusterfs = nil
+ }
+ } else {
+ if x.Glusterfs == nil {
+ x.Glusterfs = new(GlusterfsVolumeSource)
+ }
+ x.Glusterfs.CodecDecodeSelf(d)
+ }
+ yyj24++
+ if yyhl24 {
+ yyb24 = yyj24 > l
+ } else {
+ yyb24 = r.CheckBreak()
+ }
+ if yyb24 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.PersistentVolumeClaim != nil {
+ x.PersistentVolumeClaim = nil
+ }
+ } else {
+ if x.PersistentVolumeClaim == nil {
+ x.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource)
+ }
+ x.PersistentVolumeClaim.CodecDecodeSelf(d)
+ }
+ yyj24++
+ if yyhl24 {
+ yyb24 = yyj24 > l
+ } else {
+ yyb24 = r.CheckBreak()
+ }
+ if yyb24 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.RBD != nil {
+ x.RBD = nil
+ }
+ } else {
+ if x.RBD == nil {
+ x.RBD = new(RBDVolumeSource)
+ }
+ x.RBD.CodecDecodeSelf(d)
+ }
+ yyj24++
+ if yyhl24 {
+ yyb24 = yyj24 > l
+ } else {
+ yyb24 = r.CheckBreak()
+ }
+ if yyb24 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.FlexVolume != nil {
+ x.FlexVolume = nil
+ }
+ } else {
+ if x.FlexVolume == nil {
+ x.FlexVolume = new(FlexVolumeSource)
+ }
+ x.FlexVolume.CodecDecodeSelf(d)
+ }
+ yyj24++
+ if yyhl24 {
+ yyb24 = yyj24 > l
+ } else {
+ yyb24 = r.CheckBreak()
+ }
+ if yyb24 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Cinder != nil {
+ x.Cinder = nil
+ }
+ } else {
+ if x.Cinder == nil {
+ x.Cinder = new(CinderVolumeSource)
+ }
+ x.Cinder.CodecDecodeSelf(d)
+ }
+ yyj24++
+ if yyhl24 {
+ yyb24 = yyj24 > l
+ } else {
+ yyb24 = r.CheckBreak()
+ }
+ if yyb24 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.CephFS != nil {
+ x.CephFS = nil
+ }
+ } else {
+ if x.CephFS == nil {
+ x.CephFS = new(CephFSVolumeSource)
+ }
+ x.CephFS.CodecDecodeSelf(d)
+ }
+ yyj24++
+ if yyhl24 {
+ yyb24 = yyj24 > l
+ } else {
+ yyb24 = r.CheckBreak()
+ }
+ if yyb24 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Flocker != nil {
+ x.Flocker = nil
+ }
+ } else {
+ if x.Flocker == nil {
+ x.Flocker = new(FlockerVolumeSource)
+ }
+ x.Flocker.CodecDecodeSelf(d)
+ }
+ yyj24++
+ if yyhl24 {
+ yyb24 = yyj24 > l
+ } else {
+ yyb24 = r.CheckBreak()
+ }
+ if yyb24 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.DownwardAPI != nil {
+ x.DownwardAPI = nil
+ }
+ } else {
+ if x.DownwardAPI == nil {
+ x.DownwardAPI = new(DownwardAPIVolumeSource)
+ }
+ x.DownwardAPI.CodecDecodeSelf(d)
+ }
+ yyj24++
+ if yyhl24 {
+ yyb24 = yyj24 > l
+ } else {
+ yyb24 = r.CheckBreak()
+ }
+ if yyb24 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.FC != nil {
+ x.FC = nil
+ }
+ } else {
+ if x.FC == nil {
+ x.FC = new(FCVolumeSource)
+ }
+ x.FC.CodecDecodeSelf(d)
+ }
+ yyj24++
+ if yyhl24 {
+ yyb24 = yyj24 > l
+ } else {
+ yyb24 = r.CheckBreak()
+ }
+ if yyb24 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.AzureFile != nil {
+ x.AzureFile = nil
+ }
+ } else {
+ if x.AzureFile == nil {
+ x.AzureFile = new(AzureFileVolumeSource)
+ }
+ x.AzureFile.CodecDecodeSelf(d)
+ }
+ yyj24++
+ if yyhl24 {
+ yyb24 = yyj24 > l
+ } else {
+ yyb24 = r.CheckBreak()
+ }
+ if yyb24 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.ConfigMap != nil {
+ x.ConfigMap = nil
+ }
+ } else {
+ if x.ConfigMap == nil {
+ x.ConfigMap = new(ConfigMapVolumeSource)
+ }
+ x.ConfigMap.CodecDecodeSelf(d)
+ }
+ yyj24++
+ if yyhl24 {
+ yyb24 = yyj24 > l
+ } else {
+ yyb24 = r.CheckBreak()
+ }
+ if yyb24 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.VsphereVolume != nil {
+ x.VsphereVolume = nil
+ }
+ } else {
+ if x.VsphereVolume == nil {
+ x.VsphereVolume = new(VsphereVirtualDiskVolumeSource)
+ }
+ x.VsphereVolume.CodecDecodeSelf(d)
+ }
+ for {
+ yyj24++
+ if yyhl24 {
+ yyb24 = yyj24 > l
+ } else {
+ yyb24 = r.CheckBreak()
+ }
+ if yyb24 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj24-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PersistentVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [14]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.GCEPersistentDisk != nil
+ yyq2[1] = x.AWSElasticBlockStore != nil
+ yyq2[2] = x.HostPath != nil
+ yyq2[3] = x.Glusterfs != nil
+ yyq2[4] = x.NFS != nil
+ yyq2[5] = x.RBD != nil
+ yyq2[6] = x.ISCSI != nil
+ yyq2[7] = x.FlexVolume != nil
+ yyq2[8] = x.Cinder != nil
+ yyq2[9] = x.CephFS != nil
+ yyq2[10] = x.FC != nil
+ yyq2[11] = x.Flocker != nil
+ yyq2[12] = x.AzureFile != nil
+ yyq2[13] = x.VsphereVolume != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(14)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.GCEPersistentDisk == nil {
+ r.EncodeNil()
+ } else {
+ x.GCEPersistentDisk.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("gcePersistentDisk"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.GCEPersistentDisk == nil {
+ r.EncodeNil()
+ } else {
+ x.GCEPersistentDisk.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.AWSElasticBlockStore == nil {
+ r.EncodeNil()
+ } else {
+ x.AWSElasticBlockStore.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("awsElasticBlockStore"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.AWSElasticBlockStore == nil {
+ r.EncodeNil()
+ } else {
+ x.AWSElasticBlockStore.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.HostPath == nil {
+ r.EncodeNil()
+ } else {
+ x.HostPath.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("hostPath"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.HostPath == nil {
+ r.EncodeNil()
+ } else {
+ x.HostPath.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ if x.Glusterfs == nil {
+ r.EncodeNil()
+ } else {
+ x.Glusterfs.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("glusterfs"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Glusterfs == nil {
+ r.EncodeNil()
+ } else {
+ x.Glusterfs.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ if x.NFS == nil {
+ r.EncodeNil()
+ } else {
+ x.NFS.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("nfs"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.NFS == nil {
+ r.EncodeNil()
+ } else {
+ x.NFS.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ if x.RBD == nil {
+ r.EncodeNil()
+ } else {
+ x.RBD.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("rbd"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.RBD == nil {
+ r.EncodeNil()
+ } else {
+ x.RBD.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[6] {
+ if x.ISCSI == nil {
+ r.EncodeNil()
+ } else {
+ x.ISCSI.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[6] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("iscsi"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.ISCSI == nil {
+ r.EncodeNil()
+ } else {
+ x.ISCSI.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[7] {
+ if x.FlexVolume == nil {
+ r.EncodeNil()
+ } else {
+ x.FlexVolume.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[7] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("flexVolume"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.FlexVolume == nil {
+ r.EncodeNil()
+ } else {
+ x.FlexVolume.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[8] {
+ if x.Cinder == nil {
+ r.EncodeNil()
+ } else {
+ x.Cinder.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[8] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("cinder"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Cinder == nil {
+ r.EncodeNil()
+ } else {
+ x.Cinder.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[9] {
+ if x.CephFS == nil {
+ r.EncodeNil()
+ } else {
+ x.CephFS.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[9] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("cephfs"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.CephFS == nil {
+ r.EncodeNil()
+ } else {
+ x.CephFS.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[10] {
+ if x.FC == nil {
+ r.EncodeNil()
+ } else {
+ x.FC.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[10] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("fc"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.FC == nil {
+ r.EncodeNil()
+ } else {
+ x.FC.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[11] {
+ if x.Flocker == nil {
+ r.EncodeNil()
+ } else {
+ x.Flocker.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[11] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("flocker"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Flocker == nil {
+ r.EncodeNil()
+ } else {
+ x.Flocker.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[12] {
+ if x.AzureFile == nil {
+ r.EncodeNil()
+ } else {
+ x.AzureFile.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[12] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("azureFile"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.AzureFile == nil {
+ r.EncodeNil()
+ } else {
+ x.AzureFile.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[13] {
+ if x.VsphereVolume == nil {
+ r.EncodeNil()
+ } else {
+ x.VsphereVolume.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[13] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("vsphereVolume"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.VsphereVolume == nil {
+ r.EncodeNil()
+ } else {
+ x.VsphereVolume.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PersistentVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PersistentVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "gcePersistentDisk":
+ if r.TryDecodeAsNil() {
+ if x.GCEPersistentDisk != nil {
+ x.GCEPersistentDisk = nil
+ }
+ } else {
+ if x.GCEPersistentDisk == nil {
+ x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource)
+ }
+ x.GCEPersistentDisk.CodecDecodeSelf(d)
+ }
+ case "awsElasticBlockStore":
+ if r.TryDecodeAsNil() {
+ if x.AWSElasticBlockStore != nil {
+ x.AWSElasticBlockStore = nil
+ }
+ } else {
+ if x.AWSElasticBlockStore == nil {
+ x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource)
+ }
+ x.AWSElasticBlockStore.CodecDecodeSelf(d)
+ }
+ case "hostPath":
+ if r.TryDecodeAsNil() {
+ if x.HostPath != nil {
+ x.HostPath = nil
+ }
+ } else {
+ if x.HostPath == nil {
+ x.HostPath = new(HostPathVolumeSource)
+ }
+ x.HostPath.CodecDecodeSelf(d)
+ }
+ case "glusterfs":
+ if r.TryDecodeAsNil() {
+ if x.Glusterfs != nil {
+ x.Glusterfs = nil
+ }
+ } else {
+ if x.Glusterfs == nil {
+ x.Glusterfs = new(GlusterfsVolumeSource)
+ }
+ x.Glusterfs.CodecDecodeSelf(d)
+ }
+ case "nfs":
+ if r.TryDecodeAsNil() {
+ if x.NFS != nil {
+ x.NFS = nil
+ }
+ } else {
+ if x.NFS == nil {
+ x.NFS = new(NFSVolumeSource)
+ }
+ x.NFS.CodecDecodeSelf(d)
+ }
+ case "rbd":
+ if r.TryDecodeAsNil() {
+ if x.RBD != nil {
+ x.RBD = nil
+ }
+ } else {
+ if x.RBD == nil {
+ x.RBD = new(RBDVolumeSource)
+ }
+ x.RBD.CodecDecodeSelf(d)
+ }
+ case "iscsi":
+ if r.TryDecodeAsNil() {
+ if x.ISCSI != nil {
+ x.ISCSI = nil
+ }
+ } else {
+ if x.ISCSI == nil {
+ x.ISCSI = new(ISCSIVolumeSource)
+ }
+ x.ISCSI.CodecDecodeSelf(d)
+ }
+ case "flexVolume":
+ if r.TryDecodeAsNil() {
+ if x.FlexVolume != nil {
+ x.FlexVolume = nil
+ }
+ } else {
+ if x.FlexVolume == nil {
+ x.FlexVolume = new(FlexVolumeSource)
+ }
+ x.FlexVolume.CodecDecodeSelf(d)
+ }
+ case "cinder":
+ if r.TryDecodeAsNil() {
+ if x.Cinder != nil {
+ x.Cinder = nil
+ }
+ } else {
+ if x.Cinder == nil {
+ x.Cinder = new(CinderVolumeSource)
+ }
+ x.Cinder.CodecDecodeSelf(d)
+ }
+ case "cephfs":
+ if r.TryDecodeAsNil() {
+ if x.CephFS != nil {
+ x.CephFS = nil
+ }
+ } else {
+ if x.CephFS == nil {
+ x.CephFS = new(CephFSVolumeSource)
+ }
+ x.CephFS.CodecDecodeSelf(d)
+ }
+ case "fc":
+ if r.TryDecodeAsNil() {
+ if x.FC != nil {
+ x.FC = nil
+ }
+ } else {
+ if x.FC == nil {
+ x.FC = new(FCVolumeSource)
+ }
+ x.FC.CodecDecodeSelf(d)
+ }
+ case "flocker":
+ if r.TryDecodeAsNil() {
+ if x.Flocker != nil {
+ x.Flocker = nil
+ }
+ } else {
+ if x.Flocker == nil {
+ x.Flocker = new(FlockerVolumeSource)
+ }
+ x.Flocker.CodecDecodeSelf(d)
+ }
+ case "azureFile":
+ if r.TryDecodeAsNil() {
+ if x.AzureFile != nil {
+ x.AzureFile = nil
+ }
+ } else {
+ if x.AzureFile == nil {
+ x.AzureFile = new(AzureFileVolumeSource)
+ }
+ x.AzureFile.CodecDecodeSelf(d)
+ }
+ case "vsphereVolume":
+ if r.TryDecodeAsNil() {
+ if x.VsphereVolume != nil {
+ x.VsphereVolume = nil
+ }
+ } else {
+ if x.VsphereVolume == nil {
+ x.VsphereVolume = new(VsphereVirtualDiskVolumeSource)
+ }
+ x.VsphereVolume.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj18 int
+ var yyb18 bool
+ var yyhl18 bool = l >= 0
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.GCEPersistentDisk != nil {
+ x.GCEPersistentDisk = nil
+ }
+ } else {
+ if x.GCEPersistentDisk == nil {
+ x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource)
+ }
+ x.GCEPersistentDisk.CodecDecodeSelf(d)
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.AWSElasticBlockStore != nil {
+ x.AWSElasticBlockStore = nil
+ }
+ } else {
+ if x.AWSElasticBlockStore == nil {
+ x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource)
+ }
+ x.AWSElasticBlockStore.CodecDecodeSelf(d)
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.HostPath != nil {
+ x.HostPath = nil
+ }
+ } else {
+ if x.HostPath == nil {
+ x.HostPath = new(HostPathVolumeSource)
+ }
+ x.HostPath.CodecDecodeSelf(d)
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Glusterfs != nil {
+ x.Glusterfs = nil
+ }
+ } else {
+ if x.Glusterfs == nil {
+ x.Glusterfs = new(GlusterfsVolumeSource)
+ }
+ x.Glusterfs.CodecDecodeSelf(d)
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.NFS != nil {
+ x.NFS = nil
+ }
+ } else {
+ if x.NFS == nil {
+ x.NFS = new(NFSVolumeSource)
+ }
+ x.NFS.CodecDecodeSelf(d)
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.RBD != nil {
+ x.RBD = nil
+ }
+ } else {
+ if x.RBD == nil {
+ x.RBD = new(RBDVolumeSource)
+ }
+ x.RBD.CodecDecodeSelf(d)
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.ISCSI != nil {
+ x.ISCSI = nil
+ }
+ } else {
+ if x.ISCSI == nil {
+ x.ISCSI = new(ISCSIVolumeSource)
+ }
+ x.ISCSI.CodecDecodeSelf(d)
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.FlexVolume != nil {
+ x.FlexVolume = nil
+ }
+ } else {
+ if x.FlexVolume == nil {
+ x.FlexVolume = new(FlexVolumeSource)
+ }
+ x.FlexVolume.CodecDecodeSelf(d)
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Cinder != nil {
+ x.Cinder = nil
+ }
+ } else {
+ if x.Cinder == nil {
+ x.Cinder = new(CinderVolumeSource)
+ }
+ x.Cinder.CodecDecodeSelf(d)
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.CephFS != nil {
+ x.CephFS = nil
+ }
+ } else {
+ if x.CephFS == nil {
+ x.CephFS = new(CephFSVolumeSource)
+ }
+ x.CephFS.CodecDecodeSelf(d)
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.FC != nil {
+ x.FC = nil
+ }
+ } else {
+ if x.FC == nil {
+ x.FC = new(FCVolumeSource)
+ }
+ x.FC.CodecDecodeSelf(d)
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Flocker != nil {
+ x.Flocker = nil
+ }
+ } else {
+ if x.Flocker == nil {
+ x.Flocker = new(FlockerVolumeSource)
+ }
+ x.Flocker.CodecDecodeSelf(d)
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.AzureFile != nil {
+ x.AzureFile = nil
+ }
+ } else {
+ if x.AzureFile == nil {
+ x.AzureFile = new(AzureFileVolumeSource)
+ }
+ x.AzureFile.CodecDecodeSelf(d)
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.VsphereVolume != nil {
+ x.VsphereVolume = nil
+ }
+ } else {
+ if x.VsphereVolume == nil {
+ x.VsphereVolume = new(VsphereVirtualDiskVolumeSource)
+ }
+ x.VsphereVolume.CodecDecodeSelf(d)
+ }
+ for {
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj18-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PersistentVolumeClaimVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.ReadOnly != false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ClaimName))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("claimName"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ClaimName))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("readOnly"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PersistentVolumeClaimVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PersistentVolumeClaimVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "claimName":
+ if r.TryDecodeAsNil() {
+ x.ClaimName = ""
+ } else {
+ x.ClaimName = string(r.DecodeString())
+ }
+ case "readOnly":
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PersistentVolumeClaimVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ClaimName = ""
+ } else {
+ x.ClaimName = string(r.DecodeString())
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PersistentVolume) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = true
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy14 := &x.Status
+ yy14.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy16 := &x.Status
+ yy16.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PersistentVolume) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PersistentVolume) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = PersistentVolumeSpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = PersistentVolumeStatus{}
+ } else {
+ yyv6 := &x.Status
+ yyv6.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PersistentVolume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = PersistentVolumeSpec{}
+ } else {
+ yyv11 := &x.Spec
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = PersistentVolumeStatus{}
+ } else {
+ yyv12 := &x.Status
+ yyv12.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [18]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = len(x.AccessModes) != 0
+ yyq2[2] = x.ClaimRef != nil
+ yyq2[3] = x.PersistentVolumeReclaimPolicy != ""
+ yyq2[4] = x.PersistentVolumeSource.GCEPersistentDisk != nil && x.GCEPersistentDisk != nil
+ yyq2[5] = x.PersistentVolumeSource.AWSElasticBlockStore != nil && x.AWSElasticBlockStore != nil
+ yyq2[6] = x.PersistentVolumeSource.HostPath != nil && x.HostPath != nil
+ yyq2[7] = x.PersistentVolumeSource.Glusterfs != nil && x.Glusterfs != nil
+ yyq2[8] = x.PersistentVolumeSource.NFS != nil && x.NFS != nil
+ yyq2[9] = x.PersistentVolumeSource.RBD != nil && x.RBD != nil
+ yyq2[10] = x.PersistentVolumeSource.ISCSI != nil && x.ISCSI != nil
+ yyq2[11] = x.PersistentVolumeSource.FlexVolume != nil && x.FlexVolume != nil
+ yyq2[12] = x.PersistentVolumeSource.Cinder != nil && x.Cinder != nil
+ yyq2[13] = x.PersistentVolumeSource.CephFS != nil && x.CephFS != nil
+ yyq2[14] = x.PersistentVolumeSource.FC != nil && x.FC != nil
+ yyq2[15] = x.PersistentVolumeSource.Flocker != nil && x.Flocker != nil
+ yyq2[16] = x.PersistentVolumeSource.AzureFile != nil && x.AzureFile != nil
+ yyq2[17] = x.PersistentVolumeSource.VsphereVolume != nil && x.VsphereVolume != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(18)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Capacity == nil {
+ r.EncodeNil()
+ } else {
+ x.Capacity.CodecEncodeSelf(e)
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("capacity"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Capacity == nil {
+ r.EncodeNil()
+ } else {
+ x.Capacity.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.AccessModes == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("accessModes"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.AccessModes == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.ClaimRef == nil {
+ r.EncodeNil()
+ } else {
+ x.ClaimRef.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("claimRef"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.ClaimRef == nil {
+ r.EncodeNil()
+ } else {
+ x.ClaimRef.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ x.PersistentVolumeReclaimPolicy.CodecEncodeSelf(e)
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("persistentVolumeReclaimPolicy"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.PersistentVolumeReclaimPolicy.CodecEncodeSelf(e)
+ }
+ }
+ var yyn15 bool
+ if x.PersistentVolumeSource.GCEPersistentDisk == nil {
+ yyn15 = true
+ goto LABEL15
+ }
+ LABEL15:
+ if yyr2 || yy2arr2 {
+ if yyn15 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ if x.GCEPersistentDisk == nil {
+ r.EncodeNil()
+ } else {
+ x.GCEPersistentDisk.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("gcePersistentDisk"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn15 {
+ r.EncodeNil()
+ } else {
+ if x.GCEPersistentDisk == nil {
+ r.EncodeNil()
+ } else {
+ x.GCEPersistentDisk.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn18 bool
+ if x.PersistentVolumeSource.AWSElasticBlockStore == nil {
+ yyn18 = true
+ goto LABEL18
+ }
+ LABEL18:
+ if yyr2 || yy2arr2 {
+ if yyn18 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ if x.AWSElasticBlockStore == nil {
+ r.EncodeNil()
+ } else {
+ x.AWSElasticBlockStore.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("awsElasticBlockStore"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn18 {
+ r.EncodeNil()
+ } else {
+ if x.AWSElasticBlockStore == nil {
+ r.EncodeNil()
+ } else {
+ x.AWSElasticBlockStore.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn21 bool
+ if x.PersistentVolumeSource.HostPath == nil {
+ yyn21 = true
+ goto LABEL21
+ }
+ LABEL21:
+ if yyr2 || yy2arr2 {
+ if yyn21 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[6] {
+ if x.HostPath == nil {
+ r.EncodeNil()
+ } else {
+ x.HostPath.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[6] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("hostPath"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn21 {
+ r.EncodeNil()
+ } else {
+ if x.HostPath == nil {
+ r.EncodeNil()
+ } else {
+ x.HostPath.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn24 bool
+ if x.PersistentVolumeSource.Glusterfs == nil {
+ yyn24 = true
+ goto LABEL24
+ }
+ LABEL24:
+ if yyr2 || yy2arr2 {
+ if yyn24 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[7] {
+ if x.Glusterfs == nil {
+ r.EncodeNil()
+ } else {
+ x.Glusterfs.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[7] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("glusterfs"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn24 {
+ r.EncodeNil()
+ } else {
+ if x.Glusterfs == nil {
+ r.EncodeNil()
+ } else {
+ x.Glusterfs.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn27 bool
+ if x.PersistentVolumeSource.NFS == nil {
+ yyn27 = true
+ goto LABEL27
+ }
+ LABEL27:
+ if yyr2 || yy2arr2 {
+ if yyn27 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[8] {
+ if x.NFS == nil {
+ r.EncodeNil()
+ } else {
+ x.NFS.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[8] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("nfs"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn27 {
+ r.EncodeNil()
+ } else {
+ if x.NFS == nil {
+ r.EncodeNil()
+ } else {
+ x.NFS.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn30 bool
+ if x.PersistentVolumeSource.RBD == nil {
+ yyn30 = true
+ goto LABEL30
+ }
+ LABEL30:
+ if yyr2 || yy2arr2 {
+ if yyn30 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[9] {
+ if x.RBD == nil {
+ r.EncodeNil()
+ } else {
+ x.RBD.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[9] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("rbd"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn30 {
+ r.EncodeNil()
+ } else {
+ if x.RBD == nil {
+ r.EncodeNil()
+ } else {
+ x.RBD.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn33 bool
+ if x.PersistentVolumeSource.ISCSI == nil {
+ yyn33 = true
+ goto LABEL33
+ }
+ LABEL33:
+ if yyr2 || yy2arr2 {
+ if yyn33 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[10] {
+ if x.ISCSI == nil {
+ r.EncodeNil()
+ } else {
+ x.ISCSI.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[10] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("iscsi"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn33 {
+ r.EncodeNil()
+ } else {
+ if x.ISCSI == nil {
+ r.EncodeNil()
+ } else {
+ x.ISCSI.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn36 bool
+ if x.PersistentVolumeSource.FlexVolume == nil {
+ yyn36 = true
+ goto LABEL36
+ }
+ LABEL36:
+ if yyr2 || yy2arr2 {
+ if yyn36 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[11] {
+ if x.FlexVolume == nil {
+ r.EncodeNil()
+ } else {
+ x.FlexVolume.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[11] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("flexVolume"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn36 {
+ r.EncodeNil()
+ } else {
+ if x.FlexVolume == nil {
+ r.EncodeNil()
+ } else {
+ x.FlexVolume.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn39 bool
+ if x.PersistentVolumeSource.Cinder == nil {
+ yyn39 = true
+ goto LABEL39
+ }
+ LABEL39:
+ if yyr2 || yy2arr2 {
+ if yyn39 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[12] {
+ if x.Cinder == nil {
+ r.EncodeNil()
+ } else {
+ x.Cinder.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[12] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("cinder"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn39 {
+ r.EncodeNil()
+ } else {
+ if x.Cinder == nil {
+ r.EncodeNil()
+ } else {
+ x.Cinder.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn42 bool
+ if x.PersistentVolumeSource.CephFS == nil {
+ yyn42 = true
+ goto LABEL42
+ }
+ LABEL42:
+ if yyr2 || yy2arr2 {
+ if yyn42 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[13] {
+ if x.CephFS == nil {
+ r.EncodeNil()
+ } else {
+ x.CephFS.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[13] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("cephfs"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn42 {
+ r.EncodeNil()
+ } else {
+ if x.CephFS == nil {
+ r.EncodeNil()
+ } else {
+ x.CephFS.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn45 bool
+ if x.PersistentVolumeSource.FC == nil {
+ yyn45 = true
+ goto LABEL45
+ }
+ LABEL45:
+ if yyr2 || yy2arr2 {
+ if yyn45 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[14] {
+ if x.FC == nil {
+ r.EncodeNil()
+ } else {
+ x.FC.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[14] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("fc"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn45 {
+ r.EncodeNil()
+ } else {
+ if x.FC == nil {
+ r.EncodeNil()
+ } else {
+ x.FC.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn48 bool
+ if x.PersistentVolumeSource.Flocker == nil {
+ yyn48 = true
+ goto LABEL48
+ }
+ LABEL48:
+ if yyr2 || yy2arr2 {
+ if yyn48 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[15] {
+ if x.Flocker == nil {
+ r.EncodeNil()
+ } else {
+ x.Flocker.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[15] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("flocker"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn48 {
+ r.EncodeNil()
+ } else {
+ if x.Flocker == nil {
+ r.EncodeNil()
+ } else {
+ x.Flocker.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn51 bool
+ if x.PersistentVolumeSource.AzureFile == nil {
+ yyn51 = true
+ goto LABEL51
+ }
+ LABEL51:
+ if yyr2 || yy2arr2 {
+ if yyn51 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[16] {
+ if x.AzureFile == nil {
+ r.EncodeNil()
+ } else {
+ x.AzureFile.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[16] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("azureFile"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn51 {
+ r.EncodeNil()
+ } else {
+ if x.AzureFile == nil {
+ r.EncodeNil()
+ } else {
+ x.AzureFile.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn54 bool
+ if x.PersistentVolumeSource.VsphereVolume == nil {
+ yyn54 = true
+ goto LABEL54
+ }
+ LABEL54:
+ if yyr2 || yy2arr2 {
+ if yyn54 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[17] {
+ if x.VsphereVolume == nil {
+ r.EncodeNil()
+ } else {
+ x.VsphereVolume.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[17] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("vsphereVolume"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn54 {
+ r.EncodeNil()
+ } else {
+ if x.VsphereVolume == nil {
+ r.EncodeNil()
+ } else {
+ x.VsphereVolume.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PersistentVolumeSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PersistentVolumeSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "capacity":
+ if r.TryDecodeAsNil() {
+ x.Capacity = nil
+ } else {
+ yyv4 := &x.Capacity
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "accessModes":
+ if r.TryDecodeAsNil() {
+ x.AccessModes = nil
+ } else {
+ yyv5 := &x.AccessModes
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv5), d)
+ }
+ }
+ case "claimRef":
+ if r.TryDecodeAsNil() {
+ if x.ClaimRef != nil {
+ x.ClaimRef = nil
+ }
+ } else {
+ if x.ClaimRef == nil {
+ x.ClaimRef = new(ObjectReference)
+ }
+ x.ClaimRef.CodecDecodeSelf(d)
+ }
+ case "persistentVolumeReclaimPolicy":
+ if r.TryDecodeAsNil() {
+ x.PersistentVolumeReclaimPolicy = ""
+ } else {
+ x.PersistentVolumeReclaimPolicy = PersistentVolumeReclaimPolicy(r.DecodeString())
+ }
+ case "gcePersistentDisk":
+ if x.PersistentVolumeSource.GCEPersistentDisk == nil {
+ x.PersistentVolumeSource.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.GCEPersistentDisk != nil {
+ x.GCEPersistentDisk = nil
+ }
+ } else {
+ if x.GCEPersistentDisk == nil {
+ x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource)
+ }
+ x.GCEPersistentDisk.CodecDecodeSelf(d)
+ }
+ case "awsElasticBlockStore":
+ if x.PersistentVolumeSource.AWSElasticBlockStore == nil {
+ x.PersistentVolumeSource.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.AWSElasticBlockStore != nil {
+ x.AWSElasticBlockStore = nil
+ }
+ } else {
+ if x.AWSElasticBlockStore == nil {
+ x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource)
+ }
+ x.AWSElasticBlockStore.CodecDecodeSelf(d)
+ }
+ case "hostPath":
+ if x.PersistentVolumeSource.HostPath == nil {
+ x.PersistentVolumeSource.HostPath = new(HostPathVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.HostPath != nil {
+ x.HostPath = nil
+ }
+ } else {
+ if x.HostPath == nil {
+ x.HostPath = new(HostPathVolumeSource)
+ }
+ x.HostPath.CodecDecodeSelf(d)
+ }
+ case "glusterfs":
+ if x.PersistentVolumeSource.Glusterfs == nil {
+ x.PersistentVolumeSource.Glusterfs = new(GlusterfsVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.Glusterfs != nil {
+ x.Glusterfs = nil
+ }
+ } else {
+ if x.Glusterfs == nil {
+ x.Glusterfs = new(GlusterfsVolumeSource)
+ }
+ x.Glusterfs.CodecDecodeSelf(d)
+ }
+ case "nfs":
+ if x.PersistentVolumeSource.NFS == nil {
+ x.PersistentVolumeSource.NFS = new(NFSVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.NFS != nil {
+ x.NFS = nil
+ }
+ } else {
+ if x.NFS == nil {
+ x.NFS = new(NFSVolumeSource)
+ }
+ x.NFS.CodecDecodeSelf(d)
+ }
+ case "rbd":
+ if x.PersistentVolumeSource.RBD == nil {
+ x.PersistentVolumeSource.RBD = new(RBDVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.RBD != nil {
+ x.RBD = nil
+ }
+ } else {
+ if x.RBD == nil {
+ x.RBD = new(RBDVolumeSource)
+ }
+ x.RBD.CodecDecodeSelf(d)
+ }
+ case "iscsi":
+ if x.PersistentVolumeSource.ISCSI == nil {
+ x.PersistentVolumeSource.ISCSI = new(ISCSIVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.ISCSI != nil {
+ x.ISCSI = nil
+ }
+ } else {
+ if x.ISCSI == nil {
+ x.ISCSI = new(ISCSIVolumeSource)
+ }
+ x.ISCSI.CodecDecodeSelf(d)
+ }
+ case "flexVolume":
+ if x.PersistentVolumeSource.FlexVolume == nil {
+ x.PersistentVolumeSource.FlexVolume = new(FlexVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.FlexVolume != nil {
+ x.FlexVolume = nil
+ }
+ } else {
+ if x.FlexVolume == nil {
+ x.FlexVolume = new(FlexVolumeSource)
+ }
+ x.FlexVolume.CodecDecodeSelf(d)
+ }
+ case "cinder":
+ if x.PersistentVolumeSource.Cinder == nil {
+ x.PersistentVolumeSource.Cinder = new(CinderVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.Cinder != nil {
+ x.Cinder = nil
+ }
+ } else {
+ if x.Cinder == nil {
+ x.Cinder = new(CinderVolumeSource)
+ }
+ x.Cinder.CodecDecodeSelf(d)
+ }
+ case "cephfs":
+ if x.PersistentVolumeSource.CephFS == nil {
+ x.PersistentVolumeSource.CephFS = new(CephFSVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.CephFS != nil {
+ x.CephFS = nil
+ }
+ } else {
+ if x.CephFS == nil {
+ x.CephFS = new(CephFSVolumeSource)
+ }
+ x.CephFS.CodecDecodeSelf(d)
+ }
+ case "fc":
+ if x.PersistentVolumeSource.FC == nil {
+ x.PersistentVolumeSource.FC = new(FCVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.FC != nil {
+ x.FC = nil
+ }
+ } else {
+ if x.FC == nil {
+ x.FC = new(FCVolumeSource)
+ }
+ x.FC.CodecDecodeSelf(d)
+ }
+ case "flocker":
+ if x.PersistentVolumeSource.Flocker == nil {
+ x.PersistentVolumeSource.Flocker = new(FlockerVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.Flocker != nil {
+ x.Flocker = nil
+ }
+ } else {
+ if x.Flocker == nil {
+ x.Flocker = new(FlockerVolumeSource)
+ }
+ x.Flocker.CodecDecodeSelf(d)
+ }
+ case "azureFile":
+ if x.PersistentVolumeSource.AzureFile == nil {
+ x.PersistentVolumeSource.AzureFile = new(AzureFileVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.AzureFile != nil {
+ x.AzureFile = nil
+ }
+ } else {
+ if x.AzureFile == nil {
+ x.AzureFile = new(AzureFileVolumeSource)
+ }
+ x.AzureFile.CodecDecodeSelf(d)
+ }
+ case "vsphereVolume":
+ if x.PersistentVolumeSource.VsphereVolume == nil {
+ x.PersistentVolumeSource.VsphereVolume = new(VsphereVirtualDiskVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.VsphereVolume != nil {
+ x.VsphereVolume = nil
+ }
+ } else {
+ if x.VsphereVolume == nil {
+ x.VsphereVolume = new(VsphereVirtualDiskVolumeSource)
+ }
+ x.VsphereVolume.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj23 int
+ var yyb23 bool
+ var yyhl23 bool = l >= 0
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Capacity = nil
+ } else {
+ yyv24 := &x.Capacity
+ yyv24.CodecDecodeSelf(d)
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.AccessModes = nil
+ } else {
+ yyv25 := &x.AccessModes
+ yym26 := z.DecBinary()
+ _ = yym26
+ if false {
+ } else {
+ h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv25), d)
+ }
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.ClaimRef != nil {
+ x.ClaimRef = nil
+ }
+ } else {
+ if x.ClaimRef == nil {
+ x.ClaimRef = new(ObjectReference)
+ }
+ x.ClaimRef.CodecDecodeSelf(d)
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.PersistentVolumeReclaimPolicy = ""
+ } else {
+ x.PersistentVolumeReclaimPolicy = PersistentVolumeReclaimPolicy(r.DecodeString())
+ }
+ if x.PersistentVolumeSource.GCEPersistentDisk == nil {
+ x.PersistentVolumeSource.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource)
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.GCEPersistentDisk != nil {
+ x.GCEPersistentDisk = nil
+ }
+ } else {
+ if x.GCEPersistentDisk == nil {
+ x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource)
+ }
+ x.GCEPersistentDisk.CodecDecodeSelf(d)
+ }
+ if x.PersistentVolumeSource.AWSElasticBlockStore == nil {
+ x.PersistentVolumeSource.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource)
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.AWSElasticBlockStore != nil {
+ x.AWSElasticBlockStore = nil
+ }
+ } else {
+ if x.AWSElasticBlockStore == nil {
+ x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource)
+ }
+ x.AWSElasticBlockStore.CodecDecodeSelf(d)
+ }
+ if x.PersistentVolumeSource.HostPath == nil {
+ x.PersistentVolumeSource.HostPath = new(HostPathVolumeSource)
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.HostPath != nil {
+ x.HostPath = nil
+ }
+ } else {
+ if x.HostPath == nil {
+ x.HostPath = new(HostPathVolumeSource)
+ }
+ x.HostPath.CodecDecodeSelf(d)
+ }
+ if x.PersistentVolumeSource.Glusterfs == nil {
+ x.PersistentVolumeSource.Glusterfs = new(GlusterfsVolumeSource)
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Glusterfs != nil {
+ x.Glusterfs = nil
+ }
+ } else {
+ if x.Glusterfs == nil {
+ x.Glusterfs = new(GlusterfsVolumeSource)
+ }
+ x.Glusterfs.CodecDecodeSelf(d)
+ }
+ if x.PersistentVolumeSource.NFS == nil {
+ x.PersistentVolumeSource.NFS = new(NFSVolumeSource)
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.NFS != nil {
+ x.NFS = nil
+ }
+ } else {
+ if x.NFS == nil {
+ x.NFS = new(NFSVolumeSource)
+ }
+ x.NFS.CodecDecodeSelf(d)
+ }
+ if x.PersistentVolumeSource.RBD == nil {
+ x.PersistentVolumeSource.RBD = new(RBDVolumeSource)
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.RBD != nil {
+ x.RBD = nil
+ }
+ } else {
+ if x.RBD == nil {
+ x.RBD = new(RBDVolumeSource)
+ }
+ x.RBD.CodecDecodeSelf(d)
+ }
+ if x.PersistentVolumeSource.ISCSI == nil {
+ x.PersistentVolumeSource.ISCSI = new(ISCSIVolumeSource)
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.ISCSI != nil {
+ x.ISCSI = nil
+ }
+ } else {
+ if x.ISCSI == nil {
+ x.ISCSI = new(ISCSIVolumeSource)
+ }
+ x.ISCSI.CodecDecodeSelf(d)
+ }
+ if x.PersistentVolumeSource.FlexVolume == nil {
+ x.PersistentVolumeSource.FlexVolume = new(FlexVolumeSource)
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.FlexVolume != nil {
+ x.FlexVolume = nil
+ }
+ } else {
+ if x.FlexVolume == nil {
+ x.FlexVolume = new(FlexVolumeSource)
+ }
+ x.FlexVolume.CodecDecodeSelf(d)
+ }
+ if x.PersistentVolumeSource.Cinder == nil {
+ x.PersistentVolumeSource.Cinder = new(CinderVolumeSource)
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Cinder != nil {
+ x.Cinder = nil
+ }
+ } else {
+ if x.Cinder == nil {
+ x.Cinder = new(CinderVolumeSource)
+ }
+ x.Cinder.CodecDecodeSelf(d)
+ }
+ if x.PersistentVolumeSource.CephFS == nil {
+ x.PersistentVolumeSource.CephFS = new(CephFSVolumeSource)
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.CephFS != nil {
+ x.CephFS = nil
+ }
+ } else {
+ if x.CephFS == nil {
+ x.CephFS = new(CephFSVolumeSource)
+ }
+ x.CephFS.CodecDecodeSelf(d)
+ }
+ if x.PersistentVolumeSource.FC == nil {
+ x.PersistentVolumeSource.FC = new(FCVolumeSource)
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.FC != nil {
+ x.FC = nil
+ }
+ } else {
+ if x.FC == nil {
+ x.FC = new(FCVolumeSource)
+ }
+ x.FC.CodecDecodeSelf(d)
+ }
+ if x.PersistentVolumeSource.Flocker == nil {
+ x.PersistentVolumeSource.Flocker = new(FlockerVolumeSource)
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Flocker != nil {
+ x.Flocker = nil
+ }
+ } else {
+ if x.Flocker == nil {
+ x.Flocker = new(FlockerVolumeSource)
+ }
+ x.Flocker.CodecDecodeSelf(d)
+ }
+ if x.PersistentVolumeSource.AzureFile == nil {
+ x.PersistentVolumeSource.AzureFile = new(AzureFileVolumeSource)
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.AzureFile != nil {
+ x.AzureFile = nil
+ }
+ } else {
+ if x.AzureFile == nil {
+ x.AzureFile = new(AzureFileVolumeSource)
+ }
+ x.AzureFile.CodecDecodeSelf(d)
+ }
+ if x.PersistentVolumeSource.VsphereVolume == nil {
+ x.PersistentVolumeSource.VsphereVolume = new(VsphereVirtualDiskVolumeSource)
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.VsphereVolume != nil {
+ x.VsphereVolume = nil
+ }
+ } else {
+ if x.VsphereVolume == nil {
+ x.VsphereVolume = new(VsphereVirtualDiskVolumeSource)
+ }
+ x.VsphereVolume.CodecDecodeSelf(d)
+ }
+ for {
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj23-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x PersistentVolumeReclaimPolicy) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *PersistentVolumeReclaimPolicy) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *PersistentVolumeStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Phase != ""
+ yyq2[1] = x.Message != ""
+ yyq2[2] = x.Reason != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ x.Phase.CodecEncodeSelf(e)
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("phase"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Phase.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Message))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("message"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Message))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Reason))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("reason"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Reason))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PersistentVolumeStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PersistentVolumeStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "phase":
+ if r.TryDecodeAsNil() {
+ x.Phase = ""
+ } else {
+ x.Phase = PersistentVolumePhase(r.DecodeString())
+ }
+ case "message":
+ if r.TryDecodeAsNil() {
+ x.Message = ""
+ } else {
+ x.Message = string(r.DecodeString())
+ }
+ case "reason":
+ if r.TryDecodeAsNil() {
+ x.Reason = ""
+ } else {
+ x.Reason = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PersistentVolumeStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Phase = ""
+ } else {
+ x.Phase = PersistentVolumePhase(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Message = ""
+ } else {
+ x.Message = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Reason = ""
+ } else {
+ x.Reason = string(r.DecodeString())
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PersistentVolumeList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSlicePersistentVolume(([]PersistentVolume)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSlicePersistentVolume(([]PersistentVolume)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PersistentVolumeList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PersistentVolumeList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSlicePersistentVolume((*[]PersistentVolume)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PersistentVolumeList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSlicePersistentVolume((*[]PersistentVolume)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PersistentVolumeClaim) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = true
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy14 := &x.Status
+ yy14.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy16 := &x.Status
+ yy16.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PersistentVolumeClaim) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PersistentVolumeClaim) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = PersistentVolumeClaimSpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = PersistentVolumeClaimStatus{}
+ } else {
+ yyv6 := &x.Status
+ yyv6.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PersistentVolumeClaim) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = PersistentVolumeClaimSpec{}
+ } else {
+ yyv11 := &x.Spec
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = PersistentVolumeClaimStatus{}
+ } else {
+ yyv12 := &x.Status
+ yyv12.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PersistentVolumeClaimList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSlicePersistentVolumeClaim(([]PersistentVolumeClaim)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSlicePersistentVolumeClaim(([]PersistentVolumeClaim)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PersistentVolumeClaimList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PersistentVolumeClaimList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSlicePersistentVolumeClaim((*[]PersistentVolumeClaim)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PersistentVolumeClaimList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSlicePersistentVolumeClaim((*[]PersistentVolumeClaim)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PersistentVolumeClaimSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = len(x.AccessModes) != 0
+ yyq2[1] = x.Selector != nil
+ yyq2[2] = true
+ yyq2[3] = x.VolumeName != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.AccessModes == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("accessModes"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.AccessModes == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Selector == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.Selector) {
+ } else {
+ z.EncFallback(x.Selector)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("selector"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Selector == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.Selector) {
+ } else {
+ z.EncFallback(x.Selector)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy10 := &x.Resources
+ yy10.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("resources"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy12 := &x.Resources
+ yy12.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.VolumeName))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("volumeName"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.VolumeName))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PersistentVolumeClaimSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PersistentVolumeClaimSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "accessModes":
+ if r.TryDecodeAsNil() {
+ x.AccessModes = nil
+ } else {
+ yyv4 := &x.AccessModes
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv4), d)
+ }
+ }
+ case "selector":
+ if r.TryDecodeAsNil() {
+ if x.Selector != nil {
+ x.Selector = nil
+ }
+ } else {
+ if x.Selector == nil {
+ x.Selector = new(pkg2_unversioned.LabelSelector)
+ }
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.Selector) {
+ } else {
+ z.DecFallback(x.Selector, false)
+ }
+ }
+ case "resources":
+ if r.TryDecodeAsNil() {
+ x.Resources = ResourceRequirements{}
+ } else {
+ yyv8 := &x.Resources
+ yyv8.CodecDecodeSelf(d)
+ }
+ case "volumeName":
+ if r.TryDecodeAsNil() {
+ x.VolumeName = ""
+ } else {
+ x.VolumeName = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PersistentVolumeClaimSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.AccessModes = nil
+ } else {
+ yyv11 := &x.AccessModes
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv11), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Selector != nil {
+ x.Selector = nil
+ }
+ } else {
+ if x.Selector == nil {
+ x.Selector = new(pkg2_unversioned.LabelSelector)
+ }
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.Selector) {
+ } else {
+ z.DecFallback(x.Selector, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Resources = ResourceRequirements{}
+ } else {
+ yyv15 := &x.Resources
+ yyv15.CodecDecodeSelf(d)
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.VolumeName = ""
+ } else {
+ x.VolumeName = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PersistentVolumeClaimStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Phase != ""
+ yyq2[1] = len(x.AccessModes) != 0
+ yyq2[2] = len(x.Capacity) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ x.Phase.CodecEncodeSelf(e)
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("phase"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Phase.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.AccessModes == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("accessModes"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.AccessModes == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.Capacity == nil {
+ r.EncodeNil()
+ } else {
+ x.Capacity.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("capacity"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Capacity == nil {
+ r.EncodeNil()
+ } else {
+ x.Capacity.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PersistentVolumeClaimStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PersistentVolumeClaimStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "phase":
+ if r.TryDecodeAsNil() {
+ x.Phase = ""
+ } else {
+ x.Phase = PersistentVolumeClaimPhase(r.DecodeString())
+ }
+ case "accessModes":
+ if r.TryDecodeAsNil() {
+ x.AccessModes = nil
+ } else {
+ yyv5 := &x.AccessModes
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv5), d)
+ }
+ }
+ case "capacity":
+ if r.TryDecodeAsNil() {
+ x.Capacity = nil
+ } else {
+ yyv7 := &x.Capacity
+ yyv7.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PersistentVolumeClaimStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Phase = ""
+ } else {
+ x.Phase = PersistentVolumeClaimPhase(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.AccessModes = nil
+ } else {
+ yyv10 := &x.AccessModes
+ yym11 := z.DecBinary()
+ _ = yym11
+ if false {
+ } else {
+ h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv10), d)
+ }
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Capacity = nil
+ } else {
+ yyv12 := &x.Capacity
+ yyv12.CodecDecodeSelf(d)
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x PersistentVolumeAccessMode) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *PersistentVolumeAccessMode) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x PersistentVolumePhase) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *PersistentVolumePhase) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x PersistentVolumeClaimPhase) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *PersistentVolumeClaimPhase) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *HostPathVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Path))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("path"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Path))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *HostPathVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *HostPathVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "path":
+ if r.TryDecodeAsNil() {
+ x.Path = ""
+ } else {
+ x.Path = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *HostPathVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj5 int
+ var yyb5 bool
+ var yyhl5 bool = l >= 0
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Path = ""
+ } else {
+ x.Path = string(r.DecodeString())
+ }
+ for {
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj5-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *EmptyDirVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Medium != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ x.Medium.CodecEncodeSelf(e)
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("medium"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Medium.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *EmptyDirVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *EmptyDirVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "medium":
+ if r.TryDecodeAsNil() {
+ x.Medium = ""
+ } else {
+ x.Medium = StorageMedium(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *EmptyDirVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj5 int
+ var yyb5 bool
+ var yyhl5 bool = l >= 0
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Medium = ""
+ } else {
+ x.Medium = StorageMedium(r.DecodeString())
+ }
+ for {
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj5-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x StorageMedium) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *StorageMedium) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x Protocol) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *Protocol) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *GCEPersistentDiskVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.FSType != ""
+ yyq2[2] = x.Partition != 0
+ yyq2[3] = x.ReadOnly != false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.PDName))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("pdName"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.PDName))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.FSType))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("fsType"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.FSType))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Partition))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("partition"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Partition))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("readOnly"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *GCEPersistentDiskVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *GCEPersistentDiskVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "pdName":
+ if r.TryDecodeAsNil() {
+ x.PDName = ""
+ } else {
+ x.PDName = string(r.DecodeString())
+ }
+ case "fsType":
+ if r.TryDecodeAsNil() {
+ x.FSType = ""
+ } else {
+ x.FSType = string(r.DecodeString())
+ }
+ case "partition":
+ if r.TryDecodeAsNil() {
+ x.Partition = 0
+ } else {
+ x.Partition = int32(r.DecodeInt(32))
+ }
+ case "readOnly":
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *GCEPersistentDiskVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.PDName = ""
+ } else {
+ x.PDName = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.FSType = ""
+ } else {
+ x.FSType = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Partition = 0
+ } else {
+ x.Partition = int32(r.DecodeInt(32))
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ISCSIVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [6]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.TargetPortal != ""
+ yyq2[1] = x.IQN != ""
+ yyq2[2] = x.Lun != 0
+ yyq2[3] = x.ISCSIInterface != ""
+ yyq2[4] = x.FSType != ""
+ yyq2[5] = x.ReadOnly != false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(6)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.TargetPortal))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("targetPortal"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.TargetPortal))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.IQN))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("iqn"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.IQN))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Lun))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("lun"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Lun))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ISCSIInterface))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("iscsiInterface"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ISCSIInterface))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.FSType))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("fsType"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.FSType))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("readOnly"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ISCSIVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ISCSIVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "targetPortal":
+ if r.TryDecodeAsNil() {
+ x.TargetPortal = ""
+ } else {
+ x.TargetPortal = string(r.DecodeString())
+ }
+ case "iqn":
+ if r.TryDecodeAsNil() {
+ x.IQN = ""
+ } else {
+ x.IQN = string(r.DecodeString())
+ }
+ case "lun":
+ if r.TryDecodeAsNil() {
+ x.Lun = 0
+ } else {
+ x.Lun = int32(r.DecodeInt(32))
+ }
+ case "iscsiInterface":
+ if r.TryDecodeAsNil() {
+ x.ISCSIInterface = ""
+ } else {
+ x.ISCSIInterface = string(r.DecodeString())
+ }
+ case "fsType":
+ if r.TryDecodeAsNil() {
+ x.FSType = ""
+ } else {
+ x.FSType = string(r.DecodeString())
+ }
+ case "readOnly":
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ISCSIVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.TargetPortal = ""
+ } else {
+ x.TargetPortal = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.IQN = ""
+ } else {
+ x.IQN = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Lun = 0
+ } else {
+ x.Lun = int32(r.DecodeInt(32))
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ISCSIInterface = ""
+ } else {
+ x.ISCSIInterface = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.FSType = ""
+ } else {
+ x.FSType = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *FCVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[2] = x.FSType != ""
+ yyq2[3] = x.ReadOnly != false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.TargetWWNs == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.TargetWWNs, false, e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("targetWWNs"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.TargetWWNs == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.TargetWWNs, false, e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Lun == nil {
+ r.EncodeNil()
+ } else {
+ yy7 := *x.Lun
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeInt(int64(yy7))
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("lun"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Lun == nil {
+ r.EncodeNil()
+ } else {
+ yy9 := *x.Lun
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeInt(int64(yy9))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.FSType))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("fsType"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.FSType))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("readOnly"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *FCVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *FCVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "targetWWNs":
+ if r.TryDecodeAsNil() {
+ x.TargetWWNs = nil
+ } else {
+ yyv4 := &x.TargetWWNs
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv4, false, d)
+ }
+ }
+ case "lun":
+ if r.TryDecodeAsNil() {
+ if x.Lun != nil {
+ x.Lun = nil
+ }
+ } else {
+ if x.Lun == nil {
+ x.Lun = new(int32)
+ }
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ *((*int32)(x.Lun)) = int32(r.DecodeInt(32))
+ }
+ }
+ case "fsType":
+ if r.TryDecodeAsNil() {
+ x.FSType = ""
+ } else {
+ x.FSType = string(r.DecodeString())
+ }
+ case "readOnly":
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *FCVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.TargetWWNs = nil
+ } else {
+ yyv11 := &x.TargetWWNs
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv11, false, d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Lun != nil {
+ x.Lun = nil
+ }
+ } else {
+ if x.Lun == nil {
+ x.Lun = new(int32)
+ }
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ *((*int32)(x.Lun)) = int32(r.DecodeInt(32))
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.FSType = ""
+ } else {
+ x.FSType = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *FlexVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.FSType != ""
+ yyq2[2] = x.SecretRef != nil
+ yyq2[3] = x.ReadOnly != false
+ yyq2[4] = len(x.Options) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Driver))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("driver"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Driver))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.FSType))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("fsType"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.FSType))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.SecretRef == nil {
+ r.EncodeNil()
+ } else {
+ x.SecretRef.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("secretRef"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.SecretRef == nil {
+ r.EncodeNil()
+ } else {
+ x.SecretRef.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("readOnly"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ if x.Options == nil {
+ r.EncodeNil()
+ } else {
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ z.F.EncMapStringStringV(x.Options, false, e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("options"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Options == nil {
+ r.EncodeNil()
+ } else {
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ z.F.EncMapStringStringV(x.Options, false, e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *FlexVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *FlexVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "driver":
+ if r.TryDecodeAsNil() {
+ x.Driver = ""
+ } else {
+ x.Driver = string(r.DecodeString())
+ }
+ case "fsType":
+ if r.TryDecodeAsNil() {
+ x.FSType = ""
+ } else {
+ x.FSType = string(r.DecodeString())
+ }
+ case "secretRef":
+ if r.TryDecodeAsNil() {
+ if x.SecretRef != nil {
+ x.SecretRef = nil
+ }
+ } else {
+ if x.SecretRef == nil {
+ x.SecretRef = new(LocalObjectReference)
+ }
+ x.SecretRef.CodecDecodeSelf(d)
+ }
+ case "readOnly":
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ case "options":
+ if r.TryDecodeAsNil() {
+ x.Options = nil
+ } else {
+ yyv8 := &x.Options
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else {
+ z.F.DecMapStringStringX(yyv8, false, d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *FlexVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Driver = ""
+ } else {
+ x.Driver = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.FSType = ""
+ } else {
+ x.FSType = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.SecretRef != nil {
+ x.SecretRef = nil
+ }
+ } else {
+ if x.SecretRef == nil {
+ x.SecretRef = new(LocalObjectReference)
+ }
+ x.SecretRef.CodecDecodeSelf(d)
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Options = nil
+ } else {
+ yyv15 := &x.Options
+ yym16 := z.DecBinary()
+ _ = yym16
+ if false {
+ } else {
+ z.F.DecMapStringStringX(yyv15, false, d)
+ }
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *AWSElasticBlockStoreVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.FSType != ""
+ yyq2[2] = x.Partition != 0
+ yyq2[3] = x.ReadOnly != false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.VolumeID))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("volumeID"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.VolumeID))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.FSType))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("fsType"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.FSType))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Partition))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("partition"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Partition))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("readOnly"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *AWSElasticBlockStoreVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *AWSElasticBlockStoreVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "volumeID":
+ if r.TryDecodeAsNil() {
+ x.VolumeID = ""
+ } else {
+ x.VolumeID = string(r.DecodeString())
+ }
+ case "fsType":
+ if r.TryDecodeAsNil() {
+ x.FSType = ""
+ } else {
+ x.FSType = string(r.DecodeString())
+ }
+ case "partition":
+ if r.TryDecodeAsNil() {
+ x.Partition = 0
+ } else {
+ x.Partition = int32(r.DecodeInt(32))
+ }
+ case "readOnly":
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *AWSElasticBlockStoreVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.VolumeID = ""
+ } else {
+ x.VolumeID = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.FSType = ""
+ } else {
+ x.FSType = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Partition = 0
+ } else {
+ x.Partition = int32(r.DecodeInt(32))
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *GitRepoVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.Revision != ""
+ yyq2[2] = x.Directory != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Repository))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("repository"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Repository))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Revision))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("revision"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Revision))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Directory))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("directory"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Directory))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *GitRepoVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *GitRepoVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "repository":
+ if r.TryDecodeAsNil() {
+ x.Repository = ""
+ } else {
+ x.Repository = string(r.DecodeString())
+ }
+ case "revision":
+ if r.TryDecodeAsNil() {
+ x.Revision = ""
+ } else {
+ x.Revision = string(r.DecodeString())
+ }
+ case "directory":
+ if r.TryDecodeAsNil() {
+ x.Directory = ""
+ } else {
+ x.Directory = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *GitRepoVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Repository = ""
+ } else {
+ x.Repository = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Revision = ""
+ } else {
+ x.Revision = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Directory = ""
+ } else {
+ x.Directory = string(r.DecodeString())
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *SecretVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.SecretName != ""
+ yyq2[1] = len(x.Items) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.SecretName))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("secretName"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.SecretName))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.encSliceKeyToPath(([]KeyToPath)(x.Items), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.encSliceKeyToPath(([]KeyToPath)(x.Items), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *SecretVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *SecretVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "secretName":
+ if r.TryDecodeAsNil() {
+ x.SecretName = ""
+ } else {
+ x.SecretName = string(r.DecodeString())
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv5 := &x.Items
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ h.decSliceKeyToPath((*[]KeyToPath)(yyv5), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *SecretVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.SecretName = ""
+ } else {
+ x.SecretName = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv9 := &x.Items
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.decSliceKeyToPath((*[]KeyToPath)(yyv9), d)
+ }
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *NFSVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[2] = x.ReadOnly != false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Server))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("server"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Server))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Path))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("path"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Path))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("readOnly"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *NFSVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *NFSVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "server":
+ if r.TryDecodeAsNil() {
+ x.Server = ""
+ } else {
+ x.Server = string(r.DecodeString())
+ }
+ case "path":
+ if r.TryDecodeAsNil() {
+ x.Path = ""
+ } else {
+ x.Path = string(r.DecodeString())
+ }
+ case "readOnly":
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *NFSVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Server = ""
+ } else {
+ x.Server = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Path = ""
+ } else {
+ x.Path = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *GlusterfsVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[2] = x.ReadOnly != false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.EndpointsName))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("endpoints"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.EndpointsName))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Path))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("path"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Path))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("readOnly"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *GlusterfsVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *GlusterfsVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "endpoints":
+ if r.TryDecodeAsNil() {
+ x.EndpointsName = ""
+ } else {
+ x.EndpointsName = string(r.DecodeString())
+ }
+ case "path":
+ if r.TryDecodeAsNil() {
+ x.Path = ""
+ } else {
+ x.Path = string(r.DecodeString())
+ }
+ case "readOnly":
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *GlusterfsVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.EndpointsName = ""
+ } else {
+ x.EndpointsName = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Path = ""
+ } else {
+ x.Path = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *RBDVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [8]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[2] = x.FSType != ""
+ yyq2[3] = x.RBDPool != ""
+ yyq2[4] = x.RadosUser != ""
+ yyq2[5] = x.Keyring != ""
+ yyq2[6] = x.SecretRef != nil
+ yyq2[7] = x.ReadOnly != false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(8)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.CephMonitors == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.CephMonitors, false, e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("monitors"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.CephMonitors == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.CephMonitors, false, e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.RBDImage))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("image"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.RBDImage))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.FSType))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("fsType"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.FSType))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.RBDPool))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("pool"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.RBDPool))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.RadosUser))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("user"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.RadosUser))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Keyring))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("keyring"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Keyring))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[6] {
+ if x.SecretRef == nil {
+ r.EncodeNil()
+ } else {
+ x.SecretRef.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[6] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("secretRef"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.SecretRef == nil {
+ r.EncodeNil()
+ } else {
+ x.SecretRef.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[7] {
+ yym25 := z.EncBinary()
+ _ = yym25
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[7] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("readOnly"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym26 := z.EncBinary()
+ _ = yym26
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *RBDVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *RBDVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "monitors":
+ if r.TryDecodeAsNil() {
+ x.CephMonitors = nil
+ } else {
+ yyv4 := &x.CephMonitors
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv4, false, d)
+ }
+ }
+ case "image":
+ if r.TryDecodeAsNil() {
+ x.RBDImage = ""
+ } else {
+ x.RBDImage = string(r.DecodeString())
+ }
+ case "fsType":
+ if r.TryDecodeAsNil() {
+ x.FSType = ""
+ } else {
+ x.FSType = string(r.DecodeString())
+ }
+ case "pool":
+ if r.TryDecodeAsNil() {
+ x.RBDPool = ""
+ } else {
+ x.RBDPool = string(r.DecodeString())
+ }
+ case "user":
+ if r.TryDecodeAsNil() {
+ x.RadosUser = ""
+ } else {
+ x.RadosUser = string(r.DecodeString())
+ }
+ case "keyring":
+ if r.TryDecodeAsNil() {
+ x.Keyring = ""
+ } else {
+ x.Keyring = string(r.DecodeString())
+ }
+ case "secretRef":
+ if r.TryDecodeAsNil() {
+ if x.SecretRef != nil {
+ x.SecretRef = nil
+ }
+ } else {
+ if x.SecretRef == nil {
+ x.SecretRef = new(LocalObjectReference)
+ }
+ x.SecretRef.CodecDecodeSelf(d)
+ }
+ case "readOnly":
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *RBDVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj13 int
+ var yyb13 bool
+ var yyhl13 bool = l >= 0
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.CephMonitors = nil
+ } else {
+ yyv14 := &x.CephMonitors
+ yym15 := z.DecBinary()
+ _ = yym15
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv14, false, d)
+ }
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.RBDImage = ""
+ } else {
+ x.RBDImage = string(r.DecodeString())
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.FSType = ""
+ } else {
+ x.FSType = string(r.DecodeString())
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.RBDPool = ""
+ } else {
+ x.RBDPool = string(r.DecodeString())
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.RadosUser = ""
+ } else {
+ x.RadosUser = string(r.DecodeString())
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Keyring = ""
+ } else {
+ x.Keyring = string(r.DecodeString())
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.SecretRef != nil {
+ x.SecretRef = nil
+ }
+ } else {
+ if x.SecretRef == nil {
+ x.SecretRef = new(LocalObjectReference)
+ }
+ x.SecretRef.CodecDecodeSelf(d)
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ for {
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj13-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *CinderVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.FSType != ""
+ yyq2[2] = x.ReadOnly != false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.VolumeID))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("volumeID"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.VolumeID))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.FSType))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("fsType"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.FSType))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("readOnly"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *CinderVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *CinderVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "volumeID":
+ if r.TryDecodeAsNil() {
+ x.VolumeID = ""
+ } else {
+ x.VolumeID = string(r.DecodeString())
+ }
+ case "fsType":
+ if r.TryDecodeAsNil() {
+ x.FSType = ""
+ } else {
+ x.FSType = string(r.DecodeString())
+ }
+ case "readOnly":
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *CinderVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.VolumeID = ""
+ } else {
+ x.VolumeID = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.FSType = ""
+ } else {
+ x.FSType = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *CephFSVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [6]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.Path != ""
+ yyq2[2] = x.User != ""
+ yyq2[3] = x.SecretFile != ""
+ yyq2[4] = x.SecretRef != nil
+ yyq2[5] = x.ReadOnly != false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(6)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Monitors == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Monitors, false, e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("monitors"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Monitors == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Monitors, false, e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Path))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("path"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Path))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.User))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("user"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.User))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.SecretFile))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("secretFile"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.SecretFile))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ if x.SecretRef == nil {
+ r.EncodeNil()
+ } else {
+ x.SecretRef.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("secretRef"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.SecretRef == nil {
+ r.EncodeNil()
+ } else {
+ x.SecretRef.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("readOnly"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *CephFSVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *CephFSVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "monitors":
+ if r.TryDecodeAsNil() {
+ x.Monitors = nil
+ } else {
+ yyv4 := &x.Monitors
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv4, false, d)
+ }
+ }
+ case "path":
+ if r.TryDecodeAsNil() {
+ x.Path = ""
+ } else {
+ x.Path = string(r.DecodeString())
+ }
+ case "user":
+ if r.TryDecodeAsNil() {
+ x.User = ""
+ } else {
+ x.User = string(r.DecodeString())
+ }
+ case "secretFile":
+ if r.TryDecodeAsNil() {
+ x.SecretFile = ""
+ } else {
+ x.SecretFile = string(r.DecodeString())
+ }
+ case "secretRef":
+ if r.TryDecodeAsNil() {
+ if x.SecretRef != nil {
+ x.SecretRef = nil
+ }
+ } else {
+ if x.SecretRef == nil {
+ x.SecretRef = new(LocalObjectReference)
+ }
+ x.SecretRef.CodecDecodeSelf(d)
+ }
+ case "readOnly":
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *CephFSVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj11 int
+ var yyb11 bool
+ var yyhl11 bool = l >= 0
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Monitors = nil
+ } else {
+ yyv12 := &x.Monitors
+ yym13 := z.DecBinary()
+ _ = yym13
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv12, false, d)
+ }
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Path = ""
+ } else {
+ x.Path = string(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.User = ""
+ } else {
+ x.User = string(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.SecretFile = ""
+ } else {
+ x.SecretFile = string(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.SecretRef != nil {
+ x.SecretRef = nil
+ }
+ } else {
+ if x.SecretRef == nil {
+ x.SecretRef = new(LocalObjectReference)
+ }
+ x.SecretRef.CodecDecodeSelf(d)
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ for {
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj11-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *FlockerVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.DatasetName))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("datasetName"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.DatasetName))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *FlockerVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *FlockerVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "datasetName":
+ if r.TryDecodeAsNil() {
+ x.DatasetName = ""
+ } else {
+ x.DatasetName = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *FlockerVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj5 int
+ var yyb5 bool
+ var yyhl5 bool = l >= 0
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.DatasetName = ""
+ } else {
+ x.DatasetName = string(r.DecodeString())
+ }
+ for {
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj5-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *DownwardAPIVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = len(x.Items) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ h.encSliceDownwardAPIVolumeFile(([]DownwardAPIVolumeFile)(x.Items), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.encSliceDownwardAPIVolumeFile(([]DownwardAPIVolumeFile)(x.Items), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *DownwardAPIVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *DownwardAPIVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv4 := &x.Items
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.decSliceDownwardAPIVolumeFile((*[]DownwardAPIVolumeFile)(yyv4), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *DownwardAPIVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv7 := &x.Items
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.decSliceDownwardAPIVolumeFile((*[]DownwardAPIVolumeFile)(yyv7), d)
+ }
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *DownwardAPIVolumeFile) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.FieldRef != nil
+ yyq2[2] = x.ResourceFieldRef != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Path))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("path"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Path))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.FieldRef == nil {
+ r.EncodeNil()
+ } else {
+ x.FieldRef.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("fieldRef"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.FieldRef == nil {
+ r.EncodeNil()
+ } else {
+ x.FieldRef.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.ResourceFieldRef == nil {
+ r.EncodeNil()
+ } else {
+ x.ResourceFieldRef.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("resourceFieldRef"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.ResourceFieldRef == nil {
+ r.EncodeNil()
+ } else {
+ x.ResourceFieldRef.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *DownwardAPIVolumeFile) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *DownwardAPIVolumeFile) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "path":
+ if r.TryDecodeAsNil() {
+ x.Path = ""
+ } else {
+ x.Path = string(r.DecodeString())
+ }
+ case "fieldRef":
+ if r.TryDecodeAsNil() {
+ if x.FieldRef != nil {
+ x.FieldRef = nil
+ }
+ } else {
+ if x.FieldRef == nil {
+ x.FieldRef = new(ObjectFieldSelector)
+ }
+ x.FieldRef.CodecDecodeSelf(d)
+ }
+ case "resourceFieldRef":
+ if r.TryDecodeAsNil() {
+ if x.ResourceFieldRef != nil {
+ x.ResourceFieldRef = nil
+ }
+ } else {
+ if x.ResourceFieldRef == nil {
+ x.ResourceFieldRef = new(ResourceFieldSelector)
+ }
+ x.ResourceFieldRef.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *DownwardAPIVolumeFile) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Path = ""
+ } else {
+ x.Path = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.FieldRef != nil {
+ x.FieldRef = nil
+ }
+ } else {
+ if x.FieldRef == nil {
+ x.FieldRef = new(ObjectFieldSelector)
+ }
+ x.FieldRef.CodecDecodeSelf(d)
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.ResourceFieldRef != nil {
+ x.ResourceFieldRef = nil
+ }
+ } else {
+ if x.ResourceFieldRef == nil {
+ x.ResourceFieldRef = new(ResourceFieldSelector)
+ }
+ x.ResourceFieldRef.CodecDecodeSelf(d)
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *AzureFileVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[2] = x.ReadOnly != false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.SecretName))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("secretName"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.SecretName))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ShareName))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("shareName"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ShareName))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("readOnly"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *AzureFileVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *AzureFileVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "secretName":
+ if r.TryDecodeAsNil() {
+ x.SecretName = ""
+ } else {
+ x.SecretName = string(r.DecodeString())
+ }
+ case "shareName":
+ if r.TryDecodeAsNil() {
+ x.ShareName = ""
+ } else {
+ x.ShareName = string(r.DecodeString())
+ }
+ case "readOnly":
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *AzureFileVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.SecretName = ""
+ } else {
+ x.SecretName = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ShareName = ""
+ } else {
+ x.ShareName = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *VsphereVirtualDiskVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.FSType != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.VolumePath))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("volumePath"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.VolumePath))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.FSType))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("fsType"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.FSType))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *VsphereVirtualDiskVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *VsphereVirtualDiskVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "volumePath":
+ if r.TryDecodeAsNil() {
+ x.VolumePath = ""
+ } else {
+ x.VolumePath = string(r.DecodeString())
+ }
+ case "fsType":
+ if r.TryDecodeAsNil() {
+ x.FSType = ""
+ } else {
+ x.FSType = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *VsphereVirtualDiskVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.VolumePath = ""
+ } else {
+ x.VolumePath = string(r.DecodeString())
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.FSType = ""
+ } else {
+ x.FSType = string(r.DecodeString())
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ConfigMapVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = len(x.Items) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ h.encSliceKeyToPath(([]KeyToPath)(x.Items), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.encSliceKeyToPath(([]KeyToPath)(x.Items), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Name"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ConfigMapVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ConfigMapVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv4 := &x.Items
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.decSliceKeyToPath((*[]KeyToPath)(yyv4), d)
+ }
+ }
+ case "Name":
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ConfigMapVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv8 := &x.Items
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.decSliceKeyToPath((*[]KeyToPath)(yyv8), d)
+ }
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *KeyToPath) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Key))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("key"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Key))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Path))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("path"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Path))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *KeyToPath) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *KeyToPath) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "key":
+ if r.TryDecodeAsNil() {
+ x.Key = ""
+ } else {
+ x.Key = string(r.DecodeString())
+ }
+ case "path":
+ if r.TryDecodeAsNil() {
+ x.Path = ""
+ } else {
+ x.Path = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *KeyToPath) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Key = ""
+ } else {
+ x.Key = string(r.DecodeString())
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Path = ""
+ } else {
+ x.Path = string(r.DecodeString())
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ContainerPort) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Name != ""
+ yyq2[1] = x.HostPort != 0
+ yyq2[3] = x.Protocol != ""
+ yyq2[4] = x.HostIP != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("name"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeInt(int64(x.HostPort))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("hostPort"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeInt(int64(x.HostPort))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ContainerPort))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("containerPort"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ContainerPort))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ x.Protocol.CodecEncodeSelf(e)
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("protocol"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Protocol.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.HostIP))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("hostIP"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.HostIP))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ContainerPort) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ContainerPort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "name":
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ case "hostPort":
+ if r.TryDecodeAsNil() {
+ x.HostPort = 0
+ } else {
+ x.HostPort = int32(r.DecodeInt(32))
+ }
+ case "containerPort":
+ if r.TryDecodeAsNil() {
+ x.ContainerPort = 0
+ } else {
+ x.ContainerPort = int32(r.DecodeInt(32))
+ }
+ case "protocol":
+ if r.TryDecodeAsNil() {
+ x.Protocol = ""
+ } else {
+ x.Protocol = Protocol(r.DecodeString())
+ }
+ case "hostIP":
+ if r.TryDecodeAsNil() {
+ x.HostIP = ""
+ } else {
+ x.HostIP = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ContainerPort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.HostPort = 0
+ } else {
+ x.HostPort = int32(r.DecodeInt(32))
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ContainerPort = 0
+ } else {
+ x.ContainerPort = int32(r.DecodeInt(32))
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Protocol = ""
+ } else {
+ x.Protocol = Protocol(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.HostIP = ""
+ } else {
+ x.HostIP = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *VolumeMount) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.ReadOnly != false
+ yyq2[3] = x.SubPath != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("name"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("readOnly"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.MountPath))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("mountPath"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.MountPath))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.SubPath))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("subPath"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.SubPath))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *VolumeMount) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *VolumeMount) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "name":
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ case "readOnly":
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ case "mountPath":
+ if r.TryDecodeAsNil() {
+ x.MountPath = ""
+ } else {
+ x.MountPath = string(r.DecodeString())
+ }
+ case "subPath":
+ if r.TryDecodeAsNil() {
+ x.SubPath = ""
+ } else {
+ x.SubPath = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *VolumeMount) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.MountPath = ""
+ } else {
+ x.MountPath = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.SubPath = ""
+ } else {
+ x.SubPath = string(r.DecodeString())
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *EnvVar) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.Value != ""
+ yyq2[2] = x.ValueFrom != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("name"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Value))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("value"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Value))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.ValueFrom == nil {
+ r.EncodeNil()
+ } else {
+ x.ValueFrom.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("valueFrom"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.ValueFrom == nil {
+ r.EncodeNil()
+ } else {
+ x.ValueFrom.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *EnvVar) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *EnvVar) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "name":
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ case "value":
+ if r.TryDecodeAsNil() {
+ x.Value = ""
+ } else {
+ x.Value = string(r.DecodeString())
+ }
+ case "valueFrom":
+ if r.TryDecodeAsNil() {
+ if x.ValueFrom != nil {
+ x.ValueFrom = nil
+ }
+ } else {
+ if x.ValueFrom == nil {
+ x.ValueFrom = new(EnvVarSource)
+ }
+ x.ValueFrom.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *EnvVar) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Value = ""
+ } else {
+ x.Value = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.ValueFrom != nil {
+ x.ValueFrom = nil
+ }
+ } else {
+ if x.ValueFrom == nil {
+ x.ValueFrom = new(EnvVarSource)
+ }
+ x.ValueFrom.CodecDecodeSelf(d)
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *EnvVarSource) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.FieldRef != nil
+ yyq2[1] = x.ResourceFieldRef != nil
+ yyq2[2] = x.ConfigMapKeyRef != nil
+ yyq2[3] = x.SecretKeyRef != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.FieldRef == nil {
+ r.EncodeNil()
+ } else {
+ x.FieldRef.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("fieldRef"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.FieldRef == nil {
+ r.EncodeNil()
+ } else {
+ x.FieldRef.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.ResourceFieldRef == nil {
+ r.EncodeNil()
+ } else {
+ x.ResourceFieldRef.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("resourceFieldRef"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.ResourceFieldRef == nil {
+ r.EncodeNil()
+ } else {
+ x.ResourceFieldRef.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.ConfigMapKeyRef == nil {
+ r.EncodeNil()
+ } else {
+ x.ConfigMapKeyRef.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("configMapKeyRef"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.ConfigMapKeyRef == nil {
+ r.EncodeNil()
+ } else {
+ x.ConfigMapKeyRef.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ if x.SecretKeyRef == nil {
+ r.EncodeNil()
+ } else {
+ x.SecretKeyRef.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("secretKeyRef"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.SecretKeyRef == nil {
+ r.EncodeNil()
+ } else {
+ x.SecretKeyRef.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *EnvVarSource) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *EnvVarSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "fieldRef":
+ if r.TryDecodeAsNil() {
+ if x.FieldRef != nil {
+ x.FieldRef = nil
+ }
+ } else {
+ if x.FieldRef == nil {
+ x.FieldRef = new(ObjectFieldSelector)
+ }
+ x.FieldRef.CodecDecodeSelf(d)
+ }
+ case "resourceFieldRef":
+ if r.TryDecodeAsNil() {
+ if x.ResourceFieldRef != nil {
+ x.ResourceFieldRef = nil
+ }
+ } else {
+ if x.ResourceFieldRef == nil {
+ x.ResourceFieldRef = new(ResourceFieldSelector)
+ }
+ x.ResourceFieldRef.CodecDecodeSelf(d)
+ }
+ case "configMapKeyRef":
+ if r.TryDecodeAsNil() {
+ if x.ConfigMapKeyRef != nil {
+ x.ConfigMapKeyRef = nil
+ }
+ } else {
+ if x.ConfigMapKeyRef == nil {
+ x.ConfigMapKeyRef = new(ConfigMapKeySelector)
+ }
+ x.ConfigMapKeyRef.CodecDecodeSelf(d)
+ }
+ case "secretKeyRef":
+ if r.TryDecodeAsNil() {
+ if x.SecretKeyRef != nil {
+ x.SecretKeyRef = nil
+ }
+ } else {
+ if x.SecretKeyRef == nil {
+ x.SecretKeyRef = new(SecretKeySelector)
+ }
+ x.SecretKeyRef.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *EnvVarSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.FieldRef != nil {
+ x.FieldRef = nil
+ }
+ } else {
+ if x.FieldRef == nil {
+ x.FieldRef = new(ObjectFieldSelector)
+ }
+ x.FieldRef.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.ResourceFieldRef != nil {
+ x.ResourceFieldRef = nil
+ }
+ } else {
+ if x.ResourceFieldRef == nil {
+ x.ResourceFieldRef = new(ResourceFieldSelector)
+ }
+ x.ResourceFieldRef.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.ConfigMapKeyRef != nil {
+ x.ConfigMapKeyRef = nil
+ }
+ } else {
+ if x.ConfigMapKeyRef == nil {
+ x.ConfigMapKeyRef = new(ConfigMapKeySelector)
+ }
+ x.ConfigMapKeyRef.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.SecretKeyRef != nil {
+ x.SecretKeyRef = nil
+ }
+ } else {
+ if x.SecretKeyRef == nil {
+ x.SecretKeyRef = new(SecretKeySelector)
+ }
+ x.SecretKeyRef.CodecDecodeSelf(d)
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ObjectFieldSelector) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.FieldPath))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("fieldPath"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.FieldPath))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ObjectFieldSelector) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ObjectFieldSelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ case "fieldPath":
+ if r.TryDecodeAsNil() {
+ x.FieldPath = ""
+ } else {
+ x.FieldPath = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ObjectFieldSelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.FieldPath = ""
+ } else {
+ x.FieldPath = string(r.DecodeString())
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ResourceFieldSelector) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.ContainerName != ""
+ yyq2[2] = true
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ContainerName))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("containerName"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ContainerName))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Resource))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("resource"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Resource))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy10 := &x.Divisor
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy10) {
+ } else if !yym11 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy10)
+ } else {
+ z.EncFallback(yy10)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("divisor"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy12 := &x.Divisor
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy12) {
+ } else if !yym13 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy12)
+ } else {
+ z.EncFallback(yy12)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ResourceFieldSelector) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ResourceFieldSelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "containerName":
+ if r.TryDecodeAsNil() {
+ x.ContainerName = ""
+ } else {
+ x.ContainerName = string(r.DecodeString())
+ }
+ case "resource":
+ if r.TryDecodeAsNil() {
+ x.Resource = ""
+ } else {
+ x.Resource = string(r.DecodeString())
+ }
+ case "divisor":
+ if r.TryDecodeAsNil() {
+ x.Divisor = pkg3_resource.Quantity{}
+ } else {
+ yyv6 := &x.Divisor
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv6) {
+ } else if !yym7 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv6)
+ } else {
+ z.DecFallback(yyv6, false)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ResourceFieldSelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ContainerName = ""
+ } else {
+ x.ContainerName = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Resource = ""
+ } else {
+ x.Resource = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Divisor = pkg3_resource.Quantity{}
+ } else {
+ yyv11 := &x.Divisor
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else if !yym12 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv11)
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ConfigMapKeySelector) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Key))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("key"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Key))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Name"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ConfigMapKeySelector) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ConfigMapKeySelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "key":
+ if r.TryDecodeAsNil() {
+ x.Key = ""
+ } else {
+ x.Key = string(r.DecodeString())
+ }
+ case "Name":
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ConfigMapKeySelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Key = ""
+ } else {
+ x.Key = string(r.DecodeString())
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *SecretKeySelector) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Key))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("key"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Key))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Name"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *SecretKeySelector) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *SecretKeySelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "key":
+ if r.TryDecodeAsNil() {
+ x.Key = ""
+ } else {
+ x.Key = string(r.DecodeString())
+ }
+ case "Name":
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *SecretKeySelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Key = ""
+ } else {
+ x.Key = string(r.DecodeString())
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *HTTPHeader) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("name"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Value))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("value"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Value))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *HTTPHeader) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *HTTPHeader) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "name":
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ case "value":
+ if r.TryDecodeAsNil() {
+ x.Value = ""
+ } else {
+ x.Value = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *HTTPHeader) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Value = ""
+ } else {
+ x.Value = string(r.DecodeString())
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *HTTPGetAction) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Path != ""
+ yyq2[1] = true
+ yyq2[2] = x.Host != ""
+ yyq2[3] = x.Scheme != ""
+ yyq2[4] = len(x.HTTPHeaders) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Path))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("path"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Path))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy7 := &x.Port
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy7) {
+ } else if !yym8 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy7)
+ } else {
+ z.EncFallback(yy7)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("port"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy9 := &x.Port
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy9) {
+ } else if !yym10 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy9)
+ } else {
+ z.EncFallback(yy9)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Host))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("host"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Host))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ x.Scheme.CodecEncodeSelf(e)
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("scheme"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Scheme.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ if x.HTTPHeaders == nil {
+ r.EncodeNil()
+ } else {
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ h.encSliceHTTPHeader(([]HTTPHeader)(x.HTTPHeaders), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("httpHeaders"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.HTTPHeaders == nil {
+ r.EncodeNil()
+ } else {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ h.encSliceHTTPHeader(([]HTTPHeader)(x.HTTPHeaders), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *HTTPGetAction) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *HTTPGetAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "path":
+ if r.TryDecodeAsNil() {
+ x.Path = ""
+ } else {
+ x.Path = string(r.DecodeString())
+ }
+ case "port":
+ if r.TryDecodeAsNil() {
+ x.Port = pkg4_intstr.IntOrString{}
+ } else {
+ yyv5 := &x.Port
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv5) {
+ } else if !yym6 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv5)
+ } else {
+ z.DecFallback(yyv5, false)
+ }
+ }
+ case "host":
+ if r.TryDecodeAsNil() {
+ x.Host = ""
+ } else {
+ x.Host = string(r.DecodeString())
+ }
+ case "scheme":
+ if r.TryDecodeAsNil() {
+ x.Scheme = ""
+ } else {
+ x.Scheme = URIScheme(r.DecodeString())
+ }
+ case "httpHeaders":
+ if r.TryDecodeAsNil() {
+ x.HTTPHeaders = nil
+ } else {
+ yyv9 := &x.HTTPHeaders
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.decSliceHTTPHeader((*[]HTTPHeader)(yyv9), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *HTTPGetAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj11 int
+ var yyb11 bool
+ var yyhl11 bool = l >= 0
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Path = ""
+ } else {
+ x.Path = string(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Port = pkg4_intstr.IntOrString{}
+ } else {
+ yyv13 := &x.Port
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv13) {
+ } else if !yym14 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv13)
+ } else {
+ z.DecFallback(yyv13, false)
+ }
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Host = ""
+ } else {
+ x.Host = string(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Scheme = ""
+ } else {
+ x.Scheme = URIScheme(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.HTTPHeaders = nil
+ } else {
+ yyv17 := &x.HTTPHeaders
+ yym18 := z.DecBinary()
+ _ = yym18
+ if false {
+ } else {
+ h.decSliceHTTPHeader((*[]HTTPHeader)(yyv17), d)
+ }
+ }
+ for {
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj11-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x URIScheme) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *URIScheme) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *TCPSocketAction) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.Port
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else if !yym5 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy4)
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("port"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.Port
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else if !yym7 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy6)
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *TCPSocketAction) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *TCPSocketAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "port":
+ if r.TryDecodeAsNil() {
+ x.Port = pkg4_intstr.IntOrString{}
+ } else {
+ yyv4 := &x.Port
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else if !yym5 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv4)
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *TCPSocketAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Port = pkg4_intstr.IntOrString{}
+ } else {
+ yyv7 := &x.Port
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv7) {
+ } else if !yym8 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv7)
+ } else {
+ z.DecFallback(yyv7, false)
+ }
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ExecAction) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = len(x.Command) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Command == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Command, false, e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("command"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Command == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Command, false, e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ExecAction) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ExecAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "command":
+ if r.TryDecodeAsNil() {
+ x.Command = nil
+ } else {
+ yyv4 := &x.Command
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv4, false, d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ExecAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Command = nil
+ } else {
+ yyv7 := &x.Command
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv7, false, d)
+ }
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *Probe) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [8]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.InitialDelaySeconds != 0
+ yyq2[1] = x.TimeoutSeconds != 0
+ yyq2[2] = x.PeriodSeconds != 0
+ yyq2[3] = x.SuccessThreshold != 0
+ yyq2[4] = x.FailureThreshold != 0
+ yyq2[5] = x.Handler.Exec != nil && x.Exec != nil
+ yyq2[6] = x.Handler.HTTPGet != nil && x.HTTPGet != nil
+ yyq2[7] = x.Handler.TCPSocket != nil && x.TCPSocket != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(8)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeInt(int64(x.InitialDelaySeconds))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("initialDelaySeconds"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(x.InitialDelaySeconds))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeInt(int64(x.TimeoutSeconds))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("timeoutSeconds"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeInt(int64(x.TimeoutSeconds))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeInt(int64(x.PeriodSeconds))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("periodSeconds"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeInt(int64(x.PeriodSeconds))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeInt(int64(x.SuccessThreshold))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("successThreshold"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeInt(int64(x.SuccessThreshold))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeInt(int64(x.FailureThreshold))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("failureThreshold"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeInt(int64(x.FailureThreshold))
+ }
+ }
+ }
+ var yyn18 bool
+ if x.Handler.Exec == nil {
+ yyn18 = true
+ goto LABEL18
+ }
+ LABEL18:
+ if yyr2 || yy2arr2 {
+ if yyn18 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ if x.Exec == nil {
+ r.EncodeNil()
+ } else {
+ x.Exec.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("exec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn18 {
+ r.EncodeNil()
+ } else {
+ if x.Exec == nil {
+ r.EncodeNil()
+ } else {
+ x.Exec.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn21 bool
+ if x.Handler.HTTPGet == nil {
+ yyn21 = true
+ goto LABEL21
+ }
+ LABEL21:
+ if yyr2 || yy2arr2 {
+ if yyn21 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[6] {
+ if x.HTTPGet == nil {
+ r.EncodeNil()
+ } else {
+ x.HTTPGet.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[6] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("httpGet"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn21 {
+ r.EncodeNil()
+ } else {
+ if x.HTTPGet == nil {
+ r.EncodeNil()
+ } else {
+ x.HTTPGet.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn24 bool
+ if x.Handler.TCPSocket == nil {
+ yyn24 = true
+ goto LABEL24
+ }
+ LABEL24:
+ if yyr2 || yy2arr2 {
+ if yyn24 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[7] {
+ if x.TCPSocket == nil {
+ r.EncodeNil()
+ } else {
+ x.TCPSocket.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[7] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("tcpSocket"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn24 {
+ r.EncodeNil()
+ } else {
+ if x.TCPSocket == nil {
+ r.EncodeNil()
+ } else {
+ x.TCPSocket.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *Probe) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *Probe) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "initialDelaySeconds":
+ if r.TryDecodeAsNil() {
+ x.InitialDelaySeconds = 0
+ } else {
+ x.InitialDelaySeconds = int32(r.DecodeInt(32))
+ }
+ case "timeoutSeconds":
+ if r.TryDecodeAsNil() {
+ x.TimeoutSeconds = 0
+ } else {
+ x.TimeoutSeconds = int32(r.DecodeInt(32))
+ }
+ case "periodSeconds":
+ if r.TryDecodeAsNil() {
+ x.PeriodSeconds = 0
+ } else {
+ x.PeriodSeconds = int32(r.DecodeInt(32))
+ }
+ case "successThreshold":
+ if r.TryDecodeAsNil() {
+ x.SuccessThreshold = 0
+ } else {
+ x.SuccessThreshold = int32(r.DecodeInt(32))
+ }
+ case "failureThreshold":
+ if r.TryDecodeAsNil() {
+ x.FailureThreshold = 0
+ } else {
+ x.FailureThreshold = int32(r.DecodeInt(32))
+ }
+ case "exec":
+ if x.Handler.Exec == nil {
+ x.Handler.Exec = new(ExecAction)
+ }
+ if r.TryDecodeAsNil() {
+ if x.Exec != nil {
+ x.Exec = nil
+ }
+ } else {
+ if x.Exec == nil {
+ x.Exec = new(ExecAction)
+ }
+ x.Exec.CodecDecodeSelf(d)
+ }
+ case "httpGet":
+ if x.Handler.HTTPGet == nil {
+ x.Handler.HTTPGet = new(HTTPGetAction)
+ }
+ if r.TryDecodeAsNil() {
+ if x.HTTPGet != nil {
+ x.HTTPGet = nil
+ }
+ } else {
+ if x.HTTPGet == nil {
+ x.HTTPGet = new(HTTPGetAction)
+ }
+ x.HTTPGet.CodecDecodeSelf(d)
+ }
+ case "tcpSocket":
+ if x.Handler.TCPSocket == nil {
+ x.Handler.TCPSocket = new(TCPSocketAction)
+ }
+ if r.TryDecodeAsNil() {
+ if x.TCPSocket != nil {
+ x.TCPSocket = nil
+ }
+ } else {
+ if x.TCPSocket == nil {
+ x.TCPSocket = new(TCPSocketAction)
+ }
+ x.TCPSocket.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *Probe) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj12 int
+ var yyb12 bool
+ var yyhl12 bool = l >= 0
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.InitialDelaySeconds = 0
+ } else {
+ x.InitialDelaySeconds = int32(r.DecodeInt(32))
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.TimeoutSeconds = 0
+ } else {
+ x.TimeoutSeconds = int32(r.DecodeInt(32))
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.PeriodSeconds = 0
+ } else {
+ x.PeriodSeconds = int32(r.DecodeInt(32))
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.SuccessThreshold = 0
+ } else {
+ x.SuccessThreshold = int32(r.DecodeInt(32))
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.FailureThreshold = 0
+ } else {
+ x.FailureThreshold = int32(r.DecodeInt(32))
+ }
+ if x.Handler.Exec == nil {
+ x.Handler.Exec = new(ExecAction)
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Exec != nil {
+ x.Exec = nil
+ }
+ } else {
+ if x.Exec == nil {
+ x.Exec = new(ExecAction)
+ }
+ x.Exec.CodecDecodeSelf(d)
+ }
+ if x.Handler.HTTPGet == nil {
+ x.Handler.HTTPGet = new(HTTPGetAction)
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.HTTPGet != nil {
+ x.HTTPGet = nil
+ }
+ } else {
+ if x.HTTPGet == nil {
+ x.HTTPGet = new(HTTPGetAction)
+ }
+ x.HTTPGet.CodecDecodeSelf(d)
+ }
+ if x.Handler.TCPSocket == nil {
+ x.Handler.TCPSocket = new(TCPSocketAction)
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.TCPSocket != nil {
+ x.TCPSocket = nil
+ }
+ } else {
+ if x.TCPSocket == nil {
+ x.TCPSocket = new(TCPSocketAction)
+ }
+ x.TCPSocket.CodecDecodeSelf(d)
+ }
+ for {
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj12-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x PullPolicy) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *PullPolicy) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x Capability) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *Capability) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *Capabilities) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = len(x.Add) != 0
+ yyq2[1] = len(x.Drop) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Add == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ h.encSliceCapability(([]Capability)(x.Add), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("add"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Add == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.encSliceCapability(([]Capability)(x.Add), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Drop == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.encSliceCapability(([]Capability)(x.Drop), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("drop"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Drop == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.encSliceCapability(([]Capability)(x.Drop), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *Capabilities) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *Capabilities) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "add":
+ if r.TryDecodeAsNil() {
+ x.Add = nil
+ } else {
+ yyv4 := &x.Add
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.decSliceCapability((*[]Capability)(yyv4), d)
+ }
+ }
+ case "drop":
+ if r.TryDecodeAsNil() {
+ x.Drop = nil
+ } else {
+ yyv6 := &x.Drop
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceCapability((*[]Capability)(yyv6), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *Capabilities) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Add = nil
+ } else {
+ yyv9 := &x.Add
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.decSliceCapability((*[]Capability)(yyv9), d)
+ }
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Drop = nil
+ } else {
+ yyv11 := &x.Drop
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ h.decSliceCapability((*[]Capability)(yyv11), d)
+ }
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ResourceRequirements) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = len(x.Limits) != 0
+ yyq2[1] = len(x.Requests) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Limits == nil {
+ r.EncodeNil()
+ } else {
+ x.Limits.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("limits"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Limits == nil {
+ r.EncodeNil()
+ } else {
+ x.Limits.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Requests == nil {
+ r.EncodeNil()
+ } else {
+ x.Requests.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("requests"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Requests == nil {
+ r.EncodeNil()
+ } else {
+ x.Requests.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ResourceRequirements) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ResourceRequirements) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "limits":
+ if r.TryDecodeAsNil() {
+ x.Limits = nil
+ } else {
+ yyv4 := &x.Limits
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "requests":
+ if r.TryDecodeAsNil() {
+ x.Requests = nil
+ } else {
+ yyv5 := &x.Requests
+ yyv5.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ResourceRequirements) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Limits = nil
+ } else {
+ yyv7 := &x.Limits
+ yyv7.CodecDecodeSelf(d)
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Requests = nil
+ } else {
+ yyv8 := &x.Requests
+ yyv8.CodecDecodeSelf(d)
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *Container) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [18]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[2] = len(x.Command) != 0
+ yyq2[3] = len(x.Args) != 0
+ yyq2[4] = x.WorkingDir != ""
+ yyq2[5] = len(x.Ports) != 0
+ yyq2[6] = len(x.Env) != 0
+ yyq2[7] = true
+ yyq2[8] = len(x.VolumeMounts) != 0
+ yyq2[9] = x.LivenessProbe != nil
+ yyq2[10] = x.ReadinessProbe != nil
+ yyq2[11] = x.Lifecycle != nil
+ yyq2[12] = x.TerminationMessagePath != ""
+ yyq2[14] = x.SecurityContext != nil
+ yyq2[15] = x.Stdin != false
+ yyq2[16] = x.StdinOnce != false
+ yyq2[17] = x.TTY != false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(18)
+ } else {
+ yynn2 = 3
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("name"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Image))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("image"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Image))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.Command == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Command, false, e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("command"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Command == nil {
+ r.EncodeNil()
+ } else {
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Command, false, e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ if x.Args == nil {
+ r.EncodeNil()
+ } else {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Args, false, e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("args"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Args == nil {
+ r.EncodeNil()
+ } else {
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Args, false, e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.WorkingDir))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("workingDir"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.WorkingDir))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ if x.Ports == nil {
+ r.EncodeNil()
+ } else {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ h.encSliceContainerPort(([]ContainerPort)(x.Ports), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("ports"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Ports == nil {
+ r.EncodeNil()
+ } else {
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ h.encSliceContainerPort(([]ContainerPort)(x.Ports), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[6] {
+ if x.Env == nil {
+ r.EncodeNil()
+ } else {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ h.encSliceEnvVar(([]EnvVar)(x.Env), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[6] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("env"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Env == nil {
+ r.EncodeNil()
+ } else {
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ h.encSliceEnvVar(([]EnvVar)(x.Env), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[7] {
+ yy25 := &x.Resources
+ yy25.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[7] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("resources"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy27 := &x.Resources
+ yy27.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[8] {
+ if x.VolumeMounts == nil {
+ r.EncodeNil()
+ } else {
+ yym30 := z.EncBinary()
+ _ = yym30
+ if false {
+ } else {
+ h.encSliceVolumeMount(([]VolumeMount)(x.VolumeMounts), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[8] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("volumeMounts"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.VolumeMounts == nil {
+ r.EncodeNil()
+ } else {
+ yym31 := z.EncBinary()
+ _ = yym31
+ if false {
+ } else {
+ h.encSliceVolumeMount(([]VolumeMount)(x.VolumeMounts), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[9] {
+ if x.LivenessProbe == nil {
+ r.EncodeNil()
+ } else {
+ x.LivenessProbe.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[9] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("livenessProbe"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.LivenessProbe == nil {
+ r.EncodeNil()
+ } else {
+ x.LivenessProbe.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[10] {
+ if x.ReadinessProbe == nil {
+ r.EncodeNil()
+ } else {
+ x.ReadinessProbe.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[10] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("readinessProbe"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.ReadinessProbe == nil {
+ r.EncodeNil()
+ } else {
+ x.ReadinessProbe.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[11] {
+ if x.Lifecycle == nil {
+ r.EncodeNil()
+ } else {
+ x.Lifecycle.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[11] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("lifecycle"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Lifecycle == nil {
+ r.EncodeNil()
+ } else {
+ x.Lifecycle.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[12] {
+ yym42 := z.EncBinary()
+ _ = yym42
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.TerminationMessagePath))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[12] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("terminationMessagePath"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym43 := z.EncBinary()
+ _ = yym43
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.TerminationMessagePath))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ x.ImagePullPolicy.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("imagePullPolicy"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.ImagePullPolicy.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[14] {
+ if x.SecurityContext == nil {
+ r.EncodeNil()
+ } else {
+ x.SecurityContext.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[14] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("securityContext"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.SecurityContext == nil {
+ r.EncodeNil()
+ } else {
+ x.SecurityContext.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[15] {
+ yym51 := z.EncBinary()
+ _ = yym51
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Stdin))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[15] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("stdin"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym52 := z.EncBinary()
+ _ = yym52
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Stdin))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[16] {
+ yym54 := z.EncBinary()
+ _ = yym54
+ if false {
+ } else {
+ r.EncodeBool(bool(x.StdinOnce))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[16] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("stdinOnce"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym55 := z.EncBinary()
+ _ = yym55
+ if false {
+ } else {
+ r.EncodeBool(bool(x.StdinOnce))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[17] {
+ yym57 := z.EncBinary()
+ _ = yym57
+ if false {
+ } else {
+ r.EncodeBool(bool(x.TTY))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[17] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("tty"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym58 := z.EncBinary()
+ _ = yym58
+ if false {
+ } else {
+ r.EncodeBool(bool(x.TTY))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *Container) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *Container) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "name":
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ case "image":
+ if r.TryDecodeAsNil() {
+ x.Image = ""
+ } else {
+ x.Image = string(r.DecodeString())
+ }
+ case "command":
+ if r.TryDecodeAsNil() {
+ x.Command = nil
+ } else {
+ yyv6 := &x.Command
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv6, false, d)
+ }
+ }
+ case "args":
+ if r.TryDecodeAsNil() {
+ x.Args = nil
+ } else {
+ yyv8 := &x.Args
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv8, false, d)
+ }
+ }
+ case "workingDir":
+ if r.TryDecodeAsNil() {
+ x.WorkingDir = ""
+ } else {
+ x.WorkingDir = string(r.DecodeString())
+ }
+ case "ports":
+ if r.TryDecodeAsNil() {
+ x.Ports = nil
+ } else {
+ yyv11 := &x.Ports
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ h.decSliceContainerPort((*[]ContainerPort)(yyv11), d)
+ }
+ }
+ case "env":
+ if r.TryDecodeAsNil() {
+ x.Env = nil
+ } else {
+ yyv13 := &x.Env
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceEnvVar((*[]EnvVar)(yyv13), d)
+ }
+ }
+ case "resources":
+ if r.TryDecodeAsNil() {
+ x.Resources = ResourceRequirements{}
+ } else {
+ yyv15 := &x.Resources
+ yyv15.CodecDecodeSelf(d)
+ }
+ case "volumeMounts":
+ if r.TryDecodeAsNil() {
+ x.VolumeMounts = nil
+ } else {
+ yyv16 := &x.VolumeMounts
+ yym17 := z.DecBinary()
+ _ = yym17
+ if false {
+ } else {
+ h.decSliceVolumeMount((*[]VolumeMount)(yyv16), d)
+ }
+ }
+ case "livenessProbe":
+ if r.TryDecodeAsNil() {
+ if x.LivenessProbe != nil {
+ x.LivenessProbe = nil
+ }
+ } else {
+ if x.LivenessProbe == nil {
+ x.LivenessProbe = new(Probe)
+ }
+ x.LivenessProbe.CodecDecodeSelf(d)
+ }
+ case "readinessProbe":
+ if r.TryDecodeAsNil() {
+ if x.ReadinessProbe != nil {
+ x.ReadinessProbe = nil
+ }
+ } else {
+ if x.ReadinessProbe == nil {
+ x.ReadinessProbe = new(Probe)
+ }
+ x.ReadinessProbe.CodecDecodeSelf(d)
+ }
+ case "lifecycle":
+ if r.TryDecodeAsNil() {
+ if x.Lifecycle != nil {
+ x.Lifecycle = nil
+ }
+ } else {
+ if x.Lifecycle == nil {
+ x.Lifecycle = new(Lifecycle)
+ }
+ x.Lifecycle.CodecDecodeSelf(d)
+ }
+ case "terminationMessagePath":
+ if r.TryDecodeAsNil() {
+ x.TerminationMessagePath = ""
+ } else {
+ x.TerminationMessagePath = string(r.DecodeString())
+ }
+ case "imagePullPolicy":
+ if r.TryDecodeAsNil() {
+ x.ImagePullPolicy = ""
+ } else {
+ x.ImagePullPolicy = PullPolicy(r.DecodeString())
+ }
+ case "securityContext":
+ if r.TryDecodeAsNil() {
+ if x.SecurityContext != nil {
+ x.SecurityContext = nil
+ }
+ } else {
+ if x.SecurityContext == nil {
+ x.SecurityContext = new(SecurityContext)
+ }
+ x.SecurityContext.CodecDecodeSelf(d)
+ }
+ case "stdin":
+ if r.TryDecodeAsNil() {
+ x.Stdin = false
+ } else {
+ x.Stdin = bool(r.DecodeBool())
+ }
+ case "stdinOnce":
+ if r.TryDecodeAsNil() {
+ x.StdinOnce = false
+ } else {
+ x.StdinOnce = bool(r.DecodeBool())
+ }
+ case "tty":
+ if r.TryDecodeAsNil() {
+ x.TTY = false
+ } else {
+ x.TTY = bool(r.DecodeBool())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *Container) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj27 int
+ var yyb27 bool
+ var yyhl27 bool = l >= 0
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Image = ""
+ } else {
+ x.Image = string(r.DecodeString())
+ }
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Command = nil
+ } else {
+ yyv30 := &x.Command
+ yym31 := z.DecBinary()
+ _ = yym31
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv30, false, d)
+ }
+ }
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Args = nil
+ } else {
+ yyv32 := &x.Args
+ yym33 := z.DecBinary()
+ _ = yym33
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv32, false, d)
+ }
+ }
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.WorkingDir = ""
+ } else {
+ x.WorkingDir = string(r.DecodeString())
+ }
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Ports = nil
+ } else {
+ yyv35 := &x.Ports
+ yym36 := z.DecBinary()
+ _ = yym36
+ if false {
+ } else {
+ h.decSliceContainerPort((*[]ContainerPort)(yyv35), d)
+ }
+ }
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Env = nil
+ } else {
+ yyv37 := &x.Env
+ yym38 := z.DecBinary()
+ _ = yym38
+ if false {
+ } else {
+ h.decSliceEnvVar((*[]EnvVar)(yyv37), d)
+ }
+ }
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Resources = ResourceRequirements{}
+ } else {
+ yyv39 := &x.Resources
+ yyv39.CodecDecodeSelf(d)
+ }
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.VolumeMounts = nil
+ } else {
+ yyv40 := &x.VolumeMounts
+ yym41 := z.DecBinary()
+ _ = yym41
+ if false {
+ } else {
+ h.decSliceVolumeMount((*[]VolumeMount)(yyv40), d)
+ }
+ }
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.LivenessProbe != nil {
+ x.LivenessProbe = nil
+ }
+ } else {
+ if x.LivenessProbe == nil {
+ x.LivenessProbe = new(Probe)
+ }
+ x.LivenessProbe.CodecDecodeSelf(d)
+ }
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.ReadinessProbe != nil {
+ x.ReadinessProbe = nil
+ }
+ } else {
+ if x.ReadinessProbe == nil {
+ x.ReadinessProbe = new(Probe)
+ }
+ x.ReadinessProbe.CodecDecodeSelf(d)
+ }
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Lifecycle != nil {
+ x.Lifecycle = nil
+ }
+ } else {
+ if x.Lifecycle == nil {
+ x.Lifecycle = new(Lifecycle)
+ }
+ x.Lifecycle.CodecDecodeSelf(d)
+ }
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.TerminationMessagePath = ""
+ } else {
+ x.TerminationMessagePath = string(r.DecodeString())
+ }
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ImagePullPolicy = ""
+ } else {
+ x.ImagePullPolicy = PullPolicy(r.DecodeString())
+ }
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.SecurityContext != nil {
+ x.SecurityContext = nil
+ }
+ } else {
+ if x.SecurityContext == nil {
+ x.SecurityContext = new(SecurityContext)
+ }
+ x.SecurityContext.CodecDecodeSelf(d)
+ }
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Stdin = false
+ } else {
+ x.Stdin = bool(r.DecodeBool())
+ }
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.StdinOnce = false
+ } else {
+ x.StdinOnce = bool(r.DecodeBool())
+ }
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.TTY = false
+ } else {
+ x.TTY = bool(r.DecodeBool())
+ }
+ for {
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj27-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *Handler) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Exec != nil
+ yyq2[1] = x.HTTPGet != nil
+ yyq2[2] = x.TCPSocket != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Exec == nil {
+ r.EncodeNil()
+ } else {
+ x.Exec.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("exec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Exec == nil {
+ r.EncodeNil()
+ } else {
+ x.Exec.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.HTTPGet == nil {
+ r.EncodeNil()
+ } else {
+ x.HTTPGet.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("httpGet"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.HTTPGet == nil {
+ r.EncodeNil()
+ } else {
+ x.HTTPGet.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.TCPSocket == nil {
+ r.EncodeNil()
+ } else {
+ x.TCPSocket.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("tcpSocket"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.TCPSocket == nil {
+ r.EncodeNil()
+ } else {
+ x.TCPSocket.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *Handler) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *Handler) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "exec":
+ if r.TryDecodeAsNil() {
+ if x.Exec != nil {
+ x.Exec = nil
+ }
+ } else {
+ if x.Exec == nil {
+ x.Exec = new(ExecAction)
+ }
+ x.Exec.CodecDecodeSelf(d)
+ }
+ case "httpGet":
+ if r.TryDecodeAsNil() {
+ if x.HTTPGet != nil {
+ x.HTTPGet = nil
+ }
+ } else {
+ if x.HTTPGet == nil {
+ x.HTTPGet = new(HTTPGetAction)
+ }
+ x.HTTPGet.CodecDecodeSelf(d)
+ }
+ case "tcpSocket":
+ if r.TryDecodeAsNil() {
+ if x.TCPSocket != nil {
+ x.TCPSocket = nil
+ }
+ } else {
+ if x.TCPSocket == nil {
+ x.TCPSocket = new(TCPSocketAction)
+ }
+ x.TCPSocket.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *Handler) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Exec != nil {
+ x.Exec = nil
+ }
+ } else {
+ if x.Exec == nil {
+ x.Exec = new(ExecAction)
+ }
+ x.Exec.CodecDecodeSelf(d)
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.HTTPGet != nil {
+ x.HTTPGet = nil
+ }
+ } else {
+ if x.HTTPGet == nil {
+ x.HTTPGet = new(HTTPGetAction)
+ }
+ x.HTTPGet.CodecDecodeSelf(d)
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.TCPSocket != nil {
+ x.TCPSocket = nil
+ }
+ } else {
+ if x.TCPSocket == nil {
+ x.TCPSocket = new(TCPSocketAction)
+ }
+ x.TCPSocket.CodecDecodeSelf(d)
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *Lifecycle) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.PostStart != nil
+ yyq2[1] = x.PreStop != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.PostStart == nil {
+ r.EncodeNil()
+ } else {
+ x.PostStart.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("postStart"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.PostStart == nil {
+ r.EncodeNil()
+ } else {
+ x.PostStart.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.PreStop == nil {
+ r.EncodeNil()
+ } else {
+ x.PreStop.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("preStop"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.PreStop == nil {
+ r.EncodeNil()
+ } else {
+ x.PreStop.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *Lifecycle) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *Lifecycle) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "postStart":
+ if r.TryDecodeAsNil() {
+ if x.PostStart != nil {
+ x.PostStart = nil
+ }
+ } else {
+ if x.PostStart == nil {
+ x.PostStart = new(Handler)
+ }
+ x.PostStart.CodecDecodeSelf(d)
+ }
+ case "preStop":
+ if r.TryDecodeAsNil() {
+ if x.PreStop != nil {
+ x.PreStop = nil
+ }
+ } else {
+ if x.PreStop == nil {
+ x.PreStop = new(Handler)
+ }
+ x.PreStop.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *Lifecycle) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.PostStart != nil {
+ x.PostStart = nil
+ }
+ } else {
+ if x.PostStart == nil {
+ x.PostStart = new(Handler)
+ }
+ x.PostStart.CodecDecodeSelf(d)
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.PreStop != nil {
+ x.PreStop = nil
+ }
+ } else {
+ if x.PreStop == nil {
+ x.PreStop = new(Handler)
+ }
+ x.PreStop.CodecDecodeSelf(d)
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x ConditionStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *ConditionStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *ContainerStateWaiting) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Reason != ""
+ yyq2[1] = x.Message != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Reason))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("reason"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Reason))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Message))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("message"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Message))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ContainerStateWaiting) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ContainerStateWaiting) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "reason":
+ if r.TryDecodeAsNil() {
+ x.Reason = ""
+ } else {
+ x.Reason = string(r.DecodeString())
+ }
+ case "message":
+ if r.TryDecodeAsNil() {
+ x.Message = ""
+ } else {
+ x.Message = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ContainerStateWaiting) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Reason = ""
+ } else {
+ x.Reason = string(r.DecodeString())
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Message = ""
+ } else {
+ x.Message = string(r.DecodeString())
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ContainerStateRunning) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.StartedAt
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else if yym5 {
+ z.EncBinaryMarshal(yy4)
+ } else if !yym5 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy4)
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("startedAt"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.StartedAt
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else if yym7 {
+ z.EncBinaryMarshal(yy6)
+ } else if !yym7 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy6)
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ContainerStateRunning) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ContainerStateRunning) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "startedAt":
+ if r.TryDecodeAsNil() {
+ x.StartedAt = pkg2_unversioned.Time{}
+ } else {
+ yyv4 := &x.StartedAt
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else if yym5 {
+ z.DecBinaryUnmarshal(yyv4)
+ } else if !yym5 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv4)
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ContainerStateRunning) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.StartedAt = pkg2_unversioned.Time{}
+ } else {
+ yyv7 := &x.StartedAt
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv7) {
+ } else if yym8 {
+ z.DecBinaryUnmarshal(yyv7)
+ } else if !yym8 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv7)
+ } else {
+ z.DecFallback(yyv7, false)
+ }
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ContainerStateTerminated) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [7]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.Signal != 0
+ yyq2[2] = x.Reason != ""
+ yyq2[3] = x.Message != ""
+ yyq2[4] = true
+ yyq2[5] = true
+ yyq2[6] = x.ContainerID != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(7)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ExitCode))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("exitCode"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ExitCode))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Signal))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("signal"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Signal))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Reason))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("reason"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Reason))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Message))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("message"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Message))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yy16 := &x.StartedAt
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy16) {
+ } else if yym17 {
+ z.EncBinaryMarshal(yy16)
+ } else if !yym17 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy16)
+ } else {
+ z.EncFallback(yy16)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("startedAt"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy18 := &x.StartedAt
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy18) {
+ } else if yym19 {
+ z.EncBinaryMarshal(yy18)
+ } else if !yym19 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy18)
+ } else {
+ z.EncFallback(yy18)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ yy21 := &x.FinishedAt
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy21) {
+ } else if yym22 {
+ z.EncBinaryMarshal(yy21)
+ } else if !yym22 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy21)
+ } else {
+ z.EncFallback(yy21)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("finishedAt"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy23 := &x.FinishedAt
+ yym24 := z.EncBinary()
+ _ = yym24
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy23) {
+ } else if yym24 {
+ z.EncBinaryMarshal(yy23)
+ } else if !yym24 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy23)
+ } else {
+ z.EncFallback(yy23)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[6] {
+ yym26 := z.EncBinary()
+ _ = yym26
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ContainerID))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[6] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("containerID"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym27 := z.EncBinary()
+ _ = yym27
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ContainerID))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ContainerStateTerminated) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ContainerStateTerminated) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "exitCode":
+ if r.TryDecodeAsNil() {
+ x.ExitCode = 0
+ } else {
+ x.ExitCode = int32(r.DecodeInt(32))
+ }
+ case "signal":
+ if r.TryDecodeAsNil() {
+ x.Signal = 0
+ } else {
+ x.Signal = int32(r.DecodeInt(32))
+ }
+ case "reason":
+ if r.TryDecodeAsNil() {
+ x.Reason = ""
+ } else {
+ x.Reason = string(r.DecodeString())
+ }
+ case "message":
+ if r.TryDecodeAsNil() {
+ x.Message = ""
+ } else {
+ x.Message = string(r.DecodeString())
+ }
+ case "startedAt":
+ if r.TryDecodeAsNil() {
+ x.StartedAt = pkg2_unversioned.Time{}
+ } else {
+ yyv8 := &x.StartedAt
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv8) {
+ } else if yym9 {
+ z.DecBinaryUnmarshal(yyv8)
+ } else if !yym9 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv8)
+ } else {
+ z.DecFallback(yyv8, false)
+ }
+ }
+ case "finishedAt":
+ if r.TryDecodeAsNil() {
+ x.FinishedAt = pkg2_unversioned.Time{}
+ } else {
+ yyv10 := &x.FinishedAt
+ yym11 := z.DecBinary()
+ _ = yym11
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv10) {
+ } else if yym11 {
+ z.DecBinaryUnmarshal(yyv10)
+ } else if !yym11 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv10)
+ } else {
+ z.DecFallback(yyv10, false)
+ }
+ }
+ case "containerID":
+ if r.TryDecodeAsNil() {
+ x.ContainerID = ""
+ } else {
+ x.ContainerID = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ContainerStateTerminated) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj13 int
+ var yyb13 bool
+ var yyhl13 bool = l >= 0
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ExitCode = 0
+ } else {
+ x.ExitCode = int32(r.DecodeInt(32))
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Signal = 0
+ } else {
+ x.Signal = int32(r.DecodeInt(32))
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Reason = ""
+ } else {
+ x.Reason = string(r.DecodeString())
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Message = ""
+ } else {
+ x.Message = string(r.DecodeString())
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.StartedAt = pkg2_unversioned.Time{}
+ } else {
+ yyv18 := &x.StartedAt
+ yym19 := z.DecBinary()
+ _ = yym19
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv18) {
+ } else if yym19 {
+ z.DecBinaryUnmarshal(yyv18)
+ } else if !yym19 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv18)
+ } else {
+ z.DecFallback(yyv18, false)
+ }
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.FinishedAt = pkg2_unversioned.Time{}
+ } else {
+ yyv20 := &x.FinishedAt
+ yym21 := z.DecBinary()
+ _ = yym21
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv20) {
+ } else if yym21 {
+ z.DecBinaryUnmarshal(yyv20)
+ } else if !yym21 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv20)
+ } else {
+ z.DecFallback(yyv20, false)
+ }
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ContainerID = ""
+ } else {
+ x.ContainerID = string(r.DecodeString())
+ }
+ for {
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj13-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ContainerState) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Waiting != nil
+ yyq2[1] = x.Running != nil
+ yyq2[2] = x.Terminated != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Waiting == nil {
+ r.EncodeNil()
+ } else {
+ x.Waiting.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("waiting"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Waiting == nil {
+ r.EncodeNil()
+ } else {
+ x.Waiting.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Running == nil {
+ r.EncodeNil()
+ } else {
+ x.Running.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("running"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Running == nil {
+ r.EncodeNil()
+ } else {
+ x.Running.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.Terminated == nil {
+ r.EncodeNil()
+ } else {
+ x.Terminated.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("terminated"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Terminated == nil {
+ r.EncodeNil()
+ } else {
+ x.Terminated.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ContainerState) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ContainerState) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "waiting":
+ if r.TryDecodeAsNil() {
+ if x.Waiting != nil {
+ x.Waiting = nil
+ }
+ } else {
+ if x.Waiting == nil {
+ x.Waiting = new(ContainerStateWaiting)
+ }
+ x.Waiting.CodecDecodeSelf(d)
+ }
+ case "running":
+ if r.TryDecodeAsNil() {
+ if x.Running != nil {
+ x.Running = nil
+ }
+ } else {
+ if x.Running == nil {
+ x.Running = new(ContainerStateRunning)
+ }
+ x.Running.CodecDecodeSelf(d)
+ }
+ case "terminated":
+ if r.TryDecodeAsNil() {
+ if x.Terminated != nil {
+ x.Terminated = nil
+ }
+ } else {
+ if x.Terminated == nil {
+ x.Terminated = new(ContainerStateTerminated)
+ }
+ x.Terminated.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ContainerState) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Waiting != nil {
+ x.Waiting = nil
+ }
+ } else {
+ if x.Waiting == nil {
+ x.Waiting = new(ContainerStateWaiting)
+ }
+ x.Waiting.CodecDecodeSelf(d)
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Running != nil {
+ x.Running = nil
+ }
+ } else {
+ if x.Running == nil {
+ x.Running = new(ContainerStateRunning)
+ }
+ x.Running.CodecDecodeSelf(d)
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Terminated != nil {
+ x.Terminated = nil
+ }
+ } else {
+ if x.Terminated == nil {
+ x.Terminated = new(ContainerStateTerminated)
+ }
+ x.Terminated.CodecDecodeSelf(d)
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ContainerStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [8]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = true
+ yyq2[2] = true
+ yyq2[7] = x.ContainerID != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(8)
+ } else {
+ yynn2 = 5
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("name"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy7 := &x.State
+ yy7.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("state"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy9 := &x.State
+ yy9.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy12 := &x.LastTerminationState
+ yy12.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("lastState"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy14 := &x.LastTerminationState
+ yy14.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Ready))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("ready"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Ready))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeInt(int64(x.RestartCount))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("restartCount"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym21 := z.EncBinary()
+ _ = yym21
+ if false {
+ } else {
+ r.EncodeInt(int64(x.RestartCount))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Image))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("image"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym24 := z.EncBinary()
+ _ = yym24
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Image))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym26 := z.EncBinary()
+ _ = yym26
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ImageID))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("imageID"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym27 := z.EncBinary()
+ _ = yym27
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ImageID))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[7] {
+ yym29 := z.EncBinary()
+ _ = yym29
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ContainerID))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[7] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("containerID"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym30 := z.EncBinary()
+ _ = yym30
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ContainerID))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ContainerStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ContainerStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "name":
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ case "state":
+ if r.TryDecodeAsNil() {
+ x.State = ContainerState{}
+ } else {
+ yyv5 := &x.State
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "lastState":
+ if r.TryDecodeAsNil() {
+ x.LastTerminationState = ContainerState{}
+ } else {
+ yyv6 := &x.LastTerminationState
+ yyv6.CodecDecodeSelf(d)
+ }
+ case "ready":
+ if r.TryDecodeAsNil() {
+ x.Ready = false
+ } else {
+ x.Ready = bool(r.DecodeBool())
+ }
+ case "restartCount":
+ if r.TryDecodeAsNil() {
+ x.RestartCount = 0
+ } else {
+ x.RestartCount = int32(r.DecodeInt(32))
+ }
+ case "image":
+ if r.TryDecodeAsNil() {
+ x.Image = ""
+ } else {
+ x.Image = string(r.DecodeString())
+ }
+ case "imageID":
+ if r.TryDecodeAsNil() {
+ x.ImageID = ""
+ } else {
+ x.ImageID = string(r.DecodeString())
+ }
+ case "containerID":
+ if r.TryDecodeAsNil() {
+ x.ContainerID = ""
+ } else {
+ x.ContainerID = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ContainerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj12 int
+ var yyb12 bool
+ var yyhl12 bool = l >= 0
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.State = ContainerState{}
+ } else {
+ yyv14 := &x.State
+ yyv14.CodecDecodeSelf(d)
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.LastTerminationState = ContainerState{}
+ } else {
+ yyv15 := &x.LastTerminationState
+ yyv15.CodecDecodeSelf(d)
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Ready = false
+ } else {
+ x.Ready = bool(r.DecodeBool())
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.RestartCount = 0
+ } else {
+ x.RestartCount = int32(r.DecodeInt(32))
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Image = ""
+ } else {
+ x.Image = string(r.DecodeString())
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ImageID = ""
+ } else {
+ x.ImageID = string(r.DecodeString())
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ContainerID = ""
+ } else {
+ x.ContainerID = string(r.DecodeString())
+ }
+ for {
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj12-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x PodPhase) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *PodPhase) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x PodConditionType) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *PodConditionType) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *PodCondition) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [6]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[2] = true
+ yyq2[3] = true
+ yyq2[4] = x.Reason != ""
+ yyq2[5] = x.Message != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(6)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ x.Type.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("type"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Type.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ x.Status.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Status.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy10 := &x.LastProbeTime
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy10) {
+ } else if yym11 {
+ z.EncBinaryMarshal(yy10)
+ } else if !yym11 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy10)
+ } else {
+ z.EncFallback(yy10)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("lastProbeTime"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy12 := &x.LastProbeTime
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy12) {
+ } else if yym13 {
+ z.EncBinaryMarshal(yy12)
+ } else if !yym13 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy12)
+ } else {
+ z.EncFallback(yy12)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yy15 := &x.LastTransitionTime
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy15) {
+ } else if yym16 {
+ z.EncBinaryMarshal(yy15)
+ } else if !yym16 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy15)
+ } else {
+ z.EncFallback(yy15)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy17 := &x.LastTransitionTime
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy17) {
+ } else if yym18 {
+ z.EncBinaryMarshal(yy17)
+ } else if !yym18 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy17)
+ } else {
+ z.EncFallback(yy17)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Reason))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("reason"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym21 := z.EncBinary()
+ _ = yym21
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Reason))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Message))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("message"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym24 := z.EncBinary()
+ _ = yym24
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Message))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PodCondition) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PodCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "type":
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = PodConditionType(r.DecodeString())
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = ""
+ } else {
+ x.Status = ConditionStatus(r.DecodeString())
+ }
+ case "lastProbeTime":
+ if r.TryDecodeAsNil() {
+ x.LastProbeTime = pkg2_unversioned.Time{}
+ } else {
+ yyv6 := &x.LastProbeTime
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv6) {
+ } else if yym7 {
+ z.DecBinaryUnmarshal(yyv6)
+ } else if !yym7 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv6)
+ } else {
+ z.DecFallback(yyv6, false)
+ }
+ }
+ case "lastTransitionTime":
+ if r.TryDecodeAsNil() {
+ x.LastTransitionTime = pkg2_unversioned.Time{}
+ } else {
+ yyv8 := &x.LastTransitionTime
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv8) {
+ } else if yym9 {
+ z.DecBinaryUnmarshal(yyv8)
+ } else if !yym9 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv8)
+ } else {
+ z.DecFallback(yyv8, false)
+ }
+ }
+ case "reason":
+ if r.TryDecodeAsNil() {
+ x.Reason = ""
+ } else {
+ x.Reason = string(r.DecodeString())
+ }
+ case "message":
+ if r.TryDecodeAsNil() {
+ x.Message = ""
+ } else {
+ x.Message = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PodCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj12 int
+ var yyb12 bool
+ var yyhl12 bool = l >= 0
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = PodConditionType(r.DecodeString())
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = ""
+ } else {
+ x.Status = ConditionStatus(r.DecodeString())
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.LastProbeTime = pkg2_unversioned.Time{}
+ } else {
+ yyv15 := &x.LastProbeTime
+ yym16 := z.DecBinary()
+ _ = yym16
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv15) {
+ } else if yym16 {
+ z.DecBinaryUnmarshal(yyv15)
+ } else if !yym16 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv15)
+ } else {
+ z.DecFallback(yyv15, false)
+ }
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.LastTransitionTime = pkg2_unversioned.Time{}
+ } else {
+ yyv17 := &x.LastTransitionTime
+ yym18 := z.DecBinary()
+ _ = yym18
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv17) {
+ } else if yym18 {
+ z.DecBinaryUnmarshal(yyv17)
+ } else if !yym18 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv17)
+ } else {
+ z.DecFallback(yyv17, false)
+ }
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Reason = ""
+ } else {
+ x.Reason = string(r.DecodeString())
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Message = ""
+ } else {
+ x.Message = string(r.DecodeString())
+ }
+ for {
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj12-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x RestartPolicy) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *RestartPolicy) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *PodList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSlicePod(([]Pod)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSlicePod(([]Pod)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PodList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PodList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSlicePod((*[]Pod)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PodList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSlicePod((*[]Pod)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x DNSPolicy) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *DNSPolicy) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *NodeSelector) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.NodeSelectorTerms == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ h.encSliceNodeSelectorTerm(([]NodeSelectorTerm)(x.NodeSelectorTerms), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("nodeSelectorTerms"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.NodeSelectorTerms == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.encSliceNodeSelectorTerm(([]NodeSelectorTerm)(x.NodeSelectorTerms), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *NodeSelector) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *NodeSelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "nodeSelectorTerms":
+ if r.TryDecodeAsNil() {
+ x.NodeSelectorTerms = nil
+ } else {
+ yyv4 := &x.NodeSelectorTerms
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.decSliceNodeSelectorTerm((*[]NodeSelectorTerm)(yyv4), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *NodeSelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.NodeSelectorTerms = nil
+ } else {
+ yyv7 := &x.NodeSelectorTerms
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.decSliceNodeSelectorTerm((*[]NodeSelectorTerm)(yyv7), d)
+ }
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *NodeSelectorTerm) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.MatchExpressions == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ h.encSliceNodeSelectorRequirement(([]NodeSelectorRequirement)(x.MatchExpressions), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("matchExpressions"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.MatchExpressions == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.encSliceNodeSelectorRequirement(([]NodeSelectorRequirement)(x.MatchExpressions), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *NodeSelectorTerm) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *NodeSelectorTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "matchExpressions":
+ if r.TryDecodeAsNil() {
+ x.MatchExpressions = nil
+ } else {
+ yyv4 := &x.MatchExpressions
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.decSliceNodeSelectorRequirement((*[]NodeSelectorRequirement)(yyv4), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *NodeSelectorTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.MatchExpressions = nil
+ } else {
+ yyv7 := &x.MatchExpressions
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.decSliceNodeSelectorRequirement((*[]NodeSelectorRequirement)(yyv7), d)
+ }
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *NodeSelectorRequirement) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[2] = len(x.Values) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Key))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("key"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Key))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ x.Operator.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("operator"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Operator.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.Values == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Values, false, e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("values"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Values == nil {
+ r.EncodeNil()
+ } else {
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Values, false, e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *NodeSelectorRequirement) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *NodeSelectorRequirement) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "key":
+ if r.TryDecodeAsNil() {
+ x.Key = ""
+ } else {
+ x.Key = string(r.DecodeString())
+ }
+ case "operator":
+ if r.TryDecodeAsNil() {
+ x.Operator = ""
+ } else {
+ x.Operator = NodeSelectorOperator(r.DecodeString())
+ }
+ case "values":
+ if r.TryDecodeAsNil() {
+ x.Values = nil
+ } else {
+ yyv6 := &x.Values
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv6, false, d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *NodeSelectorRequirement) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Key = ""
+ } else {
+ x.Key = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Operator = ""
+ } else {
+ x.Operator = NodeSelectorOperator(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Values = nil
+ } else {
+ yyv11 := &x.Values
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv11, false, d)
+ }
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x NodeSelectorOperator) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *NodeSelectorOperator) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *Affinity) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.NodeAffinity != nil
+ yyq2[1] = x.PodAffinity != nil
+ yyq2[2] = x.PodAntiAffinity != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.NodeAffinity == nil {
+ r.EncodeNil()
+ } else {
+ x.NodeAffinity.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("nodeAffinity"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.NodeAffinity == nil {
+ r.EncodeNil()
+ } else {
+ x.NodeAffinity.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.PodAffinity == nil {
+ r.EncodeNil()
+ } else {
+ x.PodAffinity.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("podAffinity"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.PodAffinity == nil {
+ r.EncodeNil()
+ } else {
+ x.PodAffinity.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.PodAntiAffinity == nil {
+ r.EncodeNil()
+ } else {
+ x.PodAntiAffinity.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("podAntiAffinity"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.PodAntiAffinity == nil {
+ r.EncodeNil()
+ } else {
+ x.PodAntiAffinity.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *Affinity) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *Affinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "nodeAffinity":
+ if r.TryDecodeAsNil() {
+ if x.NodeAffinity != nil {
+ x.NodeAffinity = nil
+ }
+ } else {
+ if x.NodeAffinity == nil {
+ x.NodeAffinity = new(NodeAffinity)
+ }
+ x.NodeAffinity.CodecDecodeSelf(d)
+ }
+ case "podAffinity":
+ if r.TryDecodeAsNil() {
+ if x.PodAffinity != nil {
+ x.PodAffinity = nil
+ }
+ } else {
+ if x.PodAffinity == nil {
+ x.PodAffinity = new(PodAffinity)
+ }
+ x.PodAffinity.CodecDecodeSelf(d)
+ }
+ case "podAntiAffinity":
+ if r.TryDecodeAsNil() {
+ if x.PodAntiAffinity != nil {
+ x.PodAntiAffinity = nil
+ }
+ } else {
+ if x.PodAntiAffinity == nil {
+ x.PodAntiAffinity = new(PodAntiAffinity)
+ }
+ x.PodAntiAffinity.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *Affinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.NodeAffinity != nil {
+ x.NodeAffinity = nil
+ }
+ } else {
+ if x.NodeAffinity == nil {
+ x.NodeAffinity = new(NodeAffinity)
+ }
+ x.NodeAffinity.CodecDecodeSelf(d)
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.PodAffinity != nil {
+ x.PodAffinity = nil
+ }
+ } else {
+ if x.PodAffinity == nil {
+ x.PodAffinity = new(PodAffinity)
+ }
+ x.PodAffinity.CodecDecodeSelf(d)
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.PodAntiAffinity != nil {
+ x.PodAntiAffinity = nil
+ }
+ } else {
+ if x.PodAntiAffinity == nil {
+ x.PodAntiAffinity = new(PodAntiAffinity)
+ }
+ x.PodAntiAffinity.CodecDecodeSelf(d)
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PodAffinity) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = len(x.RequiredDuringSchedulingIgnoredDuringExecution) != 0
+ yyq2[1] = len(x.PreferredDuringSchedulingIgnoredDuringExecution) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.RequiredDuringSchedulingIgnoredDuringExecution == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("requiredDuringSchedulingIgnoredDuringExecution"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.RequiredDuringSchedulingIgnoredDuringExecution == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.PreferredDuringSchedulingIgnoredDuringExecution == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("preferredDuringSchedulingIgnoredDuringExecution"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.PreferredDuringSchedulingIgnoredDuringExecution == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PodAffinity) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PodAffinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "requiredDuringSchedulingIgnoredDuringExecution":
+ if r.TryDecodeAsNil() {
+ x.RequiredDuringSchedulingIgnoredDuringExecution = nil
+ } else {
+ yyv4 := &x.RequiredDuringSchedulingIgnoredDuringExecution
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv4), d)
+ }
+ }
+ case "preferredDuringSchedulingIgnoredDuringExecution":
+ if r.TryDecodeAsNil() {
+ x.PreferredDuringSchedulingIgnoredDuringExecution = nil
+ } else {
+ yyv6 := &x.PreferredDuringSchedulingIgnoredDuringExecution
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv6), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PodAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.RequiredDuringSchedulingIgnoredDuringExecution = nil
+ } else {
+ yyv9 := &x.RequiredDuringSchedulingIgnoredDuringExecution
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv9), d)
+ }
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.PreferredDuringSchedulingIgnoredDuringExecution = nil
+ } else {
+ yyv11 := &x.PreferredDuringSchedulingIgnoredDuringExecution
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv11), d)
+ }
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PodAntiAffinity) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = len(x.RequiredDuringSchedulingIgnoredDuringExecution) != 0
+ yyq2[1] = len(x.PreferredDuringSchedulingIgnoredDuringExecution) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.RequiredDuringSchedulingIgnoredDuringExecution == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("requiredDuringSchedulingIgnoredDuringExecution"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.RequiredDuringSchedulingIgnoredDuringExecution == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.PreferredDuringSchedulingIgnoredDuringExecution == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("preferredDuringSchedulingIgnoredDuringExecution"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.PreferredDuringSchedulingIgnoredDuringExecution == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PodAntiAffinity) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PodAntiAffinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "requiredDuringSchedulingIgnoredDuringExecution":
+ if r.TryDecodeAsNil() {
+ x.RequiredDuringSchedulingIgnoredDuringExecution = nil
+ } else {
+ yyv4 := &x.RequiredDuringSchedulingIgnoredDuringExecution
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv4), d)
+ }
+ }
+ case "preferredDuringSchedulingIgnoredDuringExecution":
+ if r.TryDecodeAsNil() {
+ x.PreferredDuringSchedulingIgnoredDuringExecution = nil
+ } else {
+ yyv6 := &x.PreferredDuringSchedulingIgnoredDuringExecution
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv6), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PodAntiAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.RequiredDuringSchedulingIgnoredDuringExecution = nil
+ } else {
+ yyv9 := &x.RequiredDuringSchedulingIgnoredDuringExecution
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv9), d)
+ }
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.PreferredDuringSchedulingIgnoredDuringExecution = nil
+ } else {
+ yyv11 := &x.PreferredDuringSchedulingIgnoredDuringExecution
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv11), d)
+ }
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *WeightedPodAffinityTerm) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Weight))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("weight"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Weight))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy7 := &x.PodAffinityTerm
+ yy7.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("podAffinityTerm"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy9 := &x.PodAffinityTerm
+ yy9.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *WeightedPodAffinityTerm) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *WeightedPodAffinityTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "weight":
+ if r.TryDecodeAsNil() {
+ x.Weight = 0
+ } else {
+ x.Weight = int(r.DecodeInt(codecSelferBitsize1234))
+ }
+ case "podAffinityTerm":
+ if r.TryDecodeAsNil() {
+ x.PodAffinityTerm = PodAffinityTerm{}
+ } else {
+ yyv5 := &x.PodAffinityTerm
+ yyv5.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *WeightedPodAffinityTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Weight = 0
+ } else {
+ x.Weight = int(r.DecodeInt(codecSelferBitsize1234))
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.PodAffinityTerm = PodAffinityTerm{}
+ } else {
+ yyv8 := &x.PodAffinityTerm
+ yyv8.CodecDecodeSelf(d)
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PodAffinityTerm) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.LabelSelector != nil
+ yyq2[2] = x.TopologyKey != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.LabelSelector == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.LabelSelector) {
+ } else {
+ z.EncFallback(x.LabelSelector)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("labelSelector"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.LabelSelector == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.LabelSelector) {
+ } else {
+ z.EncFallback(x.LabelSelector)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Namespaces == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Namespaces, false, e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("namespaces"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Namespaces == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Namespaces, false, e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.TopologyKey))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("topologyKey"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.TopologyKey))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PodAffinityTerm) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PodAffinityTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "labelSelector":
+ if r.TryDecodeAsNil() {
+ if x.LabelSelector != nil {
+ x.LabelSelector = nil
+ }
+ } else {
+ if x.LabelSelector == nil {
+ x.LabelSelector = new(pkg2_unversioned.LabelSelector)
+ }
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.LabelSelector) {
+ } else {
+ z.DecFallback(x.LabelSelector, false)
+ }
+ }
+ case "namespaces":
+ if r.TryDecodeAsNil() {
+ x.Namespaces = nil
+ } else {
+ yyv6 := &x.Namespaces
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv6, false, d)
+ }
+ }
+ case "topologyKey":
+ if r.TryDecodeAsNil() {
+ x.TopologyKey = ""
+ } else {
+ x.TopologyKey = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PodAffinityTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.LabelSelector != nil {
+ x.LabelSelector = nil
+ }
+ } else {
+ if x.LabelSelector == nil {
+ x.LabelSelector = new(pkg2_unversioned.LabelSelector)
+ }
+ yym11 := z.DecBinary()
+ _ = yym11
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.LabelSelector) {
+ } else {
+ z.DecFallback(x.LabelSelector, false)
+ }
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Namespaces = nil
+ } else {
+ yyv12 := &x.Namespaces
+ yym13 := z.DecBinary()
+ _ = yym13
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv12, false, d)
+ }
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.TopologyKey = ""
+ } else {
+ x.TopologyKey = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *NodeAffinity) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.RequiredDuringSchedulingIgnoredDuringExecution != nil
+ yyq2[1] = len(x.PreferredDuringSchedulingIgnoredDuringExecution) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.RequiredDuringSchedulingIgnoredDuringExecution == nil {
+ r.EncodeNil()
+ } else {
+ x.RequiredDuringSchedulingIgnoredDuringExecution.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("requiredDuringSchedulingIgnoredDuringExecution"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.RequiredDuringSchedulingIgnoredDuringExecution == nil {
+ r.EncodeNil()
+ } else {
+ x.RequiredDuringSchedulingIgnoredDuringExecution.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.PreferredDuringSchedulingIgnoredDuringExecution == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.encSlicePreferredSchedulingTerm(([]PreferredSchedulingTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("preferredDuringSchedulingIgnoredDuringExecution"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.PreferredDuringSchedulingIgnoredDuringExecution == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.encSlicePreferredSchedulingTerm(([]PreferredSchedulingTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *NodeAffinity) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *NodeAffinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "requiredDuringSchedulingIgnoredDuringExecution":
+ if r.TryDecodeAsNil() {
+ if x.RequiredDuringSchedulingIgnoredDuringExecution != nil {
+ x.RequiredDuringSchedulingIgnoredDuringExecution = nil
+ }
+ } else {
+ if x.RequiredDuringSchedulingIgnoredDuringExecution == nil {
+ x.RequiredDuringSchedulingIgnoredDuringExecution = new(NodeSelector)
+ }
+ x.RequiredDuringSchedulingIgnoredDuringExecution.CodecDecodeSelf(d)
+ }
+ case "preferredDuringSchedulingIgnoredDuringExecution":
+ if r.TryDecodeAsNil() {
+ x.PreferredDuringSchedulingIgnoredDuringExecution = nil
+ } else {
+ yyv5 := &x.PreferredDuringSchedulingIgnoredDuringExecution
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ h.decSlicePreferredSchedulingTerm((*[]PreferredSchedulingTerm)(yyv5), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *NodeAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.RequiredDuringSchedulingIgnoredDuringExecution != nil {
+ x.RequiredDuringSchedulingIgnoredDuringExecution = nil
+ }
+ } else {
+ if x.RequiredDuringSchedulingIgnoredDuringExecution == nil {
+ x.RequiredDuringSchedulingIgnoredDuringExecution = new(NodeSelector)
+ }
+ x.RequiredDuringSchedulingIgnoredDuringExecution.CodecDecodeSelf(d)
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.PreferredDuringSchedulingIgnoredDuringExecution = nil
+ } else {
+ yyv9 := &x.PreferredDuringSchedulingIgnoredDuringExecution
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.decSlicePreferredSchedulingTerm((*[]PreferredSchedulingTerm)(yyv9), d)
+ }
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PreferredSchedulingTerm) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Weight))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("weight"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Weight))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy7 := &x.Preference
+ yy7.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("preference"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy9 := &x.Preference
+ yy9.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PreferredSchedulingTerm) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PreferredSchedulingTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "weight":
+ if r.TryDecodeAsNil() {
+ x.Weight = 0
+ } else {
+ x.Weight = int32(r.DecodeInt(32))
+ }
+ case "preference":
+ if r.TryDecodeAsNil() {
+ x.Preference = NodeSelectorTerm{}
+ } else {
+ yyv5 := &x.Preference
+ yyv5.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PreferredSchedulingTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Weight = 0
+ } else {
+ x.Weight = int32(r.DecodeInt(32))
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Preference = NodeSelectorTerm{}
+ } else {
+ yyv8 := &x.Preference
+ yyv8.CodecDecodeSelf(d)
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *Taint) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.Value != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Key))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("key"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Key))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Value))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("value"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Value))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ x.Effect.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("effect"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Effect.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *Taint) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *Taint) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "key":
+ if r.TryDecodeAsNil() {
+ x.Key = ""
+ } else {
+ x.Key = string(r.DecodeString())
+ }
+ case "value":
+ if r.TryDecodeAsNil() {
+ x.Value = ""
+ } else {
+ x.Value = string(r.DecodeString())
+ }
+ case "effect":
+ if r.TryDecodeAsNil() {
+ x.Effect = ""
+ } else {
+ x.Effect = TaintEffect(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *Taint) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Key = ""
+ } else {
+ x.Key = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Value = ""
+ } else {
+ x.Value = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Effect = ""
+ } else {
+ x.Effect = TaintEffect(r.DecodeString())
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x TaintEffect) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *TaintEffect) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *Toleration) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Key != ""
+ yyq2[1] = x.Operator != ""
+ yyq2[2] = x.Value != ""
+ yyq2[3] = x.Effect != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Key))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("key"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Key))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ x.Operator.CodecEncodeSelf(e)
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("operator"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Operator.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Value))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("value"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Value))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ x.Effect.CodecEncodeSelf(e)
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("effect"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Effect.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *Toleration) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *Toleration) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "key":
+ if r.TryDecodeAsNil() {
+ x.Key = ""
+ } else {
+ x.Key = string(r.DecodeString())
+ }
+ case "operator":
+ if r.TryDecodeAsNil() {
+ x.Operator = ""
+ } else {
+ x.Operator = TolerationOperator(r.DecodeString())
+ }
+ case "value":
+ if r.TryDecodeAsNil() {
+ x.Value = ""
+ } else {
+ x.Value = string(r.DecodeString())
+ }
+ case "effect":
+ if r.TryDecodeAsNil() {
+ x.Effect = ""
+ } else {
+ x.Effect = TaintEffect(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *Toleration) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Key = ""
+ } else {
+ x.Key = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Operator = ""
+ } else {
+ x.Operator = TolerationOperator(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Value = ""
+ } else {
+ x.Value = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Effect = ""
+ } else {
+ x.Effect = TaintEffect(r.DecodeString())
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x TolerationOperator) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *TolerationOperator) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *PodSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [13]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[2] = x.RestartPolicy != ""
+ yyq2[3] = x.TerminationGracePeriodSeconds != nil
+ yyq2[4] = x.ActiveDeadlineSeconds != nil
+ yyq2[5] = x.DNSPolicy != ""
+ yyq2[6] = len(x.NodeSelector) != 0
+ yyq2[8] = x.NodeName != ""
+ yyq2[9] = x.SecurityContext != nil
+ yyq2[10] = len(x.ImagePullSecrets) != 0
+ yyq2[11] = x.Hostname != ""
+ yyq2[12] = x.Subdomain != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(13)
+ } else {
+ yynn2 = 3
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Volumes == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ h.encSliceVolume(([]Volume)(x.Volumes), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("volumes"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Volumes == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.encSliceVolume(([]Volume)(x.Volumes), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Containers == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.encSliceContainer(([]Container)(x.Containers), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("containers"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Containers == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.encSliceContainer(([]Container)(x.Containers), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ x.RestartPolicy.CodecEncodeSelf(e)
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("restartPolicy"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.RestartPolicy.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ if x.TerminationGracePeriodSeconds == nil {
+ r.EncodeNil()
+ } else {
+ yy13 := *x.TerminationGracePeriodSeconds
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeInt(int64(yy13))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("terminationGracePeriodSeconds"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.TerminationGracePeriodSeconds == nil {
+ r.EncodeNil()
+ } else {
+ yy15 := *x.TerminationGracePeriodSeconds
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeInt(int64(yy15))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ if x.ActiveDeadlineSeconds == nil {
+ r.EncodeNil()
+ } else {
+ yy18 := *x.ActiveDeadlineSeconds
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeInt(int64(yy18))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("activeDeadlineSeconds"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.ActiveDeadlineSeconds == nil {
+ r.EncodeNil()
+ } else {
+ yy20 := *x.ActiveDeadlineSeconds
+ yym21 := z.EncBinary()
+ _ = yym21
+ if false {
+ } else {
+ r.EncodeInt(int64(yy20))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ x.DNSPolicy.CodecEncodeSelf(e)
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("dnsPolicy"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.DNSPolicy.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[6] {
+ if x.NodeSelector == nil {
+ r.EncodeNil()
+ } else {
+ yym26 := z.EncBinary()
+ _ = yym26
+ if false {
+ } else {
+ z.F.EncMapStringStringV(x.NodeSelector, false, e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[6] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("nodeSelector"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.NodeSelector == nil {
+ r.EncodeNil()
+ } else {
+ yym27 := z.EncBinary()
+ _ = yym27
+ if false {
+ } else {
+ z.F.EncMapStringStringV(x.NodeSelector, false, e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym29 := z.EncBinary()
+ _ = yym29
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ServiceAccountName))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("serviceAccountName"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym30 := z.EncBinary()
+ _ = yym30
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ServiceAccountName))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[8] {
+ yym32 := z.EncBinary()
+ _ = yym32
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.NodeName))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[8] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("nodeName"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym33 := z.EncBinary()
+ _ = yym33
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.NodeName))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[9] {
+ if x.SecurityContext == nil {
+ r.EncodeNil()
+ } else {
+ x.SecurityContext.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[9] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("securityContext"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.SecurityContext == nil {
+ r.EncodeNil()
+ } else {
+ x.SecurityContext.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[10] {
+ if x.ImagePullSecrets == nil {
+ r.EncodeNil()
+ } else {
+ yym38 := z.EncBinary()
+ _ = yym38
+ if false {
+ } else {
+ h.encSliceLocalObjectReference(([]LocalObjectReference)(x.ImagePullSecrets), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[10] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("imagePullSecrets"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.ImagePullSecrets == nil {
+ r.EncodeNil()
+ } else {
+ yym39 := z.EncBinary()
+ _ = yym39
+ if false {
+ } else {
+ h.encSliceLocalObjectReference(([]LocalObjectReference)(x.ImagePullSecrets), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[11] {
+ yym41 := z.EncBinary()
+ _ = yym41
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Hostname))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[11] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("hostname"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym42 := z.EncBinary()
+ _ = yym42
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Hostname))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[12] {
+ yym44 := z.EncBinary()
+ _ = yym44
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Subdomain))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[12] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("subdomain"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym45 := z.EncBinary()
+ _ = yym45
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Subdomain))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PodSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PodSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "volumes":
+ if r.TryDecodeAsNil() {
+ x.Volumes = nil
+ } else {
+ yyv4 := &x.Volumes
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.decSliceVolume((*[]Volume)(yyv4), d)
+ }
+ }
+ case "containers":
+ if r.TryDecodeAsNil() {
+ x.Containers = nil
+ } else {
+ yyv6 := &x.Containers
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceContainer((*[]Container)(yyv6), d)
+ }
+ }
+ case "restartPolicy":
+ if r.TryDecodeAsNil() {
+ x.RestartPolicy = ""
+ } else {
+ x.RestartPolicy = RestartPolicy(r.DecodeString())
+ }
+ case "terminationGracePeriodSeconds":
+ if r.TryDecodeAsNil() {
+ if x.TerminationGracePeriodSeconds != nil {
+ x.TerminationGracePeriodSeconds = nil
+ }
+ } else {
+ if x.TerminationGracePeriodSeconds == nil {
+ x.TerminationGracePeriodSeconds = new(int64)
+ }
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else {
+ *((*int64)(x.TerminationGracePeriodSeconds)) = int64(r.DecodeInt(64))
+ }
+ }
+ case "activeDeadlineSeconds":
+ if r.TryDecodeAsNil() {
+ if x.ActiveDeadlineSeconds != nil {
+ x.ActiveDeadlineSeconds = nil
+ }
+ } else {
+ if x.ActiveDeadlineSeconds == nil {
+ x.ActiveDeadlineSeconds = new(int64)
+ }
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64))
+ }
+ }
+ case "dnsPolicy":
+ if r.TryDecodeAsNil() {
+ x.DNSPolicy = ""
+ } else {
+ x.DNSPolicy = DNSPolicy(r.DecodeString())
+ }
+ case "nodeSelector":
+ if r.TryDecodeAsNil() {
+ x.NodeSelector = nil
+ } else {
+ yyv14 := &x.NodeSelector
+ yym15 := z.DecBinary()
+ _ = yym15
+ if false {
+ } else {
+ z.F.DecMapStringStringX(yyv14, false, d)
+ }
+ }
+ case "serviceAccountName":
+ if r.TryDecodeAsNil() {
+ x.ServiceAccountName = ""
+ } else {
+ x.ServiceAccountName = string(r.DecodeString())
+ }
+ case "nodeName":
+ if r.TryDecodeAsNil() {
+ x.NodeName = ""
+ } else {
+ x.NodeName = string(r.DecodeString())
+ }
+ case "securityContext":
+ if r.TryDecodeAsNil() {
+ if x.SecurityContext != nil {
+ x.SecurityContext = nil
+ }
+ } else {
+ if x.SecurityContext == nil {
+ x.SecurityContext = new(PodSecurityContext)
+ }
+ x.SecurityContext.CodecDecodeSelf(d)
+ }
+ case "imagePullSecrets":
+ if r.TryDecodeAsNil() {
+ x.ImagePullSecrets = nil
+ } else {
+ yyv19 := &x.ImagePullSecrets
+ yym20 := z.DecBinary()
+ _ = yym20
+ if false {
+ } else {
+ h.decSliceLocalObjectReference((*[]LocalObjectReference)(yyv19), d)
+ }
+ }
+ case "hostname":
+ if r.TryDecodeAsNil() {
+ x.Hostname = ""
+ } else {
+ x.Hostname = string(r.DecodeString())
+ }
+ case "subdomain":
+ if r.TryDecodeAsNil() {
+ x.Subdomain = ""
+ } else {
+ x.Subdomain = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj23 int
+ var yyb23 bool
+ var yyhl23 bool = l >= 0
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Volumes = nil
+ } else {
+ yyv24 := &x.Volumes
+ yym25 := z.DecBinary()
+ _ = yym25
+ if false {
+ } else {
+ h.decSliceVolume((*[]Volume)(yyv24), d)
+ }
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Containers = nil
+ } else {
+ yyv26 := &x.Containers
+ yym27 := z.DecBinary()
+ _ = yym27
+ if false {
+ } else {
+ h.decSliceContainer((*[]Container)(yyv26), d)
+ }
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.RestartPolicy = ""
+ } else {
+ x.RestartPolicy = RestartPolicy(r.DecodeString())
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.TerminationGracePeriodSeconds != nil {
+ x.TerminationGracePeriodSeconds = nil
+ }
+ } else {
+ if x.TerminationGracePeriodSeconds == nil {
+ x.TerminationGracePeriodSeconds = new(int64)
+ }
+ yym30 := z.DecBinary()
+ _ = yym30
+ if false {
+ } else {
+ *((*int64)(x.TerminationGracePeriodSeconds)) = int64(r.DecodeInt(64))
+ }
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.ActiveDeadlineSeconds != nil {
+ x.ActiveDeadlineSeconds = nil
+ }
+ } else {
+ if x.ActiveDeadlineSeconds == nil {
+ x.ActiveDeadlineSeconds = new(int64)
+ }
+ yym32 := z.DecBinary()
+ _ = yym32
+ if false {
+ } else {
+ *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64))
+ }
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.DNSPolicy = ""
+ } else {
+ x.DNSPolicy = DNSPolicy(r.DecodeString())
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.NodeSelector = nil
+ } else {
+ yyv34 := &x.NodeSelector
+ yym35 := z.DecBinary()
+ _ = yym35
+ if false {
+ } else {
+ z.F.DecMapStringStringX(yyv34, false, d)
+ }
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ServiceAccountName = ""
+ } else {
+ x.ServiceAccountName = string(r.DecodeString())
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.NodeName = ""
+ } else {
+ x.NodeName = string(r.DecodeString())
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.SecurityContext != nil {
+ x.SecurityContext = nil
+ }
+ } else {
+ if x.SecurityContext == nil {
+ x.SecurityContext = new(PodSecurityContext)
+ }
+ x.SecurityContext.CodecDecodeSelf(d)
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ImagePullSecrets = nil
+ } else {
+ yyv39 := &x.ImagePullSecrets
+ yym40 := z.DecBinary()
+ _ = yym40
+ if false {
+ } else {
+ h.decSliceLocalObjectReference((*[]LocalObjectReference)(yyv39), d)
+ }
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Hostname = ""
+ } else {
+ x.Hostname = string(r.DecodeString())
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Subdomain = ""
+ } else {
+ x.Subdomain = string(r.DecodeString())
+ }
+ for {
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj23-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PodSecurityContext) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [8]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.HostNetwork != false
+ yyq2[1] = x.HostPID != false
+ yyq2[2] = x.HostIPC != false
+ yyq2[3] = x.SELinuxOptions != nil
+ yyq2[4] = x.RunAsUser != nil
+ yyq2[5] = x.RunAsNonRoot != nil
+ yyq2[6] = len(x.SupplementalGroups) != 0
+ yyq2[7] = x.FSGroup != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(8)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeBool(bool(x.HostNetwork))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("hostNetwork"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeBool(bool(x.HostNetwork))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeBool(bool(x.HostPID))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("hostPID"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeBool(bool(x.HostPID))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeBool(bool(x.HostIPC))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("hostIPC"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeBool(bool(x.HostIPC))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ if x.SELinuxOptions == nil {
+ r.EncodeNil()
+ } else {
+ x.SELinuxOptions.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("seLinuxOptions"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.SELinuxOptions == nil {
+ r.EncodeNil()
+ } else {
+ x.SELinuxOptions.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ if x.RunAsUser == nil {
+ r.EncodeNil()
+ } else {
+ yy16 := *x.RunAsUser
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeInt(int64(yy16))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("runAsUser"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.RunAsUser == nil {
+ r.EncodeNil()
+ } else {
+ yy18 := *x.RunAsUser
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeInt(int64(yy18))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ if x.RunAsNonRoot == nil {
+ r.EncodeNil()
+ } else {
+ yy21 := *x.RunAsNonRoot
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeBool(bool(yy21))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("runAsNonRoot"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.RunAsNonRoot == nil {
+ r.EncodeNil()
+ } else {
+ yy23 := *x.RunAsNonRoot
+ yym24 := z.EncBinary()
+ _ = yym24
+ if false {
+ } else {
+ r.EncodeBool(bool(yy23))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[6] {
+ if x.SupplementalGroups == nil {
+ r.EncodeNil()
+ } else {
+ yym26 := z.EncBinary()
+ _ = yym26
+ if false {
+ } else {
+ z.F.EncSliceInt64V(x.SupplementalGroups, false, e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[6] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("supplementalGroups"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.SupplementalGroups == nil {
+ r.EncodeNil()
+ } else {
+ yym27 := z.EncBinary()
+ _ = yym27
+ if false {
+ } else {
+ z.F.EncSliceInt64V(x.SupplementalGroups, false, e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[7] {
+ if x.FSGroup == nil {
+ r.EncodeNil()
+ } else {
+ yy29 := *x.FSGroup
+ yym30 := z.EncBinary()
+ _ = yym30
+ if false {
+ } else {
+ r.EncodeInt(int64(yy29))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[7] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("fsGroup"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.FSGroup == nil {
+ r.EncodeNil()
+ } else {
+ yy31 := *x.FSGroup
+ yym32 := z.EncBinary()
+ _ = yym32
+ if false {
+ } else {
+ r.EncodeInt(int64(yy31))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PodSecurityContext) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PodSecurityContext) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "hostNetwork":
+ if r.TryDecodeAsNil() {
+ x.HostNetwork = false
+ } else {
+ x.HostNetwork = bool(r.DecodeBool())
+ }
+ case "hostPID":
+ if r.TryDecodeAsNil() {
+ x.HostPID = false
+ } else {
+ x.HostPID = bool(r.DecodeBool())
+ }
+ case "hostIPC":
+ if r.TryDecodeAsNil() {
+ x.HostIPC = false
+ } else {
+ x.HostIPC = bool(r.DecodeBool())
+ }
+ case "seLinuxOptions":
+ if r.TryDecodeAsNil() {
+ if x.SELinuxOptions != nil {
+ x.SELinuxOptions = nil
+ }
+ } else {
+ if x.SELinuxOptions == nil {
+ x.SELinuxOptions = new(SELinuxOptions)
+ }
+ x.SELinuxOptions.CodecDecodeSelf(d)
+ }
+ case "runAsUser":
+ if r.TryDecodeAsNil() {
+ if x.RunAsUser != nil {
+ x.RunAsUser = nil
+ }
+ } else {
+ if x.RunAsUser == nil {
+ x.RunAsUser = new(int64)
+ }
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else {
+ *((*int64)(x.RunAsUser)) = int64(r.DecodeInt(64))
+ }
+ }
+ case "runAsNonRoot":
+ if r.TryDecodeAsNil() {
+ if x.RunAsNonRoot != nil {
+ x.RunAsNonRoot = nil
+ }
+ } else {
+ if x.RunAsNonRoot == nil {
+ x.RunAsNonRoot = new(bool)
+ }
+ yym11 := z.DecBinary()
+ _ = yym11
+ if false {
+ } else {
+ *((*bool)(x.RunAsNonRoot)) = r.DecodeBool()
+ }
+ }
+ case "supplementalGroups":
+ if r.TryDecodeAsNil() {
+ x.SupplementalGroups = nil
+ } else {
+ yyv12 := &x.SupplementalGroups
+ yym13 := z.DecBinary()
+ _ = yym13
+ if false {
+ } else {
+ z.F.DecSliceInt64X(yyv12, false, d)
+ }
+ }
+ case "fsGroup":
+ if r.TryDecodeAsNil() {
+ if x.FSGroup != nil {
+ x.FSGroup = nil
+ }
+ } else {
+ if x.FSGroup == nil {
+ x.FSGroup = new(int64)
+ }
+ yym15 := z.DecBinary()
+ _ = yym15
+ if false {
+ } else {
+ *((*int64)(x.FSGroup)) = int64(r.DecodeInt(64))
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PodSecurityContext) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj16 int
+ var yyb16 bool
+ var yyhl16 bool = l >= 0
+ yyj16++
+ if yyhl16 {
+ yyb16 = yyj16 > l
+ } else {
+ yyb16 = r.CheckBreak()
+ }
+ if yyb16 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.HostNetwork = false
+ } else {
+ x.HostNetwork = bool(r.DecodeBool())
+ }
+ yyj16++
+ if yyhl16 {
+ yyb16 = yyj16 > l
+ } else {
+ yyb16 = r.CheckBreak()
+ }
+ if yyb16 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.HostPID = false
+ } else {
+ x.HostPID = bool(r.DecodeBool())
+ }
+ yyj16++
+ if yyhl16 {
+ yyb16 = yyj16 > l
+ } else {
+ yyb16 = r.CheckBreak()
+ }
+ if yyb16 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.HostIPC = false
+ } else {
+ x.HostIPC = bool(r.DecodeBool())
+ }
+ yyj16++
+ if yyhl16 {
+ yyb16 = yyj16 > l
+ } else {
+ yyb16 = r.CheckBreak()
+ }
+ if yyb16 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.SELinuxOptions != nil {
+ x.SELinuxOptions = nil
+ }
+ } else {
+ if x.SELinuxOptions == nil {
+ x.SELinuxOptions = new(SELinuxOptions)
+ }
+ x.SELinuxOptions.CodecDecodeSelf(d)
+ }
+ yyj16++
+ if yyhl16 {
+ yyb16 = yyj16 > l
+ } else {
+ yyb16 = r.CheckBreak()
+ }
+ if yyb16 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.RunAsUser != nil {
+ x.RunAsUser = nil
+ }
+ } else {
+ if x.RunAsUser == nil {
+ x.RunAsUser = new(int64)
+ }
+ yym22 := z.DecBinary()
+ _ = yym22
+ if false {
+ } else {
+ *((*int64)(x.RunAsUser)) = int64(r.DecodeInt(64))
+ }
+ }
+ yyj16++
+ if yyhl16 {
+ yyb16 = yyj16 > l
+ } else {
+ yyb16 = r.CheckBreak()
+ }
+ if yyb16 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.RunAsNonRoot != nil {
+ x.RunAsNonRoot = nil
+ }
+ } else {
+ if x.RunAsNonRoot == nil {
+ x.RunAsNonRoot = new(bool)
+ }
+ yym24 := z.DecBinary()
+ _ = yym24
+ if false {
+ } else {
+ *((*bool)(x.RunAsNonRoot)) = r.DecodeBool()
+ }
+ }
+ yyj16++
+ if yyhl16 {
+ yyb16 = yyj16 > l
+ } else {
+ yyb16 = r.CheckBreak()
+ }
+ if yyb16 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.SupplementalGroups = nil
+ } else {
+ yyv25 := &x.SupplementalGroups
+ yym26 := z.DecBinary()
+ _ = yym26
+ if false {
+ } else {
+ z.F.DecSliceInt64X(yyv25, false, d)
+ }
+ }
+ yyj16++
+ if yyhl16 {
+ yyb16 = yyj16 > l
+ } else {
+ yyb16 = r.CheckBreak()
+ }
+ if yyb16 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.FSGroup != nil {
+ x.FSGroup = nil
+ }
+ } else {
+ if x.FSGroup == nil {
+ x.FSGroup = new(int64)
+ }
+ yym28 := z.DecBinary()
+ _ = yym28
+ if false {
+ } else {
+ *((*int64)(x.FSGroup)) = int64(r.DecodeInt(64))
+ }
+ }
+ for {
+ yyj16++
+ if yyhl16 {
+ yyb16 = yyj16 > l
+ } else {
+ yyb16 = r.CheckBreak()
+ }
+ if yyb16 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj16-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PodStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [8]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Phase != ""
+ yyq2[1] = len(x.Conditions) != 0
+ yyq2[2] = x.Message != ""
+ yyq2[3] = x.Reason != ""
+ yyq2[4] = x.HostIP != ""
+ yyq2[5] = x.PodIP != ""
+ yyq2[6] = x.StartTime != nil
+ yyq2[7] = len(x.ContainerStatuses) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(8)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ x.Phase.CodecEncodeSelf(e)
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("phase"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Phase.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Conditions == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.encSlicePodCondition(([]PodCondition)(x.Conditions), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("conditions"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Conditions == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.encSlicePodCondition(([]PodCondition)(x.Conditions), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Message))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("message"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Message))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Reason))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("reason"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Reason))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.HostIP))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("hostIP"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.HostIP))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.PodIP))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("podIP"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.PodIP))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[6] {
+ if x.StartTime == nil {
+ r.EncodeNil()
+ } else {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.StartTime) {
+ } else if yym22 {
+ z.EncBinaryMarshal(x.StartTime)
+ } else if !yym22 && z.IsJSONHandle() {
+ z.EncJSONMarshal(x.StartTime)
+ } else {
+ z.EncFallback(x.StartTime)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[6] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("startTime"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.StartTime == nil {
+ r.EncodeNil()
+ } else {
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.StartTime) {
+ } else if yym23 {
+ z.EncBinaryMarshal(x.StartTime)
+ } else if !yym23 && z.IsJSONHandle() {
+ z.EncJSONMarshal(x.StartTime)
+ } else {
+ z.EncFallback(x.StartTime)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[7] {
+ if x.ContainerStatuses == nil {
+ r.EncodeNil()
+ } else {
+ yym25 := z.EncBinary()
+ _ = yym25
+ if false {
+ } else {
+ h.encSliceContainerStatus(([]ContainerStatus)(x.ContainerStatuses), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[7] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("containerStatuses"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.ContainerStatuses == nil {
+ r.EncodeNil()
+ } else {
+ yym26 := z.EncBinary()
+ _ = yym26
+ if false {
+ } else {
+ h.encSliceContainerStatus(([]ContainerStatus)(x.ContainerStatuses), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PodStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PodStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "phase":
+ if r.TryDecodeAsNil() {
+ x.Phase = ""
+ } else {
+ x.Phase = PodPhase(r.DecodeString())
+ }
+ case "conditions":
+ if r.TryDecodeAsNil() {
+ x.Conditions = nil
+ } else {
+ yyv5 := &x.Conditions
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ h.decSlicePodCondition((*[]PodCondition)(yyv5), d)
+ }
+ }
+ case "message":
+ if r.TryDecodeAsNil() {
+ x.Message = ""
+ } else {
+ x.Message = string(r.DecodeString())
+ }
+ case "reason":
+ if r.TryDecodeAsNil() {
+ x.Reason = ""
+ } else {
+ x.Reason = string(r.DecodeString())
+ }
+ case "hostIP":
+ if r.TryDecodeAsNil() {
+ x.HostIP = ""
+ } else {
+ x.HostIP = string(r.DecodeString())
+ }
+ case "podIP":
+ if r.TryDecodeAsNil() {
+ x.PodIP = ""
+ } else {
+ x.PodIP = string(r.DecodeString())
+ }
+ case "startTime":
+ if r.TryDecodeAsNil() {
+ if x.StartTime != nil {
+ x.StartTime = nil
+ }
+ } else {
+ if x.StartTime == nil {
+ x.StartTime = new(pkg2_unversioned.Time)
+ }
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.StartTime) {
+ } else if yym12 {
+ z.DecBinaryUnmarshal(x.StartTime)
+ } else if !yym12 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(x.StartTime)
+ } else {
+ z.DecFallback(x.StartTime, false)
+ }
+ }
+ case "containerStatuses":
+ if r.TryDecodeAsNil() {
+ x.ContainerStatuses = nil
+ } else {
+ yyv13 := &x.ContainerStatuses
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceContainerStatus((*[]ContainerStatus)(yyv13), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PodStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj15 int
+ var yyb15 bool
+ var yyhl15 bool = l >= 0
+ yyj15++
+ if yyhl15 {
+ yyb15 = yyj15 > l
+ } else {
+ yyb15 = r.CheckBreak()
+ }
+ if yyb15 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Phase = ""
+ } else {
+ x.Phase = PodPhase(r.DecodeString())
+ }
+ yyj15++
+ if yyhl15 {
+ yyb15 = yyj15 > l
+ } else {
+ yyb15 = r.CheckBreak()
+ }
+ if yyb15 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Conditions = nil
+ } else {
+ yyv17 := &x.Conditions
+ yym18 := z.DecBinary()
+ _ = yym18
+ if false {
+ } else {
+ h.decSlicePodCondition((*[]PodCondition)(yyv17), d)
+ }
+ }
+ yyj15++
+ if yyhl15 {
+ yyb15 = yyj15 > l
+ } else {
+ yyb15 = r.CheckBreak()
+ }
+ if yyb15 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Message = ""
+ } else {
+ x.Message = string(r.DecodeString())
+ }
+ yyj15++
+ if yyhl15 {
+ yyb15 = yyj15 > l
+ } else {
+ yyb15 = r.CheckBreak()
+ }
+ if yyb15 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Reason = ""
+ } else {
+ x.Reason = string(r.DecodeString())
+ }
+ yyj15++
+ if yyhl15 {
+ yyb15 = yyj15 > l
+ } else {
+ yyb15 = r.CheckBreak()
+ }
+ if yyb15 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.HostIP = ""
+ } else {
+ x.HostIP = string(r.DecodeString())
+ }
+ yyj15++
+ if yyhl15 {
+ yyb15 = yyj15 > l
+ } else {
+ yyb15 = r.CheckBreak()
+ }
+ if yyb15 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.PodIP = ""
+ } else {
+ x.PodIP = string(r.DecodeString())
+ }
+ yyj15++
+ if yyhl15 {
+ yyb15 = yyj15 > l
+ } else {
+ yyb15 = r.CheckBreak()
+ }
+ if yyb15 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.StartTime != nil {
+ x.StartTime = nil
+ }
+ } else {
+ if x.StartTime == nil {
+ x.StartTime = new(pkg2_unversioned.Time)
+ }
+ yym24 := z.DecBinary()
+ _ = yym24
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.StartTime) {
+ } else if yym24 {
+ z.DecBinaryUnmarshal(x.StartTime)
+ } else if !yym24 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(x.StartTime)
+ } else {
+ z.DecFallback(x.StartTime, false)
+ }
+ }
+ yyj15++
+ if yyhl15 {
+ yyb15 = yyj15 > l
+ } else {
+ yyb15 = r.CheckBreak()
+ }
+ if yyb15 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ContainerStatuses = nil
+ } else {
+ yyv25 := &x.ContainerStatuses
+ yym26 := z.DecBinary()
+ _ = yym26
+ if false {
+ } else {
+ h.decSliceContainerStatus((*[]ContainerStatus)(yyv25), d)
+ }
+ }
+ for {
+ yyj15++
+ if yyhl15 {
+ yyb15 = yyj15 > l
+ } else {
+ yyb15 = r.CheckBreak()
+ }
+ if yyb15 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj15-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PodStatusResult) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Status
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Status
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PodStatusResult) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PodStatusResult) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = PodStatus{}
+ } else {
+ yyv5 := &x.Status
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PodStatusResult) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv9 := &x.ObjectMeta
+ yyv9.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = PodStatus{}
+ } else {
+ yyv10 := &x.Status
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *Pod) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = true
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy14 := &x.Status
+ yy14.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy16 := &x.Status
+ yy16.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *Pod) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *Pod) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = PodSpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = PodStatus{}
+ } else {
+ yyv6 := &x.Status
+ yyv6.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *Pod) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = PodSpec{}
+ } else {
+ yyv11 := &x.Spec
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = PodStatus{}
+ } else {
+ yyv12 := &x.Status
+ yyv12.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PodTemplateSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PodTemplateSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PodTemplateSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = PodSpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PodTemplateSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv7 := &x.ObjectMeta
+ yyv7.CodecDecodeSelf(d)
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = PodSpec{}
+ } else {
+ yyv8 := &x.Spec
+ yyv8.CodecDecodeSelf(d)
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PodTemplate) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Template
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("template"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Template
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PodTemplate) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PodTemplate) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "template":
+ if r.TryDecodeAsNil() {
+ x.Template = PodTemplateSpec{}
+ } else {
+ yyv5 := &x.Template
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PodTemplate) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv9 := &x.ObjectMeta
+ yyv9.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Template = PodTemplateSpec{}
+ } else {
+ yyv10 := &x.Template
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PodTemplateList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSlicePodTemplate(([]PodTemplate)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSlicePodTemplate(([]PodTemplate)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PodTemplateList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PodTemplateList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSlicePodTemplate((*[]PodTemplate)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PodTemplateList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSlicePodTemplate((*[]PodTemplate)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ReplicationControllerSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[2] = x.Template != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Replicas))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("replicas"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Replicas))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Selector == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ z.F.EncMapStringStringV(x.Selector, false, e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("selector"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Selector == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ z.F.EncMapStringStringV(x.Selector, false, e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.Template == nil {
+ r.EncodeNil()
+ } else {
+ x.Template.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("template"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Template == nil {
+ r.EncodeNil()
+ } else {
+ x.Template.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ReplicationControllerSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ReplicationControllerSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "replicas":
+ if r.TryDecodeAsNil() {
+ x.Replicas = 0
+ } else {
+ x.Replicas = int32(r.DecodeInt(32))
+ }
+ case "selector":
+ if r.TryDecodeAsNil() {
+ x.Selector = nil
+ } else {
+ yyv5 := &x.Selector
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ z.F.DecMapStringStringX(yyv5, false, d)
+ }
+ }
+ case "template":
+ if r.TryDecodeAsNil() {
+ if x.Template != nil {
+ x.Template = nil
+ }
+ } else {
+ if x.Template == nil {
+ x.Template = new(PodTemplateSpec)
+ }
+ x.Template.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ReplicationControllerSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Replicas = 0
+ } else {
+ x.Replicas = int32(r.DecodeInt(32))
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Selector = nil
+ } else {
+ yyv10 := &x.Selector
+ yym11 := z.DecBinary()
+ _ = yym11
+ if false {
+ } else {
+ z.F.DecMapStringStringX(yyv10, false, d)
+ }
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Template != nil {
+ x.Template = nil
+ }
+ } else {
+ if x.Template == nil {
+ x.Template = new(PodTemplateSpec)
+ }
+ x.Template.CodecDecodeSelf(d)
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ReplicationControllerStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.FullyLabeledReplicas != 0
+ yyq2[2] = x.ObservedGeneration != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Replicas))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("replicas"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Replicas))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeInt(int64(x.FullyLabeledReplicas))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("fullyLabeledReplicas"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeInt(int64(x.FullyLabeledReplicas))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ObservedGeneration))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("observedGeneration"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ObservedGeneration))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ReplicationControllerStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ReplicationControllerStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "replicas":
+ if r.TryDecodeAsNil() {
+ x.Replicas = 0
+ } else {
+ x.Replicas = int32(r.DecodeInt(32))
+ }
+ case "fullyLabeledReplicas":
+ if r.TryDecodeAsNil() {
+ x.FullyLabeledReplicas = 0
+ } else {
+ x.FullyLabeledReplicas = int32(r.DecodeInt(32))
+ }
+ case "observedGeneration":
+ if r.TryDecodeAsNil() {
+ x.ObservedGeneration = 0
+ } else {
+ x.ObservedGeneration = int64(r.DecodeInt(64))
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ReplicationControllerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Replicas = 0
+ } else {
+ x.Replicas = int32(r.DecodeInt(32))
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.FullyLabeledReplicas = 0
+ } else {
+ x.FullyLabeledReplicas = int32(r.DecodeInt(32))
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObservedGeneration = 0
+ } else {
+ x.ObservedGeneration = int64(r.DecodeInt(64))
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ReplicationController) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = true
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy14 := &x.Status
+ yy14.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy16 := &x.Status
+ yy16.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ReplicationController) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ReplicationController) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = ReplicationControllerSpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = ReplicationControllerStatus{}
+ } else {
+ yyv6 := &x.Status
+ yyv6.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ReplicationController) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = ReplicationControllerSpec{}
+ } else {
+ yyv11 := &x.Spec
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = ReplicationControllerStatus{}
+ } else {
+ yyv12 := &x.Status
+ yyv12.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ReplicationControllerList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceReplicationController(([]ReplicationController)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceReplicationController(([]ReplicationController)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ReplicationControllerList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ReplicationControllerList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceReplicationController((*[]ReplicationController)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ReplicationControllerList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceReplicationController((*[]ReplicationController)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ServiceList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceService(([]Service)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceService(([]Service)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ServiceList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ServiceList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceService((*[]Service)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ServiceList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceService((*[]Service)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x ServiceAffinity) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *ServiceAffinity) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x ServiceType) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *ServiceType) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *ServiceStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.LoadBalancer
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("loadBalancer"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.LoadBalancer
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ServiceStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ServiceStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "loadBalancer":
+ if r.TryDecodeAsNil() {
+ x.LoadBalancer = LoadBalancerStatus{}
+ } else {
+ yyv4 := &x.LoadBalancer
+ yyv4.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ServiceStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj5 int
+ var yyb5 bool
+ var yyhl5 bool = l >= 0
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.LoadBalancer = LoadBalancerStatus{}
+ } else {
+ yyv6 := &x.LoadBalancer
+ yyv6.CodecDecodeSelf(d)
+ }
+ for {
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj5-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *LoadBalancerStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = len(x.Ingress) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Ingress == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ h.encSliceLoadBalancerIngress(([]LoadBalancerIngress)(x.Ingress), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("ingress"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Ingress == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.encSliceLoadBalancerIngress(([]LoadBalancerIngress)(x.Ingress), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *LoadBalancerStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *LoadBalancerStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "ingress":
+ if r.TryDecodeAsNil() {
+ x.Ingress = nil
+ } else {
+ yyv4 := &x.Ingress
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.decSliceLoadBalancerIngress((*[]LoadBalancerIngress)(yyv4), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *LoadBalancerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Ingress = nil
+ } else {
+ yyv7 := &x.Ingress
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.decSliceLoadBalancerIngress((*[]LoadBalancerIngress)(yyv7), d)
+ }
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *LoadBalancerIngress) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.IP != ""
+ yyq2[1] = x.Hostname != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.IP))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("ip"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.IP))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Hostname))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("hostname"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Hostname))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *LoadBalancerIngress) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *LoadBalancerIngress) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "ip":
+ if r.TryDecodeAsNil() {
+ x.IP = ""
+ } else {
+ x.IP = string(r.DecodeString())
+ }
+ case "hostname":
+ if r.TryDecodeAsNil() {
+ x.Hostname = ""
+ } else {
+ x.Hostname = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *LoadBalancerIngress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.IP = ""
+ } else {
+ x.IP = string(r.DecodeString())
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Hostname = ""
+ } else {
+ x.Hostname = string(r.DecodeString())
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ServiceSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [8]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Type != ""
+ yyq2[3] = x.ClusterIP != ""
+ yyq2[4] = len(x.ExternalIPs) != 0
+ yyq2[5] = x.LoadBalancerIP != ""
+ yyq2[6] = x.SessionAffinity != ""
+ yyq2[7] = len(x.LoadBalancerSourceRanges) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(8)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ x.Type.CodecEncodeSelf(e)
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("type"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Type.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Ports == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.encSliceServicePort(([]ServicePort)(x.Ports), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("ports"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Ports == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.encSliceServicePort(([]ServicePort)(x.Ports), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Selector == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ z.F.EncMapStringStringV(x.Selector, false, e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("selector"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Selector == nil {
+ r.EncodeNil()
+ } else {
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ z.F.EncMapStringStringV(x.Selector, false, e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ClusterIP))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("clusterIP"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ClusterIP))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ if x.ExternalIPs == nil {
+ r.EncodeNil()
+ } else {
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.ExternalIPs, false, e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("externalIPs"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.ExternalIPs == nil {
+ r.EncodeNil()
+ } else {
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.ExternalIPs, false, e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.LoadBalancerIP))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("loadBalancerIP"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.LoadBalancerIP))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[6] {
+ x.SessionAffinity.CodecEncodeSelf(e)
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[6] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("sessionAffinity"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.SessionAffinity.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[7] {
+ if x.LoadBalancerSourceRanges == nil {
+ r.EncodeNil()
+ } else {
+ yym25 := z.EncBinary()
+ _ = yym25
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.LoadBalancerSourceRanges, false, e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[7] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("loadBalancerSourceRanges"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.LoadBalancerSourceRanges == nil {
+ r.EncodeNil()
+ } else {
+ yym26 := z.EncBinary()
+ _ = yym26
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.LoadBalancerSourceRanges, false, e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ServiceSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ServiceSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "type":
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = ServiceType(r.DecodeString())
+ }
+ case "ports":
+ if r.TryDecodeAsNil() {
+ x.Ports = nil
+ } else {
+ yyv5 := &x.Ports
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ h.decSliceServicePort((*[]ServicePort)(yyv5), d)
+ }
+ }
+ case "selector":
+ if r.TryDecodeAsNil() {
+ x.Selector = nil
+ } else {
+ yyv7 := &x.Selector
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else {
+ z.F.DecMapStringStringX(yyv7, false, d)
+ }
+ }
+ case "clusterIP":
+ if r.TryDecodeAsNil() {
+ x.ClusterIP = ""
+ } else {
+ x.ClusterIP = string(r.DecodeString())
+ }
+ case "externalIPs":
+ if r.TryDecodeAsNil() {
+ x.ExternalIPs = nil
+ } else {
+ yyv10 := &x.ExternalIPs
+ yym11 := z.DecBinary()
+ _ = yym11
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv10, false, d)
+ }
+ }
+ case "loadBalancerIP":
+ if r.TryDecodeAsNil() {
+ x.LoadBalancerIP = ""
+ } else {
+ x.LoadBalancerIP = string(r.DecodeString())
+ }
+ case "sessionAffinity":
+ if r.TryDecodeAsNil() {
+ x.SessionAffinity = ""
+ } else {
+ x.SessionAffinity = ServiceAffinity(r.DecodeString())
+ }
+ case "loadBalancerSourceRanges":
+ if r.TryDecodeAsNil() {
+ x.LoadBalancerSourceRanges = nil
+ } else {
+ yyv14 := &x.LoadBalancerSourceRanges
+ yym15 := z.DecBinary()
+ _ = yym15
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv14, false, d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ServiceSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj16 int
+ var yyb16 bool
+ var yyhl16 bool = l >= 0
+ yyj16++
+ if yyhl16 {
+ yyb16 = yyj16 > l
+ } else {
+ yyb16 = r.CheckBreak()
+ }
+ if yyb16 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = ServiceType(r.DecodeString())
+ }
+ yyj16++
+ if yyhl16 {
+ yyb16 = yyj16 > l
+ } else {
+ yyb16 = r.CheckBreak()
+ }
+ if yyb16 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Ports = nil
+ } else {
+ yyv18 := &x.Ports
+ yym19 := z.DecBinary()
+ _ = yym19
+ if false {
+ } else {
+ h.decSliceServicePort((*[]ServicePort)(yyv18), d)
+ }
+ }
+ yyj16++
+ if yyhl16 {
+ yyb16 = yyj16 > l
+ } else {
+ yyb16 = r.CheckBreak()
+ }
+ if yyb16 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Selector = nil
+ } else {
+ yyv20 := &x.Selector
+ yym21 := z.DecBinary()
+ _ = yym21
+ if false {
+ } else {
+ z.F.DecMapStringStringX(yyv20, false, d)
+ }
+ }
+ yyj16++
+ if yyhl16 {
+ yyb16 = yyj16 > l
+ } else {
+ yyb16 = r.CheckBreak()
+ }
+ if yyb16 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ClusterIP = ""
+ } else {
+ x.ClusterIP = string(r.DecodeString())
+ }
+ yyj16++
+ if yyhl16 {
+ yyb16 = yyj16 > l
+ } else {
+ yyb16 = r.CheckBreak()
+ }
+ if yyb16 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ExternalIPs = nil
+ } else {
+ yyv23 := &x.ExternalIPs
+ yym24 := z.DecBinary()
+ _ = yym24
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv23, false, d)
+ }
+ }
+ yyj16++
+ if yyhl16 {
+ yyb16 = yyj16 > l
+ } else {
+ yyb16 = r.CheckBreak()
+ }
+ if yyb16 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.LoadBalancerIP = ""
+ } else {
+ x.LoadBalancerIP = string(r.DecodeString())
+ }
+ yyj16++
+ if yyhl16 {
+ yyb16 = yyj16 > l
+ } else {
+ yyb16 = r.CheckBreak()
+ }
+ if yyb16 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.SessionAffinity = ""
+ } else {
+ x.SessionAffinity = ServiceAffinity(r.DecodeString())
+ }
+ yyj16++
+ if yyhl16 {
+ yyb16 = yyj16 > l
+ } else {
+ yyb16 = r.CheckBreak()
+ }
+ if yyb16 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.LoadBalancerSourceRanges = nil
+ } else {
+ yyv27 := &x.LoadBalancerSourceRanges
+ yym28 := z.DecBinary()
+ _ = yym28
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv27, false, d)
+ }
+ }
+ for {
+ yyj16++
+ if yyhl16 {
+ yyb16 = yyj16 > l
+ } else {
+ yyb16 = r.CheckBreak()
+ }
+ if yyb16 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj16-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ServicePort) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 5
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("name"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ x.Protocol.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("protocol"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Protocol.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Port))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("port"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Port))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy13 := &x.TargetPort
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy13) {
+ } else if !yym14 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy13)
+ } else {
+ z.EncFallback(yy13)
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("targetPort"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy15 := &x.TargetPort
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy15) {
+ } else if !yym16 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy15)
+ } else {
+ z.EncFallback(yy15)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ r.EncodeInt(int64(x.NodePort))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("nodePort"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeInt(int64(x.NodePort))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ServicePort) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ServicePort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "name":
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ case "protocol":
+ if r.TryDecodeAsNil() {
+ x.Protocol = ""
+ } else {
+ x.Protocol = Protocol(r.DecodeString())
+ }
+ case "port":
+ if r.TryDecodeAsNil() {
+ x.Port = 0
+ } else {
+ x.Port = int32(r.DecodeInt(32))
+ }
+ case "targetPort":
+ if r.TryDecodeAsNil() {
+ x.TargetPort = pkg4_intstr.IntOrString{}
+ } else {
+ yyv7 := &x.TargetPort
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv7) {
+ } else if !yym8 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv7)
+ } else {
+ z.DecFallback(yyv7, false)
+ }
+ }
+ case "nodePort":
+ if r.TryDecodeAsNil() {
+ x.NodePort = 0
+ } else {
+ x.NodePort = int32(r.DecodeInt(32))
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ServicePort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Protocol = ""
+ } else {
+ x.Protocol = Protocol(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Port = 0
+ } else {
+ x.Port = int32(r.DecodeInt(32))
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.TargetPort = pkg4_intstr.IntOrString{}
+ } else {
+ yyv14 := &x.TargetPort
+ yym15 := z.DecBinary()
+ _ = yym15
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv14) {
+ } else if !yym15 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv14)
+ } else {
+ z.DecFallback(yyv14, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.NodePort = 0
+ } else {
+ x.NodePort = int32(r.DecodeInt(32))
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *Service) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = true
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy14 := &x.Status
+ yy14.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy16 := &x.Status
+ yy16.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *Service) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *Service) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = ServiceSpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = ServiceStatus{}
+ } else {
+ yyv6 := &x.Status
+ yyv6.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *Service) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = ServiceSpec{}
+ } else {
+ yyv11 := &x.Spec
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = ServiceStatus{}
+ } else {
+ yyv12 := &x.Status
+ yyv12.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ServiceAccount) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = len(x.ImagePullSecrets) != 0
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Secrets == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceObjectReference(([]ObjectReference)(x.Secrets), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("secrets"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Secrets == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceObjectReference(([]ObjectReference)(x.Secrets), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.ImagePullSecrets == nil {
+ r.EncodeNil()
+ } else {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ h.encSliceLocalObjectReference(([]LocalObjectReference)(x.ImagePullSecrets), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("imagePullSecrets"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.ImagePullSecrets == nil {
+ r.EncodeNil()
+ } else {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ h.encSliceLocalObjectReference(([]LocalObjectReference)(x.ImagePullSecrets), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ServiceAccount) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ServiceAccount) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "secrets":
+ if r.TryDecodeAsNil() {
+ x.Secrets = nil
+ } else {
+ yyv5 := &x.Secrets
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ h.decSliceObjectReference((*[]ObjectReference)(yyv5), d)
+ }
+ }
+ case "imagePullSecrets":
+ if r.TryDecodeAsNil() {
+ x.ImagePullSecrets = nil
+ } else {
+ yyv7 := &x.ImagePullSecrets
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.decSliceLocalObjectReference((*[]LocalObjectReference)(yyv7), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ServiceAccount) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj11 int
+ var yyb11 bool
+ var yyhl11 bool = l >= 0
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv12 := &x.ObjectMeta
+ yyv12.CodecDecodeSelf(d)
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Secrets = nil
+ } else {
+ yyv13 := &x.Secrets
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceObjectReference((*[]ObjectReference)(yyv13), d)
+ }
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ImagePullSecrets = nil
+ } else {
+ yyv15 := &x.ImagePullSecrets
+ yym16 := z.DecBinary()
+ _ = yym16
+ if false {
+ } else {
+ h.decSliceLocalObjectReference((*[]LocalObjectReference)(yyv15), d)
+ }
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj11-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ServiceAccountList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceServiceAccount(([]ServiceAccount)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceServiceAccount(([]ServiceAccount)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ServiceAccountList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ServiceAccountList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceServiceAccount((*[]ServiceAccount)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ServiceAccountList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceServiceAccount((*[]ServiceAccount)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *Endpoints) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Subsets == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceEndpointSubset(([]EndpointSubset)(x.Subsets), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Subsets"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Subsets == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceEndpointSubset(([]EndpointSubset)(x.Subsets), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *Endpoints) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *Endpoints) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "Subsets":
+ if r.TryDecodeAsNil() {
+ x.Subsets = nil
+ } else {
+ yyv5 := &x.Subsets
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ h.decSliceEndpointSubset((*[]EndpointSubset)(yyv5), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *Endpoints) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Subsets = nil
+ } else {
+ yyv11 := &x.Subsets
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ h.decSliceEndpointSubset((*[]EndpointSubset)(yyv11), d)
+ }
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *EndpointSubset) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 3
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Addresses == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ h.encSliceEndpointAddress(([]EndpointAddress)(x.Addresses), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Addresses"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Addresses == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.encSliceEndpointAddress(([]EndpointAddress)(x.Addresses), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.NotReadyAddresses == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.encSliceEndpointAddress(([]EndpointAddress)(x.NotReadyAddresses), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("NotReadyAddresses"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.NotReadyAddresses == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.encSliceEndpointAddress(([]EndpointAddress)(x.NotReadyAddresses), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Ports == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceEndpointPort(([]EndpointPort)(x.Ports), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Ports"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Ports == nil {
+ r.EncodeNil()
+ } else {
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ h.encSliceEndpointPort(([]EndpointPort)(x.Ports), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *EndpointSubset) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *EndpointSubset) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "Addresses":
+ if r.TryDecodeAsNil() {
+ x.Addresses = nil
+ } else {
+ yyv4 := &x.Addresses
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.decSliceEndpointAddress((*[]EndpointAddress)(yyv4), d)
+ }
+ }
+ case "NotReadyAddresses":
+ if r.TryDecodeAsNil() {
+ x.NotReadyAddresses = nil
+ } else {
+ yyv6 := &x.NotReadyAddresses
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceEndpointAddress((*[]EndpointAddress)(yyv6), d)
+ }
+ }
+ case "Ports":
+ if r.TryDecodeAsNil() {
+ x.Ports = nil
+ } else {
+ yyv8 := &x.Ports
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.decSliceEndpointPort((*[]EndpointPort)(yyv8), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *EndpointSubset) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Addresses = nil
+ } else {
+ yyv11 := &x.Addresses
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ h.decSliceEndpointAddress((*[]EndpointAddress)(yyv11), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.NotReadyAddresses = nil
+ } else {
+ yyv13 := &x.NotReadyAddresses
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceEndpointAddress((*[]EndpointAddress)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Ports = nil
+ } else {
+ yyv15 := &x.Ports
+ yym16 := z.DecBinary()
+ _ = yym16
+ if false {
+ } else {
+ h.decSliceEndpointPort((*[]EndpointPort)(yyv15), d)
+ }
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *EndpointAddress) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.Hostname != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.IP))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("IP"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.IP))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Hostname))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("hostname"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Hostname))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.TargetRef == nil {
+ r.EncodeNil()
+ } else {
+ x.TargetRef.CodecEncodeSelf(e)
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("TargetRef"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.TargetRef == nil {
+ r.EncodeNil()
+ } else {
+ x.TargetRef.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *EndpointAddress) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *EndpointAddress) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "IP":
+ if r.TryDecodeAsNil() {
+ x.IP = ""
+ } else {
+ x.IP = string(r.DecodeString())
+ }
+ case "hostname":
+ if r.TryDecodeAsNil() {
+ x.Hostname = ""
+ } else {
+ x.Hostname = string(r.DecodeString())
+ }
+ case "TargetRef":
+ if r.TryDecodeAsNil() {
+ if x.TargetRef != nil {
+ x.TargetRef = nil
+ }
+ } else {
+ if x.TargetRef == nil {
+ x.TargetRef = new(ObjectReference)
+ }
+ x.TargetRef.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *EndpointAddress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.IP = ""
+ } else {
+ x.IP = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Hostname = ""
+ } else {
+ x.Hostname = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.TargetRef != nil {
+ x.TargetRef = nil
+ }
+ } else {
+ if x.TargetRef == nil {
+ x.TargetRef = new(ObjectReference)
+ }
+ x.TargetRef.CodecDecodeSelf(d)
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *EndpointPort) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 3
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Name"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Port))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Port"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Port))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ x.Protocol.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Protocol"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Protocol.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *EndpointPort) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *EndpointPort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "Name":
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ case "Port":
+ if r.TryDecodeAsNil() {
+ x.Port = 0
+ } else {
+ x.Port = int32(r.DecodeInt(32))
+ }
+ case "Protocol":
+ if r.TryDecodeAsNil() {
+ x.Protocol = ""
+ } else {
+ x.Protocol = Protocol(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *EndpointPort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Port = 0
+ } else {
+ x.Port = int32(r.DecodeInt(32))
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Protocol = ""
+ } else {
+ x.Protocol = Protocol(r.DecodeString())
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *EndpointsList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceEndpoints(([]Endpoints)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceEndpoints(([]Endpoints)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *EndpointsList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *EndpointsList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceEndpoints((*[]Endpoints)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *EndpointsList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceEndpoints((*[]Endpoints)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *NodeSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.PodCIDR != ""
+ yyq2[1] = x.ExternalID != ""
+ yyq2[2] = x.ProviderID != ""
+ yyq2[3] = x.Unschedulable != false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.PodCIDR))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("podCIDR"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.PodCIDR))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ExternalID))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("externalID"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ExternalID))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ProviderID))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("providerID"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ProviderID))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Unschedulable))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("unschedulable"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Unschedulable))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *NodeSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *NodeSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "podCIDR":
+ if r.TryDecodeAsNil() {
+ x.PodCIDR = ""
+ } else {
+ x.PodCIDR = string(r.DecodeString())
+ }
+ case "externalID":
+ if r.TryDecodeAsNil() {
+ x.ExternalID = ""
+ } else {
+ x.ExternalID = string(r.DecodeString())
+ }
+ case "providerID":
+ if r.TryDecodeAsNil() {
+ x.ProviderID = ""
+ } else {
+ x.ProviderID = string(r.DecodeString())
+ }
+ case "unschedulable":
+ if r.TryDecodeAsNil() {
+ x.Unschedulable = false
+ } else {
+ x.Unschedulable = bool(r.DecodeBool())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *NodeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.PodCIDR = ""
+ } else {
+ x.PodCIDR = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ExternalID = ""
+ } else {
+ x.ExternalID = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ProviderID = ""
+ } else {
+ x.ProviderID = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Unschedulable = false
+ } else {
+ x.Unschedulable = bool(r.DecodeBool())
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *DaemonEndpoint) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Port))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Port"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Port))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *DaemonEndpoint) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *DaemonEndpoint) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "Port":
+ if r.TryDecodeAsNil() {
+ x.Port = 0
+ } else {
+ x.Port = int32(r.DecodeInt(32))
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *DaemonEndpoint) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj5 int
+ var yyb5 bool
+ var yyhl5 bool = l >= 0
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Port = 0
+ } else {
+ x.Port = int32(r.DecodeInt(32))
+ }
+ for {
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj5-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *NodeDaemonEndpoints) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.KubeletEndpoint
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kubeletEndpoint"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.KubeletEndpoint
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *NodeDaemonEndpoints) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *NodeDaemonEndpoints) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "kubeletEndpoint":
+ if r.TryDecodeAsNil() {
+ x.KubeletEndpoint = DaemonEndpoint{}
+ } else {
+ yyv4 := &x.KubeletEndpoint
+ yyv4.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *NodeDaemonEndpoints) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj5 int
+ var yyb5 bool
+ var yyhl5 bool = l >= 0
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.KubeletEndpoint = DaemonEndpoint{}
+ } else {
+ yyv6 := &x.KubeletEndpoint
+ yyv6.CodecDecodeSelf(d)
+ }
+ for {
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj5-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *NodeSystemInfo) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [10]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(10)
+ } else {
+ yynn2 = 10
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.MachineID))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("machineID"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.MachineID))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.SystemUUID))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("systemUUID"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.SystemUUID))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.BootID))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("bootID"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.BootID))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.KernelVersion))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kernelVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.KernelVersion))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.OSImage))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("osImage"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.OSImage))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ContainerRuntimeVersion))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("containerRuntimeVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ContainerRuntimeVersion))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.KubeletVersion))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kubeletVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.KubeletVersion))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym25 := z.EncBinary()
+ _ = yym25
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.KubeProxyVersion))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kubeProxyVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym26 := z.EncBinary()
+ _ = yym26
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.KubeProxyVersion))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym28 := z.EncBinary()
+ _ = yym28
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.OperatingSystem))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("operatingSystem"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym29 := z.EncBinary()
+ _ = yym29
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.OperatingSystem))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym31 := z.EncBinary()
+ _ = yym31
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Architecture))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("architecture"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym32 := z.EncBinary()
+ _ = yym32
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Architecture))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *NodeSystemInfo) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *NodeSystemInfo) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "machineID":
+ if r.TryDecodeAsNil() {
+ x.MachineID = ""
+ } else {
+ x.MachineID = string(r.DecodeString())
+ }
+ case "systemUUID":
+ if r.TryDecodeAsNil() {
+ x.SystemUUID = ""
+ } else {
+ x.SystemUUID = string(r.DecodeString())
+ }
+ case "bootID":
+ if r.TryDecodeAsNil() {
+ x.BootID = ""
+ } else {
+ x.BootID = string(r.DecodeString())
+ }
+ case "kernelVersion":
+ if r.TryDecodeAsNil() {
+ x.KernelVersion = ""
+ } else {
+ x.KernelVersion = string(r.DecodeString())
+ }
+ case "osImage":
+ if r.TryDecodeAsNil() {
+ x.OSImage = ""
+ } else {
+ x.OSImage = string(r.DecodeString())
+ }
+ case "containerRuntimeVersion":
+ if r.TryDecodeAsNil() {
+ x.ContainerRuntimeVersion = ""
+ } else {
+ x.ContainerRuntimeVersion = string(r.DecodeString())
+ }
+ case "kubeletVersion":
+ if r.TryDecodeAsNil() {
+ x.KubeletVersion = ""
+ } else {
+ x.KubeletVersion = string(r.DecodeString())
+ }
+ case "kubeProxyVersion":
+ if r.TryDecodeAsNil() {
+ x.KubeProxyVersion = ""
+ } else {
+ x.KubeProxyVersion = string(r.DecodeString())
+ }
+ case "operatingSystem":
+ if r.TryDecodeAsNil() {
+ x.OperatingSystem = ""
+ } else {
+ x.OperatingSystem = string(r.DecodeString())
+ }
+ case "architecture":
+ if r.TryDecodeAsNil() {
+ x.Architecture = ""
+ } else {
+ x.Architecture = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *NodeSystemInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj14 int
+ var yyb14 bool
+ var yyhl14 bool = l >= 0
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.MachineID = ""
+ } else {
+ x.MachineID = string(r.DecodeString())
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.SystemUUID = ""
+ } else {
+ x.SystemUUID = string(r.DecodeString())
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.BootID = ""
+ } else {
+ x.BootID = string(r.DecodeString())
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.KernelVersion = ""
+ } else {
+ x.KernelVersion = string(r.DecodeString())
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.OSImage = ""
+ } else {
+ x.OSImage = string(r.DecodeString())
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ContainerRuntimeVersion = ""
+ } else {
+ x.ContainerRuntimeVersion = string(r.DecodeString())
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.KubeletVersion = ""
+ } else {
+ x.KubeletVersion = string(r.DecodeString())
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.KubeProxyVersion = ""
+ } else {
+ x.KubeProxyVersion = string(r.DecodeString())
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.OperatingSystem = ""
+ } else {
+ x.OperatingSystem = string(r.DecodeString())
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Architecture = ""
+ } else {
+ x.Architecture = string(r.DecodeString())
+ }
+ for {
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj14-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *NodeStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [10]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = len(x.Capacity) != 0
+ yyq2[1] = len(x.Allocatable) != 0
+ yyq2[2] = x.Phase != ""
+ yyq2[3] = len(x.Conditions) != 0
+ yyq2[4] = len(x.Addresses) != 0
+ yyq2[5] = true
+ yyq2[6] = true
+ yyq2[7] = len(x.Images) != 0
+ yyq2[8] = len(x.VolumesInUse) != 0
+ yyq2[9] = len(x.VolumesAttached) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(10)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Capacity == nil {
+ r.EncodeNil()
+ } else {
+ x.Capacity.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("capacity"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Capacity == nil {
+ r.EncodeNil()
+ } else {
+ x.Capacity.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Allocatable == nil {
+ r.EncodeNil()
+ } else {
+ x.Allocatable.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("allocatable"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Allocatable == nil {
+ r.EncodeNil()
+ } else {
+ x.Allocatable.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ x.Phase.CodecEncodeSelf(e)
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("phase"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Phase.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ if x.Conditions == nil {
+ r.EncodeNil()
+ } else {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ h.encSliceNodeCondition(([]NodeCondition)(x.Conditions), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("conditions"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Conditions == nil {
+ r.EncodeNil()
+ } else {
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.encSliceNodeCondition(([]NodeCondition)(x.Conditions), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ if x.Addresses == nil {
+ r.EncodeNil()
+ } else {
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ h.encSliceNodeAddress(([]NodeAddress)(x.Addresses), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("addresses"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Addresses == nil {
+ r.EncodeNil()
+ } else {
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ h.encSliceNodeAddress(([]NodeAddress)(x.Addresses), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ yy19 := &x.DaemonEndpoints
+ yy19.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("daemonEndpoints"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy21 := &x.DaemonEndpoints
+ yy21.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[6] {
+ yy24 := &x.NodeInfo
+ yy24.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[6] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("nodeInfo"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy26 := &x.NodeInfo
+ yy26.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[7] {
+ if x.Images == nil {
+ r.EncodeNil()
+ } else {
+ yym29 := z.EncBinary()
+ _ = yym29
+ if false {
+ } else {
+ h.encSliceContainerImage(([]ContainerImage)(x.Images), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[7] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("images"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Images == nil {
+ r.EncodeNil()
+ } else {
+ yym30 := z.EncBinary()
+ _ = yym30
+ if false {
+ } else {
+ h.encSliceContainerImage(([]ContainerImage)(x.Images), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[8] {
+ if x.VolumesInUse == nil {
+ r.EncodeNil()
+ } else {
+ yym32 := z.EncBinary()
+ _ = yym32
+ if false {
+ } else {
+ h.encSliceUniqueVolumeName(([]UniqueVolumeName)(x.VolumesInUse), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[8] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("volumesInUse"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.VolumesInUse == nil {
+ r.EncodeNil()
+ } else {
+ yym33 := z.EncBinary()
+ _ = yym33
+ if false {
+ } else {
+ h.encSliceUniqueVolumeName(([]UniqueVolumeName)(x.VolumesInUse), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[9] {
+ if x.VolumesAttached == nil {
+ r.EncodeNil()
+ } else {
+ yym35 := z.EncBinary()
+ _ = yym35
+ if false {
+ } else {
+ h.encSliceAttachedVolume(([]AttachedVolume)(x.VolumesAttached), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[9] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("volumesAttached"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.VolumesAttached == nil {
+ r.EncodeNil()
+ } else {
+ yym36 := z.EncBinary()
+ _ = yym36
+ if false {
+ } else {
+ h.encSliceAttachedVolume(([]AttachedVolume)(x.VolumesAttached), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *NodeStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *NodeStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "capacity":
+ if r.TryDecodeAsNil() {
+ x.Capacity = nil
+ } else {
+ yyv4 := &x.Capacity
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "allocatable":
+ if r.TryDecodeAsNil() {
+ x.Allocatable = nil
+ } else {
+ yyv5 := &x.Allocatable
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "phase":
+ if r.TryDecodeAsNil() {
+ x.Phase = ""
+ } else {
+ x.Phase = NodePhase(r.DecodeString())
+ }
+ case "conditions":
+ if r.TryDecodeAsNil() {
+ x.Conditions = nil
+ } else {
+ yyv7 := &x.Conditions
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.decSliceNodeCondition((*[]NodeCondition)(yyv7), d)
+ }
+ }
+ case "addresses":
+ if r.TryDecodeAsNil() {
+ x.Addresses = nil
+ } else {
+ yyv9 := &x.Addresses
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.decSliceNodeAddress((*[]NodeAddress)(yyv9), d)
+ }
+ }
+ case "daemonEndpoints":
+ if r.TryDecodeAsNil() {
+ x.DaemonEndpoints = NodeDaemonEndpoints{}
+ } else {
+ yyv11 := &x.DaemonEndpoints
+ yyv11.CodecDecodeSelf(d)
+ }
+ case "nodeInfo":
+ if r.TryDecodeAsNil() {
+ x.NodeInfo = NodeSystemInfo{}
+ } else {
+ yyv12 := &x.NodeInfo
+ yyv12.CodecDecodeSelf(d)
+ }
+ case "images":
+ if r.TryDecodeAsNil() {
+ x.Images = nil
+ } else {
+ yyv13 := &x.Images
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceContainerImage((*[]ContainerImage)(yyv13), d)
+ }
+ }
+ case "volumesInUse":
+ if r.TryDecodeAsNil() {
+ x.VolumesInUse = nil
+ } else {
+ yyv15 := &x.VolumesInUse
+ yym16 := z.DecBinary()
+ _ = yym16
+ if false {
+ } else {
+ h.decSliceUniqueVolumeName((*[]UniqueVolumeName)(yyv15), d)
+ }
+ }
+ case "volumesAttached":
+ if r.TryDecodeAsNil() {
+ x.VolumesAttached = nil
+ } else {
+ yyv17 := &x.VolumesAttached
+ yym18 := z.DecBinary()
+ _ = yym18
+ if false {
+ } else {
+ h.decSliceAttachedVolume((*[]AttachedVolume)(yyv17), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *NodeStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj19 int
+ var yyb19 bool
+ var yyhl19 bool = l >= 0
+ yyj19++
+ if yyhl19 {
+ yyb19 = yyj19 > l
+ } else {
+ yyb19 = r.CheckBreak()
+ }
+ if yyb19 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Capacity = nil
+ } else {
+ yyv20 := &x.Capacity
+ yyv20.CodecDecodeSelf(d)
+ }
+ yyj19++
+ if yyhl19 {
+ yyb19 = yyj19 > l
+ } else {
+ yyb19 = r.CheckBreak()
+ }
+ if yyb19 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Allocatable = nil
+ } else {
+ yyv21 := &x.Allocatable
+ yyv21.CodecDecodeSelf(d)
+ }
+ yyj19++
+ if yyhl19 {
+ yyb19 = yyj19 > l
+ } else {
+ yyb19 = r.CheckBreak()
+ }
+ if yyb19 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Phase = ""
+ } else {
+ x.Phase = NodePhase(r.DecodeString())
+ }
+ yyj19++
+ if yyhl19 {
+ yyb19 = yyj19 > l
+ } else {
+ yyb19 = r.CheckBreak()
+ }
+ if yyb19 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Conditions = nil
+ } else {
+ yyv23 := &x.Conditions
+ yym24 := z.DecBinary()
+ _ = yym24
+ if false {
+ } else {
+ h.decSliceNodeCondition((*[]NodeCondition)(yyv23), d)
+ }
+ }
+ yyj19++
+ if yyhl19 {
+ yyb19 = yyj19 > l
+ } else {
+ yyb19 = r.CheckBreak()
+ }
+ if yyb19 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Addresses = nil
+ } else {
+ yyv25 := &x.Addresses
+ yym26 := z.DecBinary()
+ _ = yym26
+ if false {
+ } else {
+ h.decSliceNodeAddress((*[]NodeAddress)(yyv25), d)
+ }
+ }
+ yyj19++
+ if yyhl19 {
+ yyb19 = yyj19 > l
+ } else {
+ yyb19 = r.CheckBreak()
+ }
+ if yyb19 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.DaemonEndpoints = NodeDaemonEndpoints{}
+ } else {
+ yyv27 := &x.DaemonEndpoints
+ yyv27.CodecDecodeSelf(d)
+ }
+ yyj19++
+ if yyhl19 {
+ yyb19 = yyj19 > l
+ } else {
+ yyb19 = r.CheckBreak()
+ }
+ if yyb19 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.NodeInfo = NodeSystemInfo{}
+ } else {
+ yyv28 := &x.NodeInfo
+ yyv28.CodecDecodeSelf(d)
+ }
+ yyj19++
+ if yyhl19 {
+ yyb19 = yyj19 > l
+ } else {
+ yyb19 = r.CheckBreak()
+ }
+ if yyb19 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Images = nil
+ } else {
+ yyv29 := &x.Images
+ yym30 := z.DecBinary()
+ _ = yym30
+ if false {
+ } else {
+ h.decSliceContainerImage((*[]ContainerImage)(yyv29), d)
+ }
+ }
+ yyj19++
+ if yyhl19 {
+ yyb19 = yyj19 > l
+ } else {
+ yyb19 = r.CheckBreak()
+ }
+ if yyb19 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.VolumesInUse = nil
+ } else {
+ yyv31 := &x.VolumesInUse
+ yym32 := z.DecBinary()
+ _ = yym32
+ if false {
+ } else {
+ h.decSliceUniqueVolumeName((*[]UniqueVolumeName)(yyv31), d)
+ }
+ }
+ yyj19++
+ if yyhl19 {
+ yyb19 = yyj19 > l
+ } else {
+ yyb19 = r.CheckBreak()
+ }
+ if yyb19 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.VolumesAttached = nil
+ } else {
+ yyv33 := &x.VolumesAttached
+ yym34 := z.DecBinary()
+ _ = yym34
+ if false {
+ } else {
+ h.decSliceAttachedVolume((*[]AttachedVolume)(yyv33), d)
+ }
+ }
+ for {
+ yyj19++
+ if yyhl19 {
+ yyb19 = yyj19 > l
+ } else {
+ yyb19 = r.CheckBreak()
+ }
+ if yyb19 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj19-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x UniqueVolumeName) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *UniqueVolumeName) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *AttachedVolume) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ x.Name.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("name"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Name.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.DevicePath))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("devicePath"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.DevicePath))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *AttachedVolume) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *AttachedVolume) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "name":
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = UniqueVolumeName(r.DecodeString())
+ }
+ case "devicePath":
+ if r.TryDecodeAsNil() {
+ x.DevicePath = ""
+ } else {
+ x.DevicePath = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *AttachedVolume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = UniqueVolumeName(r.DecodeString())
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.DevicePath = ""
+ } else {
+ x.DevicePath = string(r.DecodeString())
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ContainerImage) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.SizeBytes != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Names == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Names, false, e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("names"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Names == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Names, false, e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeInt(int64(x.SizeBytes))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("sizeBytes"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeInt(int64(x.SizeBytes))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ContainerImage) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ContainerImage) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "names":
+ if r.TryDecodeAsNil() {
+ x.Names = nil
+ } else {
+ yyv4 := &x.Names
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv4, false, d)
+ }
+ }
+ case "sizeBytes":
+ if r.TryDecodeAsNil() {
+ x.SizeBytes = 0
+ } else {
+ x.SizeBytes = int64(r.DecodeInt(64))
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ContainerImage) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Names = nil
+ } else {
+ yyv8 := &x.Names
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv8, false, d)
+ }
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.SizeBytes = 0
+ } else {
+ x.SizeBytes = int64(r.DecodeInt(64))
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x NodePhase) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *NodePhase) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x NodeConditionType) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *NodeConditionType) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *NodeCondition) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [6]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[2] = true
+ yyq2[3] = true
+ yyq2[4] = x.Reason != ""
+ yyq2[5] = x.Message != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(6)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ x.Type.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("type"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Type.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ x.Status.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Status.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy10 := &x.LastHeartbeatTime
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy10) {
+ } else if yym11 {
+ z.EncBinaryMarshal(yy10)
+ } else if !yym11 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy10)
+ } else {
+ z.EncFallback(yy10)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("lastHeartbeatTime"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy12 := &x.LastHeartbeatTime
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy12) {
+ } else if yym13 {
+ z.EncBinaryMarshal(yy12)
+ } else if !yym13 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy12)
+ } else {
+ z.EncFallback(yy12)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yy15 := &x.LastTransitionTime
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy15) {
+ } else if yym16 {
+ z.EncBinaryMarshal(yy15)
+ } else if !yym16 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy15)
+ } else {
+ z.EncFallback(yy15)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy17 := &x.LastTransitionTime
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy17) {
+ } else if yym18 {
+ z.EncBinaryMarshal(yy17)
+ } else if !yym18 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy17)
+ } else {
+ z.EncFallback(yy17)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Reason))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("reason"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym21 := z.EncBinary()
+ _ = yym21
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Reason))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Message))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("message"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym24 := z.EncBinary()
+ _ = yym24
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Message))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *NodeCondition) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *NodeCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "type":
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = NodeConditionType(r.DecodeString())
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = ""
+ } else {
+ x.Status = ConditionStatus(r.DecodeString())
+ }
+ case "lastHeartbeatTime":
+ if r.TryDecodeAsNil() {
+ x.LastHeartbeatTime = pkg2_unversioned.Time{}
+ } else {
+ yyv6 := &x.LastHeartbeatTime
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv6) {
+ } else if yym7 {
+ z.DecBinaryUnmarshal(yyv6)
+ } else if !yym7 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv6)
+ } else {
+ z.DecFallback(yyv6, false)
+ }
+ }
+ case "lastTransitionTime":
+ if r.TryDecodeAsNil() {
+ x.LastTransitionTime = pkg2_unversioned.Time{}
+ } else {
+ yyv8 := &x.LastTransitionTime
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv8) {
+ } else if yym9 {
+ z.DecBinaryUnmarshal(yyv8)
+ } else if !yym9 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv8)
+ } else {
+ z.DecFallback(yyv8, false)
+ }
+ }
+ case "reason":
+ if r.TryDecodeAsNil() {
+ x.Reason = ""
+ } else {
+ x.Reason = string(r.DecodeString())
+ }
+ case "message":
+ if r.TryDecodeAsNil() {
+ x.Message = ""
+ } else {
+ x.Message = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *NodeCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj12 int
+ var yyb12 bool
+ var yyhl12 bool = l >= 0
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = NodeConditionType(r.DecodeString())
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = ""
+ } else {
+ x.Status = ConditionStatus(r.DecodeString())
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.LastHeartbeatTime = pkg2_unversioned.Time{}
+ } else {
+ yyv15 := &x.LastHeartbeatTime
+ yym16 := z.DecBinary()
+ _ = yym16
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv15) {
+ } else if yym16 {
+ z.DecBinaryUnmarshal(yyv15)
+ } else if !yym16 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv15)
+ } else {
+ z.DecFallback(yyv15, false)
+ }
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.LastTransitionTime = pkg2_unversioned.Time{}
+ } else {
+ yyv17 := &x.LastTransitionTime
+ yym18 := z.DecBinary()
+ _ = yym18
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv17) {
+ } else if yym18 {
+ z.DecBinaryUnmarshal(yyv17)
+ } else if !yym18 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv17)
+ } else {
+ z.DecFallback(yyv17, false)
+ }
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Reason = ""
+ } else {
+ x.Reason = string(r.DecodeString())
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Message = ""
+ } else {
+ x.Message = string(r.DecodeString())
+ }
+ for {
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj12-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x NodeAddressType) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *NodeAddressType) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *NodeAddress) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ x.Type.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("type"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Type.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Address))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("address"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Address))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *NodeAddress) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *NodeAddress) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "type":
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = NodeAddressType(r.DecodeString())
+ }
+ case "address":
+ if r.TryDecodeAsNil() {
+ x.Address = ""
+ } else {
+ x.Address = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *NodeAddress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = NodeAddressType(r.DecodeString())
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Address = ""
+ } else {
+ x.Address = string(r.DecodeString())
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *NodeResources) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = len(x.Capacity) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Capacity == nil {
+ r.EncodeNil()
+ } else {
+ x.Capacity.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("capacity"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Capacity == nil {
+ r.EncodeNil()
+ } else {
+ x.Capacity.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *NodeResources) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *NodeResources) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "capacity":
+ if r.TryDecodeAsNil() {
+ x.Capacity = nil
+ } else {
+ yyv4 := &x.Capacity
+ yyv4.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *NodeResources) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj5 int
+ var yyb5 bool
+ var yyhl5 bool = l >= 0
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Capacity = nil
+ } else {
+ yyv6 := &x.Capacity
+ yyv6.CodecDecodeSelf(d)
+ }
+ for {
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj5-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x ResourceName) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *ResourceName) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x ResourceList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ h.encResourceList((ResourceList)(x), e)
+ }
+ }
+}
+
+func (x *ResourceList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ h.decResourceList((*ResourceList)(x), d)
+ }
+}
+
+func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = true
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy14 := &x.Status
+ yy14.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy16 := &x.Status
+ yy16.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *Node) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *Node) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = NodeSpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = NodeStatus{}
+ } else {
+ yyv6 := &x.Status
+ yyv6.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = NodeSpec{}
+ } else {
+ yyv11 := &x.Spec
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = NodeStatus{}
+ } else {
+ yyv12 := &x.Status
+ yyv12.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *NodeList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceNode(([]Node)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceNode(([]Node)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *NodeList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *NodeList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceNode((*[]Node)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *NodeList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceNode((*[]Node)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *NamespaceSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Finalizers == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ h.encSliceFinalizerName(([]FinalizerName)(x.Finalizers), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Finalizers"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Finalizers == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.encSliceFinalizerName(([]FinalizerName)(x.Finalizers), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *NamespaceSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *NamespaceSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "Finalizers":
+ if r.TryDecodeAsNil() {
+ x.Finalizers = nil
+ } else {
+ yyv4 := &x.Finalizers
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.decSliceFinalizerName((*[]FinalizerName)(yyv4), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *NamespaceSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Finalizers = nil
+ } else {
+ yyv7 := &x.Finalizers
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.decSliceFinalizerName((*[]FinalizerName)(yyv7), d)
+ }
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x FinalizerName) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *FinalizerName) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *NamespaceStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Phase != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ x.Phase.CodecEncodeSelf(e)
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("phase"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Phase.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *NamespaceStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *NamespaceStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "phase":
+ if r.TryDecodeAsNil() {
+ x.Phase = ""
+ } else {
+ x.Phase = NamespacePhase(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *NamespaceStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj5 int
+ var yyb5 bool
+ var yyhl5 bool = l >= 0
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Phase = ""
+ } else {
+ x.Phase = NamespacePhase(r.DecodeString())
+ }
+ for {
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj5-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x NamespacePhase) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *NamespacePhase) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *Namespace) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = true
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy14 := &x.Status
+ yy14.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy16 := &x.Status
+ yy16.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *Namespace) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *Namespace) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = NamespaceSpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = NamespaceStatus{}
+ } else {
+ yyv6 := &x.Status
+ yyv6.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *Namespace) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = NamespaceSpec{}
+ } else {
+ yyv11 := &x.Spec
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = NamespaceStatus{}
+ } else {
+ yyv12 := &x.Status
+ yyv12.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *NamespaceList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceNamespace(([]Namespace)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceNamespace(([]Namespace)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *NamespaceList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *NamespaceList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceNamespace((*[]Namespace)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *NamespaceList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceNamespace((*[]Namespace)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *Binding) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy9 := &x.Target
+ yy9.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("target"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Target
+ yy11.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *Binding) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *Binding) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "target":
+ if r.TryDecodeAsNil() {
+ x.Target = ObjectReference{}
+ } else {
+ yyv5 := &x.Target
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *Binding) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv9 := &x.ObjectMeta
+ yyv9.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Target = ObjectReference{}
+ } else {
+ yyv10 := &x.Target
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *Preconditions) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.UID != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.UID == nil {
+ r.EncodeNil()
+ } else {
+ yy4 := *x.UID
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(yy4))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("uid"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.UID == nil {
+ r.EncodeNil()
+ } else {
+ yy6 := *x.UID
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(yy6))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *Preconditions) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *Preconditions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "uid":
+ if r.TryDecodeAsNil() {
+ if x.UID != nil {
+ x.UID = nil
+ }
+ } else {
+ if x.UID == nil {
+ x.UID = new(pkg1_types.UID)
+ }
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.UID) {
+ } else {
+ *((*string)(x.UID)) = r.DecodeString()
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *Preconditions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.UID != nil {
+ x.UID = nil
+ }
+ } else {
+ if x.UID == nil {
+ x.UID = new(pkg1_types.UID)
+ }
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.UID) {
+ } else {
+ *((*string)(x.UID)) = r.DecodeString()
+ }
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *DeleteOptions) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.GracePeriodSeconds != nil
+ yyq2[1] = x.Preconditions != nil
+ yyq2[2] = x.OrphanDependents != nil
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.GracePeriodSeconds == nil {
+ r.EncodeNil()
+ } else {
+ yy4 := *x.GracePeriodSeconds
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(yy4))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("gracePeriodSeconds"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.GracePeriodSeconds == nil {
+ r.EncodeNil()
+ } else {
+ yy6 := *x.GracePeriodSeconds
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeInt(int64(yy6))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Preconditions == nil {
+ r.EncodeNil()
+ } else {
+ x.Preconditions.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("preconditions"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Preconditions == nil {
+ r.EncodeNil()
+ } else {
+ x.Preconditions.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.OrphanDependents == nil {
+ r.EncodeNil()
+ } else {
+ yy12 := *x.OrphanDependents
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeBool(bool(yy12))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("orphanDependents"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.OrphanDependents == nil {
+ r.EncodeNil()
+ } else {
+ yy14 := *x.OrphanDependents
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeBool(bool(yy14))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym21 := z.EncBinary()
+ _ = yym21
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *DeleteOptions) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *DeleteOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "gracePeriodSeconds":
+ if r.TryDecodeAsNil() {
+ if x.GracePeriodSeconds != nil {
+ x.GracePeriodSeconds = nil
+ }
+ } else {
+ if x.GracePeriodSeconds == nil {
+ x.GracePeriodSeconds = new(int64)
+ }
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ *((*int64)(x.GracePeriodSeconds)) = int64(r.DecodeInt(64))
+ }
+ }
+ case "preconditions":
+ if r.TryDecodeAsNil() {
+ if x.Preconditions != nil {
+ x.Preconditions = nil
+ }
+ } else {
+ if x.Preconditions == nil {
+ x.Preconditions = new(Preconditions)
+ }
+ x.Preconditions.CodecDecodeSelf(d)
+ }
+ case "orphanDependents":
+ if r.TryDecodeAsNil() {
+ if x.OrphanDependents != nil {
+ x.OrphanDependents = nil
+ }
+ } else {
+ if x.OrphanDependents == nil {
+ x.OrphanDependents = new(bool)
+ }
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else {
+ *((*bool)(x.OrphanDependents)) = r.DecodeBool()
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *DeleteOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj11 int
+ var yyb11 bool
+ var yyhl11 bool = l >= 0
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.GracePeriodSeconds != nil {
+ x.GracePeriodSeconds = nil
+ }
+ } else {
+ if x.GracePeriodSeconds == nil {
+ x.GracePeriodSeconds = new(int64)
+ }
+ yym13 := z.DecBinary()
+ _ = yym13
+ if false {
+ } else {
+ *((*int64)(x.GracePeriodSeconds)) = int64(r.DecodeInt(64))
+ }
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Preconditions != nil {
+ x.Preconditions = nil
+ }
+ } else {
+ if x.Preconditions == nil {
+ x.Preconditions = new(Preconditions)
+ }
+ x.Preconditions.CodecDecodeSelf(d)
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.OrphanDependents != nil {
+ x.OrphanDependents = nil
+ }
+ } else {
+ if x.OrphanDependents == nil {
+ x.OrphanDependents = new(bool)
+ }
+ yym16 := z.DecBinary()
+ _ = yym16
+ if false {
+ } else {
+ *((*bool)(x.OrphanDependents)) = r.DecodeBool()
+ }
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj11-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ExportOptions) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Export))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("export"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Export))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Exact))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("exact"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Exact))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ExportOptions) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ExportOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "export":
+ if r.TryDecodeAsNil() {
+ x.Export = false
+ } else {
+ x.Export = bool(r.DecodeBool())
+ }
+ case "exact":
+ if r.TryDecodeAsNil() {
+ x.Exact = false
+ } else {
+ x.Exact = bool(r.DecodeBool())
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ExportOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Export = false
+ } else {
+ x.Export = bool(r.DecodeBool())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Exact = false
+ } else {
+ x.Exact = bool(r.DecodeBool())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ListOptions) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [7]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[5] = x.Kind != ""
+ yyq2[6] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(7)
+ } else {
+ yynn2 = 5
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.LabelSelector == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.LabelSelector) {
+ } else {
+ z.EncFallback(x.LabelSelector)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("LabelSelector"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.LabelSelector == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.LabelSelector) {
+ } else {
+ z.EncFallback(x.LabelSelector)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.FieldSelector == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.FieldSelector) {
+ } else {
+ z.EncFallback(x.FieldSelector)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("FieldSelector"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.FieldSelector == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.FieldSelector) {
+ } else {
+ z.EncFallback(x.FieldSelector)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Watch))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Watch"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Watch))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("ResourceVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.TimeoutSeconds == nil {
+ r.EncodeNil()
+ } else {
+ yy16 := *x.TimeoutSeconds
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeInt(int64(yy16))
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("TimeoutSeconds"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.TimeoutSeconds == nil {
+ r.EncodeNil()
+ } else {
+ yy18 := *x.TimeoutSeconds
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeInt(int64(yy18))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ yym21 := z.EncBinary()
+ _ = yym21
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[6] {
+ yym24 := z.EncBinary()
+ _ = yym24
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[6] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym25 := z.EncBinary()
+ _ = yym25
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ListOptions) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ListOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "LabelSelector":
+ if r.TryDecodeAsNil() {
+ x.LabelSelector = nil
+ } else {
+ yyv4 := &x.LabelSelector
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, true)
+ }
+ }
+ case "FieldSelector":
+ if r.TryDecodeAsNil() {
+ x.FieldSelector = nil
+ } else {
+ yyv6 := &x.FieldSelector
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv6) {
+ } else {
+ z.DecFallback(yyv6, true)
+ }
+ }
+ case "Watch":
+ if r.TryDecodeAsNil() {
+ x.Watch = false
+ } else {
+ x.Watch = bool(r.DecodeBool())
+ }
+ case "ResourceVersion":
+ if r.TryDecodeAsNil() {
+ x.ResourceVersion = ""
+ } else {
+ x.ResourceVersion = string(r.DecodeString())
+ }
+ case "TimeoutSeconds":
+ if r.TryDecodeAsNil() {
+ if x.TimeoutSeconds != nil {
+ x.TimeoutSeconds = nil
+ }
+ } else {
+ if x.TimeoutSeconds == nil {
+ x.TimeoutSeconds = new(int64)
+ }
+ yym11 := z.DecBinary()
+ _ = yym11
+ if false {
+ } else {
+ *((*int64)(x.TimeoutSeconds)) = int64(r.DecodeInt(64))
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ListOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj14 int
+ var yyb14 bool
+ var yyhl14 bool = l >= 0
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.LabelSelector = nil
+ } else {
+ yyv15 := &x.LabelSelector
+ yym16 := z.DecBinary()
+ _ = yym16
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv15) {
+ } else {
+ z.DecFallback(yyv15, true)
+ }
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.FieldSelector = nil
+ } else {
+ yyv17 := &x.FieldSelector
+ yym18 := z.DecBinary()
+ _ = yym18
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv17) {
+ } else {
+ z.DecFallback(yyv17, true)
+ }
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Watch = false
+ } else {
+ x.Watch = bool(r.DecodeBool())
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ResourceVersion = ""
+ } else {
+ x.ResourceVersion = string(r.DecodeString())
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.TimeoutSeconds != nil {
+ x.TimeoutSeconds = nil
+ }
+ } else {
+ if x.TimeoutSeconds == nil {
+ x.TimeoutSeconds = new(int64)
+ }
+ yym22 := z.DecBinary()
+ _ = yym22
+ if false {
+ } else {
+ *((*int64)(x.TimeoutSeconds)) = int64(r.DecodeInt(64))
+ }
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj14-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PodLogOptions) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [10]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[8] = x.Kind != ""
+ yyq2[9] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(10)
+ } else {
+ yynn2 = 8
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Container))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Container"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Container))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Follow))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Follow"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Follow))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Previous))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Previous"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Previous))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.SinceSeconds == nil {
+ r.EncodeNil()
+ } else {
+ yy13 := *x.SinceSeconds
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeInt(int64(yy13))
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("SinceSeconds"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.SinceSeconds == nil {
+ r.EncodeNil()
+ } else {
+ yy15 := *x.SinceSeconds
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeInt(int64(yy15))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.SinceTime == nil {
+ r.EncodeNil()
+ } else {
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.SinceTime) {
+ } else if yym18 {
+ z.EncBinaryMarshal(x.SinceTime)
+ } else if !yym18 && z.IsJSONHandle() {
+ z.EncJSONMarshal(x.SinceTime)
+ } else {
+ z.EncFallback(x.SinceTime)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("SinceTime"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.SinceTime == nil {
+ r.EncodeNil()
+ } else {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.SinceTime) {
+ } else if yym19 {
+ z.EncBinaryMarshal(x.SinceTime)
+ } else if !yym19 && z.IsJSONHandle() {
+ z.EncJSONMarshal(x.SinceTime)
+ } else {
+ z.EncFallback(x.SinceTime)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym21 := z.EncBinary()
+ _ = yym21
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Timestamps))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Timestamps"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Timestamps))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.TailLines == nil {
+ r.EncodeNil()
+ } else {
+ yy24 := *x.TailLines
+ yym25 := z.EncBinary()
+ _ = yym25
+ if false {
+ } else {
+ r.EncodeInt(int64(yy24))
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("TailLines"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.TailLines == nil {
+ r.EncodeNil()
+ } else {
+ yy26 := *x.TailLines
+ yym27 := z.EncBinary()
+ _ = yym27
+ if false {
+ } else {
+ r.EncodeInt(int64(yy26))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.LimitBytes == nil {
+ r.EncodeNil()
+ } else {
+ yy29 := *x.LimitBytes
+ yym30 := z.EncBinary()
+ _ = yym30
+ if false {
+ } else {
+ r.EncodeInt(int64(yy29))
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("LimitBytes"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.LimitBytes == nil {
+ r.EncodeNil()
+ } else {
+ yy31 := *x.LimitBytes
+ yym32 := z.EncBinary()
+ _ = yym32
+ if false {
+ } else {
+ r.EncodeInt(int64(yy31))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[8] {
+ yym34 := z.EncBinary()
+ _ = yym34
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[8] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym35 := z.EncBinary()
+ _ = yym35
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[9] {
+ yym37 := z.EncBinary()
+ _ = yym37
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[9] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym38 := z.EncBinary()
+ _ = yym38
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PodLogOptions) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PodLogOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "Container":
+ if r.TryDecodeAsNil() {
+ x.Container = ""
+ } else {
+ x.Container = string(r.DecodeString())
+ }
+ case "Follow":
+ if r.TryDecodeAsNil() {
+ x.Follow = false
+ } else {
+ x.Follow = bool(r.DecodeBool())
+ }
+ case "Previous":
+ if r.TryDecodeAsNil() {
+ x.Previous = false
+ } else {
+ x.Previous = bool(r.DecodeBool())
+ }
+ case "SinceSeconds":
+ if r.TryDecodeAsNil() {
+ if x.SinceSeconds != nil {
+ x.SinceSeconds = nil
+ }
+ } else {
+ if x.SinceSeconds == nil {
+ x.SinceSeconds = new(int64)
+ }
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else {
+ *((*int64)(x.SinceSeconds)) = int64(r.DecodeInt(64))
+ }
+ }
+ case "SinceTime":
+ if r.TryDecodeAsNil() {
+ if x.SinceTime != nil {
+ x.SinceTime = nil
+ }
+ } else {
+ if x.SinceTime == nil {
+ x.SinceTime = new(pkg2_unversioned.Time)
+ }
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.SinceTime) {
+ } else if yym10 {
+ z.DecBinaryUnmarshal(x.SinceTime)
+ } else if !yym10 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(x.SinceTime)
+ } else {
+ z.DecFallback(x.SinceTime, false)
+ }
+ }
+ case "Timestamps":
+ if r.TryDecodeAsNil() {
+ x.Timestamps = false
+ } else {
+ x.Timestamps = bool(r.DecodeBool())
+ }
+ case "TailLines":
+ if r.TryDecodeAsNil() {
+ if x.TailLines != nil {
+ x.TailLines = nil
+ }
+ } else {
+ if x.TailLines == nil {
+ x.TailLines = new(int64)
+ }
+ yym13 := z.DecBinary()
+ _ = yym13
+ if false {
+ } else {
+ *((*int64)(x.TailLines)) = int64(r.DecodeInt(64))
+ }
+ }
+ case "LimitBytes":
+ if r.TryDecodeAsNil() {
+ if x.LimitBytes != nil {
+ x.LimitBytes = nil
+ }
+ } else {
+ if x.LimitBytes == nil {
+ x.LimitBytes = new(int64)
+ }
+ yym15 := z.DecBinary()
+ _ = yym15
+ if false {
+ } else {
+ *((*int64)(x.LimitBytes)) = int64(r.DecodeInt(64))
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PodLogOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj18 int
+ var yyb18 bool
+ var yyhl18 bool = l >= 0
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Container = ""
+ } else {
+ x.Container = string(r.DecodeString())
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Follow = false
+ } else {
+ x.Follow = bool(r.DecodeBool())
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Previous = false
+ } else {
+ x.Previous = bool(r.DecodeBool())
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.SinceSeconds != nil {
+ x.SinceSeconds = nil
+ }
+ } else {
+ if x.SinceSeconds == nil {
+ x.SinceSeconds = new(int64)
+ }
+ yym23 := z.DecBinary()
+ _ = yym23
+ if false {
+ } else {
+ *((*int64)(x.SinceSeconds)) = int64(r.DecodeInt(64))
+ }
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.SinceTime != nil {
+ x.SinceTime = nil
+ }
+ } else {
+ if x.SinceTime == nil {
+ x.SinceTime = new(pkg2_unversioned.Time)
+ }
+ yym25 := z.DecBinary()
+ _ = yym25
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.SinceTime) {
+ } else if yym25 {
+ z.DecBinaryUnmarshal(x.SinceTime)
+ } else if !yym25 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(x.SinceTime)
+ } else {
+ z.DecFallback(x.SinceTime, false)
+ }
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Timestamps = false
+ } else {
+ x.Timestamps = bool(r.DecodeBool())
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.TailLines != nil {
+ x.TailLines = nil
+ }
+ } else {
+ if x.TailLines == nil {
+ x.TailLines = new(int64)
+ }
+ yym28 := z.DecBinary()
+ _ = yym28
+ if false {
+ } else {
+ *((*int64)(x.TailLines)) = int64(r.DecodeInt(64))
+ }
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.LimitBytes != nil {
+ x.LimitBytes = nil
+ }
+ } else {
+ if x.LimitBytes == nil {
+ x.LimitBytes = new(int64)
+ }
+ yym30 := z.DecBinary()
+ _ = yym30
+ if false {
+ } else {
+ *((*int64)(x.LimitBytes)) = int64(r.DecodeInt(64))
+ }
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj18-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PodAttachOptions) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [7]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Stdin != false
+ yyq2[1] = x.Stdout != false
+ yyq2[2] = x.Stderr != false
+ yyq2[3] = x.TTY != false
+ yyq2[4] = x.Container != ""
+ yyq2[5] = x.Kind != ""
+ yyq2[6] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(7)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Stdin))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("stdin"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Stdin))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Stdout))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("stdout"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Stdout))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Stderr))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("stderr"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Stderr))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeBool(bool(x.TTY))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("tty"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeBool(bool(x.TTY))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Container))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("container"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Container))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[6] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[6] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PodAttachOptions) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PodAttachOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "stdin":
+ if r.TryDecodeAsNil() {
+ x.Stdin = false
+ } else {
+ x.Stdin = bool(r.DecodeBool())
+ }
+ case "stdout":
+ if r.TryDecodeAsNil() {
+ x.Stdout = false
+ } else {
+ x.Stdout = bool(r.DecodeBool())
+ }
+ case "stderr":
+ if r.TryDecodeAsNil() {
+ x.Stderr = false
+ } else {
+ x.Stderr = bool(r.DecodeBool())
+ }
+ case "tty":
+ if r.TryDecodeAsNil() {
+ x.TTY = false
+ } else {
+ x.TTY = bool(r.DecodeBool())
+ }
+ case "container":
+ if r.TryDecodeAsNil() {
+ x.Container = ""
+ } else {
+ x.Container = string(r.DecodeString())
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PodAttachOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj11 int
+ var yyb11 bool
+ var yyhl11 bool = l >= 0
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Stdin = false
+ } else {
+ x.Stdin = bool(r.DecodeBool())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Stdout = false
+ } else {
+ x.Stdout = bool(r.DecodeBool())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Stderr = false
+ } else {
+ x.Stderr = bool(r.DecodeBool())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.TTY = false
+ } else {
+ x.TTY = bool(r.DecodeBool())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Container = ""
+ } else {
+ x.Container = string(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj11-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PodExecOptions) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [8]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[6] = x.Kind != ""
+ yyq2[7] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(8)
+ } else {
+ yynn2 = 6
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Stdin))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Stdin"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Stdin))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Stdout))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Stdout"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Stdout))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Stderr))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Stderr"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Stderr))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeBool(bool(x.TTY))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("TTY"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeBool(bool(x.TTY))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Container))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Container"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Container))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Command == nil {
+ r.EncodeNil()
+ } else {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Command, false, e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Command"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Command == nil {
+ r.EncodeNil()
+ } else {
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Command, false, e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[6] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[6] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[7] {
+ yym25 := z.EncBinary()
+ _ = yym25
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[7] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym26 := z.EncBinary()
+ _ = yym26
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PodExecOptions) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PodExecOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "Stdin":
+ if r.TryDecodeAsNil() {
+ x.Stdin = false
+ } else {
+ x.Stdin = bool(r.DecodeBool())
+ }
+ case "Stdout":
+ if r.TryDecodeAsNil() {
+ x.Stdout = false
+ } else {
+ x.Stdout = bool(r.DecodeBool())
+ }
+ case "Stderr":
+ if r.TryDecodeAsNil() {
+ x.Stderr = false
+ } else {
+ x.Stderr = bool(r.DecodeBool())
+ }
+ case "TTY":
+ if r.TryDecodeAsNil() {
+ x.TTY = false
+ } else {
+ x.TTY = bool(r.DecodeBool())
+ }
+ case "Container":
+ if r.TryDecodeAsNil() {
+ x.Container = ""
+ } else {
+ x.Container = string(r.DecodeString())
+ }
+ case "Command":
+ if r.TryDecodeAsNil() {
+ x.Command = nil
+ } else {
+ yyv9 := &x.Command
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv9, false, d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PodExecOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj13 int
+ var yyb13 bool
+ var yyhl13 bool = l >= 0
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Stdin = false
+ } else {
+ x.Stdin = bool(r.DecodeBool())
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Stdout = false
+ } else {
+ x.Stdout = bool(r.DecodeBool())
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Stderr = false
+ } else {
+ x.Stderr = bool(r.DecodeBool())
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.TTY = false
+ } else {
+ x.TTY = bool(r.DecodeBool())
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Container = ""
+ } else {
+ x.Container = string(r.DecodeString())
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Command = nil
+ } else {
+ yyv19 := &x.Command
+ yym20 := z.DecBinary()
+ _ = yym20
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv19, false, d)
+ }
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj13-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PodProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.Kind != ""
+ yyq2[2] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Path))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Path"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Path))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PodProxyOptions) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PodProxyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "Path":
+ if r.TryDecodeAsNil() {
+ x.Path = ""
+ } else {
+ x.Path = string(r.DecodeString())
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PodProxyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Path = ""
+ } else {
+ x.Path = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *NodeProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.Kind != ""
+ yyq2[2] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Path))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Path"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Path))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *NodeProxyOptions) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *NodeProxyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "Path":
+ if r.TryDecodeAsNil() {
+ x.Path = ""
+ } else {
+ x.Path = string(r.DecodeString())
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *NodeProxyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Path = ""
+ } else {
+ x.Path = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ServiceProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.Kind != ""
+ yyq2[2] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Path))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Path"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Path))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ServiceProxyOptions) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ServiceProxyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "Path":
+ if r.TryDecodeAsNil() {
+ x.Path = ""
+ } else {
+ x.Path = string(r.DecodeString())
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ServiceProxyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Path = ""
+ } else {
+ x.Path = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *OwnerReference) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[4] = x.Controller != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 4
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("name"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.UID) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.UID))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("uid"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.UID) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.UID))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ if x.Controller == nil {
+ r.EncodeNil()
+ } else {
+ yy16 := *x.Controller
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeBool(bool(yy16))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("controller"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Controller == nil {
+ r.EncodeNil()
+ } else {
+ yy18 := *x.Controller
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeBool(bool(yy18))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *OwnerReference) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *OwnerReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "name":
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ case "uid":
+ if r.TryDecodeAsNil() {
+ x.UID = ""
+ } else {
+ x.UID = pkg1_types.UID(r.DecodeString())
+ }
+ case "controller":
+ if r.TryDecodeAsNil() {
+ if x.Controller != nil {
+ x.Controller = nil
+ }
+ } else {
+ if x.Controller == nil {
+ x.Controller = new(bool)
+ }
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else {
+ *((*bool)(x.Controller)) = r.DecodeBool()
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *OwnerReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.UID = ""
+ } else {
+ x.UID = pkg1_types.UID(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Controller != nil {
+ x.Controller = nil
+ }
+ } else {
+ if x.Controller == nil {
+ x.Controller = new(bool)
+ }
+ yym16 := z.DecBinary()
+ _ = yym16
+ if false {
+ } else {
+ *((*bool)(x.Controller)) = r.DecodeBool()
+ }
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ObjectReference) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [7]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Kind != ""
+ yyq2[1] = x.Namespace != ""
+ yyq2[2] = x.Name != ""
+ yyq2[3] = x.UID != ""
+ yyq2[4] = x.APIVersion != ""
+ yyq2[5] = x.ResourceVersion != ""
+ yyq2[6] = x.FieldPath != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(7)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Namespace))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("namespace"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Namespace))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("name"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.UID) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.UID))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("uid"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.UID) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.UID))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("resourceVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[6] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.FieldPath))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[6] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("fieldPath"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.FieldPath))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ObjectReference) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ObjectReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "namespace":
+ if r.TryDecodeAsNil() {
+ x.Namespace = ""
+ } else {
+ x.Namespace = string(r.DecodeString())
+ }
+ case "name":
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ case "uid":
+ if r.TryDecodeAsNil() {
+ x.UID = ""
+ } else {
+ x.UID = pkg1_types.UID(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ case "resourceVersion":
+ if r.TryDecodeAsNil() {
+ x.ResourceVersion = ""
+ } else {
+ x.ResourceVersion = string(r.DecodeString())
+ }
+ case "fieldPath":
+ if r.TryDecodeAsNil() {
+ x.FieldPath = ""
+ } else {
+ x.FieldPath = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ObjectReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj11 int
+ var yyb11 bool
+ var yyhl11 bool = l >= 0
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Namespace = ""
+ } else {
+ x.Namespace = string(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.UID = ""
+ } else {
+ x.UID = pkg1_types.UID(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ResourceVersion = ""
+ } else {
+ x.ResourceVersion = string(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.FieldPath = ""
+ } else {
+ x.FieldPath = string(r.DecodeString())
+ }
+ for {
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj11-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *LocalObjectReference) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Name"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *LocalObjectReference) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *LocalObjectReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "Name":
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *LocalObjectReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj5 int
+ var yyb5 bool
+ var yyhl5 bool = l >= 0
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ for {
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj5-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *SerializedReference) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = x.Kind != ""
+ yyq2[2] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.Reference
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("reference"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.Reference
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *SerializedReference) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *SerializedReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "reference":
+ if r.TryDecodeAsNil() {
+ x.Reference = ObjectReference{}
+ } else {
+ yyv4 := &x.Reference
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *SerializedReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Reference = ObjectReference{}
+ } else {
+ yyv8 := &x.Reference
+ yyv8.CodecDecodeSelf(d)
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *EventSource) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Component != ""
+ yyq2[1] = x.Host != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Component))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("component"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Component))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Host))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("host"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Host))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *EventSource) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *EventSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "component":
+ if r.TryDecodeAsNil() {
+ x.Component = ""
+ } else {
+ x.Component = string(r.DecodeString())
+ }
+ case "host":
+ if r.TryDecodeAsNil() {
+ x.Host = ""
+ } else {
+ x.Host = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *EventSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Component = ""
+ } else {
+ x.Component = string(r.DecodeString())
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Host = ""
+ } else {
+ x.Host = string(r.DecodeString())
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *Event) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [11]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = x.Reason != ""
+ yyq2[3] = x.Message != ""
+ yyq2[4] = true
+ yyq2[5] = true
+ yyq2[6] = true
+ yyq2[7] = x.Count != 0
+ yyq2[8] = x.Type != ""
+ yyq2[9] = x.Kind != ""
+ yyq2[10] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(11)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.InvolvedObject
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("involvedObject"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.InvolvedObject
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Reason))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("reason"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Reason))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Message))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("message"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Message))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yy20 := &x.Source
+ yy20.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("source"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy22 := &x.Source
+ yy22.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ yy25 := &x.FirstTimestamp
+ yym26 := z.EncBinary()
+ _ = yym26
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy25) {
+ } else if yym26 {
+ z.EncBinaryMarshal(yy25)
+ } else if !yym26 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy25)
+ } else {
+ z.EncFallback(yy25)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("firstTimestamp"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy27 := &x.FirstTimestamp
+ yym28 := z.EncBinary()
+ _ = yym28
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy27) {
+ } else if yym28 {
+ z.EncBinaryMarshal(yy27)
+ } else if !yym28 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy27)
+ } else {
+ z.EncFallback(yy27)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[6] {
+ yy30 := &x.LastTimestamp
+ yym31 := z.EncBinary()
+ _ = yym31
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy30) {
+ } else if yym31 {
+ z.EncBinaryMarshal(yy30)
+ } else if !yym31 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy30)
+ } else {
+ z.EncFallback(yy30)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[6] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("lastTimestamp"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy32 := &x.LastTimestamp
+ yym33 := z.EncBinary()
+ _ = yym33
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy32) {
+ } else if yym33 {
+ z.EncBinaryMarshal(yy32)
+ } else if !yym33 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy32)
+ } else {
+ z.EncFallback(yy32)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[7] {
+ yym35 := z.EncBinary()
+ _ = yym35
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Count))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[7] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("count"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym36 := z.EncBinary()
+ _ = yym36
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Count))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[8] {
+ yym38 := z.EncBinary()
+ _ = yym38
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Type))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[8] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("type"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym39 := z.EncBinary()
+ _ = yym39
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Type))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[9] {
+ yym41 := z.EncBinary()
+ _ = yym41
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[9] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym42 := z.EncBinary()
+ _ = yym42
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[10] {
+ yym44 := z.EncBinary()
+ _ = yym44
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[10] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym45 := z.EncBinary()
+ _ = yym45
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *Event) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *Event) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "involvedObject":
+ if r.TryDecodeAsNil() {
+ x.InvolvedObject = ObjectReference{}
+ } else {
+ yyv5 := &x.InvolvedObject
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "reason":
+ if r.TryDecodeAsNil() {
+ x.Reason = ""
+ } else {
+ x.Reason = string(r.DecodeString())
+ }
+ case "message":
+ if r.TryDecodeAsNil() {
+ x.Message = ""
+ } else {
+ x.Message = string(r.DecodeString())
+ }
+ case "source":
+ if r.TryDecodeAsNil() {
+ x.Source = EventSource{}
+ } else {
+ yyv8 := &x.Source
+ yyv8.CodecDecodeSelf(d)
+ }
+ case "firstTimestamp":
+ if r.TryDecodeAsNil() {
+ x.FirstTimestamp = pkg2_unversioned.Time{}
+ } else {
+ yyv9 := &x.FirstTimestamp
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv9) {
+ } else if yym10 {
+ z.DecBinaryUnmarshal(yyv9)
+ } else if !yym10 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv9)
+ } else {
+ z.DecFallback(yyv9, false)
+ }
+ }
+ case "lastTimestamp":
+ if r.TryDecodeAsNil() {
+ x.LastTimestamp = pkg2_unversioned.Time{}
+ } else {
+ yyv11 := &x.LastTimestamp
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else if yym12 {
+ z.DecBinaryUnmarshal(yyv11)
+ } else if !yym12 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv11)
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ case "count":
+ if r.TryDecodeAsNil() {
+ x.Count = 0
+ } else {
+ x.Count = int32(r.DecodeInt(32))
+ }
+ case "type":
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = string(r.DecodeString())
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *Event) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj17 int
+ var yyb17 bool
+ var yyhl17 bool = l >= 0
+ yyj17++
+ if yyhl17 {
+ yyb17 = yyj17 > l
+ } else {
+ yyb17 = r.CheckBreak()
+ }
+ if yyb17 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv18 := &x.ObjectMeta
+ yyv18.CodecDecodeSelf(d)
+ }
+ yyj17++
+ if yyhl17 {
+ yyb17 = yyj17 > l
+ } else {
+ yyb17 = r.CheckBreak()
+ }
+ if yyb17 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.InvolvedObject = ObjectReference{}
+ } else {
+ yyv19 := &x.InvolvedObject
+ yyv19.CodecDecodeSelf(d)
+ }
+ yyj17++
+ if yyhl17 {
+ yyb17 = yyj17 > l
+ } else {
+ yyb17 = r.CheckBreak()
+ }
+ if yyb17 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Reason = ""
+ } else {
+ x.Reason = string(r.DecodeString())
+ }
+ yyj17++
+ if yyhl17 {
+ yyb17 = yyj17 > l
+ } else {
+ yyb17 = r.CheckBreak()
+ }
+ if yyb17 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Message = ""
+ } else {
+ x.Message = string(r.DecodeString())
+ }
+ yyj17++
+ if yyhl17 {
+ yyb17 = yyj17 > l
+ } else {
+ yyb17 = r.CheckBreak()
+ }
+ if yyb17 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Source = EventSource{}
+ } else {
+ yyv22 := &x.Source
+ yyv22.CodecDecodeSelf(d)
+ }
+ yyj17++
+ if yyhl17 {
+ yyb17 = yyj17 > l
+ } else {
+ yyb17 = r.CheckBreak()
+ }
+ if yyb17 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.FirstTimestamp = pkg2_unversioned.Time{}
+ } else {
+ yyv23 := &x.FirstTimestamp
+ yym24 := z.DecBinary()
+ _ = yym24
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv23) {
+ } else if yym24 {
+ z.DecBinaryUnmarshal(yyv23)
+ } else if !yym24 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv23)
+ } else {
+ z.DecFallback(yyv23, false)
+ }
+ }
+ yyj17++
+ if yyhl17 {
+ yyb17 = yyj17 > l
+ } else {
+ yyb17 = r.CheckBreak()
+ }
+ if yyb17 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.LastTimestamp = pkg2_unversioned.Time{}
+ } else {
+ yyv25 := &x.LastTimestamp
+ yym26 := z.DecBinary()
+ _ = yym26
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv25) {
+ } else if yym26 {
+ z.DecBinaryUnmarshal(yyv25)
+ } else if !yym26 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv25)
+ } else {
+ z.DecFallback(yyv25, false)
+ }
+ }
+ yyj17++
+ if yyhl17 {
+ yyb17 = yyj17 > l
+ } else {
+ yyb17 = r.CheckBreak()
+ }
+ if yyb17 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Count = 0
+ } else {
+ x.Count = int32(r.DecodeInt(32))
+ }
+ yyj17++
+ if yyhl17 {
+ yyb17 = yyj17 > l
+ } else {
+ yyb17 = r.CheckBreak()
+ }
+ if yyb17 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = string(r.DecodeString())
+ }
+ yyj17++
+ if yyhl17 {
+ yyb17 = yyj17 > l
+ } else {
+ yyb17 = r.CheckBreak()
+ }
+ if yyb17 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj17++
+ if yyhl17 {
+ yyb17 = yyj17 > l
+ } else {
+ yyb17 = r.CheckBreak()
+ }
+ if yyb17 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj17++
+ if yyhl17 {
+ yyb17 = yyj17 > l
+ } else {
+ yyb17 = r.CheckBreak()
+ }
+ if yyb17 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj17-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *EventList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceEvent(([]Event)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceEvent(([]Event)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *EventList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *EventList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceEvent((*[]Event)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *EventList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceEvent((*[]Event)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *List) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceruntime_Object(([]pkg7_runtime.Object)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceruntime_Object(([]pkg7_runtime.Object)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *List) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *List) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceruntime_Object((*[]pkg7_runtime.Object)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *List) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceruntime_Object((*[]pkg7_runtime.Object)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x LimitType) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *LimitType) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *LimitRangeItem) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [6]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Type != ""
+ yyq2[1] = len(x.Max) != 0
+ yyq2[2] = len(x.Min) != 0
+ yyq2[3] = len(x.Default) != 0
+ yyq2[4] = len(x.DefaultRequest) != 0
+ yyq2[5] = len(x.MaxLimitRequestRatio) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(6)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ x.Type.CodecEncodeSelf(e)
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("type"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Type.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Max == nil {
+ r.EncodeNil()
+ } else {
+ x.Max.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("max"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Max == nil {
+ r.EncodeNil()
+ } else {
+ x.Max.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.Min == nil {
+ r.EncodeNil()
+ } else {
+ x.Min.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("min"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Min == nil {
+ r.EncodeNil()
+ } else {
+ x.Min.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ if x.Default == nil {
+ r.EncodeNil()
+ } else {
+ x.Default.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("default"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Default == nil {
+ r.EncodeNil()
+ } else {
+ x.Default.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ if x.DefaultRequest == nil {
+ r.EncodeNil()
+ } else {
+ x.DefaultRequest.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("defaultRequest"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.DefaultRequest == nil {
+ r.EncodeNil()
+ } else {
+ x.DefaultRequest.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ if x.MaxLimitRequestRatio == nil {
+ r.EncodeNil()
+ } else {
+ x.MaxLimitRequestRatio.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("maxLimitRequestRatio"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.MaxLimitRequestRatio == nil {
+ r.EncodeNil()
+ } else {
+ x.MaxLimitRequestRatio.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *LimitRangeItem) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *LimitRangeItem) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "type":
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = LimitType(r.DecodeString())
+ }
+ case "max":
+ if r.TryDecodeAsNil() {
+ x.Max = nil
+ } else {
+ yyv5 := &x.Max
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "min":
+ if r.TryDecodeAsNil() {
+ x.Min = nil
+ } else {
+ yyv6 := &x.Min
+ yyv6.CodecDecodeSelf(d)
+ }
+ case "default":
+ if r.TryDecodeAsNil() {
+ x.Default = nil
+ } else {
+ yyv7 := &x.Default
+ yyv7.CodecDecodeSelf(d)
+ }
+ case "defaultRequest":
+ if r.TryDecodeAsNil() {
+ x.DefaultRequest = nil
+ } else {
+ yyv8 := &x.DefaultRequest
+ yyv8.CodecDecodeSelf(d)
+ }
+ case "maxLimitRequestRatio":
+ if r.TryDecodeAsNil() {
+ x.MaxLimitRequestRatio = nil
+ } else {
+ yyv9 := &x.MaxLimitRequestRatio
+ yyv9.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *LimitRangeItem) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = LimitType(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Max = nil
+ } else {
+ yyv12 := &x.Max
+ yyv12.CodecDecodeSelf(d)
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Min = nil
+ } else {
+ yyv13 := &x.Min
+ yyv13.CodecDecodeSelf(d)
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Default = nil
+ } else {
+ yyv14 := &x.Default
+ yyv14.CodecDecodeSelf(d)
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.DefaultRequest = nil
+ } else {
+ yyv15 := &x.DefaultRequest
+ yyv15.CodecDecodeSelf(d)
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.MaxLimitRequestRatio = nil
+ } else {
+ yyv16 := &x.MaxLimitRequestRatio
+ yyv16.CodecDecodeSelf(d)
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *LimitRangeSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Limits == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ h.encSliceLimitRangeItem(([]LimitRangeItem)(x.Limits), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("limits"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Limits == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.encSliceLimitRangeItem(([]LimitRangeItem)(x.Limits), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *LimitRangeSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *LimitRangeSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "limits":
+ if r.TryDecodeAsNil() {
+ x.Limits = nil
+ } else {
+ yyv4 := &x.Limits
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.decSliceLimitRangeItem((*[]LimitRangeItem)(yyv4), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *LimitRangeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Limits = nil
+ } else {
+ yyv7 := &x.Limits
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.decSliceLimitRangeItem((*[]LimitRangeItem)(yyv7), d)
+ }
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *LimitRange) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *LimitRange) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *LimitRange) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = LimitRangeSpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *LimitRange) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv9 := &x.ObjectMeta
+ yyv9.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = LimitRangeSpec{}
+ } else {
+ yyv10 := &x.Spec
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *LimitRangeList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceLimitRange(([]LimitRange)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceLimitRange(([]LimitRange)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *LimitRangeList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *LimitRangeList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceLimitRange((*[]LimitRange)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *LimitRangeList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceLimitRange((*[]LimitRange)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x ResourceQuotaScope) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *ResourceQuotaScope) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *ResourceQuotaSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = len(x.Hard) != 0
+ yyq2[1] = len(x.Scopes) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Hard == nil {
+ r.EncodeNil()
+ } else {
+ x.Hard.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("hard"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Hard == nil {
+ r.EncodeNil()
+ } else {
+ x.Hard.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Scopes == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.encSliceResourceQuotaScope(([]ResourceQuotaScope)(x.Scopes), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("scopes"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Scopes == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.encSliceResourceQuotaScope(([]ResourceQuotaScope)(x.Scopes), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ResourceQuotaSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ResourceQuotaSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "hard":
+ if r.TryDecodeAsNil() {
+ x.Hard = nil
+ } else {
+ yyv4 := &x.Hard
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "scopes":
+ if r.TryDecodeAsNil() {
+ x.Scopes = nil
+ } else {
+ yyv5 := &x.Scopes
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ h.decSliceResourceQuotaScope((*[]ResourceQuotaScope)(yyv5), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ResourceQuotaSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Hard = nil
+ } else {
+ yyv8 := &x.Hard
+ yyv8.CodecDecodeSelf(d)
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Scopes = nil
+ } else {
+ yyv9 := &x.Scopes
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.decSliceResourceQuotaScope((*[]ResourceQuotaScope)(yyv9), d)
+ }
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ResourceQuotaStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = len(x.Hard) != 0
+ yyq2[1] = len(x.Used) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Hard == nil {
+ r.EncodeNil()
+ } else {
+ x.Hard.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("hard"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Hard == nil {
+ r.EncodeNil()
+ } else {
+ x.Hard.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Used == nil {
+ r.EncodeNil()
+ } else {
+ x.Used.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("used"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Used == nil {
+ r.EncodeNil()
+ } else {
+ x.Used.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ResourceQuotaStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ResourceQuotaStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "hard":
+ if r.TryDecodeAsNil() {
+ x.Hard = nil
+ } else {
+ yyv4 := &x.Hard
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "used":
+ if r.TryDecodeAsNil() {
+ x.Used = nil
+ } else {
+ yyv5 := &x.Used
+ yyv5.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ResourceQuotaStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Hard = nil
+ } else {
+ yyv7 := &x.Hard
+ yyv7.CodecDecodeSelf(d)
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Used = nil
+ } else {
+ yyv8 := &x.Used
+ yyv8.CodecDecodeSelf(d)
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ResourceQuota) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = true
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy14 := &x.Status
+ yy14.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy16 := &x.Status
+ yy16.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ResourceQuota) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ResourceQuota) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = ResourceQuotaSpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = ResourceQuotaStatus{}
+ } else {
+ yyv6 := &x.Status
+ yyv6.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ResourceQuota) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = ResourceQuotaSpec{}
+ } else {
+ yyv11 := &x.Spec
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = ResourceQuotaStatus{}
+ } else {
+ yyv12 := &x.Status
+ yyv12.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ResourceQuotaList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceResourceQuota(([]ResourceQuota)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceResourceQuota(([]ResourceQuota)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ResourceQuotaList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ResourceQuotaList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceResourceQuota((*[]ResourceQuota)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ResourceQuotaList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceResourceQuota((*[]ResourceQuota)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *Secret) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = len(x.Data) != 0
+ yyq2[2] = x.Type != ""
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Data == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encMapstringSliceuint8((map[string][]uint8)(x.Data), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("data"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Data == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encMapstringSliceuint8((map[string][]uint8)(x.Data), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ x.Type.CodecEncodeSelf(e)
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("type"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Type.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *Secret) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *Secret) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "data":
+ if r.TryDecodeAsNil() {
+ x.Data = nil
+ } else {
+ yyv5 := &x.Data
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ h.decMapstringSliceuint8((*map[string][]uint8)(yyv5), d)
+ }
+ }
+ case "type":
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = SecretType(r.DecodeString())
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *Secret) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv11 := &x.ObjectMeta
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Data = nil
+ } else {
+ yyv12 := &x.Data
+ yym13 := z.DecBinary()
+ _ = yym13
+ if false {
+ } else {
+ h.decMapstringSliceuint8((*map[string][]uint8)(yyv12), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = SecretType(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x SecretType) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *SecretType) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *SecretList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceSecret(([]Secret)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceSecret(([]Secret)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *SecretList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *SecretList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceSecret((*[]Secret)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *SecretList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceSecret((*[]Secret)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ConfigMap) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = len(x.Data) != 0
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Data == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ z.F.EncMapStringStringV(x.Data, false, e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("data"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Data == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ z.F.EncMapStringStringV(x.Data, false, e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ConfigMap) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ConfigMap) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "data":
+ if r.TryDecodeAsNil() {
+ x.Data = nil
+ } else {
+ yyv5 := &x.Data
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ z.F.DecMapStringStringX(yyv5, false, d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ConfigMap) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Data = nil
+ } else {
+ yyv11 := &x.Data
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ z.F.DecMapStringStringX(yyv11, false, d)
+ }
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ConfigMapList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceConfigMap(([]ConfigMap)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceConfigMap(([]ConfigMap)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ConfigMapList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ConfigMapList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceConfigMap((*[]ConfigMap)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ConfigMapList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceConfigMap((*[]ConfigMap)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x PatchType) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *PatchType) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x ComponentConditionType) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *ComponentConditionType) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *ComponentCondition) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[2] = x.Message != ""
+ yyq2[3] = x.Error != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ x.Type.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("type"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Type.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ x.Status.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Status.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Message))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("message"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Message))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Error))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("error"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Error))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ComponentCondition) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ComponentCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "type":
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = ComponentConditionType(r.DecodeString())
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = ""
+ } else {
+ x.Status = ConditionStatus(r.DecodeString())
+ }
+ case "message":
+ if r.TryDecodeAsNil() {
+ x.Message = ""
+ } else {
+ x.Message = string(r.DecodeString())
+ }
+ case "error":
+ if r.TryDecodeAsNil() {
+ x.Error = ""
+ } else {
+ x.Error = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ComponentCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = ComponentConditionType(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = ""
+ } else {
+ x.Status = ConditionStatus(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Message = ""
+ } else {
+ x.Message = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Error = ""
+ } else {
+ x.Error = string(r.DecodeString())
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ComponentStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = len(x.Conditions) != 0
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Conditions == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceComponentCondition(([]ComponentCondition)(x.Conditions), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("conditions"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Conditions == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceComponentCondition(([]ComponentCondition)(x.Conditions), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ComponentStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ComponentStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "conditions":
+ if r.TryDecodeAsNil() {
+ x.Conditions = nil
+ } else {
+ yyv5 := &x.Conditions
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ h.decSliceComponentCondition((*[]ComponentCondition)(yyv5), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ComponentStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Conditions = nil
+ } else {
+ yyv11 := &x.Conditions
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ h.decSliceComponentCondition((*[]ComponentCondition)(yyv11), d)
+ }
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ComponentStatusList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceComponentStatus(([]ComponentStatus)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceComponentStatus(([]ComponentStatus)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ComponentStatusList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ComponentStatusList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceComponentStatus((*[]ComponentStatus)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ComponentStatusList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceComponentStatus((*[]ComponentStatus)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *SecurityContext) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [6]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Capabilities != nil
+ yyq2[1] = x.Privileged != nil
+ yyq2[2] = x.SELinuxOptions != nil
+ yyq2[3] = x.RunAsUser != nil
+ yyq2[4] = x.RunAsNonRoot != nil
+ yyq2[5] = x.ReadOnlyRootFilesystem != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(6)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Capabilities == nil {
+ r.EncodeNil()
+ } else {
+ x.Capabilities.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("capabilities"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Capabilities == nil {
+ r.EncodeNil()
+ } else {
+ x.Capabilities.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Privileged == nil {
+ r.EncodeNil()
+ } else {
+ yy7 := *x.Privileged
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeBool(bool(yy7))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("privileged"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Privileged == nil {
+ r.EncodeNil()
+ } else {
+ yy9 := *x.Privileged
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeBool(bool(yy9))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.SELinuxOptions == nil {
+ r.EncodeNil()
+ } else {
+ x.SELinuxOptions.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("seLinuxOptions"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.SELinuxOptions == nil {
+ r.EncodeNil()
+ } else {
+ x.SELinuxOptions.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ if x.RunAsUser == nil {
+ r.EncodeNil()
+ } else {
+ yy15 := *x.RunAsUser
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeInt(int64(yy15))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("runAsUser"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.RunAsUser == nil {
+ r.EncodeNil()
+ } else {
+ yy17 := *x.RunAsUser
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ r.EncodeInt(int64(yy17))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ if x.RunAsNonRoot == nil {
+ r.EncodeNil()
+ } else {
+ yy20 := *x.RunAsNonRoot
+ yym21 := z.EncBinary()
+ _ = yym21
+ if false {
+ } else {
+ r.EncodeBool(bool(yy20))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("runAsNonRoot"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.RunAsNonRoot == nil {
+ r.EncodeNil()
+ } else {
+ yy22 := *x.RunAsNonRoot
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeBool(bool(yy22))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ if x.ReadOnlyRootFilesystem == nil {
+ r.EncodeNil()
+ } else {
+ yy25 := *x.ReadOnlyRootFilesystem
+ yym26 := z.EncBinary()
+ _ = yym26
+ if false {
+ } else {
+ r.EncodeBool(bool(yy25))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("readOnlyRootFilesystem"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.ReadOnlyRootFilesystem == nil {
+ r.EncodeNil()
+ } else {
+ yy27 := *x.ReadOnlyRootFilesystem
+ yym28 := z.EncBinary()
+ _ = yym28
+ if false {
+ } else {
+ r.EncodeBool(bool(yy27))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *SecurityContext) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *SecurityContext) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "capabilities":
+ if r.TryDecodeAsNil() {
+ if x.Capabilities != nil {
+ x.Capabilities = nil
+ }
+ } else {
+ if x.Capabilities == nil {
+ x.Capabilities = new(Capabilities)
+ }
+ x.Capabilities.CodecDecodeSelf(d)
+ }
+ case "privileged":
+ if r.TryDecodeAsNil() {
+ if x.Privileged != nil {
+ x.Privileged = nil
+ }
+ } else {
+ if x.Privileged == nil {
+ x.Privileged = new(bool)
+ }
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ *((*bool)(x.Privileged)) = r.DecodeBool()
+ }
+ }
+ case "seLinuxOptions":
+ if r.TryDecodeAsNil() {
+ if x.SELinuxOptions != nil {
+ x.SELinuxOptions = nil
+ }
+ } else {
+ if x.SELinuxOptions == nil {
+ x.SELinuxOptions = new(SELinuxOptions)
+ }
+ x.SELinuxOptions.CodecDecodeSelf(d)
+ }
+ case "runAsUser":
+ if r.TryDecodeAsNil() {
+ if x.RunAsUser != nil {
+ x.RunAsUser = nil
+ }
+ } else {
+ if x.RunAsUser == nil {
+ x.RunAsUser = new(int64)
+ }
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else {
+ *((*int64)(x.RunAsUser)) = int64(r.DecodeInt(64))
+ }
+ }
+ case "runAsNonRoot":
+ if r.TryDecodeAsNil() {
+ if x.RunAsNonRoot != nil {
+ x.RunAsNonRoot = nil
+ }
+ } else {
+ if x.RunAsNonRoot == nil {
+ x.RunAsNonRoot = new(bool)
+ }
+ yym11 := z.DecBinary()
+ _ = yym11
+ if false {
+ } else {
+ *((*bool)(x.RunAsNonRoot)) = r.DecodeBool()
+ }
+ }
+ case "readOnlyRootFilesystem":
+ if r.TryDecodeAsNil() {
+ if x.ReadOnlyRootFilesystem != nil {
+ x.ReadOnlyRootFilesystem = nil
+ }
+ } else {
+ if x.ReadOnlyRootFilesystem == nil {
+ x.ReadOnlyRootFilesystem = new(bool)
+ }
+ yym13 := z.DecBinary()
+ _ = yym13
+ if false {
+ } else {
+ *((*bool)(x.ReadOnlyRootFilesystem)) = r.DecodeBool()
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *SecurityContext) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj14 int
+ var yyb14 bool
+ var yyhl14 bool = l >= 0
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Capabilities != nil {
+ x.Capabilities = nil
+ }
+ } else {
+ if x.Capabilities == nil {
+ x.Capabilities = new(Capabilities)
+ }
+ x.Capabilities.CodecDecodeSelf(d)
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Privileged != nil {
+ x.Privileged = nil
+ }
+ } else {
+ if x.Privileged == nil {
+ x.Privileged = new(bool)
+ }
+ yym17 := z.DecBinary()
+ _ = yym17
+ if false {
+ } else {
+ *((*bool)(x.Privileged)) = r.DecodeBool()
+ }
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.SELinuxOptions != nil {
+ x.SELinuxOptions = nil
+ }
+ } else {
+ if x.SELinuxOptions == nil {
+ x.SELinuxOptions = new(SELinuxOptions)
+ }
+ x.SELinuxOptions.CodecDecodeSelf(d)
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.RunAsUser != nil {
+ x.RunAsUser = nil
+ }
+ } else {
+ if x.RunAsUser == nil {
+ x.RunAsUser = new(int64)
+ }
+ yym20 := z.DecBinary()
+ _ = yym20
+ if false {
+ } else {
+ *((*int64)(x.RunAsUser)) = int64(r.DecodeInt(64))
+ }
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.RunAsNonRoot != nil {
+ x.RunAsNonRoot = nil
+ }
+ } else {
+ if x.RunAsNonRoot == nil {
+ x.RunAsNonRoot = new(bool)
+ }
+ yym22 := z.DecBinary()
+ _ = yym22
+ if false {
+ } else {
+ *((*bool)(x.RunAsNonRoot)) = r.DecodeBool()
+ }
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.ReadOnlyRootFilesystem != nil {
+ x.ReadOnlyRootFilesystem = nil
+ }
+ } else {
+ if x.ReadOnlyRootFilesystem == nil {
+ x.ReadOnlyRootFilesystem = new(bool)
+ }
+ yym24 := z.DecBinary()
+ _ = yym24
+ if false {
+ } else {
+ *((*bool)(x.ReadOnlyRootFilesystem)) = r.DecodeBool()
+ }
+ }
+ for {
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj14-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *SELinuxOptions) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.User != ""
+ yyq2[1] = x.Role != ""
+ yyq2[2] = x.Type != ""
+ yyq2[3] = x.Level != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.User))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("user"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.User))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Role))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("role"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Role))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Type))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("type"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Type))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Level))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("level"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Level))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *SELinuxOptions) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *SELinuxOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "user":
+ if r.TryDecodeAsNil() {
+ x.User = ""
+ } else {
+ x.User = string(r.DecodeString())
+ }
+ case "role":
+ if r.TryDecodeAsNil() {
+ x.Role = ""
+ } else {
+ x.Role = string(r.DecodeString())
+ }
+ case "type":
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = string(r.DecodeString())
+ }
+ case "level":
+ if r.TryDecodeAsNil() {
+ x.Level = ""
+ } else {
+ x.Level = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *SELinuxOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.User = ""
+ } else {
+ x.User = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Role = ""
+ } else {
+ x.Role = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Level = ""
+ } else {
+ x.Level = string(r.DecodeString())
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *RangeAllocation) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Range))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("range"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Range))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Data == nil {
+ r.EncodeNil()
+ } else {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Data))
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("data"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Data == nil {
+ r.EncodeNil()
+ } else {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Data))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *RangeAllocation) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *RangeAllocation) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "range":
+ if r.TryDecodeAsNil() {
+ x.Range = ""
+ } else {
+ x.Range = string(r.DecodeString())
+ }
+ case "data":
+ if r.TryDecodeAsNil() {
+ x.Data = nil
+ } else {
+ yyv6 := &x.Data
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ *yyv6 = r.DecodeBytes(*(*[]byte)(yyv6), false, false)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *RangeAllocation) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv11 := &x.ObjectMeta
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Range = ""
+ } else {
+ x.Range = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Data = nil
+ } else {
+ yyv13 := &x.Data
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ *yyv13 = r.DecodeBytes(*(*[]byte)(yyv13), false, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) encSliceOwnerReference(v []OwnerReference, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceOwnerReference(v *[]OwnerReference, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []OwnerReference{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 72)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]OwnerReference, yyrl1)
+ }
+ } else {
+ yyv1 = make([]OwnerReference, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = OwnerReference{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, OwnerReference{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = OwnerReference{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, OwnerReference{}) // var yyz1 OwnerReference
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = OwnerReference{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []OwnerReference{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSlicePersistentVolumeAccessMode(v []PersistentVolumeAccessMode, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yyv1.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSlicePersistentVolumeAccessMode(v *[]PersistentVolumeAccessMode, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []PersistentVolumeAccessMode{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]PersistentVolumeAccessMode, yyrl1)
+ }
+ } else {
+ yyv1 = make([]PersistentVolumeAccessMode, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = PersistentVolumeAccessMode(r.DecodeString())
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, "")
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = PersistentVolumeAccessMode(r.DecodeString())
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, "") // var yyz1 PersistentVolumeAccessMode
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = PersistentVolumeAccessMode(r.DecodeString())
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []PersistentVolumeAccessMode{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSlicePersistentVolume(v []PersistentVolume, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSlicePersistentVolume(v *[]PersistentVolume, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []PersistentVolume{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 456)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]PersistentVolume, yyrl1)
+ }
+ } else {
+ yyv1 = make([]PersistentVolume, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PersistentVolume{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, PersistentVolume{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PersistentVolume{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, PersistentVolume{}) // var yyz1 PersistentVolume
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PersistentVolume{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []PersistentVolume{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSlicePersistentVolumeClaim(v []PersistentVolumeClaim, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSlicePersistentVolumeClaim(v *[]PersistentVolumeClaim, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []PersistentVolumeClaim{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 352)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]PersistentVolumeClaim, yyrl1)
+ }
+ } else {
+ yyv1 = make([]PersistentVolumeClaim, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PersistentVolumeClaim{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, PersistentVolumeClaim{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PersistentVolumeClaim{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, PersistentVolumeClaim{}) // var yyz1 PersistentVolumeClaim
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PersistentVolumeClaim{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []PersistentVolumeClaim{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceKeyToPath(v []KeyToPath, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceKeyToPath(v *[]KeyToPath, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []KeyToPath{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]KeyToPath, yyrl1)
+ }
+ } else {
+ yyv1 = make([]KeyToPath, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = KeyToPath{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, KeyToPath{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = KeyToPath{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, KeyToPath{}) // var yyz1 KeyToPath
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = KeyToPath{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []KeyToPath{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceDownwardAPIVolumeFile(v []DownwardAPIVolumeFile, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceDownwardAPIVolumeFile(v *[]DownwardAPIVolumeFile, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []DownwardAPIVolumeFile{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]DownwardAPIVolumeFile, yyrl1)
+ }
+ } else {
+ yyv1 = make([]DownwardAPIVolumeFile, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = DownwardAPIVolumeFile{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, DownwardAPIVolumeFile{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = DownwardAPIVolumeFile{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, DownwardAPIVolumeFile{}) // var yyz1 DownwardAPIVolumeFile
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = DownwardAPIVolumeFile{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []DownwardAPIVolumeFile{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceHTTPHeader(v []HTTPHeader, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceHTTPHeader(v *[]HTTPHeader, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []HTTPHeader{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]HTTPHeader, yyrl1)
+ }
+ } else {
+ yyv1 = make([]HTTPHeader, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = HTTPHeader{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, HTTPHeader{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = HTTPHeader{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, HTTPHeader{}) // var yyz1 HTTPHeader
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = HTTPHeader{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []HTTPHeader{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceCapability(v []Capability, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yyv1.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceCapability(v *[]Capability, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []Capability{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]Capability, yyrl1)
+ }
+ } else {
+ yyv1 = make([]Capability, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = Capability(r.DecodeString())
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, "")
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = Capability(r.DecodeString())
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, "") // var yyz1 Capability
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = Capability(r.DecodeString())
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []Capability{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceContainerPort(v []ContainerPort, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceContainerPort(v *[]ContainerPort, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []ContainerPort{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]ContainerPort, yyrl1)
+ }
+ } else {
+ yyv1 = make([]ContainerPort, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ContainerPort{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, ContainerPort{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ContainerPort{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, ContainerPort{}) // var yyz1 ContainerPort
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ContainerPort{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []ContainerPort{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceEnvVar(v []EnvVar, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceEnvVar(v *[]EnvVar, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []EnvVar{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]EnvVar, yyrl1)
+ }
+ } else {
+ yyv1 = make([]EnvVar, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = EnvVar{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, EnvVar{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = EnvVar{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, EnvVar{}) // var yyz1 EnvVar
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = EnvVar{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []EnvVar{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceVolumeMount(v []VolumeMount, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceVolumeMount(v *[]VolumeMount, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []VolumeMount{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]VolumeMount, yyrl1)
+ }
+ } else {
+ yyv1 = make([]VolumeMount, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = VolumeMount{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, VolumeMount{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = VolumeMount{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, VolumeMount{}) // var yyz1 VolumeMount
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = VolumeMount{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []VolumeMount{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSlicePod(v []Pod, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSlicePod(v *[]Pod, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []Pod{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 624)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]Pod, yyrl1)
+ }
+ } else {
+ yyv1 = make([]Pod, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Pod{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, Pod{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Pod{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, Pod{}) // var yyz1 Pod
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Pod{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []Pod{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceNodeSelectorTerm(v []NodeSelectorTerm, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceNodeSelectorTerm(v *[]NodeSelectorTerm, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []NodeSelectorTerm{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 24)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]NodeSelectorTerm, yyrl1)
+ }
+ } else {
+ yyv1 = make([]NodeSelectorTerm, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = NodeSelectorTerm{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, NodeSelectorTerm{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = NodeSelectorTerm{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, NodeSelectorTerm{}) // var yyz1 NodeSelectorTerm
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = NodeSelectorTerm{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []NodeSelectorTerm{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceNodeSelectorRequirement(v []NodeSelectorRequirement, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceNodeSelectorRequirement(v *[]NodeSelectorRequirement, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []NodeSelectorRequirement{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]NodeSelectorRequirement, yyrl1)
+ }
+ } else {
+ yyv1 = make([]NodeSelectorRequirement, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = NodeSelectorRequirement{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, NodeSelectorRequirement{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = NodeSelectorRequirement{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, NodeSelectorRequirement{}) // var yyz1 NodeSelectorRequirement
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = NodeSelectorRequirement{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []NodeSelectorRequirement{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSlicePodAffinityTerm(v []PodAffinityTerm, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSlicePodAffinityTerm(v *[]PodAffinityTerm, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []PodAffinityTerm{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 48)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]PodAffinityTerm, yyrl1)
+ }
+ } else {
+ yyv1 = make([]PodAffinityTerm, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PodAffinityTerm{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, PodAffinityTerm{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PodAffinityTerm{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, PodAffinityTerm{}) // var yyz1 PodAffinityTerm
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PodAffinityTerm{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []PodAffinityTerm{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceWeightedPodAffinityTerm(v []WeightedPodAffinityTerm, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceWeightedPodAffinityTerm(v *[]WeightedPodAffinityTerm, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []WeightedPodAffinityTerm{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]WeightedPodAffinityTerm, yyrl1)
+ }
+ } else {
+ yyv1 = make([]WeightedPodAffinityTerm, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = WeightedPodAffinityTerm{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, WeightedPodAffinityTerm{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = WeightedPodAffinityTerm{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, WeightedPodAffinityTerm{}) // var yyz1 WeightedPodAffinityTerm
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = WeightedPodAffinityTerm{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []WeightedPodAffinityTerm{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSlicePreferredSchedulingTerm(v []PreferredSchedulingTerm, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSlicePreferredSchedulingTerm(v *[]PreferredSchedulingTerm, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []PreferredSchedulingTerm{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]PreferredSchedulingTerm, yyrl1)
+ }
+ } else {
+ yyv1 = make([]PreferredSchedulingTerm, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PreferredSchedulingTerm{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, PreferredSchedulingTerm{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PreferredSchedulingTerm{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, PreferredSchedulingTerm{}) // var yyz1 PreferredSchedulingTerm
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PreferredSchedulingTerm{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []PreferredSchedulingTerm{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceVolume(v []Volume, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceVolume(v *[]Volume, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []Volume{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 176)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]Volume, yyrl1)
+ }
+ } else {
+ yyv1 = make([]Volume, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Volume{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, Volume{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Volume{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, Volume{}) // var yyz1 Volume
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Volume{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []Volume{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceContainer(v []Container, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceContainer(v *[]Container, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []Container{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 256)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]Container, yyrl1)
+ }
+ } else {
+ yyv1 = make([]Container, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Container{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, Container{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Container{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, Container{}) // var yyz1 Container
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Container{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []Container{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceLocalObjectReference(v []LocalObjectReference, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceLocalObjectReference(v *[]LocalObjectReference, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []LocalObjectReference{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]LocalObjectReference, yyrl1)
+ }
+ } else {
+ yyv1 = make([]LocalObjectReference, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = LocalObjectReference{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, LocalObjectReference{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = LocalObjectReference{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, LocalObjectReference{}) // var yyz1 LocalObjectReference
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = LocalObjectReference{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []LocalObjectReference{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSlicePodCondition(v []PodCondition, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSlicePodCondition(v *[]PodCondition, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []PodCondition{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]PodCondition, yyrl1)
+ }
+ } else {
+ yyv1 = make([]PodCondition, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PodCondition{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, PodCondition{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PodCondition{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, PodCondition{}) // var yyz1 PodCondition
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PodCondition{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []PodCondition{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceContainerStatus(v []ContainerStatus, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceContainerStatus(v *[]ContainerStatus, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []ContainerStatus{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 120)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]ContainerStatus, yyrl1)
+ }
+ } else {
+ yyv1 = make([]ContainerStatus, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ContainerStatus{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, ContainerStatus{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ContainerStatus{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, ContainerStatus{}) // var yyz1 ContainerStatus
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ContainerStatus{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []ContainerStatus{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSlicePodTemplate(v []PodTemplate, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSlicePodTemplate(v *[]PodTemplate, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []PodTemplate{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 672)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]PodTemplate, yyrl1)
+ }
+ } else {
+ yyv1 = make([]PodTemplate, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PodTemplate{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, PodTemplate{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PodTemplate{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, PodTemplate{}) // var yyz1 PodTemplate
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PodTemplate{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []PodTemplate{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceReplicationController(v []ReplicationController, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceReplicationController(v *[]ReplicationController, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []ReplicationController{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]ReplicationController, yyrl1)
+ }
+ } else {
+ yyv1 = make([]ReplicationController, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ReplicationController{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, ReplicationController{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ReplicationController{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, ReplicationController{}) // var yyz1 ReplicationController
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ReplicationController{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []ReplicationController{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceService(v []Service, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceService(v *[]Service, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []Service{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 408)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]Service, yyrl1)
+ }
+ } else {
+ yyv1 = make([]Service, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Service{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, Service{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Service{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, Service{}) // var yyz1 Service
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Service{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []Service{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceLoadBalancerIngress(v []LoadBalancerIngress, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceLoadBalancerIngress(v *[]LoadBalancerIngress, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []LoadBalancerIngress{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]LoadBalancerIngress, yyrl1)
+ }
+ } else {
+ yyv1 = make([]LoadBalancerIngress, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = LoadBalancerIngress{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, LoadBalancerIngress{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = LoadBalancerIngress{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, LoadBalancerIngress{}) // var yyz1 LoadBalancerIngress
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = LoadBalancerIngress{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []LoadBalancerIngress{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceServicePort(v []ServicePort, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceServicePort(v *[]ServicePort, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []ServicePort{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 80)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]ServicePort, yyrl1)
+ }
+ } else {
+ yyv1 = make([]ServicePort, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ServicePort{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, ServicePort{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ServicePort{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, ServicePort{}) // var yyz1 ServicePort
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ServicePort{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []ServicePort{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceObjectReference(v []ObjectReference, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceObjectReference(v *[]ObjectReference, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []ObjectReference{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]ObjectReference, yyrl1)
+ }
+ } else {
+ yyv1 = make([]ObjectReference, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ObjectReference{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, ObjectReference{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ObjectReference{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, ObjectReference{}) // var yyz1 ObjectReference
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ObjectReference{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []ObjectReference{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceServiceAccount(v []ServiceAccount, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceServiceAccount(v *[]ServiceAccount, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []ServiceAccount{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 288)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]ServiceAccount, yyrl1)
+ }
+ } else {
+ yyv1 = make([]ServiceAccount, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ServiceAccount{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, ServiceAccount{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ServiceAccount{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, ServiceAccount{}) // var yyz1 ServiceAccount
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ServiceAccount{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []ServiceAccount{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceEndpointSubset(v []EndpointSubset, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceEndpointSubset(v *[]EndpointSubset, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []EndpointSubset{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 72)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]EndpointSubset, yyrl1)
+ }
+ } else {
+ yyv1 = make([]EndpointSubset, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = EndpointSubset{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, EndpointSubset{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = EndpointSubset{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, EndpointSubset{}) // var yyz1 EndpointSubset
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = EndpointSubset{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []EndpointSubset{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceEndpointAddress(v []EndpointAddress, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceEndpointAddress(v *[]EndpointAddress, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []EndpointAddress{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]EndpointAddress, yyrl1)
+ }
+ } else {
+ yyv1 = make([]EndpointAddress, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = EndpointAddress{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, EndpointAddress{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = EndpointAddress{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, EndpointAddress{}) // var yyz1 EndpointAddress
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = EndpointAddress{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []EndpointAddress{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceEndpointPort(v []EndpointPort, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceEndpointPort(v *[]EndpointPort, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []EndpointPort{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]EndpointPort, yyrl1)
+ }
+ } else {
+ yyv1 = make([]EndpointPort, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = EndpointPort{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, EndpointPort{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = EndpointPort{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, EndpointPort{}) // var yyz1 EndpointPort
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = EndpointPort{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []EndpointPort{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceEndpoints(v []Endpoints, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceEndpoints(v *[]Endpoints, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []Endpoints{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 264)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]Endpoints, yyrl1)
+ }
+ } else {
+ yyv1 = make([]Endpoints, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Endpoints{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, Endpoints{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Endpoints{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, Endpoints{}) // var yyz1 Endpoints
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Endpoints{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []Endpoints{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceNodeCondition(v []NodeCondition, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceNodeCondition(v *[]NodeCondition, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []NodeCondition{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]NodeCondition, yyrl1)
+ }
+ } else {
+ yyv1 = make([]NodeCondition, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = NodeCondition{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, NodeCondition{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = NodeCondition{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, NodeCondition{}) // var yyz1 NodeCondition
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = NodeCondition{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []NodeCondition{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceNodeAddress(v []NodeAddress, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceNodeAddress(v *[]NodeAddress, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []NodeAddress{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]NodeAddress, yyrl1)
+ }
+ } else {
+ yyv1 = make([]NodeAddress, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = NodeAddress{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, NodeAddress{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = NodeAddress{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, NodeAddress{}) // var yyz1 NodeAddress
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = NodeAddress{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []NodeAddress{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceContainerImage(v []ContainerImage, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceContainerImage(v *[]ContainerImage, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []ContainerImage{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]ContainerImage, yyrl1)
+ }
+ } else {
+ yyv1 = make([]ContainerImage, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ContainerImage{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, ContainerImage{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ContainerImage{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, ContainerImage{}) // var yyz1 ContainerImage
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ContainerImage{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []ContainerImage{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceUniqueVolumeName(v []UniqueVolumeName, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yyv1.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceUniqueVolumeName(v *[]UniqueVolumeName, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []UniqueVolumeName{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]UniqueVolumeName, yyrl1)
+ }
+ } else {
+ yyv1 = make([]UniqueVolumeName, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = UniqueVolumeName(r.DecodeString())
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, "")
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = UniqueVolumeName(r.DecodeString())
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, "") // var yyz1 UniqueVolumeName
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = UniqueVolumeName(r.DecodeString())
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []UniqueVolumeName{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceAttachedVolume(v []AttachedVolume, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceAttachedVolume(v *[]AttachedVolume, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []AttachedVolume{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]AttachedVolume, yyrl1)
+ }
+ } else {
+ yyv1 = make([]AttachedVolume, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = AttachedVolume{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, AttachedVolume{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = AttachedVolume{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, AttachedVolume{}) // var yyz1 AttachedVolume
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = AttachedVolume{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []AttachedVolume{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encResourceList(v ResourceList, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeMapStart(len(v))
+ for yyk1, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ yyk1.CodecEncodeSelf(e)
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy3 := &yyv1
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy3) {
+ } else if !yym4 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy3)
+ } else {
+ z.EncFallback(yy3)
+ }
+ }
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x codecSelfer1234) decResourceList(v *ResourceList, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyl1 := r.ReadMapStart()
+ yybh1 := z.DecBasicHandle()
+ if yyv1 == nil {
+ yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 72)
+ yyv1 = make(map[ResourceName]pkg3_resource.Quantity, yyrl1)
+ *v = yyv1
+ }
+ var yymk1 ResourceName
+ var yymv1 pkg3_resource.Quantity
+ var yymg1 bool
+ if yybh1.MapValueReset {
+ yymg1 = true
+ }
+ if yyl1 > 0 {
+ for yyj1 := 0; yyj1 < yyl1; yyj1++ {
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ if r.TryDecodeAsNil() {
+ yymk1 = ""
+ } else {
+ yymk1 = ResourceName(r.DecodeString())
+ }
+
+ if yymg1 {
+ yymv1 = yyv1[yymk1]
+ } else {
+ yymv1 = pkg3_resource.Quantity{}
+ }
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ if r.TryDecodeAsNil() {
+ yymv1 = pkg3_resource.Quantity{}
+ } else {
+ yyv3 := &yymv1
+ yym4 := z.DecBinary()
+ _ = yym4
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv3) {
+ } else if !yym4 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv3)
+ } else {
+ z.DecFallback(yyv3, false)
+ }
+ }
+
+ if yyv1 != nil {
+ yyv1[yymk1] = yymv1
+ }
+ }
+ } else if yyl1 < 0 {
+ for yyj1 := 0; !r.CheckBreak(); yyj1++ {
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ if r.TryDecodeAsNil() {
+ yymk1 = ""
+ } else {
+ yymk1 = ResourceName(r.DecodeString())
+ }
+
+ if yymg1 {
+ yymv1 = yyv1[yymk1]
+ } else {
+ yymv1 = pkg3_resource.Quantity{}
+ }
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ if r.TryDecodeAsNil() {
+ yymv1 = pkg3_resource.Quantity{}
+ } else {
+ yyv6 := &yymv1
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv6) {
+ } else if !yym7 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv6)
+ } else {
+ z.DecFallback(yyv6, false)
+ }
+ }
+
+ if yyv1 != nil {
+ yyv1[yymk1] = yymv1
+ }
+ }
+ } // else len==0: TODO: Should we clear map entries?
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x codecSelfer1234) encSliceNode(v []Node, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceNode(v *[]Node, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []Node{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 616)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]Node, yyrl1)
+ }
+ } else {
+ yyv1 = make([]Node, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Node{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, Node{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Node{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, Node{}) // var yyz1 Node
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Node{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []Node{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceFinalizerName(v []FinalizerName, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yyv1.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceFinalizerName(v *[]FinalizerName, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []FinalizerName{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]FinalizerName, yyrl1)
+ }
+ } else {
+ yyv1 = make([]FinalizerName, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = FinalizerName(r.DecodeString())
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, "")
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = FinalizerName(r.DecodeString())
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, "") // var yyz1 FinalizerName
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = FinalizerName(r.DecodeString())
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []FinalizerName{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceNamespace(v []Namespace, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceNamespace(v *[]Namespace, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []Namespace{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]Namespace, yyrl1)
+ }
+ } else {
+ yyv1 = make([]Namespace, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Namespace{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, Namespace{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Namespace{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, Namespace{}) // var yyz1 Namespace
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Namespace{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []Namespace{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceEvent(v []Event, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceEvent(v *[]Event, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []Event{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 488)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]Event, yyrl1)
+ }
+ } else {
+ yyv1 = make([]Event, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Event{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, Event{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Event{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, Event{}) // var yyz1 Event
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Event{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []Event{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceruntime_Object(v []pkg7_runtime.Object, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyv1 == nil {
+ r.EncodeNil()
+ } else {
+ yym2 := z.EncBinary()
+ _ = yym2
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yyv1) {
+ } else {
+ z.EncFallback(yyv1)
+ }
+ }
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceruntime_Object(v *[]pkg7_runtime.Object, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []pkg7_runtime.Object{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]pkg7_runtime.Object, yyrl1)
+ }
+ } else {
+ yyv1 = make([]pkg7_runtime.Object, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = nil
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yym3 := z.DecBinary()
+ _ = yym3
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv2) {
+ } else {
+ z.DecFallback(yyv2, true)
+ }
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, nil)
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = nil
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, true)
+ }
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, nil) // var yyz1 pkg7_runtime.Object
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = nil
+ } else {
+ yyv6 := &yyv1[yyj1]
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv6) {
+ } else {
+ z.DecFallback(yyv6, true)
+ }
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []pkg7_runtime.Object{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceLimitRangeItem(v []LimitRangeItem, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceLimitRangeItem(v *[]LimitRangeItem, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []LimitRangeItem{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]LimitRangeItem, yyrl1)
+ }
+ } else {
+ yyv1 = make([]LimitRangeItem, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = LimitRangeItem{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, LimitRangeItem{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = LimitRangeItem{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, LimitRangeItem{}) // var yyz1 LimitRangeItem
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = LimitRangeItem{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []LimitRangeItem{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceLimitRange(v []LimitRange, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceLimitRange(v *[]LimitRange, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []LimitRange{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 264)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]LimitRange, yyrl1)
+ }
+ } else {
+ yyv1 = make([]LimitRange, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = LimitRange{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, LimitRange{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = LimitRange{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, LimitRange{}) // var yyz1 LimitRange
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = LimitRange{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []LimitRange{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceResourceQuotaScope(v []ResourceQuotaScope, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yyv1.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceResourceQuotaScope(v *[]ResourceQuotaScope, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []ResourceQuotaScope{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]ResourceQuotaScope, yyrl1)
+ }
+ } else {
+ yyv1 = make([]ResourceQuotaScope, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = ResourceQuotaScope(r.DecodeString())
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, "")
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = ResourceQuotaScope(r.DecodeString())
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, "") // var yyz1 ResourceQuotaScope
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = ResourceQuotaScope(r.DecodeString())
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []ResourceQuotaScope{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceResourceQuota(v []ResourceQuota, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceResourceQuota(v *[]ResourceQuota, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []ResourceQuota{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 288)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]ResourceQuota, yyrl1)
+ }
+ } else {
+ yyv1 = make([]ResourceQuota, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ResourceQuota{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, ResourceQuota{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ResourceQuota{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, ResourceQuota{}) // var yyz1 ResourceQuota
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ResourceQuota{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []ResourceQuota{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encMapstringSliceuint8(v map[string][]uint8, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeMapStart(len(v))
+ for yyk1, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ yym2 := z.EncBinary()
+ _ = yym2
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(yyk1))
+ }
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyv1 == nil {
+ r.EncodeNil()
+ } else {
+ yym3 := z.EncBinary()
+ _ = yym3
+ if false {
+ } else {
+ r.EncodeStringBytes(codecSelferC_RAW1234, []byte(yyv1))
+ }
+ }
+ }
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x codecSelfer1234) decMapstringSliceuint8(v *map[string][]uint8, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyl1 := r.ReadMapStart()
+ yybh1 := z.DecBasicHandle()
+ if yyv1 == nil {
+ yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 40)
+ yyv1 = make(map[string][]uint8, yyrl1)
+ *v = yyv1
+ }
+ var yymk1 string
+ var yymv1 []uint8
+ var yymg1 bool
+ if yybh1.MapValueReset {
+ yymg1 = true
+ }
+ if yyl1 > 0 {
+ for yyj1 := 0; yyj1 < yyl1; yyj1++ {
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ if r.TryDecodeAsNil() {
+ yymk1 = ""
+ } else {
+ yymk1 = string(r.DecodeString())
+ }
+
+ if yymg1 {
+ yymv1 = yyv1[yymk1]
+ } else {
+ yymv1 = nil
+ }
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ if r.TryDecodeAsNil() {
+ yymv1 = nil
+ } else {
+ yyv3 := &yymv1
+ yym4 := z.DecBinary()
+ _ = yym4
+ if false {
+ } else {
+ *yyv3 = r.DecodeBytes(*(*[]byte)(yyv3), false, false)
+ }
+ }
+
+ if yyv1 != nil {
+ yyv1[yymk1] = yymv1
+ }
+ }
+ } else if yyl1 < 0 {
+ for yyj1 := 0; !r.CheckBreak(); yyj1++ {
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ if r.TryDecodeAsNil() {
+ yymk1 = ""
+ } else {
+ yymk1 = string(r.DecodeString())
+ }
+
+ if yymg1 {
+ yymv1 = yyv1[yymk1]
+ } else {
+ yymv1 = nil
+ }
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ if r.TryDecodeAsNil() {
+ yymv1 = nil
+ } else {
+ yyv6 := &yymv1
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ *yyv6 = r.DecodeBytes(*(*[]byte)(yyv6), false, false)
+ }
+ }
+
+ if yyv1 != nil {
+ yyv1[yymk1] = yymv1
+ }
+ }
+ } // else len==0: TODO: Should we clear map entries?
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x codecSelfer1234) encSliceuint8(v []uint8, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym2 := z.EncBinary()
+ _ = yym2
+ if false {
+ } else {
+ r.EncodeUint(uint64(yyv1))
+ }
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceuint8(v *[]uint8, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []uint8{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 1)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]uint8, yyrl1)
+ }
+ } else {
+ yyv1 = make([]uint8, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = 0
+ } else {
+ yyv1[yyj1] = uint8(r.DecodeUint(8))
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, 0)
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = 0
+ } else {
+ yyv1[yyj1] = uint8(r.DecodeUint(8))
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, 0) // var yyz1 uint8
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = 0
+ } else {
+ yyv1[yyj1] = uint8(r.DecodeUint(8))
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []uint8{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceSecret(v []Secret, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceSecret(v *[]Secret, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []Secret{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 264)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]Secret, yyrl1)
+ }
+ } else {
+ yyv1 = make([]Secret, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Secret{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, Secret{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Secret{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, Secret{}) // var yyz1 Secret
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Secret{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []Secret{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceConfigMap(v []ConfigMap, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceConfigMap(v *[]ConfigMap, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []ConfigMap{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 248)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]ConfigMap, yyrl1)
+ }
+ } else {
+ yyv1 = make([]ConfigMap, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ConfigMap{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, ConfigMap{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ConfigMap{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, ConfigMap{}) // var yyz1 ConfigMap
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ConfigMap{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []ConfigMap{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceComponentCondition(v []ComponentCondition, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceComponentCondition(v *[]ComponentCondition, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []ComponentCondition{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 64)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]ComponentCondition, yyrl1)
+ }
+ } else {
+ yyv1 = make([]ComponentCondition, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ComponentCondition{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, ComponentCondition{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ComponentCondition{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, ComponentCondition{}) // var yyz1 ComponentCondition
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ComponentCondition{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []ComponentCondition{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceComponentStatus(v []ComponentStatus, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceComponentStatus(v *[]ComponentStatus, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []ComponentStatus{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 264)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]ComponentStatus, yyrl1)
+ }
+ } else {
+ yyv1 = make([]ComponentStatus, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ComponentStatus{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, ComponentStatus{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ComponentStatus{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, ComponentStatus{}) // var yyz1 ComponentStatus
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ComponentStatus{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []ComponentStatus{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/types.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/types.go
new file mode 100644
index 0000000..7225e8f
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/types.go
@@ -0,0 +1,2891 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package api
+
+import (
+ "k8s.io/kubernetes/pkg/api/resource"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/fields"
+ "k8s.io/kubernetes/pkg/labels"
+ "k8s.io/kubernetes/pkg/runtime"
+ "k8s.io/kubernetes/pkg/types"
+ "k8s.io/kubernetes/pkg/util/intstr"
+)
+
+// Common string formats
+// ---------------------
+// Many fields in this API have formatting requirements. The commonly used
+// formats are defined here.
+//
+// C_IDENTIFIER: This is a string that conforms to the definition of an "identifier"
+// in the C language. This is captured by the following regex:
+// [A-Za-z_][A-Za-z0-9_]*
+// This defines the format, but not the length restriction, which should be
+// specified at the definition of any field of this type.
+//
+// DNS_LABEL: This is a string, no more than 63 characters long, that conforms
+// to the definition of a "label" in RFCs 1035 and 1123. This is captured
+// by the following regex:
+// [a-z0-9]([-a-z0-9]*[a-z0-9])?
+//
+// DNS_SUBDOMAIN: This is a string, no more than 253 characters long, that conforms
+// to the definition of a "subdomain" in RFCs 1035 and 1123. This is captured
+// by the following regex:
+// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*
+// or more simply:
+// DNS_LABEL(\.DNS_LABEL)*
+//
+// IANA_SVC_NAME: This is a string, no more than 15 characters long, that
+// conforms to the definition of IANA service name in RFC 6335.
+// It must contains at least one letter [a-z] and it must contains only [a-z0-9-].
+// Hypens ('-') cannot be leading or trailing character of the string
+// and cannot be adjacent to other hyphens.
+
+// ObjectMeta is metadata that all persisted resources must have, which includes all objects
+// users must create.
+type ObjectMeta struct {
+ // Name is unique within a namespace. Name is required when creating resources, although
+ // some resources may allow a client to request the generation of an appropriate name
+ // automatically. Name is primarily intended for creation idempotence and configuration
+ // definition.
+ Name string `json:"name,omitempty"`
+
+ // GenerateName indicates that the name should be made unique by the server prior to persisting
+ // it. A non-empty value for the field indicates the name will be made unique (and the name
+ // returned to the client will be different than the name passed). The value of this field will
+ // be combined with a unique suffix on the server if the Name field has not been provided.
+ // The provided value must be valid within the rules for Name, and may be truncated by the length
+ // of the suffix required to make the value unique on the server.
+ //
+ // If this field is specified, and Name is not present, the server will NOT return a 409 if the
+ // generated name exists - instead, it will either return 201 Created or 500 with Reason
+ // ServerTimeout indicating a unique name could not be found in the time allotted, and the client
+ // should retry (optionally after the time indicated in the Retry-After header).
+ GenerateName string `json:"generateName,omitempty"`
+
+ // Namespace defines the space within which name must be unique. An empty namespace is
+ // equivalent to the "default" namespace, but "default" is the canonical representation.
+ // Not all objects are required to be scoped to a namespace - the value of this field for
+ // those objects will be empty.
+ Namespace string `json:"namespace,omitempty"`
+
+ // SelfLink is a URL representing this object.
+ SelfLink string `json:"selfLink,omitempty"`
+
+ // UID is the unique in time and space value for this object. It is typically generated by
+ // the server on successful creation of a resource and is not allowed to change on PUT
+ // operations.
+ UID types.UID `json:"uid,omitempty"`
+
+ // An opaque value that represents the version of this resource. May be used for optimistic
+ // concurrency, change detection, and the watch operation on a resource or set of resources.
+ // Clients must treat these values as opaque and values may only be valid for a particular
+ // resource or set of resources. Only servers will generate resource versions.
+ ResourceVersion string `json:"resourceVersion,omitempty"`
+
+ // A sequence number representing a specific generation of the desired state.
+ // Populated by the system. Read-only.
+ Generation int64 `json:"generation,omitempty"`
+
+ // CreationTimestamp is a timestamp representing the server time when this object was
+ // created. It is not guaranteed to be set in happens-before order across separate operations.
+ // Clients may not set this value. It is represented in RFC3339 form and is in UTC.
+ CreationTimestamp unversioned.Time `json:"creationTimestamp,omitempty"`
+
+ // DeletionTimestamp is the time after which this resource will be deleted. This
+ // field is set by the server when a graceful deletion is requested by the user, and is not
+ // directly settable by a client. The resource will be deleted (no longer visible from
+ // resource lists, and not reachable by name) after the time in this field. Once set, this
+ // value may not be unset or be set further into the future, although it may be shortened
+ // or the resource may be deleted prior to this time. For example, a user may request that
+ // a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination
+ // signal to the containers in the pod. Once the resource is deleted in the API, the Kubelet
+ // will send a hard termination signal to the container.
+ DeletionTimestamp *unversioned.Time `json:"deletionTimestamp,omitempty"`
+
+ // DeletionGracePeriodSeconds records the graceful deletion value set when graceful deletion
+ // was requested. Represents the most recent grace period, and may only be shortened once set.
+ DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty"`
+
+ // Labels are key value pairs that may be used to scope and select individual resources.
+ // Label keys are of the form:
+ // label-key ::= prefixed-name | name
+ // prefixed-name ::= prefix '/' name
+ // prefix ::= DNS_SUBDOMAIN
+ // name ::= DNS_LABEL
+ // The prefix is optional. If the prefix is not specified, the key is assumed to be private
+ // to the user. Other system components that wish to use labels must specify a prefix. The
+ // "kubernetes.io/" prefix is reserved for use by kubernetes components.
+ // TODO: replace map[string]string with labels.LabelSet type
+ Labels map[string]string `json:"labels,omitempty"`
+
+ // Annotations are unstructured key value data stored with a resource that may be set by
+ // external tooling. They are not queryable and should be preserved when modifying
+ // objects. Annotation keys have the same formatting restrictions as Label keys. See the
+ // comments on Labels for details.
+ Annotations map[string]string `json:"annotations,omitempty"`
+
+ // List of objects depended by this object. If ALL objects in the list have
+ // been deleted, this object will be garbage collected. If this object is managed by a controller,
+ // then an entry in this list will point to this controller, with the controller field set to true.
+ // There cannot be more than one managing controller.
+ OwnerReferences []OwnerReference `json:"ownerReferences,omitempty"`
+
+ // Must be empty before the object is deleted from the registry. Each entry
+ // is an identifier for the responsible component that will remove the entry
+ // from the list. If the deletionTimestamp of the object is non-nil, entries
+ // in this list can only be removed.
+ Finalizers []string `json:"finalizers,omitempty"`
+}
+
+const (
+ // NamespaceDefault means the object is in the default namespace which is applied when not specified by clients
+ NamespaceDefault string = "default"
+ // NamespaceAll is the default argument to specify on a context when you want to list or filter resources across all namespaces
+ NamespaceAll string = ""
+ // NamespaceNone is the argument for a context when there is no namespace.
+ NamespaceNone string = ""
+ // NamespaceSystem is the system namespace where we place system components.
+ NamespaceSystem string = "kube-system"
+ // TerminationMessagePathDefault means the default path to capture the application termination message running in a container
+ TerminationMessagePathDefault string = "/dev/termination-log"
+)
+
+// Volume represents a named volume in a pod that may be accessed by any containers in the pod.
+type Volume struct {
+ // Required: This must be a DNS_LABEL. Each volume in a pod must have
+ // a unique name.
+ Name string `json:"name"`
+ // The VolumeSource represents the location and type of a volume to mount.
+ // This is optional for now. If not specified, the Volume is implied to be an EmptyDir.
+ // This implied behavior is deprecated and will be removed in a future version.
+ VolumeSource `json:",inline,omitempty"`
+}
+
+// VolumeSource represents the source location of a volume to mount.
+// Only one of its members may be specified.
+type VolumeSource struct {
+ // HostPath represents file or directory on the host machine that is
+ // directly exposed to the container. This is generally used for system
+ // agents or other privileged things that are allowed to see the host
+ // machine. Most containers will NOT need this.
+ // ---
+ // TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not
+ // mount host directories as read/write.
+ HostPath *HostPathVolumeSource `json:"hostPath,omitempty"`
+ // EmptyDir represents a temporary directory that shares a pod's lifetime.
+ EmptyDir *EmptyDirVolumeSource `json:"emptyDir,omitempty"`
+ // GCEPersistentDisk represents a GCE Disk resource that is attached to a
+ // kubelet's host machine and then exposed to the pod.
+ GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty"`
+ // AWSElasticBlockStore represents an AWS EBS disk that is attached to a
+ // kubelet's host machine and then exposed to the pod.
+ AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty"`
+ // GitRepo represents a git repository at a particular revision.
+ GitRepo *GitRepoVolumeSource `json:"gitRepo,omitempty"`
+ // Secret represents a secret that should populate this volume.
+ Secret *SecretVolumeSource `json:"secret,omitempty"`
+ // NFS represents an NFS mount on the host that shares a pod's lifetime
+ NFS *NFSVolumeSource `json:"nfs,omitempty"`
+ // ISCSIVolumeSource represents an ISCSI Disk resource that is attached to a
+ // kubelet's host machine and then exposed to the pod.
+ ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty"`
+ // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime
+ Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty"`
+ // PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace
+ PersistentVolumeClaim *PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty"`
+ // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime
+ RBD *RBDVolumeSource `json:"rbd,omitempty"`
+ // FlexVolume represents a generic volume resource that is
+ // provisioned/attached using a exec based plugin. This is an alpha feature and may change in future.
+ FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty"`
+
+ // Cinder represents a cinder volume attached and mounted on kubelets host machine
+ Cinder *CinderVolumeSource `json:"cinder,omitempty"`
+
+ // CephFS represents a Cephfs mount on the host that shares a pod's lifetime
+ CephFS *CephFSVolumeSource `json:"cephfs,omitempty"`
+
+ // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running
+ Flocker *FlockerVolumeSource `json:"flocker,omitempty"`
+
+ // DownwardAPI represents metadata about the pod that should populate this volume
+ DownwardAPI *DownwardAPIVolumeSource `json:"downwardAPI,omitempty"`
+ // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.
+ FC *FCVolumeSource `json:"fc,omitempty"`
+ // AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
+ AzureFile *AzureFileVolumeSource `json:"azureFile,omitempty"`
+ // ConfigMap represents a configMap that should populate this volume
+ ConfigMap *ConfigMapVolumeSource `json:"configMap,omitempty"`
+ // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
+ VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty"`
+}
+
+// Similar to VolumeSource but meant for the administrator who creates PVs.
+// Exactly one of its members must be set.
+type PersistentVolumeSource struct {
+ // GCEPersistentDisk represents a GCE Disk resource that is attached to a
+ // kubelet's host machine and then exposed to the pod.
+ GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty"`
+ // AWSElasticBlockStore represents an AWS EBS disk that is attached to a
+ // kubelet's host machine and then exposed to the pod.
+ AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty"`
+ // HostPath represents a directory on the host.
+ // Provisioned by a developer or tester.
+ // This is useful for single-node development and testing only!
+ // On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster.
+ HostPath *HostPathVolumeSource `json:"hostPath,omitempty"`
+ // Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod
+ Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty"`
+ // NFS represents an NFS mount on the host that shares a pod's lifetime
+ NFS *NFSVolumeSource `json:"nfs,omitempty"`
+ // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime
+ RBD *RBDVolumeSource `json:"rbd,omitempty"`
+ // ISCSIVolumeSource represents an ISCSI resource that is attached to a
+ // kubelet's host machine and then exposed to the pod.
+ ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty"`
+ // FlexVolume represents a generic volume resource that is
+ // provisioned/attached using a exec based plugin. This is an alpha feature and may change in future.
+ FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty"`
+ // Cinder represents a cinder volume attached and mounted on kubelets host machine
+ Cinder *CinderVolumeSource `json:"cinder,omitempty"`
+ // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime
+ CephFS *CephFSVolumeSource `json:"cephfs,omitempty"`
+ // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.
+ FC *FCVolumeSource `json:"fc,omitempty"`
+ // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running
+ Flocker *FlockerVolumeSource `json:"flocker,omitempty"`
+ // AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
+ AzureFile *AzureFileVolumeSource `json:"azureFile,omitempty"`
+ // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
+ VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty"`
+}
+
+type PersistentVolumeClaimVolumeSource struct {
+ // ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume
+ ClaimName string `json:"claimName"`
+ // Optional: Defaults to false (read/write). ReadOnly here
+ // will force the ReadOnly setting in VolumeMounts
+ ReadOnly bool `json:"readOnly,omitempty"`
+}
+
+// +genclient=true
+// +nonNamespaced=true
+
+type PersistentVolume struct {
+ unversioned.TypeMeta `json:",inline"`
+ ObjectMeta `json:"metadata,omitempty"`
+
+ //Spec defines a persistent volume owned by the cluster
+ Spec PersistentVolumeSpec `json:"spec,omitempty"`
+
+ // Status represents the current information about persistent volume.
+ Status PersistentVolumeStatus `json:"status,omitempty"`
+}
+
+type PersistentVolumeSpec struct {
+ // Resources represents the actual resources of the volume
+ Capacity ResourceList `json:"capacity"`
+ // Source represents the location and type of a volume to mount.
+ PersistentVolumeSource `json:",inline"`
+ // AccessModes contains all ways the volume can be mounted
+ AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty"`
+ // ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim.
+ // ClaimRef is expected to be non-nil when bound.
+ // claim.VolumeName is the authoritative bind between PV and PVC.
+ // When set to non-nil value, PVC.Spec.Selector of the referenced PVC is
+ // ignored, i.e. labels of this PV do not need to match PVC selector.
+ ClaimRef *ObjectReference `json:"claimRef,omitempty"`
+ // Optional: what happens to a persistent volume when released from its claim.
+ PersistentVolumeReclaimPolicy PersistentVolumeReclaimPolicy `json:"persistentVolumeReclaimPolicy,omitempty"`
+}
+
+// PersistentVolumeReclaimPolicy describes a policy for end-of-life maintenance of persistent volumes
+type PersistentVolumeReclaimPolicy string
+
+const (
+ // PersistentVolumeReclaimRecycle means the volume will be recycled back into the pool of unbound persistent volumes on release from its claim.
+ // The volume plugin must support Recycling.
+ PersistentVolumeReclaimRecycle PersistentVolumeReclaimPolicy = "Recycle"
+ // PersistentVolumeReclaimDelete means the volume will be deleted from Kubernetes on release from its claim.
+ // The volume plugin must support Deletion.
+ PersistentVolumeReclaimDelete PersistentVolumeReclaimPolicy = "Delete"
+ // PersistentVolumeReclaimRetain means the volume will be left in its current phase (Released) for manual reclamation by the administrator.
+ // The default policy is Retain.
+ PersistentVolumeReclaimRetain PersistentVolumeReclaimPolicy = "Retain"
+)
+
+type PersistentVolumeStatus struct {
+ // Phase indicates if a volume is available, bound to a claim, or released by a claim
+ Phase PersistentVolumePhase `json:"phase,omitempty"`
+ // A human-readable message indicating details about why the volume is in this state.
+ Message string `json:"message,omitempty"`
+ // Reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI
+ Reason string `json:"reason,omitempty"`
+}
+
+type PersistentVolumeList struct {
+ unversioned.TypeMeta `json:",inline"`
+ unversioned.ListMeta `json:"metadata,omitempty"`
+ Items []PersistentVolume `json:"items"`
+}
+
+// +genclient=true
+
+// PersistentVolumeClaim is a user's request for and claim to a persistent volume
+type PersistentVolumeClaim struct {
+ unversioned.TypeMeta `json:",inline"`
+ ObjectMeta `json:"metadata,omitempty"`
+
+ // Spec defines the volume requested by a pod author
+ Spec PersistentVolumeClaimSpec `json:"spec,omitempty"`
+
+ // Status represents the current information about a claim
+ Status PersistentVolumeClaimStatus `json:"status,omitempty"`
+}
+
+type PersistentVolumeClaimList struct {
+ unversioned.TypeMeta `json:",inline"`
+ unversioned.ListMeta `json:"metadata,omitempty"`
+ Items []PersistentVolumeClaim `json:"items"`
+}
+
+// PersistentVolumeClaimSpec describes the common attributes of storage devices
+// and allows a Source for provider-specific attributes
+type PersistentVolumeClaimSpec struct {
+ // Contains the types of access modes required
+ AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty"`
+ // A label query over volumes to consider for binding. This selector is
+ // ignored when VolumeName is set
+ Selector *unversioned.LabelSelector `json:"selector,omitempty"`
+ // Resources represents the minimum resources required
+ Resources ResourceRequirements `json:"resources,omitempty"`
+ // VolumeName is the binding reference to the PersistentVolume backing this
+ // claim. When set to non-empty value Selector is not evaluated
+ VolumeName string `json:"volumeName,omitempty"`
+}
+
+type PersistentVolumeClaimStatus struct {
+ // Phase represents the current phase of PersistentVolumeClaim
+ Phase PersistentVolumeClaimPhase `json:"phase,omitempty"`
+ // AccessModes contains all ways the volume backing the PVC can be mounted
+ AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty"`
+ // Represents the actual resources of the underlying volume
+ Capacity ResourceList `json:"capacity,omitempty"`
+}
+
+type PersistentVolumeAccessMode string
+
+const (
+ // can be mounted read/write mode to exactly 1 host
+ ReadWriteOnce PersistentVolumeAccessMode = "ReadWriteOnce"
+ // can be mounted in read-only mode to many hosts
+ ReadOnlyMany PersistentVolumeAccessMode = "ReadOnlyMany"
+ // can be mounted in read/write mode to many hosts
+ ReadWriteMany PersistentVolumeAccessMode = "ReadWriteMany"
+)
+
+type PersistentVolumePhase string
+
+const (
+ // used for PersistentVolumes that are not available
+ VolumePending PersistentVolumePhase = "Pending"
+ // used for PersistentVolumes that are not yet bound
+ // Available volumes are held by the binder and matched to PersistentVolumeClaims
+ VolumeAvailable PersistentVolumePhase = "Available"
+ // used for PersistentVolumes that are bound
+ VolumeBound PersistentVolumePhase = "Bound"
+ // used for PersistentVolumes where the bound PersistentVolumeClaim was deleted
+ // released volumes must be recycled before becoming available again
+ // this phase is used by the persistent volume claim binder to signal to another process to reclaim the resource
+ VolumeReleased PersistentVolumePhase = "Released"
+ // used for PersistentVolumes that failed to be correctly recycled or deleted after being released from a claim
+ VolumeFailed PersistentVolumePhase = "Failed"
+)
+
+type PersistentVolumeClaimPhase string
+
+const (
+ // used for PersistentVolumeClaims that are not yet bound
+ ClaimPending PersistentVolumeClaimPhase = "Pending"
+ // used for PersistentVolumeClaims that are bound
+ ClaimBound PersistentVolumeClaimPhase = "Bound"
+ // used for PersistentVolumeClaims that lost their underlying
+ // PersistentVolume. The claim was bound to a PersistentVolume and this
+ // volume does not exist any longer and all data on it was lost.
+ ClaimLost PersistentVolumeClaimPhase = "Lost"
+)
+
+// Represents a host path mapped into a pod.
+// Host path volumes do not support ownership management or SELinux relabeling.
+type HostPathVolumeSource struct {
+ Path string `json:"path"`
+}
+
+// Represents an empty directory for a pod.
+// Empty directory volumes support ownership management and SELinux relabeling.
+type EmptyDirVolumeSource struct {
+ // TODO: Longer term we want to represent the selection of underlying
+ // media more like a scheduling problem - user says what traits they
+ // need, we give them a backing store that satisfies that. For now
+ // this will cover the most common needs.
+ // Optional: what type of storage medium should back this directory.
+ // The default is "" which means to use the node's default medium.
+ Medium StorageMedium `json:"medium,omitempty"`
+}
+
+// StorageMedium defines ways that storage can be allocated to a volume.
+type StorageMedium string
+
+const (
+ StorageMediumDefault StorageMedium = "" // use whatever the default is for the node
+ StorageMediumMemory StorageMedium = "Memory" // use memory (tmpfs)
+)
+
+// Protocol defines network protocols supported for things like container ports.
+type Protocol string
+
+const (
+ // ProtocolTCP is the TCP protocol.
+ ProtocolTCP Protocol = "TCP"
+ // ProtocolUDP is the UDP protocol.
+ ProtocolUDP Protocol = "UDP"
+)
+
+// Represents a Persistent Disk resource in Google Compute Engine.
+//
+// A GCE PD must exist before mounting to a container. The disk must
+// also be in the same GCE project and zone as the kubelet. A GCE PD
+// can only be mounted as read/write once or read-only many times. GCE
+// PDs support ownership management and SELinux relabeling.
+type GCEPersistentDiskVolumeSource struct {
+ // Unique name of the PD resource. Used to identify the disk in GCE
+ PDName string `json:"pdName"`
+ // Filesystem type to mount.
+ // Must be a filesystem type supported by the host operating system.
+ // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ // TODO: how do we prevent errors in the filesystem from compromising the machine
+ FSType string `json:"fsType,omitempty"`
+ // Optional: Partition on the disk to mount.
+ // If omitted, kubelet will attempt to mount the device name.
+ // Ex. For /dev/sda1, this field is "1", for /dev/sda, this field is 0 or empty.
+ Partition int32 `json:"partition,omitempty"`
+ // Optional: Defaults to false (read/write). ReadOnly here will force
+ // the ReadOnly setting in VolumeMounts.
+ ReadOnly bool `json:"readOnly,omitempty"`
+}
+
+// Represents an ISCSI disk.
+// ISCSI volumes can only be mounted as read/write once.
+// ISCSI volumes support ownership management and SELinux relabeling.
+type ISCSIVolumeSource struct {
+ // Required: iSCSI target portal
+ // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260)
+ TargetPortal string `json:"targetPortal,omitempty"`
+ // Required: target iSCSI Qualified Name
+ IQN string `json:"iqn,omitempty"`
+ // Required: iSCSI target lun number
+ Lun int32 `json:"lun,omitempty"`
+ // Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport.
+ ISCSIInterface string `json:"iscsiInterface,omitempty"`
+ // Filesystem type to mount.
+ // Must be a filesystem type supported by the host operating system.
+ // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ // TODO: how do we prevent errors in the filesystem from compromising the machine
+ FSType string `json:"fsType,omitempty"`
+ // Optional: Defaults to false (read/write). ReadOnly here will force
+ // the ReadOnly setting in VolumeMounts.
+ ReadOnly bool `json:"readOnly,omitempty"`
+}
+
+// Represents a Fibre Channel volume.
+// Fibre Channel volumes can only be mounted as read/write once.
+// Fibre Channel volumes support ownership management and SELinux relabeling.
+type FCVolumeSource struct {
+ // Required: FC target world wide names (WWNs)
+ TargetWWNs []string `json:"targetWWNs"`
+ // Required: FC target lun number
+ Lun *int32 `json:"lun"`
+ // Filesystem type to mount.
+ // Must be a filesystem type supported by the host operating system.
+ // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ // TODO: how do we prevent errors in the filesystem from compromising the machine
+ FSType string `json:"fsType,omitempty"`
+ // Optional: Defaults to false (read/write). ReadOnly here will force
+ // the ReadOnly setting in VolumeMounts.
+ ReadOnly bool `json:"readOnly,omitempty"`
+}
+
+// FlexVolume represents a generic volume resource that is
+// provisioned/attached using a exec based plugin. This is an alpha feature and may change in future.
+type FlexVolumeSource struct {
+ // Driver is the name of the driver to use for this volume.
+ Driver string `json:"driver"`
+ // Filesystem type to mount.
+ // Must be a filesystem type supported by the host operating system.
+ // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.
+ FSType string `json:"fsType,omitempty"`
+ // Optional: SecretRef is reference to the secret object containing
+ // sensitive information to pass to the plugin scripts. This may be
+ // empty if no secret object is specified. If the secret object
+ // contains more than one secret, all secrets are passed to the plugin
+ // scripts.
+ SecretRef *LocalObjectReference `json:"secretRef,omitempty"`
+ // Optional: Defaults to false (read/write). ReadOnly here will force
+ // the ReadOnly setting in VolumeMounts.
+ ReadOnly bool `json:"readOnly,omitempty"`
+ // Optional: Extra driver options if any.
+ Options map[string]string `json:"options,omitempty"`
+}
+
+// Represents a Persistent Disk resource in AWS.
+//
+// An AWS EBS disk must exist before mounting to a container. The disk
+// must also be in the same AWS zone as the kubelet. A AWS EBS disk
+// can only be mounted as read/write once. AWS EBS volumes support
+// ownership management and SELinux relabeling.
+type AWSElasticBlockStoreVolumeSource struct {
+ // Unique id of the persistent disk resource. Used to identify the disk in AWS
+ VolumeID string `json:"volumeID"`
+ // Filesystem type to mount.
+ // Must be a filesystem type supported by the host operating system.
+ // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ // TODO: how do we prevent errors in the filesystem from compromising the machine
+ FSType string `json:"fsType,omitempty"`
+ // Optional: Partition on the disk to mount.
+ // If omitted, kubelet will attempt to mount the device name.
+ // Ex. For /dev/sda1, this field is "1", for /dev/sda, this field is 0 or empty.
+ Partition int32 `json:"partition,omitempty"`
+ // Optional: Defaults to false (read/write). ReadOnly here will force
+ // the ReadOnly setting in VolumeMounts.
+ ReadOnly bool `json:"readOnly,omitempty"`
+}
+
+// Represents a volume that is populated with the contents of a git repository.
+// Git repo volumes do not support ownership management.
+// Git repo volumes support SELinux relabeling.
+type GitRepoVolumeSource struct {
+ // Repository URL
+ Repository string `json:"repository"`
+ // Commit hash, this is optional
+ Revision string `json:"revision,omitempty"`
+ // Clone target, this is optional
+ // Must not contain or start with '..'. If '.' is supplied, the volume directory will be the
+ // git repository. Otherwise, if specified, the volume will contain the git repository in
+ // the subdirectory with the given name.
+ Directory string `json:"directory,omitempty"`
+ // TODO: Consider credentials here.
+}
+
+// Adapts a Secret into a volume.
+//
+// The contents of the target Secret's Data field will be presented in a volume
+// as files using the keys in the Data field as the file names.
+// Secret volumes support ownership management and SELinux relabeling.
+type SecretVolumeSource struct {
+ // Name of the secret in the pod's namespace to use.
+ SecretName string `json:"secretName,omitempty"`
+ // If unspecified, each key-value pair in the Data field of the referenced
+ // Secret will be projected into the volume as a file whose name is the
+ // key and content is the value. If specified, the listed keys will be
+ // projected into the specified paths, and unlisted keys will not be
+ // present. If a key is specified which is not present in the Secret,
+ // the volume setup will error. Paths must be relative and may not contain
+ // the '..' path or start with '..'.
+ Items []KeyToPath `json:"items,omitempty"`
+}
+
+// Represents an NFS mount that lasts the lifetime of a pod.
+// NFS volumes do not support ownership management or SELinux relabeling.
+type NFSVolumeSource struct {
+ // Server is the hostname or IP address of the NFS server
+ Server string `json:"server"`
+
+ // Path is the exported NFS share
+ Path string `json:"path"`
+
+ // Optional: Defaults to false (read/write). ReadOnly here will force
+ // the NFS export to be mounted with read-only permissions
+ ReadOnly bool `json:"readOnly,omitempty"`
+}
+
+// Represents a Glusterfs mount that lasts the lifetime of a pod.
+// Glusterfs volumes do not support ownership management or SELinux relabeling.
+type GlusterfsVolumeSource struct {
+ // Required: EndpointsName is the endpoint name that details Glusterfs topology
+ EndpointsName string `json:"endpoints"`
+
+ // Required: Path is the Glusterfs volume path
+ Path string `json:"path"`
+
+ // Optional: Defaults to false (read/write). ReadOnly here will force
+ // the Glusterfs to be mounted with read-only permissions
+ ReadOnly bool `json:"readOnly,omitempty"`
+}
+
+// Represents a Rados Block Device mount that lasts the lifetime of a pod.
+// RBD volumes support ownership management and SELinux relabeling.
+type RBDVolumeSource struct {
+ // Required: CephMonitors is a collection of Ceph monitors
+ CephMonitors []string `json:"monitors"`
+ // Required: RBDImage is the rados image name
+ RBDImage string `json:"image"`
+ // Filesystem type to mount.
+ // Must be a filesystem type supported by the host operating system.
+ // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ // TODO: how do we prevent errors in the filesystem from compromising the machine
+ FSType string `json:"fsType,omitempty"`
+ // Optional: RadosPool is the rados pool name,default is rbd
+ RBDPool string `json:"pool,omitempty"`
+ // Optional: RBDUser is the rados user name, default is admin
+ RadosUser string `json:"user,omitempty"`
+ // Optional: Keyring is the path to key ring for RBDUser, default is /etc/ceph/keyring
+ Keyring string `json:"keyring,omitempty"`
+ // Optional: SecretRef is name of the authentication secret for RBDUser, default is nil.
+ SecretRef *LocalObjectReference `json:"secretRef,omitempty"`
+ // Optional: Defaults to false (read/write). ReadOnly here will force
+ // the ReadOnly setting in VolumeMounts.
+ ReadOnly bool `json:"readOnly,omitempty"`
+}
+
+// Represents a cinder volume resource in Openstack. A Cinder volume
+// must exist before mounting to a container. The volume must also be
+// in the same region as the kubelet. Cinder volumes support ownership
+// management and SELinux relabeling.
+type CinderVolumeSource struct {
+ // Unique id of the volume used to identify the cinder volume
+ VolumeID string `json:"volumeID"`
+ // Filesystem type to mount.
+ // Must be a filesystem type supported by the host operating system.
+ // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ FSType string `json:"fsType,omitempty"`
+ // Optional: Defaults to false (read/write). ReadOnly here will force
+ // the ReadOnly setting in VolumeMounts.
+ ReadOnly bool `json:"readOnly,omitempty"`
+}
+
+// Represents a Ceph Filesystem mount that lasts the lifetime of a pod
+// Cephfs volumes do not support ownership management or SELinux relabeling.
+type CephFSVolumeSource struct {
+ // Required: Monitors is a collection of Ceph monitors
+ Monitors []string `json:"monitors"`
+ // Optional: Used as the mounted root, rather than the full Ceph tree, default is /
+ Path string `json:"path,omitempty"`
+ // Optional: User is the rados user name, default is admin
+ User string `json:"user,omitempty"`
+ // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret
+ SecretFile string `json:"secretFile,omitempty"`
+ // Optional: SecretRef is reference to the authentication secret for User, default is empty.
+ SecretRef *LocalObjectReference `json:"secretRef,omitempty"`
+ // Optional: Defaults to false (read/write). ReadOnly here will force
+ // the ReadOnly setting in VolumeMounts.
+ ReadOnly bool `json:"readOnly,omitempty"`
+}
+
+// Represents a Flocker volume mounted by the Flocker agent.
+// Flocker volumes do not support ownership management or SELinux relabeling.
+type FlockerVolumeSource struct {
+ // Required: the volume name. This is going to be store on metadata -> name on the payload for Flocker
+ DatasetName string `json:"datasetName"`
+}
+
+// Represents a volume containing downward API info.
+// Downward API volumes support ownership management and SELinux relabeling.
+type DownwardAPIVolumeSource struct {
+ // Items is a list of DownwardAPIVolume file
+ Items []DownwardAPIVolumeFile `json:"items,omitempty"`
+}
+
+// Represents a single file containing information from the downward API
+type DownwardAPIVolumeFile struct {
+ // Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'
+ Path string `json:"path"`
+ // Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.
+ FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty"`
+ // Selects a resource of the container: only resources limits and requests
+ // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
+ ResourceFieldRef *ResourceFieldSelector `json:"resourceFieldRef,omitempty"`
+}
+
+// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
+type AzureFileVolumeSource struct {
+ // the name of secret that contains Azure Storage Account Name and Key
+ SecretName string `json:"secretName"`
+ // Share Name
+ ShareName string `json:"shareName"`
+ // Defaults to false (read/write). ReadOnly here will force
+ // the ReadOnly setting in VolumeMounts.
+ ReadOnly bool `json:"readOnly,omitempty"`
+}
+
+// Represents a vSphere volume resource.
+type VsphereVirtualDiskVolumeSource struct {
+ // Path that identifies vSphere volume vmdk
+ VolumePath string `json:"volumePath"`
+ // Filesystem type to mount.
+ // Must be a filesystem type supported by the host operating system.
+ // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ FSType string `json:"fsType,omitempty"`
+}
+
+// Adapts a ConfigMap into a volume.
+//
+// The contents of the target ConfigMap's Data field will be presented in a
+// volume as files using the keys in the Data field as the file names, unless
+// the items element is populated with specific mappings of keys to paths.
+// ConfigMap volumes support ownership management and SELinux relabeling.
+type ConfigMapVolumeSource struct {
+ LocalObjectReference `json:",inline"`
+ // If unspecified, each key-value pair in the Data field of the referenced
+ // ConfigMap will be projected into the volume as a file whose name is the
+ // key and content is the value. If specified, the listed keys will be
+ // projected into the specified paths, and unlisted keys will not be
+ // present. If a key is specified which is not present in the ConfigMap,
+ // the volume setup will error. Paths must be relative and may not contain
+ // the '..' path or start with '..'.
+ Items []KeyToPath `json:"items,omitempty"`
+}
+
+// Maps a string key to a path within a volume.
+type KeyToPath struct {
+ // The key to project.
+ Key string `json:"key"`
+
+ // The relative path of the file to map the key to.
+ // May not be an absolute path.
+ // May not contain the path element '..'.
+ // May not start with the string '..'.
+ Path string `json:"path"`
+}
+
+// ContainerPort represents a network port in a single container
+type ContainerPort struct {
+ // Optional: If specified, this must be an IANA_SVC_NAME Each named port
+ // in a pod must have a unique name.
+ Name string `json:"name,omitempty"`
+ // Optional: If specified, this must be a valid port number, 0 < x < 65536.
+ // If HostNetwork is specified, this must match ContainerPort.
+ HostPort int32 `json:"hostPort,omitempty"`
+ // Required: This must be a valid port number, 0 < x < 65536.
+ ContainerPort int32 `json:"containerPort"`
+ // Required: Supports "TCP" and "UDP".
+ Protocol Protocol `json:"protocol,omitempty"`
+ // Optional: What host IP to bind the external port to.
+ HostIP string `json:"hostIP,omitempty"`
+}
+
+// VolumeMount describes a mounting of a Volume within a container.
+type VolumeMount struct {
+ // Required: This must match the Name of a Volume [above].
+ Name string `json:"name"`
+ // Optional: Defaults to false (read-write).
+ ReadOnly bool `json:"readOnly,omitempty"`
+ // Required. Must not contain ':'.
+ MountPath string `json:"mountPath"`
+ // Path within the volume from which the container's volume should be mounted.
+ // Defaults to "" (volume's root).
+ SubPath string `json:"subPath,omitempty"`
+}
+
+// EnvVar represents an environment variable present in a Container.
+type EnvVar struct {
+ // Required: This must be a C_IDENTIFIER.
+ Name string `json:"name"`
+ // Optional: no more than one of the following may be specified.
+ // Optional: Defaults to ""; variable references $(VAR_NAME) are expanded
+ // using the previous defined environment variables in the container and
+ // any service environment variables. If a variable cannot be resolved,
+ // the reference in the input string will be unchanged. The $(VAR_NAME)
+ // syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped
+ // references will never be expanded, regardless of whether the variable
+ // exists or not.
+ Value string `json:"value,omitempty"`
+ // Optional: Specifies a source the value of this var should come from.
+ ValueFrom *EnvVarSource `json:"valueFrom,omitempty"`
+}
+
+// EnvVarSource represents a source for the value of an EnvVar.
+// Only one of its fields may be set.
+type EnvVarSource struct {
+ // Selects a field of the pod; only name and namespace are supported.
+ FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty"`
+ // Selects a resource of the container: only resources limits and requests
+ // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
+ ResourceFieldRef *ResourceFieldSelector `json:"resourceFieldRef,omitempty"`
+ // Selects a key of a ConfigMap.
+ ConfigMapKeyRef *ConfigMapKeySelector `json:"configMapKeyRef,omitempty"`
+ // Selects a key of a secret in the pod's namespace.
+ SecretKeyRef *SecretKeySelector `json:"secretKeyRef,omitempty"`
+}
+
+// ObjectFieldSelector selects an APIVersioned field of an object.
+type ObjectFieldSelector struct {
+ // Required: Version of the schema the FieldPath is written in terms of.
+ // If no value is specified, it will be defaulted to the APIVersion of the
+ // enclosing object.
+ APIVersion string `json:"apiVersion"`
+ // Required: Path of the field to select in the specified API version
+ FieldPath string `json:"fieldPath"`
+}
+
+// ResourceFieldSelector represents container resources (cpu, memory) and their output format
+type ResourceFieldSelector struct {
+ // Container name: required for volumes, optional for env vars
+ ContainerName string `json:"containerName,omitempty"`
+ // Required: resource to select
+ Resource string `json:"resource"`
+ // Specifies the output format of the exposed resources, defaults to "1"
+ Divisor resource.Quantity `json:"divisor,omitempty"`
+}
+
+// Selects a key from a ConfigMap.
+type ConfigMapKeySelector struct {
+ // The ConfigMap to select from.
+ LocalObjectReference `json:",inline"`
+ // The key to select.
+ Key string `json:"key"`
+}
+
+// SecretKeySelector selects a key of a Secret.
+type SecretKeySelector struct {
+ // The name of the secret in the pod's namespace to select from.
+ LocalObjectReference `json:",inline"`
+ // The key of the secret to select from. Must be a valid secret key.
+ Key string `json:"key"`
+}
+
+// HTTPHeader describes a custom header to be used in HTTP probes
+type HTTPHeader struct {
+ // The header field name
+ Name string `json:"name"`
+ // The header field value
+ Value string `json:"value"`
+}
+
+// HTTPGetAction describes an action based on HTTP Get requests.
+type HTTPGetAction struct {
+ // Optional: Path to access on the HTTP server.
+ Path string `json:"path,omitempty"`
+ // Required: Name or number of the port to access on the container.
+ Port intstr.IntOrString `json:"port,omitempty"`
+ // Optional: Host name to connect to, defaults to the pod IP. You
+ // probably want to set "Host" in httpHeaders instead.
+ Host string `json:"host,omitempty"`
+ // Optional: Scheme to use for connecting to the host, defaults to HTTP.
+ Scheme URIScheme `json:"scheme,omitempty"`
+ // Optional: Custom headers to set in the request. HTTP allows repeated headers.
+ HTTPHeaders []HTTPHeader `json:"httpHeaders,omitempty"`
+}
+
+// URIScheme identifies the scheme used for connection to a host for Get actions
+type URIScheme string
+
+const (
+ // URISchemeHTTP means that the scheme used will be http://
+ URISchemeHTTP URIScheme = "HTTP"
+ // URISchemeHTTPS means that the scheme used will be https://
+ URISchemeHTTPS URIScheme = "HTTPS"
+)
+
+// TCPSocketAction describes an action based on opening a socket
+type TCPSocketAction struct {
+ // Required: Port to connect to.
+ Port intstr.IntOrString `json:"port,omitempty"`
+}
+
+// ExecAction describes a "run in container" action.
+type ExecAction struct {
+ // Command is the command line to execute inside the container, the working directory for the
+ // command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ // not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ // a shell, you need to explicitly call out to that shell.
+ Command []string `json:"command,omitempty"`
+}
+
+// Probe describes a health check to be performed against a container to determine whether it is
+// alive or ready to receive traffic.
+type Probe struct {
+ // The action taken to determine the health of a container
+ Handler `json:",inline"`
+ // Length of time before health checking is activated. In seconds.
+ InitialDelaySeconds int32 `json:"initialDelaySeconds,omitempty"`
+ // Length of time before health checking times out. In seconds.
+ TimeoutSeconds int32 `json:"timeoutSeconds,omitempty"`
+ // How often (in seconds) to perform the probe.
+ PeriodSeconds int32 `json:"periodSeconds,omitempty"`
+ // Minimum consecutive successes for the probe to be considered successful after having failed.
+ // Must be 1 for liveness.
+ SuccessThreshold int32 `json:"successThreshold,omitempty"`
+ // Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ FailureThreshold int32 `json:"failureThreshold,omitempty"`
+}
+
+// PullPolicy describes a policy for if/when to pull a container image
+type PullPolicy string
+
+const (
+ // PullAlways means that kubelet always attempts to pull the latest image. Container will fail If the pull fails.
+ PullAlways PullPolicy = "Always"
+ // PullNever means that kubelet never pulls an image, but only uses a local image. Container will fail if the image isn't present
+ PullNever PullPolicy = "Never"
+ // PullIfNotPresent means that kubelet pulls if the image isn't present on disk. Container will fail if the image isn't present and the pull fails.
+ PullIfNotPresent PullPolicy = "IfNotPresent"
+)
+
+// Capability represent POSIX capabilities type
+type Capability string
+
+// Capabilities represent POSIX capabilities that can be added or removed to a running container.
+type Capabilities struct {
+ // Added capabilities
+ Add []Capability `json:"add,omitempty"`
+ // Removed capabilities
+ Drop []Capability `json:"drop,omitempty"`
+}
+
+// ResourceRequirements describes the compute resource requirements.
+type ResourceRequirements struct {
+ // Limits describes the maximum amount of compute resources allowed.
+ Limits ResourceList `json:"limits,omitempty"`
+ // Requests describes the minimum amount of compute resources required.
+ // If Request is omitted for a container, it defaults to Limits if that is explicitly specified,
+ // otherwise to an implementation-defined value
+ Requests ResourceList `json:"requests,omitempty"`
+}
+
+// Container represents a single container that is expected to be run on the host.
+type Container struct {
+ // Required: This must be a DNS_LABEL. Each container in a pod must
+ // have a unique name.
+ Name string `json:"name"`
+ // Required.
+ Image string `json:"image"`
+ // Optional: The docker image's entrypoint is used if this is not provided; cannot be updated.
+ // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+ // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax
+ // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,
+ // regardless of whether the variable exists or not.
+ Command []string `json:"command,omitempty"`
+ // Optional: The docker image's cmd is used if this is not provided; cannot be updated.
+ // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+ // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax
+ // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,
+ // regardless of whether the variable exists or not.
+ Args []string `json:"args,omitempty"`
+ // Optional: Defaults to Docker's default.
+ WorkingDir string `json:"workingDir,omitempty"`
+ Ports []ContainerPort `json:"ports,omitempty"`
+ Env []EnvVar `json:"env,omitempty"`
+ // Compute resource requirements.
+ Resources ResourceRequirements `json:"resources,omitempty"`
+ VolumeMounts []VolumeMount `json:"volumeMounts,omitempty"`
+ LivenessProbe *Probe `json:"livenessProbe,omitempty"`
+ ReadinessProbe *Probe `json:"readinessProbe,omitempty"`
+ Lifecycle *Lifecycle `json:"lifecycle,omitempty"`
+ // Required.
+ TerminationMessagePath string `json:"terminationMessagePath,omitempty"`
+ // Required: Policy for pulling images for this container
+ ImagePullPolicy PullPolicy `json:"imagePullPolicy"`
+ // Optional: SecurityContext defines the security options the container should be run with.
+ // If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.
+ SecurityContext *SecurityContext `json:"securityContext,omitempty"`
+
+ // Variables for interactive containers, these have very specialized use-cases (e.g. debugging)
+ // and shouldn't be used for general purpose containers.
+ Stdin bool `json:"stdin,omitempty"`
+ StdinOnce bool `json:"stdinOnce,omitempty"`
+ TTY bool `json:"tty,omitempty"`
+}
+
+// Handler defines a specific action that should be taken
+// TODO: pass structured data to these actions, and document that data here.
+type Handler struct {
+ // One and only one of the following should be specified.
+ // Exec specifies the action to take.
+ Exec *ExecAction `json:"exec,omitempty"`
+ // HTTPGet specifies the http request to perform.
+ HTTPGet *HTTPGetAction `json:"httpGet,omitempty"`
+ // TCPSocket specifies an action involving a TCP port.
+ // TODO: implement a realistic TCP lifecycle hook
+ TCPSocket *TCPSocketAction `json:"tcpSocket,omitempty"`
+}
+
+// Lifecycle describes actions that the management system should take in response to container lifecycle
+// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks
+// until the action is complete, unless the container process fails, in which case the handler is aborted.
+type Lifecycle struct {
+ // PostStart is called immediately after a container is created. If the handler fails, the container
+ // is terminated and restarted.
+ PostStart *Handler `json:"postStart,omitempty"`
+ // PreStop is called immediately before a container is terminated. The reason for termination is
+ // passed to the handler. Regardless of the outcome of the handler, the container is eventually terminated.
+ PreStop *Handler `json:"preStop,omitempty"`
+}
+
+// The below types are used by kube_client and api_server.
+
+type ConditionStatus string
+
+// These are valid condition statuses. "ConditionTrue" means a resource is in the condition;
+// "ConditionFalse" means a resource is not in the condition; "ConditionUnknown" means kubernetes
+// can't decide if a resource is in the condition or not. In the future, we could add other
+// intermediate conditions, e.g. ConditionDegraded.
+const (
+ ConditionTrue ConditionStatus = "True"
+ ConditionFalse ConditionStatus = "False"
+ ConditionUnknown ConditionStatus = "Unknown"
+)
+
+type ContainerStateWaiting struct {
+ // A brief CamelCase string indicating details about why the container is in waiting state.
+ Reason string `json:"reason,omitempty"`
+ // A human-readable message indicating details about why the container is in waiting state.
+ Message string `json:"message,omitempty"`
+}
+
+type ContainerStateRunning struct {
+ StartedAt unversioned.Time `json:"startedAt,omitempty"`
+}
+
+type ContainerStateTerminated struct {
+ ExitCode int32 `json:"exitCode"`
+ Signal int32 `json:"signal,omitempty"`
+ Reason string `json:"reason,omitempty"`
+ Message string `json:"message,omitempty"`
+ StartedAt unversioned.Time `json:"startedAt,omitempty"`
+ FinishedAt unversioned.Time `json:"finishedAt,omitempty"`
+ ContainerID string `json:"containerID,omitempty"`
+}
+
+// ContainerState holds a possible state of container.
+// Only one of its members may be specified.
+// If none of them is specified, the default one is ContainerStateWaiting.
+type ContainerState struct {
+ Waiting *ContainerStateWaiting `json:"waiting,omitempty"`
+ Running *ContainerStateRunning `json:"running,omitempty"`
+ Terminated *ContainerStateTerminated `json:"terminated,omitempty"`
+}
+
+type ContainerStatus struct {
+ // Each container in a pod must have a unique name.
+ Name string `json:"name"`
+ State ContainerState `json:"state,omitempty"`
+ LastTerminationState ContainerState `json:"lastState,omitempty"`
+ // Ready specifies whether the container has passed its readiness check.
+ Ready bool `json:"ready"`
+ // Note that this is calculated from dead containers. But those containers are subject to
+ // garbage collection. This value will get capped at 5 by GC.
+ RestartCount int32 `json:"restartCount"`
+ Image string `json:"image"`
+ ImageID string `json:"imageID"`
+ ContainerID string `json:"containerID,omitempty"`
+}
+
+// PodPhase is a label for the condition of a pod at the current time.
+type PodPhase string
+
+// These are the valid statuses of pods.
+const (
+ // PodPending means the pod has been accepted by the system, but one or more of the containers
+ // has not been started. This includes time before being bound to a node, as well as time spent
+ // pulling images onto the host.
+ PodPending PodPhase = "Pending"
+ // PodRunning means the pod has been bound to a node and all of the containers have been started.
+ // At least one container is still running or is in the process of being restarted.
+ PodRunning PodPhase = "Running"
+ // PodSucceeded means that all containers in the pod have voluntarily terminated
+ // with a container exit code of 0, and the system is not going to restart any of these containers.
+ PodSucceeded PodPhase = "Succeeded"
+ // PodFailed means that all containers in the pod have terminated, and at least one container has
+ // terminated in a failure (exited with a non-zero exit code or was stopped by the system).
+ PodFailed PodPhase = "Failed"
+ // PodUnknown means that for some reason the state of the pod could not be obtained, typically due
+ // to an error in communicating with the host of the pod.
+ PodUnknown PodPhase = "Unknown"
+)
+
+type PodConditionType string
+
+// These are valid conditions of pod.
+const (
+ // PodScheduled represents status of the scheduling process for this pod.
+ PodScheduled PodConditionType = "PodScheduled"
+ // PodReady means the pod is able to service requests and should be added to the
+ // load balancing pools of all matching services.
+ PodReady PodConditionType = "Ready"
+ // PodInitialized means that all init containers in the pod have started successfully.
+ PodInitialized PodConditionType = "Initialized"
+)
+
+type PodCondition struct {
+ Type PodConditionType `json:"type"`
+ Status ConditionStatus `json:"status"`
+ LastProbeTime unversioned.Time `json:"lastProbeTime,omitempty"`
+ LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty"`
+ Reason string `json:"reason,omitempty"`
+ Message string `json:"message,omitempty"`
+}
+
+// RestartPolicy describes how the container should be restarted.
+// Only one of the following restart policies may be specified.
+// If none of the following policies is specified, the default one
+// is RestartPolicyAlways.
+type RestartPolicy string
+
+const (
+ RestartPolicyAlways RestartPolicy = "Always"
+ RestartPolicyOnFailure RestartPolicy = "OnFailure"
+ RestartPolicyNever RestartPolicy = "Never"
+)
+
+// PodList is a list of Pods.
+type PodList struct {
+ unversioned.TypeMeta `json:",inline"`
+ unversioned.ListMeta `json:"metadata,omitempty"`
+
+ Items []Pod `json:"items"`
+}
+
+// DNSPolicy defines how a pod's DNS will be configured.
+type DNSPolicy string
+
+const (
+ // DNSClusterFirst indicates that the pod should use cluster DNS
+ // first, if it is available, then fall back on the default (as
+ // determined by kubelet) DNS settings.
+ DNSClusterFirst DNSPolicy = "ClusterFirst"
+
+ // DNSDefault indicates that the pod should use the default (as
+ // determined by kubelet) DNS settings.
+ DNSDefault DNSPolicy = "Default"
+)
+
+// A node selector represents the union of the results of one or more label queries
+// over a set of nodes; that is, it represents the OR of the selectors represented
+// by the node selector terms.
+type NodeSelector struct {
+ //Required. A list of node selector terms. The terms are ORed.
+ NodeSelectorTerms []NodeSelectorTerm `json:"nodeSelectorTerms"`
+}
+
+// A null or empty node selector term matches no objects.
+type NodeSelectorTerm struct {
+ //Required. A list of node selector requirements. The requirements are ANDed.
+ MatchExpressions []NodeSelectorRequirement `json:"matchExpressions"`
+}
+
+// A node selector requirement is a selector that contains values, a key, and an operator
+// that relates the key and values.
+type NodeSelectorRequirement struct {
+ // The label key that the selector applies to.
+ Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key"`
+ // Represents a key's relationship to a set of values.
+ // Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ Operator NodeSelectorOperator `json:"operator"`
+ // An array of string values. If the operator is In or NotIn,
+ // the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ // the values array must be empty. If the operator is Gt or Lt, the values
+ // array must have a single element, which will be interpreted as an integer.
+ // This array is replaced during a strategic merge patch.
+ Values []string `json:"values,omitempty"`
+}
+
+// A node selector operator is the set of operators that can be used in
+// a node selector requirement.
+type NodeSelectorOperator string
+
+const (
+ NodeSelectorOpIn NodeSelectorOperator = "In"
+ NodeSelectorOpNotIn NodeSelectorOperator = "NotIn"
+ NodeSelectorOpExists NodeSelectorOperator = "Exists"
+ NodeSelectorOpDoesNotExist NodeSelectorOperator = "DoesNotExist"
+ NodeSelectorOpGt NodeSelectorOperator = "Gt"
+ NodeSelectorOpLt NodeSelectorOperator = "Lt"
+)
+
+// Affinity is a group of affinity scheduling rules.
+type Affinity struct {
+ // Describes node affinity scheduling rules for the pod.
+ NodeAffinity *NodeAffinity `json:"nodeAffinity,omitempty"`
+ // Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
+ PodAffinity *PodAffinity `json:"podAffinity,omitempty"`
+ // Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
+ PodAntiAffinity *PodAntiAffinity `json:"podAntiAffinity,omitempty"`
+}
+
+// Pod affinity is a group of inter pod affinity scheduling rules.
+type PodAffinity struct {
+ // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
+ // If the affinity requirements specified by this field are not met at
+ // scheduling time, the pod will not be scheduled onto the node.
+ // If the affinity requirements specified by this field cease to be met
+ // at some point during pod execution (e.g. due to a pod label update), the
+ // system will try to eventually evict the pod from its node.
+ // When there are multiple elements, the lists of nodes corresponding to each
+ // podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"`
+ // If the affinity requirements specified by this field are not met at
+ // scheduling time, the pod will not be scheduled onto the node.
+ // If the affinity requirements specified by this field cease to be met
+ // at some point during pod execution (e.g. due to a pod label update), the
+ // system may or may not try to eventually evict the pod from its node.
+ // When there are multiple elements, the lists of nodes corresponding to each
+ // podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"`
+ // The scheduler will prefer to schedule pods to nodes that satisfy
+ // the affinity expressions specified by this field, but it may choose
+ // a node that violates one or more of the expressions. The node that is
+ // most preferred is the one with the greatest sum of weights, i.e.
+ // for each node that meets all of the scheduling requirements (resource
+ // request, requiredDuringScheduling affinity expressions, etc.),
+ // compute a sum by iterating through the elements of this field and adding
+ // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
+ // node(s) with the highest sum are the most preferred.
+ PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty"`
+}
+
+// Pod anti affinity is a group of inter pod anti affinity scheduling rules.
+type PodAntiAffinity struct {
+ // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
+ // If the anti-affinity requirements specified by this field are not met at
+ // scheduling time, the pod will not be scheduled onto the node.
+ // If the anti-affinity requirements specified by this field cease to be met
+ // at some point during pod execution (e.g. due to a pod label update), the
+ // system will try to eventually evict the pod from its node.
+ // When there are multiple elements, the lists of nodes corresponding to each
+ // podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"`
+ // If the anti-affinity requirements specified by this field are not met at
+ // scheduling time, the pod will not be scheduled onto the node.
+ // If the anti-affinity requirements specified by this field cease to be met
+ // at some point during pod execution (e.g. due to a pod label update), the
+ // system may or may not try to eventually evict the pod from its node.
+ // When there are multiple elements, the lists of nodes corresponding to each
+ // podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"`
+ // The scheduler will prefer to schedule pods to nodes that satisfy
+ // the anti-affinity expressions specified by this field, but it may choose
+ // a node that violates one or more of the expressions. The node that is
+ // most preferred is the one with the greatest sum of weights, i.e.
+ // for each node that meets all of the scheduling requirements (resource
+ // request, requiredDuringScheduling anti-affinity expressions, etc.),
+ // compute a sum by iterating through the elements of this field and adding
+ // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
+ // node(s) with the highest sum are the most preferred.
+ PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty"`
+}
+
+// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
+type WeightedPodAffinityTerm struct {
+ // weight associated with matching the corresponding podAffinityTerm,
+ // in the range 1-100.
+ Weight int `json:"weight"`
+ // Required. A pod affinity term, associated with the corresponding weight.
+ PodAffinityTerm PodAffinityTerm `json:"podAffinityTerm"`
+}
+
+// Defines a set of pods (namely those matching the labelSelector
+// relative to the given namespace(s)) that this pod should be
+// co-located (affinity) or not co-located (anti-affinity) with,
+// where co-located is defined as running on a node whose value of
+// the label with key <topologyKey> matches that of any node on which
+// a pod of the set of pods is running.
+type PodAffinityTerm struct {
+ // A label query over a set of resources, in this case pods.
+ LabelSelector *unversioned.LabelSelector `json:"labelSelector,omitempty"`
+ // namespaces specifies which namespaces the labelSelector applies to (matches against);
+ // nil list means "this pod's namespace," empty list means "all namespaces"
+ // The json tag here is not "omitempty" since we need to distinguish nil and empty.
+ // See https://golang.org/pkg/encoding/json/#Marshal for more details.
+ Namespaces []string `json:"namespaces"`
+ // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ // the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ // whose value of the label with key topologyKey matches that of any node on which any of the
+ // selected pods is running.
+ // For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as "all topologies"
+ // ("all topologies" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains);
+ // for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed.
+ TopologyKey string `json:"topologyKey,omitempty"`
+}
+
+// Node affinity is a group of node affinity scheduling rules.
+type NodeAffinity struct {
+ // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
+ // If the affinity requirements specified by this field are not met at
+ // scheduling time, the pod will not be scheduled onto the node.
+ // If the affinity requirements specified by this field cease to be met
+ // at some point during pod execution (e.g. due to an update), the system
+ // will try to eventually evict the pod from its node.
+ // RequiredDuringSchedulingRequiredDuringExecution *NodeSelector `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"`
+
+ // If the affinity requirements specified by this field are not met at
+ // scheduling time, the pod will not be scheduled onto the node.
+ // If the affinity requirements specified by this field cease to be met
+ // at some point during pod execution (e.g. due to an update), the system
+ // may or may not try to eventually evict the pod from its node.
+ RequiredDuringSchedulingIgnoredDuringExecution *NodeSelector `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"`
+ // The scheduler will prefer to schedule pods to nodes that satisfy
+ // the affinity expressions specified by this field, but it may choose
+ // a node that violates one or more of the expressions. The node that is
+ // most preferred is the one with the greatest sum of weights, i.e.
+ // for each node that meets all of the scheduling requirements (resource
+ // request, requiredDuringScheduling affinity expressions, etc.),
+ // compute a sum by iterating through the elements of this field and adding
+ // "weight" to the sum if the node matches the corresponding matchExpressions; the
+ // node(s) with the highest sum are the most preferred.
+ PreferredDuringSchedulingIgnoredDuringExecution []PreferredSchedulingTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty"`
+}
+
+// An empty preferred scheduling term matches all objects with implicit weight 0
+// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
+type PreferredSchedulingTerm struct {
+ // Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
+ Weight int32 `json:"weight"`
+ // A node selector term, associated with the corresponding weight.
+ Preference NodeSelectorTerm `json:"preference"`
+}
+
+// The node this Taint is attached to has the effect "effect" on
+// any pod that that does not tolerate the Taint.
+type Taint struct {
+ // Required. The taint key to be applied to a node.
+ Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key"`
+ // Required. The taint value corresponding to the taint key.
+ Value string `json:"value,omitempty"`
+ // Required. The effect of the taint on pods
+ // that do not tolerate the taint.
+ // Valid effects are NoSchedule and PreferNoSchedule.
+ Effect TaintEffect `json:"effect"`
+}
+
+type TaintEffect string
+
+const (
+ // Do not allow new pods to schedule onto the node unless they tolerate the taint,
+ // but allow all pods submitted to Kubelet without going through the scheduler
+ // to start, and allow all already-running pods to continue running.
+ // Enforced by the scheduler.
+ TaintEffectNoSchedule TaintEffect = "NoSchedule"
+ // Like TaintEffectNoSchedule, but the scheduler tries not to schedule
+ // new pods onto the node, rather than prohibiting new pods from scheduling
+ // onto the node entirely. Enforced by the scheduler.
+ TaintEffectPreferNoSchedule TaintEffect = "PreferNoSchedule"
+ // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
+ // Do not allow new pods to schedule onto the node unless they tolerate the taint,
+ // do not allow pods to start on Kubelet unless they tolerate the taint,
+ // but allow all already-running pods to continue running.
+ // Enforced by the scheduler and Kubelet.
+ // TaintEffectNoScheduleNoAdmit TaintEffect = "NoScheduleNoAdmit"
+ // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
+ // Do not allow new pods to schedule onto the node unless they tolerate the taint,
+ // do not allow pods to start on Kubelet unless they tolerate the taint,
+ // and evict any already-running pods that do not tolerate the taint.
+ // Enforced by the scheduler and Kubelet.
+ // TaintEffectNoScheduleNoAdmitNoExecute = "NoScheduleNoAdmitNoExecute"
+)
+
+// The pod this Toleration is attached to tolerates any taint that matches
+// the triple <key,value,effect> using the matching operator <operator>.
+type Toleration struct {
+ // Required. Key is the taint key that the toleration applies to.
+ Key string `json:"key,omitempty" patchStrategy:"merge" patchMergeKey:"key"`
+ // operator represents a key's relationship to the value.
+ // Valid operators are Exists and Equal. Defaults to Equal.
+ // Exists is equivalent to wildcard for value, so that a pod can
+ // tolerate all taints of a particular category.
+ Operator TolerationOperator `json:"operator,omitempty"`
+ // Value is the taint value the toleration matches to.
+ // If the operator is Exists, the value should be empty, otherwise just a regular string.
+ Value string `json:"value,omitempty"`
+ // Effect indicates the taint effect to match. Empty means match all taint effects.
+ // When specified, allowed values are NoSchedule and PreferNoSchedule.
+ Effect TaintEffect `json:"effect,omitempty"`
+ // TODO: For forgiveness (#1574), we'd eventually add at least a grace period
+ // here, and possibly an occurrence threshold and period.
+}
+
+// A toleration operator is the set of operators that can be used in a toleration.
+type TolerationOperator string
+
+const (
+ TolerationOpExists TolerationOperator = "Exists"
+ TolerationOpEqual TolerationOperator = "Equal"
+)
+
+// PodSpec is a description of a pod
+type PodSpec struct {
+ Volumes []Volume `json:"volumes"`
+ // List of initialization containers belonging to the pod.
+ InitContainers []Container `json:"-"`
+ // List of containers belonging to the pod.
+ Containers []Container `json:"containers"`
+ RestartPolicy RestartPolicy `json:"restartPolicy,omitempty"`
+ // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request.
+ // Value must be non-negative integer. The value zero indicates delete immediately.
+ // If this value is nil, the default grace period will be used instead.
+ // The grace period is the duration in seconds after the processes running in the pod are sent
+ // a termination signal and the time when the processes are forcibly halted with a kill signal.
+ // Set this value longer than the expected cleanup time for your process.
+ TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"`
+ // Optional duration in seconds relative to the StartTime that the pod may be active on a node
+ // before the system actively tries to terminate the pod; value must be positive integer
+ ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty"`
+ // Required: Set DNS policy.
+ DNSPolicy DNSPolicy `json:"dnsPolicy,omitempty"`
+ // NodeSelector is a selector which must be true for the pod to fit on a node
+ NodeSelector map[string]string `json:"nodeSelector,omitempty"`
+
+ // ServiceAccountName is the name of the ServiceAccount to use to run this pod
+ // The pod will be allowed to use secrets referenced by the ServiceAccount
+ ServiceAccountName string `json:"serviceAccountName"`
+
+ // NodeName is a request to schedule this pod onto a specific node. If it is non-empty,
+ // the scheduler simply schedules this pod onto that node, assuming that it fits resource
+ // requirements.
+ NodeName string `json:"nodeName,omitempty"`
+ // SecurityContext holds pod-level security attributes and common container settings.
+ // Optional: Defaults to empty. See type description for default values of each field.
+ SecurityContext *PodSecurityContext `json:"securityContext,omitempty"`
+ // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.
+ // If specified, these secrets will be passed to individual puller implementations for them to use. For example,
+ // in the case of docker, only DockerConfig type secrets are honored.
+ ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty"`
+ // Specifies the hostname of the Pod.
+ // If not specified, the pod's hostname will be set to a system-defined value.
+ Hostname string `json:"hostname,omitempty"`
+ // If specified, the fully qualified Pod hostname will be "<hostname>.<subdomain>.<pod namespace>.svc.<cluster domain>".
+ // If not specified, the pod will not have a domainname at all.
+ Subdomain string `json:"subdomain,omitempty"`
+}
+
+// PodSecurityContext holds pod-level security attributes and common container settings.
+// Some fields are also present in container.securityContext. Field values of
+// container.securityContext take precedence over field values of PodSecurityContext.
+type PodSecurityContext struct {
+ // Use the host's network namespace. If this option is set, the ports that will be
+ // used must be specified.
+ // Optional: Default to false
+ // +k8s:conversion-gen=false
+ HostNetwork bool `json:"hostNetwork,omitempty"`
+ // Use the host's pid namespace.
+ // Optional: Default to false.
+ // +k8s:conversion-gen=false
+ HostPID bool `json:"hostPID,omitempty"`
+ // Use the host's ipc namespace.
+ // Optional: Default to false.
+ // +k8s:conversion-gen=false
+ HostIPC bool `json:"hostIPC,omitempty"`
+ // The SELinux context to be applied to all containers.
+ // If unspecified, the container runtime will allocate a random SELinux context for each
+ // container. May also be set in SecurityContext. If set in
+ // both SecurityContext and PodSecurityContext, the value specified in SecurityContext
+ // takes precedence for that container.
+ SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty"`
+ // The UID to run the entrypoint of the container process.
+ // Defaults to user specified in image metadata if unspecified.
+ // May also be set in SecurityContext. If set in both SecurityContext and
+ // PodSecurityContext, the value specified in SecurityContext takes precedence
+ // for that container.
+ RunAsUser *int64 `json:"runAsUser,omitempty"`
+ // Indicates that the container must run as a non-root user.
+ // If true, the Kubelet will validate the image at runtime to ensure that it
+ // does not run as UID 0 (root) and fail to start the container if it does.
+ // If unset or false, no such validation will be performed.
+ // May also be set in SecurityContext. If set in both SecurityContext and
+ // PodSecurityContext, the value specified in SecurityContext takes precedence.
+ RunAsNonRoot *bool `json:"runAsNonRoot,omitempty"`
+ // A list of groups applied to the first process run in each container, in addition
+ // to the container's primary GID. If unspecified, no groups will be added to
+ // any container.
+ SupplementalGroups []int64 `json:"supplementalGroups,omitempty"`
+ // A special supplemental group that applies to all containers in a pod.
+ // Some volume types allow the Kubelet to change the ownership of that volume
+ // to be owned by the pod:
+ //
+ // 1. The owning GID will be the FSGroup
+ // 2. The setgid bit is set (new files created in the volume will be owned by FSGroup)
+ // 3. The permission bits are OR'd with rw-rw----
+ //
+ // If unset, the Kubelet will not modify the ownership and permissions of any volume.
+ FSGroup *int64 `json:"fsGroup,omitempty"`
+}
+
+// PodStatus represents information about the status of a pod. Status may trail the actual
+// state of a system.
+type PodStatus struct {
+ Phase PodPhase `json:"phase,omitempty"`
+ Conditions []PodCondition `json:"conditions,omitempty"`
+ // A human readable message indicating details about why the pod is in this state.
+ Message string `json:"message,omitempty"`
+ // A brief CamelCase message indicating details about why the pod is in this state. e.g. 'OutOfDisk'
+ Reason string `json:"reason,omitempty"`
+
+ HostIP string `json:"hostIP,omitempty"`
+ PodIP string `json:"podIP,omitempty"`
+
+ // Date and time at which the object was acknowledged by the Kubelet.
+ // This is before the Kubelet pulled the container image(s) for the pod.
+ StartTime *unversioned.Time `json:"startTime,omitempty"`
+
+ // The list has one entry per init container in the manifest. The most recent successful
+ // init container will have ready = true, the most recently started container will have
+ // startTime set.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-statuses
+ InitContainerStatuses []ContainerStatus `json:"-"`
+ // The list has one entry per container in the manifest. Each entry is
+ // currently the output of `docker inspect`. This output format is *not*
+ // final and should not be relied upon.
+ // TODO: Make real decisions about what our info should look like. Re-enable fuzz test
+ // when we have done this.
+ ContainerStatuses []ContainerStatus `json:"containerStatuses,omitempty"`
+}
+
+// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded
+type PodStatusResult struct {
+ unversioned.TypeMeta `json:",inline"`
+ ObjectMeta `json:"metadata,omitempty"`
+ // Status represents the current information about a pod. This data may not be up
+ // to date.
+ Status PodStatus `json:"status,omitempty"`
+}
+
+// +genclient=true
+
+// Pod is a collection of containers, used as either input (create, update) or as output (list, get).
+type Pod struct {
+ unversioned.TypeMeta `json:",inline"`
+ ObjectMeta `json:"metadata,omitempty"`
+
+ // Spec defines the behavior of a pod.
+ Spec PodSpec `json:"spec,omitempty"`
+
+ // Status represents the current information about a pod. This data may not be up
+ // to date.
+ Status PodStatus `json:"status,omitempty"`
+}
+
+// PodTemplateSpec describes the data a pod should have when created from a template
+type PodTemplateSpec struct {
+ // Metadata of the pods created from this template.
+ ObjectMeta `json:"metadata,omitempty"`
+
+ // Spec defines the behavior of a pod.
+ Spec PodSpec `json:"spec,omitempty"`
+}
+
+// +genclient=true
+
+// PodTemplate describes a template for creating copies of a predefined pod.
+type PodTemplate struct {
+ unversioned.TypeMeta `json:",inline"`
+ ObjectMeta `json:"metadata,omitempty"`
+
+ // Template defines the pods that will be created from this pod template
+ Template PodTemplateSpec `json:"template,omitempty"`
+}
+
+// PodTemplateList is a list of PodTemplates.
+type PodTemplateList struct {
+ unversioned.TypeMeta `json:",inline"`
+ unversioned.ListMeta `json:"metadata,omitempty"`
+
+ Items []PodTemplate `json:"items"`
+}
+
+// ReplicationControllerSpec is the specification of a replication controller.
+// As the internal representation of a replication controller, it may have either
+// a TemplateRef or a Template set.
+type ReplicationControllerSpec struct {
+ // Replicas is the number of desired replicas.
+ Replicas int32 `json:"replicas"`
+
+ // Selector is a label query over pods that should match the Replicas count.
+ Selector map[string]string `json:"selector"`
+
+ // TemplateRef is a reference to an object that describes the pod that will be created if
+ // insufficient replicas are detected. This reference is ignored if a Template is set.
+ // Must be set before converting to a versioned API object
+ //TemplateRef *ObjectReference `json:"templateRef,omitempty"`
+
+ // Template is the object that describes the pod that will be created if
+ // insufficient replicas are detected. Internally, this takes precedence over a
+ // TemplateRef.
+ Template *PodTemplateSpec `json:"template,omitempty"`
+}
+
+// ReplicationControllerStatus represents the current status of a replication
+// controller.
+type ReplicationControllerStatus struct {
+ // Replicas is the number of actual replicas.
+ Replicas int32 `json:"replicas"`
+
+ // The number of pods that have labels matching the labels of the pod template of the replication controller.
+ FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty"`
+
+ // ObservedGeneration is the most recent generation observed by the controller.
+ ObservedGeneration int64 `json:"observedGeneration,omitempty"`
+}
+
+// +genclient=true
+
+// ReplicationController represents the configuration of a replication controller.
+type ReplicationController struct {
+ unversioned.TypeMeta `json:",inline"`
+ ObjectMeta `json:"metadata,omitempty"`
+
+ // Spec defines the desired behavior of this replication controller.
+ Spec ReplicationControllerSpec `json:"spec,omitempty"`
+
+ // Status is the current status of this replication controller. This data may be
+ // out of date by some window of time.
+ Status ReplicationControllerStatus `json:"status,omitempty"`
+}
+
+// ReplicationControllerList is a collection of replication controllers.
+type ReplicationControllerList struct {
+ unversioned.TypeMeta `json:",inline"`
+ unversioned.ListMeta `json:"metadata,omitempty"`
+
+ Items []ReplicationController `json:"items"`
+}
+
+const (
+ // ClusterIPNone - do not assign a cluster IP
+ // no proxying required and no environment variables should be created for pods
+ ClusterIPNone = "None"
+)
+
+// ServiceList holds a list of services.
+type ServiceList struct {
+ unversioned.TypeMeta `json:",inline"`
+ unversioned.ListMeta `json:"metadata,omitempty"`
+
+ Items []Service `json:"items"`
+}
+
+// Session Affinity Type string
+type ServiceAffinity string
+
+const (
+ // ServiceAffinityClientIP is the Client IP based.
+ ServiceAffinityClientIP ServiceAffinity = "ClientIP"
+
+ // ServiceAffinityNone - no session affinity.
+ ServiceAffinityNone ServiceAffinity = "None"
+)
+
+// Service Type string describes ingress methods for a service
+type ServiceType string
+
+const (
+ // ServiceTypeClusterIP means a service will only be accessible inside the
+ // cluster, via the ClusterIP.
+ ServiceTypeClusterIP ServiceType = "ClusterIP"
+
+ // ServiceTypeNodePort means a service will be exposed on one port of
+ // every node, in addition to 'ClusterIP' type.
+ ServiceTypeNodePort ServiceType = "NodePort"
+
+ // ServiceTypeLoadBalancer means a service will be exposed via an
+ // external load balancer (if the cloud provider supports it), in addition
+ // to 'NodePort' type.
+ ServiceTypeLoadBalancer ServiceType = "LoadBalancer"
+)
+
+// ServiceStatus represents the current status of a service
+type ServiceStatus struct {
+ // LoadBalancer contains the current status of the load-balancer,
+ // if one is present.
+ LoadBalancer LoadBalancerStatus `json:"loadBalancer,omitempty"`
+}
+
+// LoadBalancerStatus represents the status of a load-balancer
+type LoadBalancerStatus struct {
+ // Ingress is a list containing ingress points for the load-balancer;
+ // traffic intended for the service should be sent to these ingress points.
+ Ingress []LoadBalancerIngress `json:"ingress,omitempty"`
+}
+
+// LoadBalancerIngress represents the status of a load-balancer ingress point:
+// traffic intended for the service should be sent to an ingress point.
+type LoadBalancerIngress struct {
+ // IP is set for load-balancer ingress points that are IP based
+ // (typically GCE or OpenStack load-balancers)
+ IP string `json:"ip,omitempty"`
+
+ // Hostname is set for load-balancer ingress points that are DNS based
+ // (typically AWS load-balancers)
+ Hostname string `json:"hostname,omitempty"`
+}
+
+// ServiceSpec describes the attributes that a user creates on a service
+type ServiceSpec struct {
+ // Type determines how the service will be exposed. Valid options: ClusterIP, NodePort, LoadBalancer
+ Type ServiceType `json:"type,omitempty"`
+
+ // Required: The list of ports that are exposed by this service.
+ Ports []ServicePort `json:"ports"`
+
+ // This service will route traffic to pods having labels matching this selector. If empty or not present,
+ // the service is assumed to have endpoints set by an external process and Kubernetes will not modify
+ // those endpoints.
+ Selector map[string]string `json:"selector"`
+
+ // ClusterIP is usually assigned by the master. If specified by the user
+ // we will try to respect it or else fail the request. This field can
+ // not be changed by updates.
+ // Valid values are None, empty string (""), or a valid IP address
+ // None can be specified for headless services when proxying is not required
+ ClusterIP string `json:"clusterIP,omitempty"`
+
+ // ExternalIPs are used by external load balancers, or can be set by
+ // users to handle external traffic that arrives at a node.
+ ExternalIPs []string `json:"externalIPs,omitempty"`
+
+ // Only applies to Service Type: LoadBalancer
+ // LoadBalancer will get created with the IP specified in this field.
+ // This feature depends on whether the underlying cloud-provider supports specifying
+ // the loadBalancerIP when a load balancer is created.
+ // This field will be ignored if the cloud-provider does not support the feature.
+ LoadBalancerIP string `json:"loadBalancerIP,omitempty"`
+
+ // Optional: Supports "ClientIP" and "None". Used to maintain session affinity.
+ SessionAffinity ServiceAffinity `json:"sessionAffinity,omitempty"`
+
+ // Optional: If specified and supported by the platform, this will restrict traffic through the cloud-provider
+ // load-balancer will be restricted to the specified client IPs. This field will be ignored if the
+ // cloud-provider does not support the feature."
+ LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty"`
+}
+
+type ServicePort struct {
+ // Optional if only one ServicePort is defined on this service: The
+ // name of this port within the service. This must be a DNS_LABEL.
+ // All ports within a ServiceSpec must have unique names. This maps to
+ // the 'Name' field in EndpointPort objects.
+ Name string `json:"name"`
+
+ // The IP protocol for this port. Supports "TCP" and "UDP".
+ Protocol Protocol `json:"protocol"`
+
+ // The port that will be exposed on the service.
+ Port int32 `json:"port"`
+
+ // Optional: The target port on pods selected by this service. If this
+ // is a string, it will be looked up as a named port in the target
+ // Pod's container ports. If this is not specified, the value
+ // of the 'port' field is used (an identity map).
+ // This field is ignored for services with clusterIP=None, and should be
+ // omitted or set equal to the 'port' field.
+ TargetPort intstr.IntOrString `json:"targetPort"`
+
+ // The port on each node on which this service is exposed.
+ // Default is to auto-allocate a port if the ServiceType of this Service requires one.
+ NodePort int32 `json:"nodePort"`
+}
+
+// +genclient=true
+
+// Service is a named abstraction of software service (for example, mysql) consisting of local port
+// (for example 3306) that the proxy listens on, and the selector that determines which pods
+// will answer requests sent through the proxy.
+type Service struct {
+ unversioned.TypeMeta `json:",inline"`
+ ObjectMeta `json:"metadata,omitempty"`
+
+ // Spec defines the behavior of a service.
+ Spec ServiceSpec `json:"spec,omitempty"`
+
+ // Status represents the current status of a service.
+ Status ServiceStatus `json:"status,omitempty"`
+}
+
+// +genclient=true
+
+// ServiceAccount binds together:
+// * a name, understood by users, and perhaps by peripheral systems, for an identity
+// * a principal that can be authenticated and authorized
+// * a set of secrets
+type ServiceAccount struct {
+ unversioned.TypeMeta `json:",inline"`
+ ObjectMeta `json:"metadata,omitempty"`
+
+ // Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount
+ Secrets []ObjectReference `json:"secrets"`
+
+ // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images
+ // in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets
+ // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet.
+ ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty"`
+}
+
+// ServiceAccountList is a list of ServiceAccount objects
+type ServiceAccountList struct {
+ unversioned.TypeMeta `json:",inline"`
+ unversioned.ListMeta `json:"metadata,omitempty"`
+
+ Items []ServiceAccount `json:"items"`
+}
+
+// +genclient=true
+
+// Endpoints is a collection of endpoints that implement the actual service. Example:
+// Name: "mysvc",
+// Subsets: [
+// {
+// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}],
+// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}]
+// },
+// {
+// Addresses: [{"ip": "10.10.3.3"}],
+// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}]
+// },
+// ]
+type Endpoints struct {
+ unversioned.TypeMeta `json:",inline"`
+ ObjectMeta `json:"metadata,omitempty"`
+
+ // The set of all endpoints is the union of all subsets.
+ Subsets []EndpointSubset
+}
+
+// EndpointSubset is a group of addresses with a common set of ports. The
+// expanded set of endpoints is the Cartesian product of Addresses x Ports.
+// For example, given:
+// {
+// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}],
+// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}]
+// }
+// The resulting set of endpoints can be viewed as:
+// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ],
+// b: [ 10.10.1.1:309, 10.10.2.2:309 ]
+type EndpointSubset struct {
+ Addresses []EndpointAddress
+ NotReadyAddresses []EndpointAddress
+ Ports []EndpointPort
+}
+
+// EndpointAddress is a tuple that describes single IP address.
+type EndpointAddress struct {
+ // The IP of this endpoint.
+ // IPv6 is also accepted but not fully supported on all platforms. Also, certain
+ // kubernetes components, like kube-proxy, are not IPv6 ready.
+ // TODO: This should allow hostname or IP, see #4447.
+ IP string
+ // Optional: Hostname of this endpoint
+ // Meant to be used by DNS servers etc.
+ Hostname string `json:"hostname,omitempty"`
+ // Optional: The kubernetes object related to the entry point.
+ TargetRef *ObjectReference
+}
+
+// EndpointPort is a tuple that describes a single port.
+type EndpointPort struct {
+ // The name of this port (corresponds to ServicePort.Name). Optional
+ // if only one port is defined. Must be a DNS_LABEL.
+ Name string
+
+ // The port number.
+ Port int32
+
+ // The IP protocol for this port.
+ Protocol Protocol
+}
+
+// EndpointsList is a list of endpoints.
+type EndpointsList struct {
+ unversioned.TypeMeta `json:",inline"`
+ unversioned.ListMeta `json:"metadata,omitempty"`
+
+ Items []Endpoints `json:"items"`
+}
+
+// NodeSpec describes the attributes that a node is created with.
+type NodeSpec struct {
+ // PodCIDR represents the pod IP range assigned to the node
+ // Note: assigning IP ranges to nodes might need to be revisited when we support migratable IPs.
+ PodCIDR string `json:"podCIDR,omitempty"`
+
+ // External ID of the node assigned by some machine database (e.g. a cloud provider)
+ ExternalID string `json:"externalID,omitempty"`
+
+ // ID of the node assigned by the cloud provider
+ // Note: format is "<ProviderName>://<ProviderSpecificNodeID>"
+ ProviderID string `json:"providerID,omitempty"`
+
+ // Unschedulable controls node schedulability of new pods. By default node is schedulable.
+ Unschedulable bool `json:"unschedulable,omitempty"`
+}
+
+// DaemonEndpoint contains information about a single Daemon endpoint.
+type DaemonEndpoint struct {
+ /*
+ The port tag was not properly in quotes in earlier releases, so it must be
+ uppercased for backwards compat (since it was falling back to var name of
+ 'Port').
+ */
+
+ // Port number of the given endpoint.
+ Port int32 `json:"Port"`
+}
+
+// NodeDaemonEndpoints lists ports opened by daemons running on the Node.
+type NodeDaemonEndpoints struct {
+ // Endpoint on which Kubelet is listening.
+ KubeletEndpoint DaemonEndpoint `json:"kubeletEndpoint,omitempty"`
+}
+
+// NodeSystemInfo is a set of ids/uuids to uniquely identify the node.
+type NodeSystemInfo struct {
+ // Machine ID reported by the node.
+ MachineID string `json:"machineID"`
+ // System UUID reported by the node.
+ SystemUUID string `json:"systemUUID"`
+ // Boot ID reported by the node.
+ BootID string `json:"bootID"`
+ // Kernel Version reported by the node.
+ KernelVersion string `json:"kernelVersion"`
+ // OS Image reported by the node.
+ OSImage string `json:"osImage"`
+ // ContainerRuntime Version reported by the node.
+ ContainerRuntimeVersion string `json:"containerRuntimeVersion"`
+ // Kubelet Version reported by the node.
+ KubeletVersion string `json:"kubeletVersion"`
+ // KubeProxy Version reported by the node.
+ KubeProxyVersion string `json:"kubeProxyVersion"`
+ // The Operating System reported by the node
+ OperatingSystem string `json:"operatingSystem"`
+ // The Architecture reported by the node
+ Architecture string `json:"architecture"`
+}
+
+// NodeStatus is information about the current status of a node.
+type NodeStatus struct {
+ // Capacity represents the total resources of a node.
+ Capacity ResourceList `json:"capacity,omitempty"`
+ // Allocatable represents the resources of a node that are available for scheduling.
+ Allocatable ResourceList `json:"allocatable,omitempty"`
+ // NodePhase is the current lifecycle phase of the node.
+ Phase NodePhase `json:"phase,omitempty"`
+ // Conditions is an array of current node conditions.
+ Conditions []NodeCondition `json:"conditions,omitempty"`
+ // Queried from cloud provider, if available.
+ Addresses []NodeAddress `json:"addresses,omitempty"`
+ // Endpoints of daemons running on the Node.
+ DaemonEndpoints NodeDaemonEndpoints `json:"daemonEndpoints,omitempty"`
+ // Set of ids/uuids to uniquely identify the node.
+ NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty"`
+ // List of container images on this node
+ Images []ContainerImage `json:"images,omitempty"`
+ // List of attachable volumes in use (mounted) by the node.
+ VolumesInUse []UniqueVolumeName `json:"volumesInUse,omitempty"`
+ // List of volumes that are attached to the node.
+ VolumesAttached []AttachedVolume `json:"volumesAttached,omitempty"`
+}
+
+type UniqueVolumeName string
+
+// AttachedVolume describes a volume attached to a node
+type AttachedVolume struct {
+ // Name of the attached volume
+ Name UniqueVolumeName `json:"name"`
+
+ // DevicePath represents the device path where the volume should be available
+ DevicePath string `json:"devicePath"`
+}
+
+// Describe a container image
+type ContainerImage struct {
+ // Names by which this image is known.
+ Names []string `json:"names"`
+ // The size of the image in bytes.
+ SizeBytes int64 `json:"sizeBytes,omitempty"`
+}
+
+type NodePhase string
+
+// These are the valid phases of node.
+const (
+ // NodePending means the node has been created/added by the system, but not configured.
+ NodePending NodePhase = "Pending"
+ // NodeRunning means the node has been configured and has Kubernetes components running.
+ NodeRunning NodePhase = "Running"
+ // NodeTerminated means the node has been removed from the cluster.
+ NodeTerminated NodePhase = "Terminated"
+)
+
+type NodeConditionType string
+
+// These are valid conditions of node. Currently, we don't have enough information to decide
+// node condition. In the future, we will add more. The proposed set of conditions are:
+// NodeReady, NodeReachable
+const (
+ // NodeReady means kubelet is healthy and ready to accept pods.
+ NodeReady NodeConditionType = "Ready"
+ // NodeOutOfDisk means the kubelet will not accept new pods due to insufficient free disk
+ // space on the node.
+ NodeOutOfDisk NodeConditionType = "OutOfDisk"
+ // NodeMemoryPressure means the kubelet is under pressure due to insufficient available memory.
+ NodeMemoryPressure NodeConditionType = "MemoryPressure"
+ // NodeNetworkUnavailable means that network for the node is not correctly configured.
+ NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable"
+)
+
+type NodeCondition struct {
+ Type NodeConditionType `json:"type"`
+ Status ConditionStatus `json:"status"`
+ LastHeartbeatTime unversioned.Time `json:"lastHeartbeatTime,omitempty"`
+ LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty"`
+ Reason string `json:"reason,omitempty"`
+ Message string `json:"message,omitempty"`
+}
+
+type NodeAddressType string
+
+// These are valid address types of node. NodeLegacyHostIP is used to transit
+// from out-dated HostIP field to NodeAddress.
+const (
+ NodeLegacyHostIP NodeAddressType = "LegacyHostIP"
+ NodeHostName NodeAddressType = "Hostname"
+ NodeExternalIP NodeAddressType = "ExternalIP"
+ NodeInternalIP NodeAddressType = "InternalIP"
+)
+
+type NodeAddress struct {
+ Type NodeAddressType `json:"type"`
+ Address string `json:"address"`
+}
+
+// NodeResources is an object for conveying resource information about a node.
+// see http://releases.k8s.io/HEAD/docs/design/resources.md for more details.
+type NodeResources struct {
+ // Capacity represents the available resources of a node
+ Capacity ResourceList `json:"capacity,omitempty"`
+}
+
+// ResourceName is the name identifying various resources in a ResourceList.
+type ResourceName string
+
+// Resource names must be not more than 63 characters, consisting of upper- or lower-case alphanumeric characters,
+// with the -, _, and . characters allowed anywhere, except the first or last character.
+// The default convention, matching that for annotations, is to use lower-case names, with dashes, rather than
+// camel case, separating compound words.
+// Fully-qualified resource typenames are constructed from a DNS-style subdomain, followed by a slash `/` and a name.
+const (
+ // CPU, in cores. (500m = .5 cores)
+ ResourceCPU ResourceName = "cpu"
+ // Memory, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
+ ResourceMemory ResourceName = "memory"
+ // Volume size, in bytes (e,g. 5Gi = 5GiB = 5 * 1024 * 1024 * 1024)
+ ResourceStorage ResourceName = "storage"
+ // NVIDIA GPU, in devices. Alpha, might change: although fractional and allowing values >1, only one whole device per node is assigned.
+ ResourceNvidiaGPU ResourceName = "alpha.kubernetes.io/nvidia-gpu"
+ // Number of Pods that may be running on this Node: see ResourcePods
+)
+
+// ResourceList is a set of (resource name, quantity) pairs.
+type ResourceList map[ResourceName]resource.Quantity
+
+// +genclient=true
+// +nonNamespaced=true
+
+// Node is a worker node in Kubernetes
+// The name of the node according to etcd is in ObjectMeta.Name.
+type Node struct {
+ unversioned.TypeMeta `json:",inline"`
+ ObjectMeta `json:"metadata,omitempty"`
+
+ // Spec defines the behavior of a node.
+ Spec NodeSpec `json:"spec,omitempty"`
+
+ // Status describes the current status of a Node
+ Status NodeStatus `json:"status,omitempty"`
+}
+
+// NodeList is a list of nodes.
+type NodeList struct {
+ unversioned.TypeMeta `json:",inline"`
+ unversioned.ListMeta `json:"metadata,omitempty"`
+
+ Items []Node `json:"items"`
+}
+
+// NamespaceSpec describes the attributes on a Namespace
+type NamespaceSpec struct {
+ // Finalizers is an opaque list of values that must be empty to permanently remove object from storage
+ Finalizers []FinalizerName
+}
+
+type FinalizerName string
+
+// These are internal finalizer values to Kubernetes, must be qualified name unless defined here
+const (
+ FinalizerKubernetes FinalizerName = "kubernetes"
+ FinalizerOrphan string = "orphan"
+)
+
+// NamespaceStatus is information about the current status of a Namespace.
+type NamespaceStatus struct {
+ // Phase is the current lifecycle phase of the namespace.
+ Phase NamespacePhase `json:"phase,omitempty"`
+}
+
+type NamespacePhase string
+
+// These are the valid phases of a namespace.
+const (
+ // NamespaceActive means the namespace is available for use in the system
+ NamespaceActive NamespacePhase = "Active"
+ // NamespaceTerminating means the namespace is undergoing graceful termination
+ NamespaceTerminating NamespacePhase = "Terminating"
+)
+
+// +genclient=true
+// +nonNamespaced=true
+
+// A namespace provides a scope for Names.
+// Use of multiple namespaces is optional
+type Namespace struct {
+ unversioned.TypeMeta `json:",inline"`
+ ObjectMeta `json:"metadata,omitempty"`
+
+ // Spec defines the behavior of the Namespace.
+ Spec NamespaceSpec `json:"spec,omitempty"`
+
+ // Status describes the current status of a Namespace
+ Status NamespaceStatus `json:"status,omitempty"`
+}
+
+// NamespaceList is a list of Namespaces.
+type NamespaceList struct {
+ unversioned.TypeMeta `json:",inline"`
+ unversioned.ListMeta `json:"metadata,omitempty"`
+
+ Items []Namespace `json:"items"`
+}
+
+// Binding ties one object to another - for example, a pod is bound to a node by a scheduler.
+type Binding struct {
+ unversioned.TypeMeta `json:",inline"`
+ // ObjectMeta describes the object that is being bound.
+ ObjectMeta `json:"metadata,omitempty"`
+
+ // Target is the object to bind to.
+ Target ObjectReference `json:"target"`
+}
+
+// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.
+type Preconditions struct {
+ // Specifies the target UID.
+ UID *types.UID `json:"uid,omitempty"`
+}
+
+// DeleteOptions may be provided when deleting an API object
+type DeleteOptions struct {
+ unversioned.TypeMeta `json:",inline"`
+
+ // Optional duration in seconds before the object should be deleted. Value must be non-negative integer.
+ // The value zero indicates delete immediately. If this value is nil, the default grace period for the
+ // specified type will be used.
+ GracePeriodSeconds *int64 `json:"gracePeriodSeconds,omitempty"`
+
+ // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be
+ // returned.
+ Preconditions *Preconditions `json:"preconditions,omitempty"`
+
+ // Should the dependent objects be orphaned. If true/false, the "orphan"
+ // finalizer will be added to/removed from the object's finalizers list.
+ OrphanDependents *bool `json:"orphanDependents,omitempty"`
+}
+
+// ExportOptions is the query options to the standard REST get call.
+type ExportOptions struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Should this value be exported. Export strips fields that a user can not specify.
+ Export bool `json:"export"`
+ // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'
+ Exact bool `json:"exact"`
+}
+
+// ListOptions is the query options to a standard REST list call, and has future support for
+// watch calls.
+type ListOptions struct {
+ unversioned.TypeMeta `json:",inline"`
+
+ // A selector based on labels
+ LabelSelector labels.Selector
+ // A selector based on fields
+ FieldSelector fields.Selector
+ // If true, watch for changes to this list
+ Watch bool
+ // The resource version to watch (no effect on list yet)
+ ResourceVersion string
+ // Timeout for the list/watch call.
+ TimeoutSeconds *int64
+}
+
+// PodLogOptions is the query options for a Pod's logs REST call
+type PodLogOptions struct {
+ unversioned.TypeMeta
+
+ // Container for which to return logs
+ Container string
+ // If true, follow the logs for the pod
+ Follow bool
+ // If true, return previous terminated container logs
+ Previous bool
+ // A relative time in seconds before the current time from which to show logs. If this value
+ // precedes the time a pod was started, only logs since the pod start will be returned.
+ // If this value is in the future, no logs will be returned.
+ // Only one of sinceSeconds or sinceTime may be specified.
+ SinceSeconds *int64
+ // An RFC3339 timestamp from which to show logs. If this value
+ // precedes the time a pod was started, only logs since the pod start will be returned.
+ // If this value is in the future, no logs will be returned.
+ // Only one of sinceSeconds or sinceTime may be specified.
+ SinceTime *unversioned.Time
+ // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line
+ // of log output.
+ Timestamps bool
+ // If set, the number of lines from the end of the logs to show. If not specified,
+ // logs are shown from the creation of the container or sinceSeconds or sinceTime
+ TailLines *int64
+ // If set, the number of bytes to read from the server before terminating the
+ // log output. This may not display a complete final line of logging, and may return
+ // slightly more or slightly less than the specified limit.
+ LimitBytes *int64
+}
+
+// PodAttachOptions is the query options to a Pod's remote attach call
+// TODO: merge w/ PodExecOptions below for stdin, stdout, etc
+type PodAttachOptions struct {
+ unversioned.TypeMeta `json:",inline"`
+
+ // Stdin if true indicates that stdin is to be redirected for the attach call
+ Stdin bool `json:"stdin,omitempty"`
+
+ // Stdout if true indicates that stdout is to be redirected for the attach call
+ Stdout bool `json:"stdout,omitempty"`
+
+ // Stderr if true indicates that stderr is to be redirected for the attach call
+ Stderr bool `json:"stderr,omitempty"`
+
+ // TTY if true indicates that a tty will be allocated for the attach call
+ TTY bool `json:"tty,omitempty"`
+
+ // Container to attach to.
+ Container string `json:"container,omitempty"`
+}
+
+// PodExecOptions is the query options to a Pod's remote exec call
+type PodExecOptions struct {
+ unversioned.TypeMeta
+
+ // Stdin if true indicates that stdin is to be redirected for the exec call
+ Stdin bool
+
+ // Stdout if true indicates that stdout is to be redirected for the exec call
+ Stdout bool
+
+ // Stderr if true indicates that stderr is to be redirected for the exec call
+ Stderr bool
+
+ // TTY if true indicates that a tty will be allocated for the exec call
+ TTY bool
+
+ // Container in which to execute the command.
+ Container string
+
+ // Command is the remote command to execute; argv array; not executed within a shell.
+ Command []string
+}
+
+// PodProxyOptions is the query options to a Pod's proxy call
+type PodProxyOptions struct {
+ unversioned.TypeMeta
+
+ // Path is the URL path to use for the current proxy request
+ Path string
+}
+
+// NodeProxyOptions is the query options to a Node's proxy call
+type NodeProxyOptions struct {
+ unversioned.TypeMeta
+
+ // Path is the URL path to use for the current proxy request
+ Path string
+}
+
+// ServiceProxyOptions is the query options to a Service's proxy call.
+type ServiceProxyOptions struct {
+ unversioned.TypeMeta
+
+ // Path is the part of URLs that include service endpoints, suffixes,
+ // and parameters to use for the current proxy request to service.
+ // For example, the whole request URL is
+ // http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy.
+ // Path is _search?q=user:kimchy.
+ Path string
+}
+
+// OwnerReference contains enough information to let you identify an owning
+// object. Currently, an owning object must be in the same namespace, so there
+// is no namespace field.
+type OwnerReference struct {
+ // API version of the referent.
+ APIVersion string `json:"apiVersion"`
+ // Kind of the referent.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
+ Kind string `json:"kind"`
+ // Name of the referent.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names
+ Name string `json:"name"`
+ // UID of the referent.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#uids
+ UID types.UID `json:"uid"`
+ // If true, this reference points to the managing controller.
+ Controller *bool `json:"controller,omitempty"`
+}
+
+// ObjectReference contains enough information to let you inspect or modify the referred object.
+type ObjectReference struct {
+ Kind string `json:"kind,omitempty"`
+ Namespace string `json:"namespace,omitempty"`
+ Name string `json:"name,omitempty"`
+ UID types.UID `json:"uid,omitempty"`
+ APIVersion string `json:"apiVersion,omitempty"`
+ ResourceVersion string `json:"resourceVersion,omitempty"`
+
+ // Optional. If referring to a piece of an object instead of an entire object, this string
+ // should contain information to identify the sub-object. For example, if the object
+ // reference is to a container within a pod, this would take on a value like:
+ // "spec.containers{name}" (where "name" refers to the name of the container that triggered
+ // the event) or if no container name is specified "spec.containers[2]" (container with
+ // index 2 in this pod). This syntax is chosen only to have some well-defined way of
+ // referencing a part of an object.
+ // TODO: this design is not final and this field is subject to change in the future.
+ FieldPath string `json:"fieldPath,omitempty"`
+}
+
+// LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.
+type LocalObjectReference struct {
+ //TODO: Add other useful fields. apiVersion, kind, uid?
+ Name string
+}
+
+type SerializedReference struct {
+ unversioned.TypeMeta `json:",inline"`
+ Reference ObjectReference `json:"reference,omitempty"`
+}
+
+type EventSource struct {
+ // Component from which the event is generated.
+ Component string `json:"component,omitempty"`
+ // Host name on which the event is generated.
+ Host string `json:"host,omitempty"`
+}
+
+// Valid values for event types (new types could be added in future)
+const (
+ // Information only and will not cause any problems
+ EventTypeNormal string = "Normal"
+ // These events are to warn that something might go wrong
+ EventTypeWarning string = "Warning"
+)
+
+// +genclient=true
+
+// Event is a report of an event somewhere in the cluster.
+// TODO: Decide whether to store these separately or with the object they apply to.
+type Event struct {
+ unversioned.TypeMeta `json:",inline"`
+ ObjectMeta `json:"metadata,omitempty"`
+
+ // Required. The object that this event is about.
+ InvolvedObject ObjectReference `json:"involvedObject,omitempty"`
+
+ // Optional; this should be a short, machine understandable string that gives the reason
+ // for this event being generated. For example, if the event is reporting that a container
+ // can't start, the Reason might be "ImageNotFound".
+ // TODO: provide exact specification for format.
+ Reason string `json:"reason,omitempty"`
+
+ // Optional. A human-readable description of the status of this operation.
+ // TODO: decide on maximum length.
+ Message string `json:"message,omitempty"`
+
+ // Optional. The component reporting this event. Should be a short machine understandable string.
+ Source EventSource `json:"source,omitempty"`
+
+ // The time at which the event was first recorded. (Time of server receipt is in TypeMeta.)
+ FirstTimestamp unversioned.Time `json:"firstTimestamp,omitempty"`
+
+ // The time at which the most recent occurrence of this event was recorded.
+ LastTimestamp unversioned.Time `json:"lastTimestamp,omitempty"`
+
+ // The number of times this event has occurred.
+ Count int32 `json:"count,omitempty"`
+
+ // Type of this event (Normal, Warning), new types could be added in the future.
+ Type string `json:"type,omitempty"`
+}
+
+// EventList is a list of events.
+type EventList struct {
+ unversioned.TypeMeta `json:",inline"`
+ unversioned.ListMeta `json:"metadata,omitempty"`
+
+ Items []Event `json:"items"`
+}
+
+// List holds a list of objects, which may not be known by the server.
+type List struct {
+ unversioned.TypeMeta `json:",inline"`
+ unversioned.ListMeta `json:"metadata,omitempty"`
+
+ Items []runtime.Object `json:"items"`
+}
+
+// A type of object that is limited
+type LimitType string
+
+const (
+ // Limit that applies to all pods in a namespace
+ LimitTypePod LimitType = "Pod"
+ // Limit that applies to all containers in a namespace
+ LimitTypeContainer LimitType = "Container"
+)
+
+// LimitRangeItem defines a min/max usage limit for any resource that matches on kind
+type LimitRangeItem struct {
+ // Type of resource that this limit applies to
+ Type LimitType `json:"type,omitempty"`
+ // Max usage constraints on this kind by resource name
+ Max ResourceList `json:"max,omitempty"`
+ // Min usage constraints on this kind by resource name
+ Min ResourceList `json:"min,omitempty"`
+ // Default resource requirement limit value by resource name.
+ Default ResourceList `json:"default,omitempty"`
+ // DefaultRequest resource requirement request value by resource name.
+ DefaultRequest ResourceList `json:"defaultRequest,omitempty"`
+ // MaxLimitRequestRatio represents the max burst value for the named resource
+ MaxLimitRequestRatio ResourceList `json:"maxLimitRequestRatio,omitempty"`
+}
+
+// LimitRangeSpec defines a min/max usage limit for resources that match on kind
+type LimitRangeSpec struct {
+ // Limits is the list of LimitRangeItem objects that are enforced
+ Limits []LimitRangeItem `json:"limits"`
+}
+
+// +genclient=true
+
+// LimitRange sets resource usage limits for each kind of resource in a Namespace
+type LimitRange struct {
+ unversioned.TypeMeta `json:",inline"`
+ ObjectMeta `json:"metadata,omitempty"`
+
+ // Spec defines the limits enforced
+ Spec LimitRangeSpec `json:"spec,omitempty"`
+}
+
+// LimitRangeList is a list of LimitRange items.
+type LimitRangeList struct {
+ unversioned.TypeMeta `json:",inline"`
+ unversioned.ListMeta `json:"metadata,omitempty"`
+
+ // Items is a list of LimitRange objects
+ Items []LimitRange `json:"items"`
+}
+
+// The following identify resource constants for Kubernetes object types
+const (
+ // Pods, number
+ ResourcePods ResourceName = "pods"
+ // Services, number
+ ResourceServices ResourceName = "services"
+ // ReplicationControllers, number
+ ResourceReplicationControllers ResourceName = "replicationcontrollers"
+ // ResourceQuotas, number
+ ResourceQuotas ResourceName = "resourcequotas"
+ // ResourceSecrets, number
+ ResourceSecrets ResourceName = "secrets"
+ // ResourceConfigMaps, number
+ ResourceConfigMaps ResourceName = "configmaps"
+ // ResourcePersistentVolumeClaims, number
+ ResourcePersistentVolumeClaims ResourceName = "persistentvolumeclaims"
+ // ResourceServicesNodePorts, number
+ ResourceServicesNodePorts ResourceName = "services.nodeports"
+ // ResourceServicesLoadBalancers, number
+ ResourceServicesLoadBalancers ResourceName = "services.loadbalancers"
+ // CPU request, in cores. (500m = .5 cores)
+ ResourceRequestsCPU ResourceName = "requests.cpu"
+ // Memory request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
+ ResourceRequestsMemory ResourceName = "requests.memory"
+ // CPU limit, in cores. (500m = .5 cores)
+ ResourceLimitsCPU ResourceName = "limits.cpu"
+ // Memory limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
+ ResourceLimitsMemory ResourceName = "limits.memory"
+)
+
+// A ResourceQuotaScope defines a filter that must match each object tracked by a quota
+type ResourceQuotaScope string
+
+const (
+ // Match all pod objects where spec.activeDeadlineSeconds
+ ResourceQuotaScopeTerminating ResourceQuotaScope = "Terminating"
+ // Match all pod objects where !spec.activeDeadlineSeconds
+ ResourceQuotaScopeNotTerminating ResourceQuotaScope = "NotTerminating"
+ // Match all pod objects that have best effort quality of service
+ ResourceQuotaScopeBestEffort ResourceQuotaScope = "BestEffort"
+ // Match all pod objects that do not have best effort quality of service
+ ResourceQuotaScopeNotBestEffort ResourceQuotaScope = "NotBestEffort"
+)
+
+// ResourceQuotaSpec defines the desired hard limits to enforce for Quota
+type ResourceQuotaSpec struct {
+ // Hard is the set of desired hard limits for each named resource
+ Hard ResourceList `json:"hard,omitempty"`
+ // A collection of filters that must match each object tracked by a quota.
+ // If not specified, the quota matches all objects.
+ Scopes []ResourceQuotaScope `json:"scopes,omitempty"`
+}
+
+// ResourceQuotaStatus defines the enforced hard limits and observed use
+type ResourceQuotaStatus struct {
+ // Hard is the set of enforced hard limits for each named resource
+ Hard ResourceList `json:"hard,omitempty"`
+ // Used is the current observed total usage of the resource in the namespace
+ Used ResourceList `json:"used,omitempty"`
+}
+
+// +genclient=true
+
+// ResourceQuota sets aggregate quota restrictions enforced per namespace
+type ResourceQuota struct {
+ unversioned.TypeMeta `json:",inline"`
+ ObjectMeta `json:"metadata,omitempty"`
+
+ // Spec defines the desired quota
+ Spec ResourceQuotaSpec `json:"spec,omitempty"`
+
+ // Status defines the actual enforced quota and its current usage
+ Status ResourceQuotaStatus `json:"status,omitempty"`
+}
+
+// ResourceQuotaList is a list of ResourceQuota items
+type ResourceQuotaList struct {
+ unversioned.TypeMeta `json:",inline"`
+ unversioned.ListMeta `json:"metadata,omitempty"`
+
+ // Items is a list of ResourceQuota objects
+ Items []ResourceQuota `json:"items"`
+}
+
+// +genclient=true
+
+// Secret holds secret data of a certain type. The total bytes of the values in
+// the Data field must be less than MaxSecretSize bytes.
+type Secret struct {
+ unversioned.TypeMeta `json:",inline"`
+ ObjectMeta `json:"metadata,omitempty"`
+
+ // Data contains the secret data. Each key must be a valid DNS_SUBDOMAIN
+ // or leading dot followed by valid DNS_SUBDOMAIN.
+ // The serialized form of the secret data is a base64 encoded string,
+ // representing the arbitrary (possibly non-string) data value here.
+ Data map[string][]byte `json:"data,omitempty"`
+
+ // Used to facilitate programmatic handling of secret data.
+ Type SecretType `json:"type,omitempty"`
+}
+
+const MaxSecretSize = 1 * 1024 * 1024
+
+type SecretType string
+
+const (
+ // SecretTypeOpaque is the default; arbitrary user-defined data
+ SecretTypeOpaque SecretType = "Opaque"
+
+ // SecretTypeServiceAccountToken contains a token that identifies a service account to the API
+ //
+ // Required fields:
+ // - Secret.Annotations["kubernetes.io/service-account.name"] - the name of the ServiceAccount the token identifies
+ // - Secret.Annotations["kubernetes.io/service-account.uid"] - the UID of the ServiceAccount the token identifies
+ // - Secret.Data["token"] - a token that identifies the service account to the API
+ SecretTypeServiceAccountToken SecretType = "kubernetes.io/service-account-token"
+
+ // ServiceAccountNameKey is the key of the required annotation for SecretTypeServiceAccountToken secrets
+ ServiceAccountNameKey = "kubernetes.io/service-account.name"
+ // ServiceAccountUIDKey is the key of the required annotation for SecretTypeServiceAccountToken secrets
+ ServiceAccountUIDKey = "kubernetes.io/service-account.uid"
+ // ServiceAccountTokenKey is the key of the required data for SecretTypeServiceAccountToken secrets
+ ServiceAccountTokenKey = "token"
+ // ServiceAccountKubeconfigKey is the key of the optional kubeconfig data for SecretTypeServiceAccountToken secrets
+ ServiceAccountKubeconfigKey = "kubernetes.kubeconfig"
+ // ServiceAccountRootCAKey is the key of the optional root certificate authority for SecretTypeServiceAccountToken secrets
+ ServiceAccountRootCAKey = "ca.crt"
+ // ServiceAccountNamespaceKey is the key of the optional namespace to use as the default for namespaced API calls
+ ServiceAccountNamespaceKey = "namespace"
+
+ // SecretTypeDockercfg contains a dockercfg file that follows the same format rules as ~/.dockercfg
+ //
+ // Required fields:
+ // - Secret.Data[".dockercfg"] - a serialized ~/.dockercfg file
+ SecretTypeDockercfg SecretType = "kubernetes.io/dockercfg"
+
+ // DockerConfigKey is the key of the required data for SecretTypeDockercfg secrets
+ DockerConfigKey = ".dockercfg"
+
+ // SecretTypeDockerConfigJson contains a dockercfg file that follows the same format rules as ~/.docker/config.json
+ //
+ // Required fields:
+ // - Secret.Data[".dockerconfigjson"] - a serialized ~/.docker/config.json file
+ SecretTypeDockerConfigJson SecretType = "kubernetes.io/dockerconfigjson"
+
+ // DockerConfigJsonKey is the key of the required data for SecretTypeDockerConfigJson secrets
+ DockerConfigJsonKey = ".dockerconfigjson"
+
+ // SecretTypeBasicAuth contains data needed for basic authentication.
+ //
+ // Required at least one of fields:
+ // - Secret.Data["username"] - username used for authentication
+ // - Secret.Data["password"] - password or token needed for authentication
+ SecretTypeBasicAuth SecretType = "kubernetes.io/basic-auth"
+
+ // BasicAuthUsernameKey is the key of the username for SecretTypeBasicAuth secrets
+ BasicAuthUsernameKey = "username"
+ // BasicAuthPasswordKey is the key of the password or token for SecretTypeBasicAuth secrets
+ BasicAuthPasswordKey = "password"
+
+ // SecretTypeSSHAuth contains data needed for SSH authetication.
+ //
+ // Required field:
+ // - Secret.Data["ssh-privatekey"] - private SSH key needed for authentication
+ SecretTypeSSHAuth SecretType = "kubernetes.io/ssh-auth"
+
+ // SSHAuthPrivateKey is the key of the required SSH private key for SecretTypeSSHAuth secrets
+ SSHAuthPrivateKey = "ssh-privatekey"
+
+ // SecretTypeTLS contains information about a TLS client or server secret. It
+ // is primarily used with TLS termination of the Ingress resource, but may be
+ // used in other types.
+ //
+ // Required fields:
+ // - Secret.Data["tls.key"] - TLS private key.
+ // Secret.Data["tls.crt"] - TLS certificate.
+ // TODO: Consider supporting different formats, specifying CA/destinationCA.
+ SecretTypeTLS SecretType = "kubernetes.io/tls"
+
+ // TLSCertKey is the key for tls certificates in a TLS secret.
+ TLSCertKey = "tls.crt"
+ // TLSPrivateKeyKey is the key for the private key field in a TLS secret.
+ TLSPrivateKeyKey = "tls.key"
+)
+
+type SecretList struct {
+ unversioned.TypeMeta `json:",inline"`
+ unversioned.ListMeta `json:"metadata,omitempty"`
+
+ Items []Secret `json:"items"`
+}
+
+// +genclient=true
+
+// ConfigMap holds configuration data for components or applications to consume.
+type ConfigMap struct {
+ unversioned.TypeMeta `json:",inline"`
+ ObjectMeta `json:"metadata,omitempty"`
+
+ // Data contains the configuration data.
+ // Each key must be a valid DNS_SUBDOMAIN with an optional leading dot.
+ Data map[string]string `json:"data,omitempty"`
+}
+
+// ConfigMapList is a resource containing a list of ConfigMap objects.
+type ConfigMapList struct {
+ unversioned.TypeMeta `json:",inline"`
+ unversioned.ListMeta `json:"metadata,omitempty"`
+
+ // Items is the list of ConfigMaps.
+ Items []ConfigMap `json:"items"`
+}
+
+// These constants are for remote command execution and port forwarding and are
+// used by both the client side and server side components.
+//
+// This is probably not the ideal place for them, but it didn't seem worth it
+// to create pkg/exec and pkg/portforward just to contain a single file with
+// constants in it. Suggestions for more appropriate alternatives are
+// definitely welcome!
+const (
+ // Enable stdin for remote command execution
+ ExecStdinParam = "input"
+ // Enable stdout for remote command execution
+ ExecStdoutParam = "output"
+ // Enable stderr for remote command execution
+ ExecStderrParam = "error"
+ // Enable TTY for remote command execution
+ ExecTTYParam = "tty"
+ // Command to run for remote command execution
+ ExecCommandParamm = "command"
+
+ // Name of header that specifies stream type
+ StreamType = "streamType"
+ // Value for streamType header for stdin stream
+ StreamTypeStdin = "stdin"
+ // Value for streamType header for stdout stream
+ StreamTypeStdout = "stdout"
+ // Value for streamType header for stderr stream
+ StreamTypeStderr = "stderr"
+ // Value for streamType header for data stream
+ StreamTypeData = "data"
+ // Value for streamType header for error stream
+ StreamTypeError = "error"
+
+ // Name of header that specifies the port being forwarded
+ PortHeader = "port"
+ // Name of header that specifies a request ID used to associate the error
+ // and data streams for a single forwarded connection
+ PortForwardRequestIDHeader = "requestID"
+)
+
+// Similarly to above, these are constants to support HTTP PATCH utilized by
+// both the client and server that didn't make sense for a whole package to be
+// dedicated to.
+type PatchType string
+
+const (
+ JSONPatchType PatchType = "application/json-patch+json"
+ MergePatchType PatchType = "application/merge-patch+json"
+ StrategicMergePatchType PatchType = "application/strategic-merge-patch+json"
+)
+
+// Type and constants for component health validation.
+type ComponentConditionType string
+
+// These are the valid conditions for the component.
+const (
+ ComponentHealthy ComponentConditionType = "Healthy"
+)
+
+type ComponentCondition struct {
+ Type ComponentConditionType `json:"type"`
+ Status ConditionStatus `json:"status"`
+ Message string `json:"message,omitempty"`
+ Error string `json:"error,omitempty"`
+}
+
+// +genclient=true
+// +nonNamespaced=true
+
+// ComponentStatus (and ComponentStatusList) holds the cluster validation info.
+type ComponentStatus struct {
+ unversioned.TypeMeta `json:",inline"`
+ ObjectMeta `json:"metadata,omitempty"`
+
+ Conditions []ComponentCondition `json:"conditions,omitempty"`
+}
+
+type ComponentStatusList struct {
+ unversioned.TypeMeta `json:",inline"`
+ unversioned.ListMeta `json:"metadata,omitempty"`
+
+ Items []ComponentStatus `json:"items"`
+}
+
+// SecurityContext holds security configuration that will be applied to a container.
+// Some fields are present in both SecurityContext and PodSecurityContext. When both
+// are set, the values in SecurityContext take precedence.
+type SecurityContext struct {
+ // The capabilities to add/drop when running containers.
+ // Defaults to the default set of capabilities granted by the container runtime.
+ Capabilities *Capabilities `json:"capabilities,omitempty"`
+ // Run container in privileged mode.
+ // Processes in privileged containers are essentially equivalent to root on the host.
+ // Defaults to false.
+ Privileged *bool `json:"privileged,omitempty"`
+ // The SELinux context to be applied to the container.
+ // If unspecified, the container runtime will allocate a random SELinux context for each
+ // container. May also be set in PodSecurityContext. If set in both SecurityContext and
+ // PodSecurityContext, the value specified in SecurityContext takes precedence.
+ SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty"`
+ // The UID to run the entrypoint of the container process.
+ // Defaults to user specified in image metadata if unspecified.
+ // May also be set in PodSecurityContext. If set in both SecurityContext and
+ // PodSecurityContext, the value specified in SecurityContext takes precedence.
+ RunAsUser *int64 `json:"runAsUser,omitempty"`
+ // Indicates that the container must run as a non-root user.
+ // If true, the Kubelet will validate the image at runtime to ensure that it
+ // does not run as UID 0 (root) and fail to start the container if it does.
+ // If unset or false, no such validation will be performed.
+ // May also be set in PodSecurityContext. If set in both SecurityContext and
+ // PodSecurityContext, the value specified in SecurityContext takes precedence.
+ RunAsNonRoot *bool `json:"runAsNonRoot,omitempty"`
+ // The read-only root filesystem allows you to restrict the locations that an application can write
+ // files to, ensuring the persistent data can only be written to mounts.
+ ReadOnlyRootFilesystem *bool `json:"readOnlyRootFilesystem,omitempty"`
+}
+
+// SELinuxOptions are the labels to be applied to the container.
+type SELinuxOptions struct {
+ // SELinux user label
+ User string `json:"user,omitempty"`
+ // SELinux role label
+ Role string `json:"role,omitempty"`
+ // SELinux type label
+ Type string `json:"type,omitempty"`
+ // SELinux level label.
+ Level string `json:"level,omitempty"`
+}
+
+// RangeAllocation is an opaque API object (not exposed to end users) that can be persisted to record
+// the global allocation state of the cluster. The schema of Range and Data generic, in that Range
+// should be a string representation of the inputs to a range (for instance, for IP allocation it
+// might be a CIDR) and Data is an opaque blob understood by an allocator which is typically a
+// binary range. Consumers should use annotations to record additional information (schema version,
+// data encoding hints). A range allocation should *ALWAYS* be recreatable at any time by observation
+// of the cluster, thus the object is less strongly typed than most.
+type RangeAllocation struct {
+ unversioned.TypeMeta `json:",inline"`
+ ObjectMeta `json:"metadata,omitempty"`
+ // A string representing a unique label for a range of resources, such as a CIDR "10.0.0.0/8" or
+ // port range "10000-30000". Range is not strongly schema'd here. The Range is expected to define
+ // a start and end unless there is an implicit end.
+ Range string `json:"range"`
+ // A byte array representing the serialized state of a range allocation. Additional clarifiers on
+ // the type or format of data should be represented with annotations. For IP allocations, this is
+ // represented as a bit array starting at the base IP of the CIDR in Range, with each bit representing
+ // a single allocated address (the fifth bit on CIDR 10.0.0.0/8 is 10.0.0.4).
+ Data []byte `json:"data"`
+}
+
+const (
+ // "default-scheduler" is the name of default scheduler.
+ DefaultSchedulerName = "default-scheduler"
+
+ // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule
+ // corresponding to every RequiredDuringScheduling affinity rule.
+ // When the --hard-pod-affinity-weight scheduler flag is not specified,
+ // DefaultHardPodAffinityWeight defines the weight of the implicit PreferredDuringScheduling affinity rule.
+ DefaultHardPodAffinitySymmetricWeight int = 1
+
+ // When the --failure-domains scheduler flag is not specified,
+ // DefaultFailureDomains defines the set of label keys used when TopologyKey is empty in PreferredDuringScheduling anti-affinity.
+ DefaultFailureDomains string = unversioned.LabelHostname + "," + unversioned.LabelZoneFailureDomain + "," + unversioned.LabelZoneRegion
+)
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/deep_copy_generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/deep_copy_generated.go
new file mode 100644
index 0000000..77f5b66
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/deep_copy_generated.go
@@ -0,0 +1,288 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by deepcopy-gen. Do not edit it manually!
+
+package unversioned
+
+import (
+ conversion "k8s.io/kubernetes/pkg/conversion"
+ time "time"
+)
+
+func DeepCopy_unversioned_APIGroup(in APIGroup, out *APIGroup, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.Name = in.Name
+ if in.Versions != nil {
+ in, out := in.Versions, &out.Versions
+ *out = make([]GroupVersionForDiscovery, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.Versions = nil
+ }
+ out.PreferredVersion = in.PreferredVersion
+ if in.ServerAddressByClientCIDRs != nil {
+ in, out := in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs
+ *out = make([]ServerAddressByClientCIDR, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.ServerAddressByClientCIDRs = nil
+ }
+ return nil
+}
+
+func DeepCopy_unversioned_APIGroupList(in APIGroupList, out *APIGroupList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if in.Groups != nil {
+ in, out := in.Groups, &out.Groups
+ *out = make([]APIGroup, len(in))
+ for i := range in {
+ if err := DeepCopy_unversioned_APIGroup(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Groups = nil
+ }
+ return nil
+}
+
+func DeepCopy_unversioned_APIResource(in APIResource, out *APIResource, c *conversion.Cloner) error {
+ out.Name = in.Name
+ out.Namespaced = in.Namespaced
+ out.Kind = in.Kind
+ return nil
+}
+
+func DeepCopy_unversioned_APIResourceList(in APIResourceList, out *APIResourceList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.GroupVersion = in.GroupVersion
+ if in.APIResources != nil {
+ in, out := in.APIResources, &out.APIResources
+ *out = make([]APIResource, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.APIResources = nil
+ }
+ return nil
+}
+
+func DeepCopy_unversioned_APIVersions(in APIVersions, out *APIVersions, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if in.Versions != nil {
+ in, out := in.Versions, &out.Versions
+ *out = make([]string, len(in))
+ copy(*out, in)
+ } else {
+ out.Versions = nil
+ }
+ if in.ServerAddressByClientCIDRs != nil {
+ in, out := in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs
+ *out = make([]ServerAddressByClientCIDR, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.ServerAddressByClientCIDRs = nil
+ }
+ return nil
+}
+
+func DeepCopy_unversioned_Duration(in Duration, out *Duration, c *conversion.Cloner) error {
+ out.Duration = in.Duration
+ return nil
+}
+
+func DeepCopy_unversioned_ExportOptions(in ExportOptions, out *ExportOptions, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.Export = in.Export
+ out.Exact = in.Exact
+ return nil
+}
+
+func DeepCopy_unversioned_GroupKind(in GroupKind, out *GroupKind, c *conversion.Cloner) error {
+ out.Group = in.Group
+ out.Kind = in.Kind
+ return nil
+}
+
+func DeepCopy_unversioned_GroupResource(in GroupResource, out *GroupResource, c *conversion.Cloner) error {
+ out.Group = in.Group
+ out.Resource = in.Resource
+ return nil
+}
+
+func DeepCopy_unversioned_GroupVersion(in GroupVersion, out *GroupVersion, c *conversion.Cloner) error {
+ out.Group = in.Group
+ out.Version = in.Version
+ return nil
+}
+
+func DeepCopy_unversioned_GroupVersionForDiscovery(in GroupVersionForDiscovery, out *GroupVersionForDiscovery, c *conversion.Cloner) error {
+ out.GroupVersion = in.GroupVersion
+ out.Version = in.Version
+ return nil
+}
+
+func DeepCopy_unversioned_GroupVersionKind(in GroupVersionKind, out *GroupVersionKind, c *conversion.Cloner) error {
+ out.Group = in.Group
+ out.Version = in.Version
+ out.Kind = in.Kind
+ return nil
+}
+
+func DeepCopy_unversioned_GroupVersionResource(in GroupVersionResource, out *GroupVersionResource, c *conversion.Cloner) error {
+ out.Group = in.Group
+ out.Version = in.Version
+ out.Resource = in.Resource
+ return nil
+}
+
+func DeepCopy_unversioned_LabelSelector(in LabelSelector, out *LabelSelector, c *conversion.Cloner) error {
+ if in.MatchLabels != nil {
+ in, out := in.MatchLabels, &out.MatchLabels
+ *out = make(map[string]string)
+ for key, val := range in {
+ (*out)[key] = val
+ }
+ } else {
+ out.MatchLabels = nil
+ }
+ if in.MatchExpressions != nil {
+ in, out := in.MatchExpressions, &out.MatchExpressions
+ *out = make([]LabelSelectorRequirement, len(in))
+ for i := range in {
+ if err := DeepCopy_unversioned_LabelSelectorRequirement(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.MatchExpressions = nil
+ }
+ return nil
+}
+
+func DeepCopy_unversioned_LabelSelectorRequirement(in LabelSelectorRequirement, out *LabelSelectorRequirement, c *conversion.Cloner) error {
+ out.Key = in.Key
+ out.Operator = in.Operator
+ if in.Values != nil {
+ in, out := in.Values, &out.Values
+ *out = make([]string, len(in))
+ copy(*out, in)
+ } else {
+ out.Values = nil
+ }
+ return nil
+}
+
+func DeepCopy_unversioned_ListMeta(in ListMeta, out *ListMeta, c *conversion.Cloner) error {
+ out.SelfLink = in.SelfLink
+ out.ResourceVersion = in.ResourceVersion
+ return nil
+}
+
+func DeepCopy_unversioned_Patch(in Patch, out *Patch, c *conversion.Cloner) error {
+ return nil
+}
+
+func DeepCopy_unversioned_RootPaths(in RootPaths, out *RootPaths, c *conversion.Cloner) error {
+ if in.Paths != nil {
+ in, out := in.Paths, &out.Paths
+ *out = make([]string, len(in))
+ copy(*out, in)
+ } else {
+ out.Paths = nil
+ }
+ return nil
+}
+
+func DeepCopy_unversioned_ServerAddressByClientCIDR(in ServerAddressByClientCIDR, out *ServerAddressByClientCIDR, c *conversion.Cloner) error {
+ out.ClientCIDR = in.ClientCIDR
+ out.ServerAddress = in.ServerAddress
+ return nil
+}
+
+func DeepCopy_unversioned_Status(in Status, out *Status, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ out.Status = in.Status
+ out.Message = in.Message
+ out.Reason = in.Reason
+ if in.Details != nil {
+ in, out := in.Details, &out.Details
+ *out = new(StatusDetails)
+ if err := DeepCopy_unversioned_StatusDetails(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.Details = nil
+ }
+ out.Code = in.Code
+ return nil
+}
+
+func DeepCopy_unversioned_StatusCause(in StatusCause, out *StatusCause, c *conversion.Cloner) error {
+ out.Type = in.Type
+ out.Message = in.Message
+ out.Field = in.Field
+ return nil
+}
+
+func DeepCopy_unversioned_StatusDetails(in StatusDetails, out *StatusDetails, c *conversion.Cloner) error {
+ out.Name = in.Name
+ out.Group = in.Group
+ out.Kind = in.Kind
+ if in.Causes != nil {
+ in, out := in.Causes, &out.Causes
+ *out = make([]StatusCause, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.Causes = nil
+ }
+ out.RetryAfterSeconds = in.RetryAfterSeconds
+ return nil
+}
+
+func DeepCopy_unversioned_Time(in Time, out *Time, c *conversion.Cloner) error {
+ if newVal, err := c.DeepCopy(in.Time); err != nil {
+ return err
+ } else {
+ out.Time = newVal.(time.Time)
+ }
+ return nil
+}
+
+func DeepCopy_unversioned_Timestamp(in Timestamp, out *Timestamp, c *conversion.Cloner) error {
+ out.Seconds = in.Seconds
+ out.Nanos = in.Nanos
+ return nil
+}
+
+func DeepCopy_unversioned_TypeMeta(in TypeMeta, out *TypeMeta, c *conversion.Cloner) error {
+ out.Kind = in.Kind
+ out.APIVersion = in.APIVersion
+ return nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/doc.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/doc.go
new file mode 100644
index 0000000..d0ffc33
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+
+package unversioned
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/duration.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/duration.go
new file mode 100644
index 0000000..ed54e51
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/duration.go
@@ -0,0 +1,47 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "encoding/json"
+ "time"
+)
+
+// Duration is a wrapper around time.Duration which supports correct
+// marshaling to YAML and JSON. In particular, it marshals into strings, which
+// can be used as map keys in json.
+type Duration struct {
+ time.Duration `protobuf:"varint,1,opt,name=duration,casttype=time.Duration"`
+}
+
+// UnmarshalJSON implements the json.Unmarshaller interface.
+func (d *Duration) UnmarshalJSON(b []byte) error {
+ var str string
+ json.Unmarshal(b, &str)
+
+ pd, err := time.ParseDuration(str)
+ if err != nil {
+ return err
+ }
+ d.Duration = pd
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (d Duration) MarshalJSON() ([]byte, error) {
+ return json.Marshal(d.Duration.String())
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/generated.pb.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/generated.pb.go
new file mode 100644
index 0000000..f0d7147
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/generated.pb.go
@@ -0,0 +1,4212 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by protoc-gen-gogo.
+// source: k8s.io/kubernetes/pkg/api/unversioned/generated.proto
+// DO NOT EDIT!
+
+/*
+ Package unversioned is a generated protocol buffer package.
+
+ It is generated from these files:
+ k8s.io/kubernetes/pkg/api/unversioned/generated.proto
+
+ It has these top-level messages:
+ APIGroup
+ APIGroupList
+ APIResource
+ APIResourceList
+ APIVersions
+ Duration
+ ExportOptions
+ GroupKind
+ GroupResource
+ GroupVersion
+ GroupVersionForDiscovery
+ GroupVersionKind
+ GroupVersionResource
+ LabelSelector
+ LabelSelectorRequirement
+ ListMeta
+ RootPaths
+ ServerAddressByClientCIDR
+ Status
+ StatusCause
+ StatusDetails
+ Time
+ Timestamp
+ TypeMeta
+*/
+package unversioned
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+import time "time"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+func (m *APIGroup) Reset() { *m = APIGroup{} }
+func (m *APIGroup) String() string { return proto.CompactTextString(m) }
+func (*APIGroup) ProtoMessage() {}
+
+func (m *APIGroupList) Reset() { *m = APIGroupList{} }
+func (m *APIGroupList) String() string { return proto.CompactTextString(m) }
+func (*APIGroupList) ProtoMessage() {}
+
+func (m *APIResource) Reset() { *m = APIResource{} }
+func (m *APIResource) String() string { return proto.CompactTextString(m) }
+func (*APIResource) ProtoMessage() {}
+
+func (m *APIResourceList) Reset() { *m = APIResourceList{} }
+func (m *APIResourceList) String() string { return proto.CompactTextString(m) }
+func (*APIResourceList) ProtoMessage() {}
+
+func (m *APIVersions) Reset() { *m = APIVersions{} }
+func (*APIVersions) ProtoMessage() {}
+
+func (m *Duration) Reset() { *m = Duration{} }
+func (m *Duration) String() string { return proto.CompactTextString(m) }
+func (*Duration) ProtoMessage() {}
+
+func (m *ExportOptions) Reset() { *m = ExportOptions{} }
+func (m *ExportOptions) String() string { return proto.CompactTextString(m) }
+func (*ExportOptions) ProtoMessage() {}
+
+func (m *GroupKind) Reset() { *m = GroupKind{} }
+func (*GroupKind) ProtoMessage() {}
+
+func (m *GroupResource) Reset() { *m = GroupResource{} }
+func (*GroupResource) ProtoMessage() {}
+
+func (m *GroupVersion) Reset() { *m = GroupVersion{} }
+func (*GroupVersion) ProtoMessage() {}
+
+func (m *GroupVersionForDiscovery) Reset() { *m = GroupVersionForDiscovery{} }
+func (m *GroupVersionForDiscovery) String() string { return proto.CompactTextString(m) }
+func (*GroupVersionForDiscovery) ProtoMessage() {}
+
+func (m *GroupVersionKind) Reset() { *m = GroupVersionKind{} }
+func (*GroupVersionKind) ProtoMessage() {}
+
+func (m *GroupVersionResource) Reset() { *m = GroupVersionResource{} }
+func (*GroupVersionResource) ProtoMessage() {}
+
+func (m *LabelSelector) Reset() { *m = LabelSelector{} }
+func (m *LabelSelector) String() string { return proto.CompactTextString(m) }
+func (*LabelSelector) ProtoMessage() {}
+
+func (m *LabelSelectorRequirement) Reset() { *m = LabelSelectorRequirement{} }
+func (m *LabelSelectorRequirement) String() string { return proto.CompactTextString(m) }
+func (*LabelSelectorRequirement) ProtoMessage() {}
+
+func (m *ListMeta) Reset() { *m = ListMeta{} }
+func (m *ListMeta) String() string { return proto.CompactTextString(m) }
+func (*ListMeta) ProtoMessage() {}
+
+func (m *RootPaths) Reset() { *m = RootPaths{} }
+func (m *RootPaths) String() string { return proto.CompactTextString(m) }
+func (*RootPaths) ProtoMessage() {}
+
+func (m *ServerAddressByClientCIDR) Reset() { *m = ServerAddressByClientCIDR{} }
+func (m *ServerAddressByClientCIDR) String() string { return proto.CompactTextString(m) }
+func (*ServerAddressByClientCIDR) ProtoMessage() {}
+
+func (m *Status) Reset() { *m = Status{} }
+func (m *Status) String() string { return proto.CompactTextString(m) }
+func (*Status) ProtoMessage() {}
+
+func (m *StatusCause) Reset() { *m = StatusCause{} }
+func (m *StatusCause) String() string { return proto.CompactTextString(m) }
+func (*StatusCause) ProtoMessage() {}
+
+func (m *StatusDetails) Reset() { *m = StatusDetails{} }
+func (m *StatusDetails) String() string { return proto.CompactTextString(m) }
+func (*StatusDetails) ProtoMessage() {}
+
+func (m *Time) Reset() { *m = Time{} }
+func (m *Time) String() string { return proto.CompactTextString(m) }
+func (*Time) ProtoMessage() {}
+
+func (m *Timestamp) Reset() { *m = Timestamp{} }
+func (m *Timestamp) String() string { return proto.CompactTextString(m) }
+func (*Timestamp) ProtoMessage() {}
+
+func (m *TypeMeta) Reset() { *m = TypeMeta{} }
+func (m *TypeMeta) String() string { return proto.CompactTextString(m) }
+func (*TypeMeta) ProtoMessage() {}
+
+func init() {
+ proto.RegisterType((*APIGroup)(nil), "k8s.io.kubernetes.pkg.api.unversioned.APIGroup")
+ proto.RegisterType((*APIGroupList)(nil), "k8s.io.kubernetes.pkg.api.unversioned.APIGroupList")
+ proto.RegisterType((*APIResource)(nil), "k8s.io.kubernetes.pkg.api.unversioned.APIResource")
+ proto.RegisterType((*APIResourceList)(nil), "k8s.io.kubernetes.pkg.api.unversioned.APIResourceList")
+ proto.RegisterType((*APIVersions)(nil), "k8s.io.kubernetes.pkg.api.unversioned.APIVersions")
+ proto.RegisterType((*Duration)(nil), "k8s.io.kubernetes.pkg.api.unversioned.Duration")
+ proto.RegisterType((*ExportOptions)(nil), "k8s.io.kubernetes.pkg.api.unversioned.ExportOptions")
+ proto.RegisterType((*GroupKind)(nil), "k8s.io.kubernetes.pkg.api.unversioned.GroupKind")
+ proto.RegisterType((*GroupResource)(nil), "k8s.io.kubernetes.pkg.api.unversioned.GroupResource")
+ proto.RegisterType((*GroupVersion)(nil), "k8s.io.kubernetes.pkg.api.unversioned.GroupVersion")
+ proto.RegisterType((*GroupVersionForDiscovery)(nil), "k8s.io.kubernetes.pkg.api.unversioned.GroupVersionForDiscovery")
+ proto.RegisterType((*GroupVersionKind)(nil), "k8s.io.kubernetes.pkg.api.unversioned.GroupVersionKind")
+ proto.RegisterType((*GroupVersionResource)(nil), "k8s.io.kubernetes.pkg.api.unversioned.GroupVersionResource")
+ proto.RegisterType((*LabelSelector)(nil), "k8s.io.kubernetes.pkg.api.unversioned.LabelSelector")
+ proto.RegisterType((*LabelSelectorRequirement)(nil), "k8s.io.kubernetes.pkg.api.unversioned.LabelSelectorRequirement")
+ proto.RegisterType((*ListMeta)(nil), "k8s.io.kubernetes.pkg.api.unversioned.ListMeta")
+ proto.RegisterType((*RootPaths)(nil), "k8s.io.kubernetes.pkg.api.unversioned.RootPaths")
+ proto.RegisterType((*ServerAddressByClientCIDR)(nil), "k8s.io.kubernetes.pkg.api.unversioned.ServerAddressByClientCIDR")
+ proto.RegisterType((*Status)(nil), "k8s.io.kubernetes.pkg.api.unversioned.Status")
+ proto.RegisterType((*StatusCause)(nil), "k8s.io.kubernetes.pkg.api.unversioned.StatusCause")
+ proto.RegisterType((*StatusDetails)(nil), "k8s.io.kubernetes.pkg.api.unversioned.StatusDetails")
+ proto.RegisterType((*Time)(nil), "k8s.io.kubernetes.pkg.api.unversioned.Time")
+ proto.RegisterType((*Timestamp)(nil), "k8s.io.kubernetes.pkg.api.unversioned.Timestamp")
+ proto.RegisterType((*TypeMeta)(nil), "k8s.io.kubernetes.pkg.api.unversioned.TypeMeta")
+}
+func (m *APIGroup) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *APIGroup) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Name)))
+ i += copy(data[i:], m.Name)
+ if len(m.Versions) > 0 {
+ for _, msg := range m.Versions {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.PreferredVersion.Size()))
+ n1, err := m.PreferredVersion.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n1
+ if len(m.ServerAddressByClientCIDRs) > 0 {
+ for _, msg := range m.ServerAddressByClientCIDRs {
+ data[i] = 0x22
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *APIGroupList) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *APIGroupList) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Groups) > 0 {
+ for _, msg := range m.Groups {
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *APIResource) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *APIResource) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Name)))
+ i += copy(data[i:], m.Name)
+ data[i] = 0x10
+ i++
+ if m.Namespaced {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Kind)))
+ i += copy(data[i:], m.Kind)
+ return i, nil
+}
+
+func (m *APIResourceList) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *APIResourceList) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.GroupVersion)))
+ i += copy(data[i:], m.GroupVersion)
+ if len(m.APIResources) > 0 {
+ for _, msg := range m.APIResources {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *APIVersions) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *APIVersions) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Versions) > 0 {
+ for _, s := range m.Versions {
+ data[i] = 0xa
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ if len(m.ServerAddressByClientCIDRs) > 0 {
+ for _, msg := range m.ServerAddressByClientCIDRs {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *Duration) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Duration) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0x8
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Duration))
+ return i, nil
+}
+
+func (m *ExportOptions) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ExportOptions) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0x8
+ i++
+ if m.Export {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ data[i] = 0x10
+ i++
+ if m.Exact {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ return i, nil
+}
+
+func (m *GroupKind) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *GroupKind) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Group)))
+ i += copy(data[i:], m.Group)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Kind)))
+ i += copy(data[i:], m.Kind)
+ return i, nil
+}
+
+func (m *GroupResource) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *GroupResource) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Group)))
+ i += copy(data[i:], m.Group)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Resource)))
+ i += copy(data[i:], m.Resource)
+ return i, nil
+}
+
+func (m *GroupVersion) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *GroupVersion) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Group)))
+ i += copy(data[i:], m.Group)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Version)))
+ i += copy(data[i:], m.Version)
+ return i, nil
+}
+
+func (m *GroupVersionForDiscovery) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *GroupVersionForDiscovery) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.GroupVersion)))
+ i += copy(data[i:], m.GroupVersion)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Version)))
+ i += copy(data[i:], m.Version)
+ return i, nil
+}
+
+func (m *GroupVersionKind) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *GroupVersionKind) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Group)))
+ i += copy(data[i:], m.Group)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Version)))
+ i += copy(data[i:], m.Version)
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Kind)))
+ i += copy(data[i:], m.Kind)
+ return i, nil
+}
+
+func (m *GroupVersionResource) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *GroupVersionResource) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Group)))
+ i += copy(data[i:], m.Group)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Version)))
+ i += copy(data[i:], m.Version)
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Resource)))
+ i += copy(data[i:], m.Resource)
+ return i, nil
+}
+
+func (m *LabelSelector) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *LabelSelector) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.MatchLabels) > 0 {
+ for k := range m.MatchLabels {
+ data[i] = 0xa
+ i++
+ v := m.MatchLabels[k]
+ mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ i = encodeVarintGenerated(data, i, uint64(mapSize))
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(k)))
+ i += copy(data[i:], k)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(v)))
+ i += copy(data[i:], v)
+ }
+ }
+ if len(m.MatchExpressions) > 0 {
+ for _, msg := range m.MatchExpressions {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *LabelSelectorRequirement) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *LabelSelectorRequirement) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Key)))
+ i += copy(data[i:], m.Key)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Operator)))
+ i += copy(data[i:], m.Operator)
+ if len(m.Values) > 0 {
+ for _, s := range m.Values {
+ data[i] = 0x1a
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ return i, nil
+}
+
+func (m *ListMeta) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ListMeta) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.SelfLink)))
+ i += copy(data[i:], m.SelfLink)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.ResourceVersion)))
+ i += copy(data[i:], m.ResourceVersion)
+ return i, nil
+}
+
+func (m *RootPaths) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *RootPaths) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Paths) > 0 {
+ for _, s := range m.Paths {
+ data[i] = 0xa
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ return i, nil
+}
+
+func (m *ServerAddressByClientCIDR) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ServerAddressByClientCIDR) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.ClientCIDR)))
+ i += copy(data[i:], m.ClientCIDR)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.ServerAddress)))
+ i += copy(data[i:], m.ServerAddress)
+ return i, nil
+}
+
+func (m *Status) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Status) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size()))
+ n2, err := m.ListMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n2
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Status)))
+ i += copy(data[i:], m.Status)
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Message)))
+ i += copy(data[i:], m.Message)
+ data[i] = 0x22
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Reason)))
+ i += copy(data[i:], m.Reason)
+ if m.Details != nil {
+ data[i] = 0x2a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Details.Size()))
+ n3, err := m.Details.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n3
+ }
+ data[i] = 0x30
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Code))
+ return i, nil
+}
+
+func (m *StatusCause) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *StatusCause) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Type)))
+ i += copy(data[i:], m.Type)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Message)))
+ i += copy(data[i:], m.Message)
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Field)))
+ i += copy(data[i:], m.Field)
+ return i, nil
+}
+
+func (m *StatusDetails) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *StatusDetails) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Name)))
+ i += copy(data[i:], m.Name)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Group)))
+ i += copy(data[i:], m.Group)
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Kind)))
+ i += copy(data[i:], m.Kind)
+ if len(m.Causes) > 0 {
+ for _, msg := range m.Causes {
+ data[i] = 0x22
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ data[i] = 0x28
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.RetryAfterSeconds))
+ return i, nil
+}
+
+func (m *Timestamp) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Timestamp) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0x8
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Seconds))
+ data[i] = 0x10
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Nanos))
+ return i, nil
+}
+
+func (m *TypeMeta) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *TypeMeta) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Kind)))
+ i += copy(data[i:], m.Kind)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion)))
+ i += copy(data[i:], m.APIVersion)
+ return i, nil
+}
+
+func encodeFixed64Generated(data []byte, offset int, v uint64) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ data[offset+4] = uint8(v >> 32)
+ data[offset+5] = uint8(v >> 40)
+ data[offset+6] = uint8(v >> 48)
+ data[offset+7] = uint8(v >> 56)
+ return offset + 8
+}
+func encodeFixed32Generated(data []byte, offset int, v uint32) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ return offset + 4
+}
+func encodeVarintGenerated(data []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ data[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ data[offset] = uint8(v)
+ return offset + 1
+}
+func (m *APIGroup) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Versions) > 0 {
+ for _, e := range m.Versions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = m.PreferredVersion.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.ServerAddressByClientCIDRs) > 0 {
+ for _, e := range m.ServerAddressByClientCIDRs {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *APIGroupList) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Groups) > 0 {
+ for _, e := range m.Groups {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *APIResource) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 2
+ l = len(m.Kind)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *APIResourceList) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.GroupVersion)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.APIResources) > 0 {
+ for _, e := range m.APIResources {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *APIVersions) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Versions) > 0 {
+ for _, s := range m.Versions {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.ServerAddressByClientCIDRs) > 0 {
+ for _, e := range m.ServerAddressByClientCIDRs {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *Duration) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovGenerated(uint64(m.Duration))
+ return n
+}
+
+func (m *ExportOptions) Size() (n int) {
+ var l int
+ _ = l
+ n += 2
+ n += 2
+ return n
+}
+
+func (m *GroupKind) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Group)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Kind)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *GroupResource) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Group)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Resource)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *GroupVersion) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Group)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Version)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *GroupVersionForDiscovery) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.GroupVersion)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Version)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *GroupVersionKind) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Group)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Version)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Kind)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *GroupVersionResource) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Group)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Version)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Resource)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *LabelSelector) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.MatchLabels) > 0 {
+ for k, v := range m.MatchLabels {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ if len(m.MatchExpressions) > 0 {
+ for _, e := range m.MatchExpressions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *LabelSelectorRequirement) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Key)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Operator)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Values) > 0 {
+ for _, s := range m.Values {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ListMeta) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.SelfLink)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.ResourceVersion)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *RootPaths) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Paths) > 0 {
+ for _, s := range m.Paths {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ServerAddressByClientCIDR) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.ClientCIDR)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.ServerAddress)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *Status) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Status)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Message)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Reason)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Details != nil {
+ l = m.Details.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ n += 1 + sovGenerated(uint64(m.Code))
+ return n
+}
+
+func (m *StatusCause) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Message)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Field)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *StatusDetails) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Group)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Kind)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Causes) > 0 {
+ for _, e := range m.Causes {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ n += 1 + sovGenerated(uint64(m.RetryAfterSeconds))
+ return n
+}
+
+func (m *Timestamp) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovGenerated(uint64(m.Seconds))
+ n += 1 + sovGenerated(uint64(m.Nanos))
+ return n
+}
+
+func (m *TypeMeta) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Kind)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.APIVersion)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *APIGroup) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: APIGroup: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: APIGroup: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Versions = append(m.Versions, GroupVersionForDiscovery{})
+ if err := m.Versions[len(m.Versions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PreferredVersion", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.PreferredVersion.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ServerAddressByClientCIDRs", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ServerAddressByClientCIDRs = append(m.ServerAddressByClientCIDRs, ServerAddressByClientCIDR{})
+ if err := m.ServerAddressByClientCIDRs[len(m.ServerAddressByClientCIDRs)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *APIGroupList) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: APIGroupList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: APIGroupList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Groups = append(m.Groups, APIGroup{})
+ if err := m.Groups[len(m.Groups)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *APIResource) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: APIResource: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: APIResource: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Namespaced", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Namespaced = bool(v != 0)
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Kind = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *APIResourceList) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: APIResourceList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: APIResourceList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field GroupVersion", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.GroupVersion = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field APIResources", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.APIResources = append(m.APIResources, APIResource{})
+ if err := m.APIResources[len(m.APIResources)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *APIVersions) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: APIVersions: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: APIVersions: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Versions = append(m.Versions, string(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ServerAddressByClientCIDRs", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ServerAddressByClientCIDRs = append(m.ServerAddressByClientCIDRs, ServerAddressByClientCIDR{})
+ if err := m.ServerAddressByClientCIDRs[len(m.ServerAddressByClientCIDRs)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Duration) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Duration: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Duration: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType)
+ }
+ m.Duration = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Duration |= (time.Duration(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ExportOptions) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ExportOptions: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ExportOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Export", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Export = bool(v != 0)
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Exact", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Exact = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *GroupKind) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GroupKind: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GroupKind: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Group = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Kind = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *GroupResource) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GroupResource: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GroupResource: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Group = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Resource = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *GroupVersion) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GroupVersion: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GroupVersion: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Group = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Version = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *GroupVersionForDiscovery) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GroupVersionForDiscovery: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GroupVersionForDiscovery: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field GroupVersion", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.GroupVersion = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Version = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *GroupVersionKind) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GroupVersionKind: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GroupVersionKind: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Group = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Version = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Kind = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *GroupVersionResource) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GroupVersionResource: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GroupVersionResource: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Group = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Version = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Resource = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LabelSelector) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LabelSelector: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LabelSelector: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MatchLabels", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var keykey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ keykey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey := string(data[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ var valuekey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ valuekey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue := string(data[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ if m.MatchLabels == nil {
+ m.MatchLabels = make(map[string]string)
+ }
+ m.MatchLabels[mapkey] = mapvalue
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MatchExpressions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.MatchExpressions = append(m.MatchExpressions, LabelSelectorRequirement{})
+ if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LabelSelectorRequirement) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LabelSelectorRequirement: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LabelSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Operator = LabelSelectorOperator(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Values = append(m.Values, string(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ListMeta) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ListMeta: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ListMeta: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SelfLink", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SelfLink = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ResourceVersion = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RootPaths) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RootPaths: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RootPaths: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Paths = append(m.Paths, string(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ServerAddressByClientCIDR) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ServerAddressByClientCIDR: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ServerAddressByClientCIDR: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientCIDR", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClientCIDR = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ServerAddress", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ServerAddress = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Status) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Status: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Status: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Status = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Reason = StatusReason(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Details", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Details == nil {
+ m.Details = &StatusDetails{}
+ }
+ if err := m.Details.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType)
+ }
+ m.Code = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Code |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *StatusCause) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: StatusCause: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: StatusCause: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = CauseType(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Field", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Field = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *StatusDetails) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: StatusDetails: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: StatusDetails: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Group = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Kind = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Causes", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Causes = append(m.Causes, StatusCause{})
+ if err := m.Causes[len(m.Causes)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RetryAfterSeconds", wireType)
+ }
+ m.RetryAfterSeconds = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.RetryAfterSeconds |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Timestamp) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Timestamp: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Timestamp: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType)
+ }
+ m.Seconds = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Seconds |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType)
+ }
+ m.Nanos = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Nanos |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *TypeMeta) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: TypeMeta: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: TypeMeta: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Kind = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.APIVersion = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenerated(data []byte) (n int, err error) {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if data[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipGenerated(data[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
+)
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/generated.proto b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/generated.proto
new file mode 100644
index 0000000..bb4bf4a
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/generated.proto
@@ -0,0 +1,377 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.kubernetes.pkg.api.unversioned;
+
+import "k8s.io/kubernetes/pkg/runtime/generated.proto";
+import "k8s.io/kubernetes/pkg/util/intstr/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "unversioned";
+
+// APIGroup contains the name, the supported versions, and the preferred version
+// of a group.
+message APIGroup {
+ // name is the name of the group.
+ optional string name = 1;
+
+ // versions are the versions supported in this group.
+ repeated GroupVersionForDiscovery versions = 2;
+
+ // preferredVersion is the version preferred by the API server, which
+ // probably is the storage version.
+ optional GroupVersionForDiscovery preferredVersion = 3;
+
+ // a map of client CIDR to server address that is serving this group.
+ // This is to help clients reach servers in the most network-efficient way possible.
+ // Clients can use the appropriate server address as per the CIDR that they match.
+ // In case of multiple matches, clients should use the longest matching CIDR.
+ // The server returns only those CIDRs that it thinks that the client can match.
+ // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP.
+ // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.
+ repeated ServerAddressByClientCIDR serverAddressByClientCIDRs = 4;
+}
+
+// APIGroupList is a list of APIGroup, to allow clients to discover the API at
+// /apis.
+message APIGroupList {
+ // groups is a list of APIGroup.
+ repeated APIGroup groups = 1;
+}
+
+// APIResource specifies the name of a resource and whether it is namespaced.
+message APIResource {
+ // name is the name of the resource.
+ optional string name = 1;
+
+ // namespaced indicates if a resource is namespaced or not.
+ optional bool namespaced = 2;
+
+ // kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')
+ optional string kind = 3;
+}
+
+// APIResourceList is a list of APIResource, it is used to expose the name of the
+// resources supported in a specific group and version, and if the resource
+// is namespaced.
+message APIResourceList {
+ // groupVersion is the group and version this APIResourceList is for.
+ optional string groupVersion = 1;
+
+ // resources contains the name of the resources and if they are namespaced.
+ repeated APIResource resources = 2;
+}
+
+// APIVersions lists the versions that are available, to allow clients to
+// discover the API at /api, which is the root path of the legacy v1 API.
+//
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+message APIVersions {
+ // versions are the api versions that are available.
+ repeated string versions = 1;
+
+ // a map of client CIDR to server address that is serving this group.
+ // This is to help clients reach servers in the most network-efficient way possible.
+ // Clients can use the appropriate server address as per the CIDR that they match.
+ // In case of multiple matches, clients should use the longest matching CIDR.
+ // The server returns only those CIDRs that it thinks that the client can match.
+ // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP.
+ // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.
+ repeated ServerAddressByClientCIDR serverAddressByClientCIDRs = 2;
+}
+
+// Duration is a wrapper around time.Duration which supports correct
+// marshaling to YAML and JSON. In particular, it marshals into strings, which
+// can be used as map keys in json.
+message Duration {
+ optional int64 duration = 1;
+}
+
+// ExportOptions is the query options to the standard REST get call.
+message ExportOptions {
+ // Should this value be exported. Export strips fields that a user can not specify.`
+ optional bool export = 1;
+
+ // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'
+ optional bool exact = 2;
+}
+
+// GroupKind specifies a Group and a Kind, but does not force a version. This is useful for identifying
+// concepts during lookup stages without having partially valid types
+//
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+message GroupKind {
+ optional string group = 1;
+
+ optional string kind = 2;
+}
+
+// GroupResource specifies a Group and a Resource, but does not force a version. This is useful for identifying
+// concepts during lookup stages without having partially valid types
+//
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+message GroupResource {
+ optional string group = 1;
+
+ optional string resource = 2;
+}
+
+// GroupVersion contains the "group" and the "version", which uniquely identifies the API.
+//
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+message GroupVersion {
+ optional string group = 1;
+
+ optional string version = 2;
+}
+
+// GroupVersion contains the "group/version" and "version" string of a version.
+// It is made a struct to keep extensiblity.
+message GroupVersionForDiscovery {
+ // groupVersion specifies the API group and version in the form "group/version"
+ optional string groupVersion = 1;
+
+ // version specifies the version in the form of "version". This is to save
+ // the clients the trouble of splitting the GroupVersion.
+ optional string version = 2;
+}
+
+// GroupVersionKind unambiguously identifies a kind. It doesn't anonymously include GroupVersion
+// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling
+//
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+message GroupVersionKind {
+ optional string group = 1;
+
+ optional string version = 2;
+
+ optional string kind = 3;
+}
+
+// GroupVersionResource unambiguously identifies a resource. It doesn't anonymously include GroupVersion
+// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling
+//
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+message GroupVersionResource {
+ optional string group = 1;
+
+ optional string version = 2;
+
+ optional string resource = 3;
+}
+
+// A label selector is a label query over a set of resources. The result of matchLabels and
+// matchExpressions are ANDed. An empty label selector matches all objects. A null
+// label selector matches no objects.
+message LabelSelector {
+ // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ // map is equivalent to an element of matchExpressions, whose key field is "key", the
+ // operator is "In", and the values array contains only "value". The requirements are ANDed.
+ map<string, string> matchLabels = 1;
+
+ // matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ repeated LabelSelectorRequirement matchExpressions = 2;
+}
+
+// A label selector requirement is a selector that contains values, a key, and an operator that
+// relates the key and values.
+message LabelSelectorRequirement {
+ // key is the label key that the selector applies to.
+ optional string key = 1;
+
+ // operator represents a key's relationship to a set of values.
+ // Valid operators ard In, NotIn, Exists and DoesNotExist.
+ optional string operator = 2;
+
+ // values is an array of string values. If the operator is In or NotIn,
+ // the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ // the values array must be empty. This array is replaced during a strategic
+ // merge patch.
+ repeated string values = 3;
+}
+
+// ListMeta describes metadata that synthetic resources must have, including lists and
+// various status objects. A resource may have only one of {ObjectMeta, ListMeta}.
+message ListMeta {
+ // SelfLink is a URL representing this object.
+ // Populated by the system.
+ // Read-only.
+ optional string selfLink = 1;
+
+ // String that identifies the server's internal version of this object that
+ // can be used by clients to determine when objects have changed.
+ // Value must be treated as opaque by clients and passed unmodified back to the server.
+ // Populated by the system.
+ // Read-only.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency
+ optional string resourceVersion = 2;
+}
+
+// RootPaths lists the paths available at root.
+// For example: "/healthz", "/apis".
+message RootPaths {
+ // paths are the paths available at root.
+ repeated string paths = 1;
+}
+
+// ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match.
+message ServerAddressByClientCIDR {
+ // The CIDR with which clients can match their IP to figure out the server address that they should use.
+ optional string clientCIDR = 1;
+
+ // Address of this server, suitable for a client that matches the above CIDR.
+ // This can be a hostname, hostname:port, IP or IP:port.
+ optional string serverAddress = 2;
+}
+
+// Status is a return value for calls that don't return other objects.
+message Status {
+ // Standard list metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
+ optional ListMeta metadata = 1;
+
+ // Status of the operation.
+ // One of: "Success" or "Failure".
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ optional string status = 2;
+
+ // A human-readable description of the status of this operation.
+ optional string message = 3;
+
+ // A machine-readable description of why this operation is in the
+ // "Failure" status. If this value is empty there
+ // is no information available. A Reason clarifies an HTTP status
+ // code but does not override it.
+ optional string reason = 4;
+
+ // Extended data associated with the reason. Each reason may define its
+ // own extended details. This field is optional and the data returned
+ // is not guaranteed to conform to any schema except that defined by
+ // the reason type.
+ optional StatusDetails details = 5;
+
+ // Suggested HTTP return code for this status, 0 if not set.
+ optional int32 code = 6;
+}
+
+// StatusCause provides more information about an api.Status failure, including
+// cases when multiple errors are encountered.
+message StatusCause {
+ // A machine-readable description of the cause of the error. If this value is
+ // empty there is no information available.
+ optional string reason = 1;
+
+ // A human-readable description of the cause of the error. This field may be
+ // presented as-is to a reader.
+ optional string message = 2;
+
+ // The field of the resource that has caused this error, as named by its JSON
+ // serialization. May include dot and postfix notation for nested attributes.
+ // Arrays are zero-indexed. Fields may appear more than once in an array of
+ // causes due to fields having multiple errors.
+ // Optional.
+ //
+ // Examples:
+ // "name" - the field "name" on the current resource
+ // "items[0].name" - the field "name" on the first array entry in "items"
+ optional string field = 3;
+}
+
+// StatusDetails is a set of additional properties that MAY be set by the
+// server to provide additional information about a response. The Reason
+// field of a Status object defines what attributes will be set. Clients
+// must ignore fields that do not match the defined type of each attribute,
+// and should assume that any attribute may be empty, invalid, or under
+// defined.
+message StatusDetails {
+ // The name attribute of the resource associated with the status StatusReason
+ // (when there is a single name which can be described).
+ optional string name = 1;
+
+ // The group attribute of the resource associated with the status StatusReason.
+ optional string group = 2;
+
+ // The kind attribute of the resource associated with the status StatusReason.
+ // On some operations may differ from the requested resource Kind.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
+ optional string kind = 3;
+
+ // The Causes array includes more details associated with the StatusReason
+ // failure. Not all StatusReasons may provide detailed causes.
+ repeated StatusCause causes = 4;
+
+ // If specified, the time in seconds before the operation should be retried.
+ optional int32 retryAfterSeconds = 5;
+}
+
+// Time is a wrapper around time.Time which supports correct
+// marshaling to YAML and JSON. Wrappers are provided for many
+// of the factory methods that the time package offers.
+//
+// +protobuf.options.marshal=false
+// +protobuf.as=Timestamp
+message Time {
+ // Represents seconds of UTC time since Unix epoch
+ // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to
+ // 9999-12-31T23:59:59Z inclusive.
+ optional int64 seconds = 1;
+
+ // Non-negative fractions of a second at nanosecond resolution. Negative
+ // second values with fractions must still have non-negative nanos values
+ // that count forward in time. Must be from 0 to 999,999,999
+ // inclusive. This field may be limited in precision depending on context.
+ optional int32 nanos = 2;
+}
+
+// Timestamp is a struct that is equivalent to Time, but intended for
+// protobuf marshalling/unmarshalling. It is generated into a serialization
+// that matches Time. Do not use in Go structs.
+message Timestamp {
+ // Represents seconds of UTC time since Unix epoch
+ // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to
+ // 9999-12-31T23:59:59Z inclusive.
+ optional int64 seconds = 1;
+
+ // Non-negative fractions of a second at nanosecond resolution. Negative
+ // second values with fractions must still have non-negative nanos values
+ // that count forward in time. Must be from 0 to 999,999,999
+ // inclusive. This field may be limited in precision depending on context.
+ optional int32 nanos = 2;
+}
+
+// TypeMeta describes an individual object in an API response or request
+// with strings representing the type of the object and its API schema version.
+// Structures that are versioned or persisted should inline TypeMeta.
+message TypeMeta {
+ // Kind is a string value representing the REST resource this object represents.
+ // Servers may infer this from the endpoint the client submits requests to.
+ // Cannot be updated.
+ // In CamelCase.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
+ optional string kind = 1;
+
+ // APIVersion defines the versioned schema of this representation of an object.
+ // Servers should convert recognized schemas to the latest internal value, and
+ // may reject unrecognized values.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources
+ optional string apiVersion = 2;
+}
+
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/group_version.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/group_version.go
new file mode 100644
index 0000000..8f74789
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/group_version.go
@@ -0,0 +1,287 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "encoding/json"
+ "fmt"
+ "strings"
+)
+
+// ParseResourceArg takes the common style of string which may be either `resource.group.com` or `resource.version.group.com`
+// and parses it out into both possibilities. This code takes no responsibility for knowing which representation was intended
+// but with a knowledge of all GroupVersions, calling code can take a very good guess. If there are only two segments, then
+// `*GroupVersionResource` is nil.
+// `resource.group.com` -> `group=com, version=group, resource=resource` and `group=group.com, resource=resource`
+func ParseResourceArg(arg string) (*GroupVersionResource, GroupResource) {
+ var gvr *GroupVersionResource
+ if strings.Count(arg, ".") >= 2 {
+ s := strings.SplitN(arg, ".", 3)
+ gvr = &GroupVersionResource{Group: s[2], Version: s[1], Resource: s[0]}
+ }
+
+ return gvr, ParseGroupResource(arg)
+}
+
+// GroupResource specifies a Group and a Resource, but does not force a version. This is useful for identifying
+// concepts during lookup stages without having partially valid types
+//
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+type GroupResource struct {
+ Group string `protobuf:"bytes,1,opt,name=group"`
+ Resource string `protobuf:"bytes,2,opt,name=resource"`
+}
+
+func (gr GroupResource) WithVersion(version string) GroupVersionResource {
+ return GroupVersionResource{Group: gr.Group, Version: version, Resource: gr.Resource}
+}
+
+func (gr GroupResource) IsEmpty() bool {
+ return len(gr.Group) == 0 && len(gr.Resource) == 0
+}
+
+func (gr *GroupResource) String() string {
+ if len(gr.Group) == 0 {
+ return gr.Resource
+ }
+ return gr.Resource + "." + gr.Group
+}
+
+// ParseGroupResource turns "resource.group" string into a GroupResource struct. Empty strings are allowed
+// for each field.
+func ParseGroupResource(gr string) GroupResource {
+ if i := strings.Index(gr, "."); i == -1 {
+ return GroupResource{Resource: gr}
+ } else {
+ return GroupResource{Group: gr[i+1:], Resource: gr[:i]}
+ }
+}
+
+// GroupVersionResource unambiguously identifies a resource. It doesn't anonymously include GroupVersion
+// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling
+//
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+type GroupVersionResource struct {
+ Group string `protobuf:"bytes,1,opt,name=group"`
+ Version string `protobuf:"bytes,2,opt,name=version"`
+ Resource string `protobuf:"bytes,3,opt,name=resource"`
+}
+
+func (gvr GroupVersionResource) IsEmpty() bool {
+ return len(gvr.Group) == 0 && len(gvr.Version) == 0 && len(gvr.Resource) == 0
+}
+
+func (gvr GroupVersionResource) GroupResource() GroupResource {
+ return GroupResource{Group: gvr.Group, Resource: gvr.Resource}
+}
+
+func (gvr GroupVersionResource) GroupVersion() GroupVersion {
+ return GroupVersion{Group: gvr.Group, Version: gvr.Version}
+}
+
+func (gvr *GroupVersionResource) String() string {
+ return strings.Join([]string{gvr.Group, "/", gvr.Version, ", Resource=", gvr.Resource}, "")
+}
+
+// GroupKind specifies a Group and a Kind, but does not force a version. This is useful for identifying
+// concepts during lookup stages without having partially valid types
+//
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+type GroupKind struct {
+ Group string `protobuf:"bytes,1,opt,name=group"`
+ Kind string `protobuf:"bytes,2,opt,name=kind"`
+}
+
+func (gk GroupKind) IsEmpty() bool {
+ return len(gk.Group) == 0 && len(gk.Kind) == 0
+}
+
+func (gk GroupKind) WithVersion(version string) GroupVersionKind {
+ return GroupVersionKind{Group: gk.Group, Version: version, Kind: gk.Kind}
+}
+
+func (gk *GroupKind) String() string {
+ if len(gk.Group) == 0 {
+ return gk.Kind
+ }
+ return gk.Kind + "." + gk.Group
+}
+
+// GroupVersionKind unambiguously identifies a kind. It doesn't anonymously include GroupVersion
+// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling
+//
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+type GroupVersionKind struct {
+ Group string `protobuf:"bytes,1,opt,name=group"`
+ Version string `protobuf:"bytes,2,opt,name=version"`
+ Kind string `protobuf:"bytes,3,opt,name=kind"`
+}
+
+// IsEmpty returns true if group, version, and kind are empty
+func (gvk GroupVersionKind) IsEmpty() bool {
+ return len(gvk.Group) == 0 && len(gvk.Version) == 0 && len(gvk.Kind) == 0
+}
+
+func (gvk GroupVersionKind) GroupKind() GroupKind {
+ return GroupKind{Group: gvk.Group, Kind: gvk.Kind}
+}
+
+func (gvk GroupVersionKind) GroupVersion() GroupVersion {
+ return GroupVersion{Group: gvk.Group, Version: gvk.Version}
+}
+
+func (gvk GroupVersionKind) String() string {
+ return gvk.Group + "/" + gvk.Version + ", Kind=" + gvk.Kind
+}
+
+// GroupVersion contains the "group" and the "version", which uniquely identifies the API.
+//
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+type GroupVersion struct {
+ Group string `protobuf:"bytes,1,opt,name=group"`
+ Version string `protobuf:"bytes,2,opt,name=version"`
+}
+
+// IsEmpty returns true if group and version are empty
+func (gv GroupVersion) IsEmpty() bool {
+ return len(gv.Group) == 0 && len(gv.Version) == 0
+}
+
+// String puts "group" and "version" into a single "group/version" string. For the legacy v1
+// it returns "v1".
+func (gv GroupVersion) String() string {
+ // special case the internal apiVersion for the legacy kube types
+ if gv.IsEmpty() {
+ return ""
+ }
+
+ // special case of "v1" for backward compatibility
+ if len(gv.Group) == 0 && gv.Version == "v1" {
+ return gv.Version
+ }
+ if len(gv.Group) > 0 {
+ return gv.Group + "/" + gv.Version
+ }
+ return gv.Version
+}
+
+// ParseGroupVersion turns "group/version" string into a GroupVersion struct. It reports error
+// if it cannot parse the string.
+func ParseGroupVersion(gv string) (GroupVersion, error) {
+ // this can be the internal version for the legacy kube types
+ // TODO once we've cleared the last uses as strings, this special case should be removed.
+ if (len(gv) == 0) || (gv == "/") {
+ return GroupVersion{}, nil
+ }
+
+ switch strings.Count(gv, "/") {
+ case 0:
+ return GroupVersion{"", gv}, nil
+ case 1:
+ i := strings.Index(gv, "/")
+ return GroupVersion{gv[:i], gv[i+1:]}, nil
+ default:
+ return GroupVersion{}, fmt.Errorf("unexpected GroupVersion string: %v", gv)
+ }
+}
+
+// WithKind creates a GroupVersionKind based on the method receiver's GroupVersion and the passed Kind.
+func (gv GroupVersion) WithKind(kind string) GroupVersionKind {
+ return GroupVersionKind{Group: gv.Group, Version: gv.Version, Kind: kind}
+}
+
+// WithResource creates a GroupVersionResource based on the method receiver's GroupVersion and the passed Resource.
+func (gv GroupVersion) WithResource(resource string) GroupVersionResource {
+ return GroupVersionResource{Group: gv.Group, Version: gv.Version, Resource: resource}
+}
+
+// MarshalJSON implements the json.Marshaller interface.
+func (gv GroupVersion) MarshalJSON() ([]byte, error) {
+ s := gv.String()
+ if strings.Count(s, "/") > 1 {
+ return []byte{}, fmt.Errorf("illegal GroupVersion %v: contains more than one /", s)
+ }
+ return json.Marshal(s)
+}
+
+func (gv *GroupVersion) unmarshal(value []byte) error {
+ var s string
+ if err := json.Unmarshal(value, &s); err != nil {
+ return err
+ }
+ parsed, err := ParseGroupVersion(s)
+ if err != nil {
+ return err
+ }
+ *gv = parsed
+ return nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaller interface.
+func (gv *GroupVersion) UnmarshalJSON(value []byte) error {
+ return gv.unmarshal(value)
+}
+
+// UnmarshalTEXT implements the Ugorji's encoding.TextUnmarshaler interface.
+func (gv *GroupVersion) UnmarshalText(value []byte) error {
+ return gv.unmarshal(value)
+}
+
+// ToAPIVersionAndKind is a convenience method for satisfying runtime.Object on types that
+// do not use TypeMeta.
+func (gvk *GroupVersionKind) ToAPIVersionAndKind() (string, string) {
+ if gvk == nil {
+ return "", ""
+ }
+ return gvk.GroupVersion().String(), gvk.Kind
+}
+
+// FromAPIVersionAndKind returns a GVK representing the provided fields for types that
+// do not use TypeMeta. This method exists to support test types and legacy serializations
+// that have a distinct group and kind.
+// TODO: further reduce usage of this method.
+func FromAPIVersionAndKind(apiVersion, kind string) GroupVersionKind {
+ if gv, err := ParseGroupVersion(apiVersion); err == nil {
+ return GroupVersionKind{Group: gv.Group, Version: gv.Version, Kind: kind}
+ }
+ return GroupVersionKind{Kind: kind}
+}
+
+// All objects that are serialized from a Scheme encode their type information. This interface is used
+// by serialization to set type information from the Scheme onto the serialized version of an object.
+// For objects that cannot be serialized or have unique requirements, this interface may be a no-op.
+// TODO: this belongs in pkg/runtime, move unversioned.GVK into runtime.
+type ObjectKind interface {
+ // SetGroupVersionKind sets or clears the intended serialized kind of an object. Passing kind nil
+ // should clear the current setting.
+ SetGroupVersionKind(kind GroupVersionKind)
+ // GroupVersionKind returns the stored group, version, and kind of an object, or nil if the object does
+ // not expose or provide these fields.
+ GroupVersionKind() GroupVersionKind
+}
+
+// EmptyObjectKind implements the ObjectKind interface as a noop
+// TODO: this belongs in pkg/runtime, move unversioned.GVK into runtime.
+var EmptyObjectKind = emptyObjectKind{}
+
+type emptyObjectKind struct{}
+
+// SetGroupVersionKind implements the ObjectKind interface
+func (emptyObjectKind) SetGroupVersionKind(gvk GroupVersionKind) {}
+
+// GroupVersionKind implements the ObjectKind interface
+func (emptyObjectKind) GroupVersionKind() GroupVersionKind { return GroupVersionKind{} }
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/helpers.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/helpers.go
new file mode 100644
index 0000000..fbbff00
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/helpers.go
@@ -0,0 +1,154 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "fmt"
+
+ "k8s.io/kubernetes/pkg/labels"
+ "k8s.io/kubernetes/pkg/util/sets"
+)
+
+// LabelSelectorAsSelector converts the LabelSelector api type into a struct that implements
+// labels.Selector
+// Note: This function should be kept in sync with the selector methods in pkg/labels/selector.go
+func LabelSelectorAsSelector(ps *LabelSelector) (labels.Selector, error) {
+ if ps == nil {
+ return labels.Nothing(), nil
+ }
+ if len(ps.MatchLabels)+len(ps.MatchExpressions) == 0 {
+ return labels.Everything(), nil
+ }
+ selector := labels.NewSelector()
+ for k, v := range ps.MatchLabels {
+ r, err := labels.NewRequirement(k, labels.EqualsOperator, sets.NewString(v))
+ if err != nil {
+ return nil, err
+ }
+ selector = selector.Add(*r)
+ }
+ for _, expr := range ps.MatchExpressions {
+ var op labels.Operator
+ switch expr.Operator {
+ case LabelSelectorOpIn:
+ op = labels.InOperator
+ case LabelSelectorOpNotIn:
+ op = labels.NotInOperator
+ case LabelSelectorOpExists:
+ op = labels.ExistsOperator
+ case LabelSelectorOpDoesNotExist:
+ op = labels.DoesNotExistOperator
+ default:
+ return nil, fmt.Errorf("%q is not a valid pod selector operator", expr.Operator)
+ }
+ r, err := labels.NewRequirement(expr.Key, op, sets.NewString(expr.Values...))
+ if err != nil {
+ return nil, err
+ }
+ selector = selector.Add(*r)
+ }
+ return selector, nil
+}
+
+// ParseToLabelSelector parses a string representing a selector into a LabelSelector object.
+// Note: This function should be kept in sync with the parser in pkg/labels/selector.go
+func ParseToLabelSelector(selector string) (*LabelSelector, error) {
+ reqs, err := labels.ParseToRequirements(selector)
+ if err != nil {
+ return nil, fmt.Errorf("couldn't parse the selector string \"%s\": %v", selector, err)
+ }
+
+ labelSelector := &LabelSelector{
+ MatchLabels: map[string]string{},
+ MatchExpressions: []LabelSelectorRequirement{},
+ }
+ for _, req := range reqs {
+ var op LabelSelectorOperator
+ switch req.Operator() {
+ case labels.EqualsOperator, labels.DoubleEqualsOperator:
+ vals := req.Values()
+ if vals.Len() != 1 {
+ return nil, fmt.Errorf("equals operator must have exactly one value")
+ }
+ val, ok := vals.PopAny()
+ if !ok {
+ return nil, fmt.Errorf("equals operator has exactly one value but it cannot be retrieved")
+ }
+ labelSelector.MatchLabels[req.Key()] = val
+ continue
+ case labels.InOperator:
+ op = LabelSelectorOpIn
+ case labels.NotInOperator:
+ op = LabelSelectorOpNotIn
+ case labels.ExistsOperator:
+ op = LabelSelectorOpExists
+ case labels.DoesNotExistOperator:
+ op = LabelSelectorOpDoesNotExist
+ case labels.GreaterThanOperator, labels.LessThanOperator:
+ // Adding a separate case for these operators to indicate that this is deliberate
+ return nil, fmt.Errorf("%q isn't supported in label selectors", req.Operator())
+ default:
+ return nil, fmt.Errorf("%q is not a valid label selector operator", req.Operator())
+ }
+ labelSelector.MatchExpressions = append(labelSelector.MatchExpressions, LabelSelectorRequirement{
+ Key: req.Key(),
+ Operator: op,
+ Values: req.Values().List(),
+ })
+ }
+ return labelSelector, nil
+}
+
+// SetAsLabelSelector converts the labels.Set object into a LabelSelector api object.
+func SetAsLabelSelector(ls labels.Set) *LabelSelector {
+ if ls == nil {
+ return nil
+ }
+
+ selector := &LabelSelector{
+ MatchLabels: make(map[string]string),
+ }
+ for label, value := range ls {
+ selector.MatchLabels[label] = value
+ }
+
+ return selector
+}
+
+// FormatLabelSelector convert labelSelector into plain string
+func FormatLabelSelector(labelSelector *LabelSelector) string {
+ selector, err := LabelSelectorAsSelector(labelSelector)
+ if err != nil {
+ return "<error>"
+ }
+
+ l := selector.String()
+ if len(l) == 0 {
+ l = "<none>"
+ }
+ return l
+}
+
+func ExtractGroupVersions(l *APIGroupList) []string {
+ var groupVersions []string
+ for _, g := range l.Groups {
+ for _, gv := range g.Versions {
+ groupVersions = append(groupVersions, gv.GroupVersion)
+ }
+ }
+ return groupVersions
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/meta.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/meta.go
new file mode 100644
index 0000000..48009da
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/meta.go
@@ -0,0 +1,62 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+// ListMetaAccessor retrieves the list interface from an object
+// TODO: move this, and TypeMeta and ListMeta, to a different package
+type ListMetaAccessor interface {
+ GetListMeta() List
+}
+
+// List lets you work with list metadata from any of the versioned or
+// internal API objects. Attempting to set or retrieve a field on an object that does
+// not support that field will be a no-op and return a default value.
+// TODO: move this, and TypeMeta and ListMeta, to a different package
+type List interface {
+ GetResourceVersion() string
+ SetResourceVersion(version string)
+ GetSelfLink() string
+ SetSelfLink(selfLink string)
+}
+
+// Type exposes the type and APIVersion of versioned or internal API objects.
+// TODO: move this, and TypeMeta and ListMeta, to a different package
+type Type interface {
+ GetAPIVersion() string
+ SetAPIVersion(version string)
+ GetKind() string
+ SetKind(kind string)
+}
+
+func (meta *ListMeta) GetResourceVersion() string { return meta.ResourceVersion }
+func (meta *ListMeta) SetResourceVersion(version string) { meta.ResourceVersion = version }
+func (meta *ListMeta) GetSelfLink() string { return meta.SelfLink }
+func (meta *ListMeta) SetSelfLink(selfLink string) { meta.SelfLink = selfLink }
+
+func (obj *TypeMeta) GetObjectKind() ObjectKind { return obj }
+
+// SetGroupVersionKind satisfies the ObjectKind interface for all objects that embed TypeMeta
+func (obj *TypeMeta) SetGroupVersionKind(gvk GroupVersionKind) {
+ obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind()
+}
+
+// GroupVersionKind satisfies the ObjectKind interface for all objects that embed TypeMeta
+func (obj *TypeMeta) GroupVersionKind() GroupVersionKind {
+ return FromAPIVersionAndKind(obj.APIVersion, obj.Kind)
+}
+
+func (obj *ListMeta) GetListMeta() List { return obj }
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/register.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/register.go
new file mode 100644
index 0000000..9af0566
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/register.go
@@ -0,0 +1,25 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = GroupVersion{Group: "", Version: ""}
+
+// Kind takes an unqualified kind and returns back a Group qualified GroupKind
+func Kind(kind string) GroupKind {
+ return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/time.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/time.go
new file mode 100644
index 0000000..73b00f2
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/time.go
@@ -0,0 +1,160 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "encoding/json"
+ "time"
+
+ "github.com/google/gofuzz"
+)
+
+// Time is a wrapper around time.Time which supports correct
+// marshaling to YAML and JSON. Wrappers are provided for many
+// of the factory methods that the time package offers.
+//
+// +protobuf.options.marshal=false
+// +protobuf.as=Timestamp
+type Time struct {
+ time.Time `protobuf:"-"`
+}
+
+// DeepCopy returns a deep-copy of the Time value. The underlying time.Time
+// type is effectively immutable in the time API, so it is safe to
+// copy-by-assign, despite the presence of (unexported) Pointer fields.
+func (t Time) DeepCopy() Time {
+ return t
+}
+
+// NewTime returns a wrapped instance of the provided time
+func NewTime(time time.Time) Time {
+ return Time{time}
+}
+
+// Date returns the Time corresponding to the supplied parameters
+// by wrapping time.Date.
+func Date(year int, month time.Month, day, hour, min, sec, nsec int, loc *time.Location) Time {
+ return Time{time.Date(year, month, day, hour, min, sec, nsec, loc)}
+}
+
+// Now returns the current local time.
+func Now() Time {
+ return Time{time.Now()}
+}
+
+// IsZero returns true if the value is nil or time is zero.
+func (t *Time) IsZero() bool {
+ if t == nil {
+ return true
+ }
+ return t.Time.IsZero()
+}
+
+// Before reports whether the time instant t is before u.
+func (t Time) Before(u Time) bool {
+ return t.Time.Before(u.Time)
+}
+
+// Equal reports whether the time instant t is equal to u.
+func (t Time) Equal(u Time) bool {
+ return t.Time.Equal(u.Time)
+}
+
+// Unix returns the local time corresponding to the given Unix time
+// by wrapping time.Unix.
+func Unix(sec int64, nsec int64) Time {
+ return Time{time.Unix(sec, nsec)}
+}
+
+// Rfc3339Copy returns a copy of the Time at second-level precision.
+func (t Time) Rfc3339Copy() Time {
+ copied, _ := time.Parse(time.RFC3339, t.Format(time.RFC3339))
+ return Time{copied}
+}
+
+// UnmarshalJSON implements the json.Unmarshaller interface.
+func (t *Time) UnmarshalJSON(b []byte) error {
+ if len(b) == 4 && string(b) == "null" {
+ t.Time = time.Time{}
+ return nil
+ }
+
+ var str string
+ json.Unmarshal(b, &str)
+
+ pt, err := time.Parse(time.RFC3339, str)
+ if err != nil {
+ return err
+ }
+
+ t.Time = pt.Local()
+ return nil
+}
+
+// UnmarshalQueryParameter converts from a URL query parameter value to an object
+func (t *Time) UnmarshalQueryParameter(str string) error {
+ if len(str) == 0 {
+ t.Time = time.Time{}
+ return nil
+ }
+ // Tolerate requests from older clients that used JSON serialization to build query params
+ if len(str) == 4 && str == "null" {
+ t.Time = time.Time{}
+ return nil
+ }
+
+ pt, err := time.Parse(time.RFC3339, str)
+ if err != nil {
+ return err
+ }
+
+ t.Time = pt.Local()
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (t Time) MarshalJSON() ([]byte, error) {
+ if t.IsZero() {
+ // Encode unset/nil objects as JSON's "null".
+ return []byte("null"), nil
+ }
+
+ return json.Marshal(t.UTC().Format(time.RFC3339))
+}
+
+// MarshalQueryParameter converts to a URL query parameter value
+func (t Time) MarshalQueryParameter() (string, error) {
+ if t.IsZero() {
+ // Encode unset/nil objects as an empty string
+ return "", nil
+ }
+
+ return t.UTC().Format(time.RFC3339), nil
+}
+
+// Fuzz satisfies fuzz.Interface.
+func (t *Time) Fuzz(c fuzz.Continue) {
+ if t == nil {
+ return
+ }
+ // Allow for about 1000 years of randomness. Leave off nanoseconds
+ // because JSON doesn't represent them so they can't round-trip
+ // properly.
+ t.Time = time.Unix(c.Rand.Int63n(1000*365*24*60*60), 0)
+}
+
+var _ fuzz.Interface = &Time{}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/time_proto.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/time_proto.go
new file mode 100644
index 0000000..ba25e91
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/time_proto.go
@@ -0,0 +1,85 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "time"
+)
+
+// Timestamp is a struct that is equivalent to Time, but intended for
+// protobuf marshalling/unmarshalling. It is generated into a serialization
+// that matches Time. Do not use in Go structs.
+type Timestamp struct {
+ // Represents seconds of UTC time since Unix epoch
+ // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to
+ // 9999-12-31T23:59:59Z inclusive.
+ Seconds int64 `json:"seconds" protobuf:"varint,1,opt,name=seconds"`
+ // Non-negative fractions of a second at nanosecond resolution. Negative
+ // second values with fractions must still have non-negative nanos values
+ // that count forward in time. Must be from 0 to 999,999,999
+ // inclusive. This field may be limited in precision depending on context.
+ Nanos int32 `json:"nanos" protobuf:"varint,2,opt,name=nanos"`
+}
+
+// Timestamp returns the Time as a new Timestamp value.
+func (m *Time) ProtoTime() *Timestamp {
+ if m == nil {
+ return &Timestamp{}
+ }
+ return &Timestamp{
+ Seconds: m.Time.Unix(),
+ Nanos: int32(m.Time.Nanosecond()),
+ }
+}
+
+// Size implements the protobuf marshalling interface.
+func (m *Time) Size() (n int) {
+ if m == nil || m.Time.IsZero() {
+ return 0
+ }
+ return m.ProtoTime().Size()
+}
+
+// Reset implements the protobuf marshalling interface.
+func (m *Time) Unmarshal(data []byte) error {
+ if len(data) == 0 {
+ m.Time = time.Time{}
+ return nil
+ }
+ p := Timestamp{}
+ if err := p.Unmarshal(data); err != nil {
+ return err
+ }
+ m.Time = time.Unix(p.Seconds, int64(p.Nanos)).Local()
+ return nil
+}
+
+// Marshal implements the protobuf marshalling interface.
+func (m *Time) Marshal() (data []byte, err error) {
+ if m == nil || m.Time.IsZero() {
+ return nil, nil
+ }
+ return m.ProtoTime().Marshal()
+}
+
+// MarshalTo implements the protobuf marshalling interface.
+func (m *Time) MarshalTo(data []byte) (int, error) {
+ if m == nil || m.Time.IsZero() {
+ return 0, nil
+ }
+ return m.ProtoTime().MarshalTo(data)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/types.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/types.go
new file mode 100644
index 0000000..f396487
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/types.go
@@ -0,0 +1,460 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package unversioned contains API types that are common to all versions.
+//
+// The package contains two categories of types:
+// - external (serialized) types that lack their own version (e.g TypeMeta)
+// - internal (never-serialized) types that are needed by several different
+// api groups, and so live here, to avoid duplication and/or import loops
+// (e.g. LabelSelector).
+// In the future, we will probably move these categories of objects into
+// separate packages.
+package unversioned
+
+import "strings"
+
+// TypeMeta describes an individual object in an API response or request
+// with strings representing the type of the object and its API schema version.
+// Structures that are versioned or persisted should inline TypeMeta.
+type TypeMeta struct {
+ // Kind is a string value representing the REST resource this object represents.
+ // Servers may infer this from the endpoint the client submits requests to.
+ // Cannot be updated.
+ // In CamelCase.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
+ Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"`
+
+ // APIVersion defines the versioned schema of this representation of an object.
+ // Servers should convert recognized schemas to the latest internal value, and
+ // may reject unrecognized values.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources
+ APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt,name=apiVersion"`
+}
+
+// ListMeta describes metadata that synthetic resources must have, including lists and
+// various status objects. A resource may have only one of {ObjectMeta, ListMeta}.
+type ListMeta struct {
+ // SelfLink is a URL representing this object.
+ // Populated by the system.
+ // Read-only.
+ SelfLink string `json:"selfLink,omitempty" protobuf:"bytes,1,opt,name=selfLink"`
+
+ // String that identifies the server's internal version of this object that
+ // can be used by clients to determine when objects have changed.
+ // Value must be treated as opaque by clients and passed unmodified back to the server.
+ // Populated by the system.
+ // Read-only.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency
+ ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,2,opt,name=resourceVersion"`
+}
+
+// ExportOptions is the query options to the standard REST get call.
+type ExportOptions struct {
+ TypeMeta `json:",inline"`
+ // Should this value be exported. Export strips fields that a user can not specify.`
+ Export bool `json:"export" protobuf:"varint,1,opt,name=export"`
+ // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'
+ Exact bool `json:"exact" protobuf:"varint,2,opt,name=exact"`
+}
+
+// Status is a return value for calls that don't return other objects.
+type Status struct {
+ TypeMeta `json:",inline"`
+ // Standard list metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
+ ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Status of the operation.
+ // One of: "Success" or "Failure".
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ Status string `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"`
+ // A human-readable description of the status of this operation.
+ Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"`
+ // A machine-readable description of why this operation is in the
+ // "Failure" status. If this value is empty there
+ // is no information available. A Reason clarifies an HTTP status
+ // code but does not override it.
+ Reason StatusReason `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason,casttype=StatusReason"`
+ // Extended data associated with the reason. Each reason may define its
+ // own extended details. This field is optional and the data returned
+ // is not guaranteed to conform to any schema except that defined by
+ // the reason type.
+ Details *StatusDetails `json:"details,omitempty" protobuf:"bytes,5,opt,name=details"`
+ // Suggested HTTP return code for this status, 0 if not set.
+ Code int32 `json:"code,omitempty" protobuf:"varint,6,opt,name=code"`
+}
+
+// StatusDetails is a set of additional properties that MAY be set by the
+// server to provide additional information about a response. The Reason
+// field of a Status object defines what attributes will be set. Clients
+// must ignore fields that do not match the defined type of each attribute,
+// and should assume that any attribute may be empty, invalid, or under
+// defined.
+type StatusDetails struct {
+ // The name attribute of the resource associated with the status StatusReason
+ // (when there is a single name which can be described).
+ Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
+ // The group attribute of the resource associated with the status StatusReason.
+ Group string `json:"group,omitempty" protobuf:"bytes,2,opt,name=group"`
+ // The kind attribute of the resource associated with the status StatusReason.
+ // On some operations may differ from the requested resource Kind.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
+ Kind string `json:"kind,omitempty" protobuf:"bytes,3,opt,name=kind"`
+ // The Causes array includes more details associated with the StatusReason
+ // failure. Not all StatusReasons may provide detailed causes.
+ Causes []StatusCause `json:"causes,omitempty" protobuf:"bytes,4,rep,name=causes"`
+ // If specified, the time in seconds before the operation should be retried.
+ RetryAfterSeconds int32 `json:"retryAfterSeconds,omitempty" protobuf:"varint,5,opt,name=retryAfterSeconds"`
+}
+
+// Values of Status.Status
+const (
+ StatusSuccess = "Success"
+ StatusFailure = "Failure"
+)
+
+// StatusReason is an enumeration of possible failure causes. Each StatusReason
+// must map to a single HTTP status code, but multiple reasons may map
+// to the same HTTP status code.
+// TODO: move to apiserver
+type StatusReason string
+
+const (
+ // StatusReasonUnknown means the server has declined to indicate a specific reason.
+ // The details field may contain other information about this error.
+ // Status code 500.
+ StatusReasonUnknown StatusReason = ""
+
+ // StatusReasonUnauthorized means the server can be reached and understood the request, but requires
+ // the user to present appropriate authorization credentials (identified by the WWW-Authenticate header)
+ // in order for the action to be completed. If the user has specified credentials on the request, the
+ // server considers them insufficient.
+ // Status code 401
+ StatusReasonUnauthorized StatusReason = "Unauthorized"
+
+ // StatusReasonForbidden means the server can be reached and understood the request, but refuses
+ // to take any further action. It is the result of the server being configured to deny access for some reason
+ // to the requested resource by the client.
+ // Details (optional):
+ // "kind" string - the kind attribute of the forbidden resource
+ // on some operations may differ from the requested
+ // resource.
+ // "id" string - the identifier of the forbidden resource
+ // Status code 403
+ StatusReasonForbidden StatusReason = "Forbidden"
+
+ // StatusReasonNotFound means one or more resources required for this operation
+ // could not be found.
+ // Details (optional):
+ // "kind" string - the kind attribute of the missing resource
+ // on some operations may differ from the requested
+ // resource.
+ // "id" string - the identifier of the missing resource
+ // Status code 404
+ StatusReasonNotFound StatusReason = "NotFound"
+
+ // StatusReasonAlreadyExists means the resource you are creating already exists.
+ // Details (optional):
+ // "kind" string - the kind attribute of the conflicting resource
+ // "id" string - the identifier of the conflicting resource
+ // Status code 409
+ StatusReasonAlreadyExists StatusReason = "AlreadyExists"
+
+ // StatusReasonConflict means the requested operation cannot be completed
+ // due to a conflict in the operation. The client may need to alter the
+ // request. Each resource may define custom details that indicate the
+ // nature of the conflict.
+ // Status code 409
+ StatusReasonConflict StatusReason = "Conflict"
+
+ // StatusReasonGone means the item is no longer available at the server and no
+ // forwarding address is known.
+ // Status code 410
+ StatusReasonGone StatusReason = "Gone"
+
+ // StatusReasonInvalid means the requested create or update operation cannot be
+ // completed due to invalid data provided as part of the request. The client may
+ // need to alter the request. When set, the client may use the StatusDetails
+ // message field as a summary of the issues encountered.
+ // Details (optional):
+ // "kind" string - the kind attribute of the invalid resource
+ // "id" string - the identifier of the invalid resource
+ // "causes" - one or more StatusCause entries indicating the data in the
+ // provided resource that was invalid. The code, message, and
+ // field attributes will be set.
+ // Status code 422
+ StatusReasonInvalid StatusReason = "Invalid"
+
+ // StatusReasonServerTimeout means the server can be reached and understood the request,
+ // but cannot complete the action in a reasonable time. The client should retry the request.
+ // This is may be due to temporary server load or a transient communication issue with
+ // another server. Status code 500 is used because the HTTP spec provides no suitable
+ // server-requested client retry and the 5xx class represents actionable errors.
+ // Details (optional):
+ // "kind" string - the kind attribute of the resource being acted on.
+ // "id" string - the operation that is being attempted.
+ // "retryAfterSeconds" int32 - the number of seconds before the operation should be retried
+ // Status code 500
+ StatusReasonServerTimeout StatusReason = "ServerTimeout"
+
+ // StatusReasonTimeout means that the request could not be completed within the given time.
+ // Clients can get this response only when they specified a timeout param in the request,
+ // or if the server cannot complete the operation within a reasonable amount of time.
+ // The request might succeed with an increased value of timeout param. The client *should*
+ // wait at least the number of seconds specified by the retryAfterSeconds field.
+ // Details (optional):
+ // "retryAfterSeconds" int32 - the number of seconds before the operation should be retried
+ // Status code 504
+ StatusReasonTimeout StatusReason = "Timeout"
+
+ // StatusReasonBadRequest means that the request itself was invalid, because the request
+ // doesn't make any sense, for example deleting a read-only object. This is different than
+ // StatusReasonInvalid above which indicates that the API call could possibly succeed, but the
+ // data was invalid. API calls that return BadRequest can never succeed.
+ StatusReasonBadRequest StatusReason = "BadRequest"
+
+ // StatusReasonMethodNotAllowed means that the action the client attempted to perform on the
+ // resource was not supported by the code - for instance, attempting to delete a resource that
+ // can only be created. API calls that return MethodNotAllowed can never succeed.
+ StatusReasonMethodNotAllowed StatusReason = "MethodNotAllowed"
+
+ // StatusReasonInternalError indicates that an internal error occurred, it is unexpected
+ // and the outcome of the call is unknown.
+ // Details (optional):
+ // "causes" - The original error
+ // Status code 500
+ StatusReasonInternalError StatusReason = "InternalError"
+
+ // StatusReasonExpired indicates that the request is invalid because the content you are requesting
+ // has expired and is no longer available. It is typically associated with watches that can't be
+ // serviced.
+ // Status code 410 (gone)
+ StatusReasonExpired StatusReason = "Expired"
+
+ // StatusReasonServiceUnavailable means that the request itself was valid,
+ // but the requested service is unavailable at this time.
+ // Retrying the request after some time might succeed.
+ // Status code 503
+ StatusReasonServiceUnavailable StatusReason = "ServiceUnavailable"
+)
+
+// StatusCause provides more information about an api.Status failure, including
+// cases when multiple errors are encountered.
+type StatusCause struct {
+ // A machine-readable description of the cause of the error. If this value is
+ // empty there is no information available.
+ Type CauseType `json:"reason,omitempty" protobuf:"bytes,1,opt,name=reason,casttype=CauseType"`
+ // A human-readable description of the cause of the error. This field may be
+ // presented as-is to a reader.
+ Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"`
+ // The field of the resource that has caused this error, as named by its JSON
+ // serialization. May include dot and postfix notation for nested attributes.
+ // Arrays are zero-indexed. Fields may appear more than once in an array of
+ // causes due to fields having multiple errors.
+ // Optional.
+ //
+ // Examples:
+ // "name" - the field "name" on the current resource
+ // "items[0].name" - the field "name" on the first array entry in "items"
+ Field string `json:"field,omitempty" protobuf:"bytes,3,opt,name=field"`
+}
+
+// CauseType is a machine readable value providing more detail about what
+// occurred in a status response. An operation may have multiple causes for a
+// status (whether Failure or Success).
+type CauseType string
+
+const (
+ // CauseTypeFieldValueNotFound is used to report failure to find a requested value
+ // (e.g. looking up an ID).
+ CauseTypeFieldValueNotFound CauseType = "FieldValueNotFound"
+ // CauseTypeFieldValueRequired is used to report required values that are not
+ // provided (e.g. empty strings, null values, or empty arrays).
+ CauseTypeFieldValueRequired CauseType = "FieldValueRequired"
+ // CauseTypeFieldValueDuplicate is used to report collisions of values that must be
+ // unique (e.g. unique IDs).
+ CauseTypeFieldValueDuplicate CauseType = "FieldValueDuplicate"
+ // CauseTypeFieldValueInvalid is used to report malformed values (e.g. failed regex
+ // match).
+ CauseTypeFieldValueInvalid CauseType = "FieldValueInvalid"
+ // CauseTypeFieldValueNotSupported is used to report valid (as per formatting rules)
+ // values that can not be handled (e.g. an enumerated string).
+ CauseTypeFieldValueNotSupported CauseType = "FieldValueNotSupported"
+ // CauseTypeUnexpectedServerResponse is used to report when the server responded to the client
+ // without the expected return type. The presence of this cause indicates the error may be
+ // due to an intervening proxy or the server software malfunctioning.
+ CauseTypeUnexpectedServerResponse CauseType = "UnexpectedServerResponse"
+)
+
+// APIVersions lists the versions that are available, to allow clients to
+// discover the API at /api, which is the root path of the legacy v1 API.
+//
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+type APIVersions struct {
+ TypeMeta `json:",inline"`
+ // versions are the api versions that are available.
+ Versions []string `json:"versions" protobuf:"bytes,1,rep,name=versions"`
+ // a map of client CIDR to server address that is serving this group.
+ // This is to help clients reach servers in the most network-efficient way possible.
+ // Clients can use the appropriate server address as per the CIDR that they match.
+ // In case of multiple matches, clients should use the longest matching CIDR.
+ // The server returns only those CIDRs that it thinks that the client can match.
+ // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP.
+ // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.
+ ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs" protobuf:"bytes,2,rep,name=serverAddressByClientCIDRs"`
+}
+
+// APIGroupList is a list of APIGroup, to allow clients to discover the API at
+// /apis.
+type APIGroupList struct {
+ TypeMeta `json:",inline"`
+ // groups is a list of APIGroup.
+ Groups []APIGroup `json:"groups" protobuf:"bytes,1,rep,name=groups"`
+}
+
+// APIGroup contains the name, the supported versions, and the preferred version
+// of a group.
+type APIGroup struct {
+ TypeMeta `json:",inline"`
+ // name is the name of the group.
+ Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+ // versions are the versions supported in this group.
+ Versions []GroupVersionForDiscovery `json:"versions" protobuf:"bytes,2,rep,name=versions"`
+ // preferredVersion is the version preferred by the API server, which
+ // probably is the storage version.
+ PreferredVersion GroupVersionForDiscovery `json:"preferredVersion,omitempty" protobuf:"bytes,3,opt,name=preferredVersion"`
+ // a map of client CIDR to server address that is serving this group.
+ // This is to help clients reach servers in the most network-efficient way possible.
+ // Clients can use the appropriate server address as per the CIDR that they match.
+ // In case of multiple matches, clients should use the longest matching CIDR.
+ // The server returns only those CIDRs that it thinks that the client can match.
+ // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP.
+ // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.
+ ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs" protobuf:"bytes,4,rep,name=serverAddressByClientCIDRs"`
+}
+
+// ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match.
+type ServerAddressByClientCIDR struct {
+ // The CIDR with which clients can match their IP to figure out the server address that they should use.
+ ClientCIDR string `json:"clientCIDR" protobuf:"bytes,1,opt,name=clientCIDR"`
+ // Address of this server, suitable for a client that matches the above CIDR.
+ // This can be a hostname, hostname:port, IP or IP:port.
+ ServerAddress string `json:"serverAddress" protobuf:"bytes,2,opt,name=serverAddress"`
+}
+
+// GroupVersion contains the "group/version" and "version" string of a version.
+// It is made a struct to keep extensiblity.
+type GroupVersionForDiscovery struct {
+ // groupVersion specifies the API group and version in the form "group/version"
+ GroupVersion string `json:"groupVersion" protobuf:"bytes,1,opt,name=groupVersion"`
+ // version specifies the version in the form of "version". This is to save
+ // the clients the trouble of splitting the GroupVersion.
+ Version string `json:"version" protobuf:"bytes,2,opt,name=version"`
+}
+
+// APIResource specifies the name of a resource and whether it is namespaced.
+type APIResource struct {
+ // name is the name of the resource.
+ Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+ // namespaced indicates if a resource is namespaced or not.
+ Namespaced bool `json:"namespaced" protobuf:"varint,2,opt,name=namespaced"`
+ // kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')
+ Kind string `json:"kind" protobuf:"bytes,3,opt,name=kind"`
+}
+
+// APIResourceList is a list of APIResource, it is used to expose the name of the
+// resources supported in a specific group and version, and if the resource
+// is namespaced.
+type APIResourceList struct {
+ TypeMeta `json:",inline"`
+ // groupVersion is the group and version this APIResourceList is for.
+ GroupVersion string `json:"groupVersion" protobuf:"bytes,1,opt,name=groupVersion"`
+ // resources contains the name of the resources and if they are namespaced.
+ APIResources []APIResource `json:"resources" protobuf:"bytes,2,rep,name=resources"`
+}
+
+// RootPaths lists the paths available at root.
+// For example: "/healthz", "/apis".
+type RootPaths struct {
+ // paths are the paths available at root.
+ Paths []string `json:"paths" protobuf:"bytes,1,rep,name=paths"`
+}
+
+// TODO: remove me when watch is refactored
+func LabelSelectorQueryParam(version string) string {
+ return "labelSelector"
+}
+
+// TODO: remove me when watch is refactored
+func FieldSelectorQueryParam(version string) string {
+ return "fieldSelector"
+}
+
+// String returns available api versions as a human-friendly version string.
+func (apiVersions APIVersions) String() string {
+ return strings.Join(apiVersions.Versions, ",")
+}
+
+func (apiVersions APIVersions) GoString() string {
+ return apiVersions.String()
+}
+
+// Patch is provided to give a concrete name and type to the Kubernetes PATCH request body.
+type Patch struct{}
+
+// Note:
+// There are two different styles of label selectors used in versioned types:
+// an older style which is represented as just a string in versioned types, and a
+// newer style that is structured. LabelSelector is an internal representation for the
+// latter style.
+
+// A label selector is a label query over a set of resources. The result of matchLabels and
+// matchExpressions are ANDed. An empty label selector matches all objects. A null
+// label selector matches no objects.
+type LabelSelector struct {
+ // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ // map is equivalent to an element of matchExpressions, whose key field is "key", the
+ // operator is "In", and the values array contains only "value". The requirements are ANDed.
+ MatchLabels map[string]string `json:"matchLabels,omitempty" protobuf:"bytes,1,rep,name=matchLabels"`
+ // matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ MatchExpressions []LabelSelectorRequirement `json:"matchExpressions,omitempty" protobuf:"bytes,2,rep,name=matchExpressions"`
+}
+
+// A label selector requirement is a selector that contains values, a key, and an operator that
+// relates the key and values.
+type LabelSelectorRequirement struct {
+ // key is the label key that the selector applies to.
+ Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"`
+ // operator represents a key's relationship to a set of values.
+ // Valid operators ard In, NotIn, Exists and DoesNotExist.
+ Operator LabelSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=LabelSelectorOperator"`
+ // values is an array of string values. If the operator is In or NotIn,
+ // the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ // the values array must be empty. This array is replaced during a strategic
+ // merge patch.
+ Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"`
+}
+
+// A label selector operator is the set of operators that can be used in a selector requirement.
+type LabelSelectorOperator string
+
+const (
+ LabelSelectorOpIn LabelSelectorOperator = "In"
+ LabelSelectorOpNotIn LabelSelectorOperator = "NotIn"
+ LabelSelectorOpExists LabelSelectorOperator = "Exists"
+ LabelSelectorOpDoesNotExist LabelSelectorOperator = "DoesNotExist"
+)
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/types_swagger_doc_generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/types_swagger_doc_generated.go
new file mode 100644
index 0000000..cdd31eb
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/types_swagger_doc_generated.go
@@ -0,0 +1,208 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE
+var map_APIGroup = map[string]string{
+ "": "APIGroup contains the name, the supported versions, and the preferred version of a group.",
+ "name": "name is the name of the group.",
+ "versions": "versions are the versions supported in this group.",
+ "preferredVersion": "preferredVersion is the version preferred by the API server, which probably is the storage version.",
+ "serverAddressByClientCIDRs": "a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.",
+}
+
+func (APIGroup) SwaggerDoc() map[string]string {
+ return map_APIGroup
+}
+
+var map_APIGroupList = map[string]string{
+ "": "APIGroupList is a list of APIGroup, to allow clients to discover the API at /apis.",
+ "groups": "groups is a list of APIGroup.",
+}
+
+func (APIGroupList) SwaggerDoc() map[string]string {
+ return map_APIGroupList
+}
+
+var map_APIResource = map[string]string{
+ "": "APIResource specifies the name of a resource and whether it is namespaced.",
+ "name": "name is the name of the resource.",
+ "namespaced": "namespaced indicates if a resource is namespaced or not.",
+ "kind": "kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')",
+}
+
+func (APIResource) SwaggerDoc() map[string]string {
+ return map_APIResource
+}
+
+var map_APIResourceList = map[string]string{
+ "": "APIResourceList is a list of APIResource, it is used to expose the name of the resources supported in a specific group and version, and if the resource is namespaced.",
+ "groupVersion": "groupVersion is the group and version this APIResourceList is for.",
+ "resources": "resources contains the name of the resources and if they are namespaced.",
+}
+
+func (APIResourceList) SwaggerDoc() map[string]string {
+ return map_APIResourceList
+}
+
+var map_APIVersions = map[string]string{
+ "": "APIVersions lists the versions that are available, to allow clients to discover the API at /api, which is the root path of the legacy v1 API.",
+ "versions": "versions are the api versions that are available.",
+ "serverAddressByClientCIDRs": "a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.",
+}
+
+func (APIVersions) SwaggerDoc() map[string]string {
+ return map_APIVersions
+}
+
+var map_ExportOptions = map[string]string{
+ "": "ExportOptions is the query options to the standard REST get call.",
+ "export": "Should this value be exported. Export strips fields that a user can not specify.`",
+ "exact": "Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'",
+}
+
+func (ExportOptions) SwaggerDoc() map[string]string {
+ return map_ExportOptions
+}
+
+var map_GroupVersionForDiscovery = map[string]string{
+ "": "GroupVersion contains the \"group/version\" and \"version\" string of a version. It is made a struct to keep extensiblity.",
+ "groupVersion": "groupVersion specifies the API group and version in the form \"group/version\"",
+ "version": "version specifies the version in the form of \"version\". This is to save the clients the trouble of splitting the GroupVersion.",
+}
+
+func (GroupVersionForDiscovery) SwaggerDoc() map[string]string {
+ return map_GroupVersionForDiscovery
+}
+
+var map_LabelSelector = map[string]string{
+ "": "A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.",
+ "matchLabels": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.",
+ "matchExpressions": "matchExpressions is a list of label selector requirements. The requirements are ANDed.",
+}
+
+func (LabelSelector) SwaggerDoc() map[string]string {
+ return map_LabelSelector
+}
+
+var map_LabelSelectorRequirement = map[string]string{
+ "": "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.",
+ "key": "key is the label key that the selector applies to.",
+ "operator": "operator represents a key's relationship to a set of values. Valid operators ard In, NotIn, Exists and DoesNotExist.",
+ "values": "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.",
+}
+
+func (LabelSelectorRequirement) SwaggerDoc() map[string]string {
+ return map_LabelSelectorRequirement
+}
+
+var map_ListMeta = map[string]string{
+ "": "ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.",
+ "selfLink": "SelfLink is a URL representing this object. Populated by the system. Read-only.",
+ "resourceVersion": "String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency",
+}
+
+func (ListMeta) SwaggerDoc() map[string]string {
+ return map_ListMeta
+}
+
+var map_Patch = map[string]string{
+ "": "Patch is provided to give a concrete name and type to the Kubernetes PATCH request body.",
+}
+
+func (Patch) SwaggerDoc() map[string]string {
+ return map_Patch
+}
+
+var map_RootPaths = map[string]string{
+ "": "RootPaths lists the paths available at root. For example: \"/healthz\", \"/apis\".",
+ "paths": "paths are the paths available at root.",
+}
+
+func (RootPaths) SwaggerDoc() map[string]string {
+ return map_RootPaths
+}
+
+var map_ServerAddressByClientCIDR = map[string]string{
+ "": "ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match.",
+ "clientCIDR": "The CIDR with which clients can match their IP to figure out the server address that they should use.",
+ "serverAddress": "Address of this server, suitable for a client that matches the above CIDR. This can be a hostname, hostname:port, IP or IP:port.",
+}
+
+func (ServerAddressByClientCIDR) SwaggerDoc() map[string]string {
+ return map_ServerAddressByClientCIDR
+}
+
+var map_Status = map[string]string{
+ "": "Status is a return value for calls that don't return other objects.",
+ "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds",
+ "status": "Status of the operation. One of: \"Success\" or \"Failure\". More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status",
+ "message": "A human-readable description of the status of this operation.",
+ "reason": "A machine-readable description of why this operation is in the \"Failure\" status. If this value is empty there is no information available. A Reason clarifies an HTTP status code but does not override it.",
+ "details": "Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type.",
+ "code": "Suggested HTTP return code for this status, 0 if not set.",
+}
+
+func (Status) SwaggerDoc() map[string]string {
+ return map_Status
+}
+
+var map_StatusCause = map[string]string{
+ "": "StatusCause provides more information about an api.Status failure, including cases when multiple errors are encountered.",
+ "reason": "A machine-readable description of the cause of the error. If this value is empty there is no information available.",
+ "message": "A human-readable description of the cause of the error. This field may be presented as-is to a reader.",
+ "field": "The field of the resource that has caused this error, as named by its JSON serialization. May include dot and postfix notation for nested attributes. Arrays are zero-indexed. Fields may appear more than once in an array of causes due to fields having multiple errors. Optional.\n\nExamples:\n \"name\" - the field \"name\" on the current resource\n \"items[0].name\" - the field \"name\" on the first array entry in \"items\"",
+}
+
+func (StatusCause) SwaggerDoc() map[string]string {
+ return map_StatusCause
+}
+
+var map_StatusDetails = map[string]string{
+ "": "StatusDetails is a set of additional properties that MAY be set by the server to provide additional information about a response. The Reason field of a Status object defines what attributes will be set. Clients must ignore fields that do not match the defined type of each attribute, and should assume that any attribute may be empty, invalid, or under defined.",
+ "name": "The name attribute of the resource associated with the status StatusReason (when there is a single name which can be described).",
+ "group": "The group attribute of the resource associated with the status StatusReason.",
+ "kind": "The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds",
+ "causes": "The Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes.",
+ "retryAfterSeconds": "If specified, the time in seconds before the operation should be retried.",
+}
+
+func (StatusDetails) SwaggerDoc() map[string]string {
+ return map_StatusDetails
+}
+
+var map_TypeMeta = map[string]string{
+ "": "TypeMeta describes an individual object in an API response or request with strings representing the type of the object and its API schema version. Structures that are versioned or persisted should inline TypeMeta.",
+ "kind": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds",
+ "apiVersion": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources",
+}
+
+func (TypeMeta) SwaggerDoc() map[string]string {
+ return map_TypeMeta
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/validation/validation.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/validation/validation.go
new file mode 100644
index 0000000..ecb968b
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/validation/validation.go
@@ -0,0 +1,74 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validation
+
+import (
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/util/validation"
+ "k8s.io/kubernetes/pkg/util/validation/field"
+)
+
+func ValidateLabelSelector(ps *unversioned.LabelSelector, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if ps == nil {
+ return allErrs
+ }
+ allErrs = append(allErrs, ValidateLabels(ps.MatchLabels, fldPath.Child("matchLabels"))...)
+ for i, expr := range ps.MatchExpressions {
+ allErrs = append(allErrs, ValidateLabelSelectorRequirement(expr, fldPath.Child("matchExpressions").Index(i))...)
+ }
+ return allErrs
+}
+
+func ValidateLabelSelectorRequirement(sr unversioned.LabelSelectorRequirement, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ switch sr.Operator {
+ case unversioned.LabelSelectorOpIn, unversioned.LabelSelectorOpNotIn:
+ if len(sr.Values) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("values"), "must be specified when `operator` is 'In' or 'NotIn'"))
+ }
+ case unversioned.LabelSelectorOpExists, unversioned.LabelSelectorOpDoesNotExist:
+ if len(sr.Values) > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("values"), "may not be specified when `operator` is 'Exists' or 'DoesNotExist'"))
+ }
+ default:
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), sr.Operator, "not a valid selector operator"))
+ }
+ allErrs = append(allErrs, ValidateLabelName(sr.Key, fldPath.Child("key"))...)
+ return allErrs
+}
+
+// ValidateLabelName validates that the label name is correctly defined.
+func ValidateLabelName(labelName string, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ for _, msg := range validation.IsQualifiedName(labelName) {
+ allErrs = append(allErrs, field.Invalid(fldPath, labelName, msg))
+ }
+ return allErrs
+}
+
+// ValidateLabels validates that a set of labels are correctly defined.
+func ValidateLabels(labels map[string]string, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ for k, v := range labels {
+ allErrs = append(allErrs, ValidateLabelName(k, fldPath)...)
+ for _, msg := range validation.IsValidLabelValue(v) {
+ allErrs = append(allErrs, field.Invalid(fldPath, v, msg))
+ }
+ }
+ return allErrs
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/well_known_labels.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/well_known_labels.go
new file mode 100644
index 0000000..318c6ee
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/unversioned/well_known_labels.go
@@ -0,0 +1,30 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+const (
+ // If you add a new topology domain here, also consider adding it to the set of default values
+ // for the scheduler's --failure-domain command-line argument.
+ LabelHostname = "kubernetes.io/hostname"
+ LabelZoneFailureDomain = "failure-domain.beta.kubernetes.io/zone"
+ LabelZoneRegion = "failure-domain.beta.kubernetes.io/region"
+
+ LabelInstanceType = "beta.kubernetes.io/instance-type"
+
+ LabelOS = "beta.kubernetes.io/os"
+ LabelArch = "beta.kubernetes.io/arch"
+)
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/util/group_version.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/util/group_version.go
new file mode 100644
index 0000000..fea2f17
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/util/group_version.go
@@ -0,0 +1,48 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// TODO: This GetVersion/GetGroup arrangement is temporary and will be replaced
+// with a GroupAndVersion type.
+package util
+
+import "strings"
+
+func GetVersion(groupVersion string) string {
+ s := strings.Split(groupVersion, "/")
+ if len(s) != 2 {
+ // e.g. return "v1" for groupVersion="v1"
+ return s[len(s)-1]
+ }
+ return s[1]
+}
+
+func GetGroup(groupVersion string) string {
+ s := strings.Split(groupVersion, "/")
+ if len(s) == 1 {
+ // e.g. return "" for groupVersion="v1"
+ return ""
+ }
+ return s[0]
+}
+
+// GetGroupVersion returns the "group/version". It returns "version" is if group
+// is empty. It returns "group/" if version is empty.
+func GetGroupVersion(group, version string) string {
+ if len(group) == 0 {
+ return version
+ }
+ return group + "/" + version
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/v1/conversion.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/v1/conversion.go
new file mode 100644
index 0000000..642b1bc
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/v1/conversion.go
@@ -0,0 +1,579 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/conversion"
+ "k8s.io/kubernetes/pkg/runtime"
+)
+
+const (
+ // Annotation key used to identify mirror pods.
+ mirrorAnnotationKey = "kubernetes.io/config.mirror"
+
+ // Value used to identify mirror pods from pre-v1.1 kubelet.
+ mirrorAnnotationValue_1_0 = "mirror"
+)
+
+func addConversionFuncs(scheme *runtime.Scheme) {
+ // Add non-generated conversion functions
+ err := scheme.AddConversionFuncs(
+ Convert_api_Pod_To_v1_Pod,
+ Convert_api_PodSpec_To_v1_PodSpec,
+ Convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec,
+ Convert_api_ServiceSpec_To_v1_ServiceSpec,
+ Convert_v1_Pod_To_api_Pod,
+ Convert_v1_PodSpec_To_api_PodSpec,
+ Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec,
+ Convert_v1_Secret_To_api_Secret,
+ Convert_v1_ServiceSpec_To_api_ServiceSpec,
+ Convert_v1_ResourceList_To_api_ResourceList,
+ )
+ if err != nil {
+ // If one of the conversion functions is malformed, detect it immediately.
+ panic(err)
+ }
+
+ // Add field label conversions for kinds having selectable nothing but ObjectMeta fields.
+ for _, kind := range []string{
+ "Endpoints",
+ "ResourceQuota",
+ "PersistentVolumeClaim",
+ "Service",
+ "ServiceAccount",
+ "ConfigMap",
+ } {
+ err = api.Scheme.AddFieldLabelConversionFunc("v1", kind,
+ func(label, value string) (string, string, error) {
+ switch label {
+ case "metadata.namespace",
+ "metadata.name":
+ return label, value, nil
+ default:
+ return "", "", fmt.Errorf("field label %q not supported for %q", label, kind)
+ }
+ })
+ if err != nil {
+ // If one of the conversion functions is malformed, detect it immediately.
+ panic(err)
+ }
+ }
+
+ // Add field conversion funcs.
+ err = api.Scheme.AddFieldLabelConversionFunc("v1", "Pod",
+ func(label, value string) (string, string, error) {
+ switch label {
+ case "metadata.name",
+ "metadata.namespace",
+ "metadata.labels",
+ "metadata.annotations",
+ "status.phase",
+ "status.podIP",
+ "spec.nodeName",
+ "spec.restartPolicy":
+ return label, value, nil
+ // This is for backwards compatibility with old v1 clients which send spec.host
+ case "spec.host":
+ return "spec.nodeName", value, nil
+ default:
+ return "", "", fmt.Errorf("field label not supported: %s", label)
+ }
+ })
+ if err != nil {
+ // If one of the conversion functions is malformed, detect it immediately.
+ panic(err)
+ }
+ err = api.Scheme.AddFieldLabelConversionFunc("v1", "Node",
+ func(label, value string) (string, string, error) {
+ switch label {
+ case "metadata.name":
+ return label, value, nil
+ case "spec.unschedulable":
+ return label, value, nil
+ default:
+ return "", "", fmt.Errorf("field label not supported: %s", label)
+ }
+ })
+ if err != nil {
+ // If one of the conversion functions is malformed, detect it immediately.
+ panic(err)
+ }
+ err = api.Scheme.AddFieldLabelConversionFunc("v1", "ReplicationController",
+ func(label, value string) (string, string, error) {
+ switch label {
+ case "metadata.name",
+ "metadata.namespace",
+ "status.replicas":
+ return label, value, nil
+ default:
+ return "", "", fmt.Errorf("field label not supported: %s", label)
+ }
+ })
+ if err != nil {
+ // If one of the conversion functions is malformed, detect it immediately.
+ panic(err)
+ }
+ err = api.Scheme.AddFieldLabelConversionFunc("v1", "Event",
+ func(label, value string) (string, string, error) {
+ switch label {
+ case "involvedObject.kind",
+ "involvedObject.namespace",
+ "involvedObject.name",
+ "involvedObject.uid",
+ "involvedObject.apiVersion",
+ "involvedObject.resourceVersion",
+ "involvedObject.fieldPath",
+ "reason",
+ "source",
+ "type",
+ "metadata.namespace",
+ "metadata.name":
+ return label, value, nil
+ default:
+ return "", "", fmt.Errorf("field label not supported: %s", label)
+ }
+ })
+ if err != nil {
+ // If one of the conversion functions is malformed, detect it immediately.
+ panic(err)
+ }
+ err = api.Scheme.AddFieldLabelConversionFunc("v1", "Namespace",
+ func(label, value string) (string, string, error) {
+ switch label {
+ case "status.phase",
+ "metadata.name":
+ return label, value, nil
+ default:
+ return "", "", fmt.Errorf("field label not supported: %s", label)
+ }
+ })
+ if err != nil {
+ // If one of the conversion functions is malformed, detect it immediately.
+ panic(err)
+ }
+ err = api.Scheme.AddFieldLabelConversionFunc("v1", "PersistentVolume",
+ func(label, value string) (string, string, error) {
+ switch label {
+ case "metadata.name":
+ return label, value, nil
+ default:
+ return "", "", fmt.Errorf("field label not supported: %s", label)
+ }
+ })
+ if err != nil {
+ // If one of the conversion functions is malformed, detect it immediately.
+ panic(err)
+ }
+ err = api.Scheme.AddFieldLabelConversionFunc("v1", "Secret",
+ func(label, value string) (string, string, error) {
+ switch label {
+ case "type",
+ "metadata.namespace",
+ "metadata.name":
+ return label, value, nil
+ default:
+ return "", "", fmt.Errorf("field label not supported: %s", label)
+ }
+ })
+ if err != nil {
+ // If one of the conversion functions is malformed, detect it immediately.
+ panic(err)
+ }
+}
+
+func Convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(in *api.ReplicationControllerSpec, out *ReplicationControllerSpec, s conversion.Scope) error {
+ out.Replicas = &in.Replicas
+ out.Selector = in.Selector
+ //if in.TemplateRef != nil {
+ // out.TemplateRef = new(ObjectReference)
+ // if err := Convert_api_ObjectReference_To_v1_ObjectReference(in.TemplateRef, out.TemplateRef, s); err != nil {
+ // return err
+ // }
+ //} else {
+ // out.TemplateRef = nil
+ //}
+ if in.Template != nil {
+ out.Template = new(PodTemplateSpec)
+ if err := Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in.Template, out.Template, s); err != nil {
+ return err
+ }
+ } else {
+ out.Template = nil
+ }
+ return nil
+}
+
+func Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec(in *ReplicationControllerSpec, out *api.ReplicationControllerSpec, s conversion.Scope) error {
+ out.Replicas = *in.Replicas
+ out.Selector = in.Selector
+
+ //if in.TemplateRef != nil {
+ // out.TemplateRef = new(api.ObjectReference)
+ // if err := Convert_v1_ObjectReference_To_api_ObjectReference(in.TemplateRef, out.TemplateRef, s); err != nil {
+ // return err
+ // }
+ //} else {
+ // out.TemplateRef = nil
+ //}
+ if in.Template != nil {
+ out.Template = new(api.PodTemplateSpec)
+ if err := Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in.Template, out.Template, s); err != nil {
+ return err
+ }
+ } else {
+ out.Template = nil
+ }
+ return nil
+}
+
+func Convert_api_PodStatusResult_To_v1_PodStatusResult(in *api.PodStatusResult, out *PodStatusResult, s conversion.Scope) error {
+ if err := autoConvert_api_PodStatusResult_To_v1_PodStatusResult(in, out, s); err != nil {
+ return err
+ }
+
+ if old := out.Annotations; old != nil {
+ out.Annotations = make(map[string]string, len(old))
+ for k, v := range old {
+ out.Annotations[k] = v
+ }
+ }
+ if len(out.Status.InitContainerStatuses) > 0 {
+ if out.Annotations == nil {
+ out.Annotations = make(map[string]string)
+ }
+ value, err := json.Marshal(out.Status.InitContainerStatuses)
+ if err != nil {
+ return err
+ }
+ out.Annotations[PodInitContainerStatusesAnnotationKey] = string(value)
+ } else {
+ delete(out.Annotations, PodInitContainerStatusesAnnotationKey)
+ }
+ return nil
+}
+
+func Convert_v1_PodStatusResult_To_api_PodStatusResult(in *PodStatusResult, out *api.PodStatusResult, s conversion.Scope) error {
+ // TODO: when we move init container to beta, remove these conversions
+ if value, ok := in.Annotations[PodInitContainerStatusesAnnotationKey]; ok {
+ var values []ContainerStatus
+ if err := json.Unmarshal([]byte(value), &values); err != nil {
+ return err
+ }
+ // Conversion from external to internal version exists more to
+ // satisfy the needs of the decoder than it does to be a general
+ // purpose tool. And Decode always creates an intermediate object
+ // to decode to. Thus the caller of UnsafeConvertToVersion is
+ // taking responsibility to ensure mutation of in is not exposed
+ // back to the caller.
+ in.Status.InitContainerStatuses = values
+ }
+
+ if err := autoConvert_v1_PodStatusResult_To_api_PodStatusResult(in, out, s); err != nil {
+ return err
+ }
+ if len(out.Annotations) > 0 {
+ old := out.Annotations
+ out.Annotations = make(map[string]string, len(old))
+ for k, v := range old {
+ out.Annotations[k] = v
+ }
+ delete(out.Annotations, PodInitContainerStatusesAnnotationKey)
+ }
+ return nil
+}
+
+func Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in *api.PodTemplateSpec, out *PodTemplateSpec, s conversion.Scope) error {
+ if err := autoConvert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in, out, s); err != nil {
+ return err
+ }
+
+ // TODO: when we move init container to beta, remove these conversions
+ if old := out.Annotations; old != nil {
+ out.Annotations = make(map[string]string, len(old))
+ for k, v := range old {
+ out.Annotations[k] = v
+ }
+ }
+ if len(out.Spec.InitContainers) > 0 {
+ if out.Annotations == nil {
+ out.Annotations = make(map[string]string)
+ }
+ value, err := json.Marshal(out.Spec.InitContainers)
+ if err != nil {
+ return err
+ }
+ out.Annotations[PodInitContainersAnnotationKey] = string(value)
+ } else {
+ delete(out.Annotations, PodInitContainersAnnotationKey)
+ }
+ return nil
+}
+
+func Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in *PodTemplateSpec, out *api.PodTemplateSpec, s conversion.Scope) error {
+ // TODO: when we move init container to beta, remove these conversions
+ if value, ok := in.Annotations[PodInitContainersAnnotationKey]; ok {
+ var values []Container
+ if err := json.Unmarshal([]byte(value), &values); err != nil {
+ return err
+ }
+ // Conversion from external to internal version exists more to
+ // satisfy the needs of the decoder than it does to be a general
+ // purpose tool. And Decode always creates an intermediate object
+ // to decode to. Thus the caller of UnsafeConvertToVersion is
+ // taking responsibility to ensure mutation of in is not exposed
+ // back to the caller.
+ in.Spec.InitContainers = values
+ }
+
+ if err := autoConvert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in, out, s); err != nil {
+ return err
+ }
+ if len(out.Annotations) > 0 {
+ old := out.Annotations
+ out.Annotations = make(map[string]string, len(old))
+ for k, v := range old {
+ out.Annotations[k] = v
+ }
+ delete(out.Annotations, PodInitContainersAnnotationKey)
+ }
+ return nil
+}
+
+// The following two PodSpec conversions are done here to support ServiceAccount
+// as an alias for ServiceAccountName.
+func Convert_api_PodSpec_To_v1_PodSpec(in *api.PodSpec, out *PodSpec, s conversion.Scope) error {
+ if err := autoConvert_api_PodSpec_To_v1_PodSpec(in, out, s); err != nil {
+ return err
+ }
+
+ // DeprecatedServiceAccount is an alias for ServiceAccountName.
+ out.DeprecatedServiceAccount = in.ServiceAccountName
+
+ if in.SecurityContext != nil {
+ // the host namespace fields have to be handled here for backward compatibility
+ // with v1.0.0
+ out.HostPID = in.SecurityContext.HostPID
+ out.HostNetwork = in.SecurityContext.HostNetwork
+ out.HostIPC = in.SecurityContext.HostIPC
+ }
+
+ return nil
+}
+
+func Convert_v1_PodSpec_To_api_PodSpec(in *PodSpec, out *api.PodSpec, s conversion.Scope) error {
+ if err := autoConvert_v1_PodSpec_To_api_PodSpec(in, out, s); err != nil {
+ return err
+ }
+
+ // We support DeprecatedServiceAccount as an alias for ServiceAccountName.
+ // If both are specified, ServiceAccountName (the new field) wins.
+ if in.ServiceAccountName == "" {
+ out.ServiceAccountName = in.DeprecatedServiceAccount
+ }
+
+ // the host namespace fields have to be handled specially for backward compatibility
+ // with v1.0.0
+ if out.SecurityContext == nil {
+ out.SecurityContext = new(api.PodSecurityContext)
+ }
+ out.SecurityContext.HostNetwork = in.HostNetwork
+ out.SecurityContext.HostPID = in.HostPID
+ out.SecurityContext.HostIPC = in.HostIPC
+
+ return nil
+}
+
+func Convert_api_Pod_To_v1_Pod(in *api.Pod, out *Pod, s conversion.Scope) error {
+ if err := autoConvert_api_Pod_To_v1_Pod(in, out, s); err != nil {
+ return err
+ }
+
+ // TODO: when we move init container to beta, remove these conversions
+ if len(out.Spec.InitContainers) > 0 || len(out.Status.InitContainerStatuses) > 0 {
+ old := out.Annotations
+ out.Annotations = make(map[string]string, len(old))
+ for k, v := range old {
+ out.Annotations[k] = v
+ }
+ delete(out.Annotations, PodInitContainersAnnotationKey)
+ delete(out.Annotations, PodInitContainerStatusesAnnotationKey)
+ }
+ if len(out.Spec.InitContainers) > 0 {
+ value, err := json.Marshal(out.Spec.InitContainers)
+ if err != nil {
+ return err
+ }
+ out.Annotations[PodInitContainersAnnotationKey] = string(value)
+ }
+ if len(out.Status.InitContainerStatuses) > 0 {
+ value, err := json.Marshal(out.Status.InitContainerStatuses)
+ if err != nil {
+ return err
+ }
+ out.Annotations[PodInitContainerStatusesAnnotationKey] = string(value)
+ }
+
+ // We need to reset certain fields for mirror pods from pre-v1.1 kubelet
+ // (#15960).
+ // TODO: Remove this code after we drop support for v1.0 kubelets.
+ if value, ok := in.Annotations[mirrorAnnotationKey]; ok && value == mirrorAnnotationValue_1_0 {
+ // Reset the TerminationGracePeriodSeconds.
+ out.Spec.TerminationGracePeriodSeconds = nil
+ // Reset the resource requests.
+ for i := range out.Spec.Containers {
+ out.Spec.Containers[i].Resources.Requests = nil
+ }
+ }
+ return nil
+}
+
+func Convert_v1_Pod_To_api_Pod(in *Pod, out *api.Pod, s conversion.Scope) error {
+ // TODO: when we move init container to beta, remove these conversions
+ if value, ok := in.Annotations[PodInitContainersAnnotationKey]; ok {
+ var values []Container
+ if err := json.Unmarshal([]byte(value), &values); err != nil {
+ return err
+ }
+ // Conversion from external to internal version exists more to
+ // satisfy the needs of the decoder than it does to be a general
+ // purpose tool. And Decode always creates an intermediate object
+ // to decode to. Thus the caller of UnsafeConvertToVersion is
+ // taking responsibility to ensure mutation of in is not exposed
+ // back to the caller.
+ in.Spec.InitContainers = values
+ }
+ if value, ok := in.Annotations[PodInitContainerStatusesAnnotationKey]; ok {
+ var values []ContainerStatus
+ if err := json.Unmarshal([]byte(value), &values); err != nil {
+ return err
+ }
+ // Conversion from external to internal version exists more to
+ // satisfy the needs of the decoder than it does to be a general
+ // purpose tool. And Decode always creates an intermediate object
+ // to decode to. Thus the caller of UnsafeConvertToVersion is
+ // taking responsibility to ensure mutation of in is not exposed
+ // back to the caller.
+ in.Status.InitContainerStatuses = values
+ }
+
+ if err := autoConvert_v1_Pod_To_api_Pod(in, out, s); err != nil {
+ return err
+ }
+ if len(out.Annotations) > 0 {
+ old := out.Annotations
+ out.Annotations = make(map[string]string, len(old))
+ for k, v := range old {
+ out.Annotations[k] = v
+ }
+ delete(out.Annotations, PodInitContainersAnnotationKey)
+ delete(out.Annotations, PodInitContainerStatusesAnnotationKey)
+ }
+ return nil
+}
+
+func Convert_api_ServiceSpec_To_v1_ServiceSpec(in *api.ServiceSpec, out *ServiceSpec, s conversion.Scope) error {
+ if err := autoConvert_api_ServiceSpec_To_v1_ServiceSpec(in, out, s); err != nil {
+ return err
+ }
+ // Publish both externalIPs and deprecatedPublicIPs fields in v1.
+ out.DeprecatedPublicIPs = in.ExternalIPs
+ return nil
+}
+
+func Convert_v1_Secret_To_api_Secret(in *Secret, out *api.Secret, s conversion.Scope) error {
+ if err := autoConvert_v1_Secret_To_api_Secret(in, out, s); err != nil {
+ return err
+ }
+
+ // StringData overwrites Data
+ if len(in.StringData) > 0 {
+ if out.Data == nil {
+ out.Data = map[string][]byte{}
+ }
+ for k, v := range in.StringData {
+ out.Data[k] = []byte(v)
+ }
+ }
+
+ return nil
+}
+
+func Convert_v1_ServiceSpec_To_api_ServiceSpec(in *ServiceSpec, out *api.ServiceSpec, s conversion.Scope) error {
+ if err := autoConvert_v1_ServiceSpec_To_api_ServiceSpec(in, out, s); err != nil {
+ return err
+ }
+ // Prefer the legacy deprecatedPublicIPs field, if provided.
+ if len(in.DeprecatedPublicIPs) > 0 {
+ out.ExternalIPs = in.DeprecatedPublicIPs
+ }
+ return nil
+}
+
+func Convert_api_PodSecurityContext_To_v1_PodSecurityContext(in *api.PodSecurityContext, out *PodSecurityContext, s conversion.Scope) error {
+ out.SupplementalGroups = in.SupplementalGroups
+ if in.SELinuxOptions != nil {
+ out.SELinuxOptions = new(SELinuxOptions)
+ if err := Convert_api_SELinuxOptions_To_v1_SELinuxOptions(in.SELinuxOptions, out.SELinuxOptions, s); err != nil {
+ return err
+ }
+ } else {
+ out.SELinuxOptions = nil
+ }
+ out.RunAsUser = in.RunAsUser
+ out.RunAsNonRoot = in.RunAsNonRoot
+ out.FSGroup = in.FSGroup
+ return nil
+}
+
+func Convert_v1_PodSecurityContext_To_api_PodSecurityContext(in *PodSecurityContext, out *api.PodSecurityContext, s conversion.Scope) error {
+ out.SupplementalGroups = in.SupplementalGroups
+ if in.SELinuxOptions != nil {
+ out.SELinuxOptions = new(api.SELinuxOptions)
+ if err := Convert_v1_SELinuxOptions_To_api_SELinuxOptions(in.SELinuxOptions, out.SELinuxOptions, s); err != nil {
+ return err
+ }
+ } else {
+ out.SELinuxOptions = nil
+ }
+ out.RunAsUser = in.RunAsUser
+ out.RunAsNonRoot = in.RunAsNonRoot
+ out.FSGroup = in.FSGroup
+ return nil
+}
+
+func Convert_v1_ResourceList_To_api_ResourceList(in *ResourceList, out *api.ResourceList, s conversion.Scope) error {
+ if *in == nil {
+ return nil
+ }
+
+ if *out == nil {
+ *out = make(api.ResourceList, len(*in))
+ }
+ for key, val := range *in {
+ // TODO(#18538): We round up resource values to milli scale to maintain API compatibility.
+ // In the future, we should instead reject values that need rounding.
+ const milliScale = -3
+ val.RoundUp(milliScale)
+
+ (*out)[api.ResourceName(key)] = val
+ }
+ return nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/v1/conversion_generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/v1/conversion_generated.go
new file mode 100644
index 0000000..a9cb349
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/v1/conversion_generated.go
@@ -0,0 +1,6820 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by conversion-gen. Do not edit it manually!
+
+package v1
+
+import (
+ api "k8s.io/kubernetes/pkg/api"
+ resource "k8s.io/kubernetes/pkg/api/resource"
+ conversion "k8s.io/kubernetes/pkg/conversion"
+ runtime "k8s.io/kubernetes/pkg/runtime"
+ types "k8s.io/kubernetes/pkg/types"
+)
+
+func init() {
+ if err := api.Scheme.AddGeneratedConversionFuncs(
+ Convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource,
+ Convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource,
+ Convert_v1_Affinity_To_api_Affinity,
+ Convert_api_Affinity_To_v1_Affinity,
+ Convert_v1_AttachedVolume_To_api_AttachedVolume,
+ Convert_api_AttachedVolume_To_v1_AttachedVolume,
+ Convert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource,
+ Convert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource,
+ Convert_v1_Binding_To_api_Binding,
+ Convert_api_Binding_To_v1_Binding,
+ Convert_v1_Capabilities_To_api_Capabilities,
+ Convert_api_Capabilities_To_v1_Capabilities,
+ Convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource,
+ Convert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource,
+ Convert_v1_CinderVolumeSource_To_api_CinderVolumeSource,
+ Convert_api_CinderVolumeSource_To_v1_CinderVolumeSource,
+ Convert_v1_ComponentCondition_To_api_ComponentCondition,
+ Convert_api_ComponentCondition_To_v1_ComponentCondition,
+ Convert_v1_ComponentStatus_To_api_ComponentStatus,
+ Convert_api_ComponentStatus_To_v1_ComponentStatus,
+ Convert_v1_ComponentStatusList_To_api_ComponentStatusList,
+ Convert_api_ComponentStatusList_To_v1_ComponentStatusList,
+ Convert_v1_ConfigMap_To_api_ConfigMap,
+ Convert_api_ConfigMap_To_v1_ConfigMap,
+ Convert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector,
+ Convert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector,
+ Convert_v1_ConfigMapList_To_api_ConfigMapList,
+ Convert_api_ConfigMapList_To_v1_ConfigMapList,
+ Convert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource,
+ Convert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource,
+ Convert_v1_Container_To_api_Container,
+ Convert_api_Container_To_v1_Container,
+ Convert_v1_ContainerImage_To_api_ContainerImage,
+ Convert_api_ContainerImage_To_v1_ContainerImage,
+ Convert_v1_ContainerPort_To_api_ContainerPort,
+ Convert_api_ContainerPort_To_v1_ContainerPort,
+ Convert_v1_ContainerState_To_api_ContainerState,
+ Convert_api_ContainerState_To_v1_ContainerState,
+ Convert_v1_ContainerStateRunning_To_api_ContainerStateRunning,
+ Convert_api_ContainerStateRunning_To_v1_ContainerStateRunning,
+ Convert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated,
+ Convert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated,
+ Convert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting,
+ Convert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting,
+ Convert_v1_ContainerStatus_To_api_ContainerStatus,
+ Convert_api_ContainerStatus_To_v1_ContainerStatus,
+ Convert_v1_DaemonEndpoint_To_api_DaemonEndpoint,
+ Convert_api_DaemonEndpoint_To_v1_DaemonEndpoint,
+ Convert_v1_DeleteOptions_To_api_DeleteOptions,
+ Convert_api_DeleteOptions_To_v1_DeleteOptions,
+ Convert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile,
+ Convert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile,
+ Convert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource,
+ Convert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource,
+ Convert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource,
+ Convert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource,
+ Convert_v1_EndpointAddress_To_api_EndpointAddress,
+ Convert_api_EndpointAddress_To_v1_EndpointAddress,
+ Convert_v1_EndpointPort_To_api_EndpointPort,
+ Convert_api_EndpointPort_To_v1_EndpointPort,
+ Convert_v1_EndpointSubset_To_api_EndpointSubset,
+ Convert_api_EndpointSubset_To_v1_EndpointSubset,
+ Convert_v1_Endpoints_To_api_Endpoints,
+ Convert_api_Endpoints_To_v1_Endpoints,
+ Convert_v1_EndpointsList_To_api_EndpointsList,
+ Convert_api_EndpointsList_To_v1_EndpointsList,
+ Convert_v1_EnvVar_To_api_EnvVar,
+ Convert_api_EnvVar_To_v1_EnvVar,
+ Convert_v1_EnvVarSource_To_api_EnvVarSource,
+ Convert_api_EnvVarSource_To_v1_EnvVarSource,
+ Convert_v1_Event_To_api_Event,
+ Convert_api_Event_To_v1_Event,
+ Convert_v1_EventList_To_api_EventList,
+ Convert_api_EventList_To_v1_EventList,
+ Convert_v1_EventSource_To_api_EventSource,
+ Convert_api_EventSource_To_v1_EventSource,
+ Convert_v1_ExecAction_To_api_ExecAction,
+ Convert_api_ExecAction_To_v1_ExecAction,
+ Convert_v1_ExportOptions_To_api_ExportOptions,
+ Convert_api_ExportOptions_To_v1_ExportOptions,
+ Convert_v1_FCVolumeSource_To_api_FCVolumeSource,
+ Convert_api_FCVolumeSource_To_v1_FCVolumeSource,
+ Convert_v1_FlexVolumeSource_To_api_FlexVolumeSource,
+ Convert_api_FlexVolumeSource_To_v1_FlexVolumeSource,
+ Convert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource,
+ Convert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource,
+ Convert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource,
+ Convert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource,
+ Convert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource,
+ Convert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource,
+ Convert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource,
+ Convert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource,
+ Convert_v1_HTTPGetAction_To_api_HTTPGetAction,
+ Convert_api_HTTPGetAction_To_v1_HTTPGetAction,
+ Convert_v1_HTTPHeader_To_api_HTTPHeader,
+ Convert_api_HTTPHeader_To_v1_HTTPHeader,
+ Convert_v1_Handler_To_api_Handler,
+ Convert_api_Handler_To_v1_Handler,
+ Convert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource,
+ Convert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource,
+ Convert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource,
+ Convert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource,
+ Convert_v1_KeyToPath_To_api_KeyToPath,
+ Convert_api_KeyToPath_To_v1_KeyToPath,
+ Convert_v1_Lifecycle_To_api_Lifecycle,
+ Convert_api_Lifecycle_To_v1_Lifecycle,
+ Convert_v1_LimitRange_To_api_LimitRange,
+ Convert_api_LimitRange_To_v1_LimitRange,
+ Convert_v1_LimitRangeItem_To_api_LimitRangeItem,
+ Convert_api_LimitRangeItem_To_v1_LimitRangeItem,
+ Convert_v1_LimitRangeList_To_api_LimitRangeList,
+ Convert_api_LimitRangeList_To_v1_LimitRangeList,
+ Convert_v1_LimitRangeSpec_To_api_LimitRangeSpec,
+ Convert_api_LimitRangeSpec_To_v1_LimitRangeSpec,
+ Convert_v1_List_To_api_List,
+ Convert_api_List_To_v1_List,
+ Convert_v1_ListOptions_To_api_ListOptions,
+ Convert_api_ListOptions_To_v1_ListOptions,
+ Convert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress,
+ Convert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress,
+ Convert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus,
+ Convert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus,
+ Convert_v1_LocalObjectReference_To_api_LocalObjectReference,
+ Convert_api_LocalObjectReference_To_v1_LocalObjectReference,
+ Convert_v1_NFSVolumeSource_To_api_NFSVolumeSource,
+ Convert_api_NFSVolumeSource_To_v1_NFSVolumeSource,
+ Convert_v1_Namespace_To_api_Namespace,
+ Convert_api_Namespace_To_v1_Namespace,
+ Convert_v1_NamespaceList_To_api_NamespaceList,
+ Convert_api_NamespaceList_To_v1_NamespaceList,
+ Convert_v1_NamespaceSpec_To_api_NamespaceSpec,
+ Convert_api_NamespaceSpec_To_v1_NamespaceSpec,
+ Convert_v1_NamespaceStatus_To_api_NamespaceStatus,
+ Convert_api_NamespaceStatus_To_v1_NamespaceStatus,
+ Convert_v1_Node_To_api_Node,
+ Convert_api_Node_To_v1_Node,
+ Convert_v1_NodeAddress_To_api_NodeAddress,
+ Convert_api_NodeAddress_To_v1_NodeAddress,
+ Convert_v1_NodeAffinity_To_api_NodeAffinity,
+ Convert_api_NodeAffinity_To_v1_NodeAffinity,
+ Convert_v1_NodeCondition_To_api_NodeCondition,
+ Convert_api_NodeCondition_To_v1_NodeCondition,
+ Convert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints,
+ Convert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints,
+ Convert_v1_NodeList_To_api_NodeList,
+ Convert_api_NodeList_To_v1_NodeList,
+ Convert_v1_NodeProxyOptions_To_api_NodeProxyOptions,
+ Convert_api_NodeProxyOptions_To_v1_NodeProxyOptions,
+ Convert_v1_NodeSelector_To_api_NodeSelector,
+ Convert_api_NodeSelector_To_v1_NodeSelector,
+ Convert_v1_NodeSelectorRequirement_To_api_NodeSelectorRequirement,
+ Convert_api_NodeSelectorRequirement_To_v1_NodeSelectorRequirement,
+ Convert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm,
+ Convert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm,
+ Convert_v1_NodeSpec_To_api_NodeSpec,
+ Convert_api_NodeSpec_To_v1_NodeSpec,
+ Convert_v1_NodeStatus_To_api_NodeStatus,
+ Convert_api_NodeStatus_To_v1_NodeStatus,
+ Convert_v1_NodeSystemInfo_To_api_NodeSystemInfo,
+ Convert_api_NodeSystemInfo_To_v1_NodeSystemInfo,
+ Convert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector,
+ Convert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector,
+ Convert_v1_ObjectMeta_To_api_ObjectMeta,
+ Convert_api_ObjectMeta_To_v1_ObjectMeta,
+ Convert_v1_ObjectReference_To_api_ObjectReference,
+ Convert_api_ObjectReference_To_v1_ObjectReference,
+ Convert_v1_OwnerReference_To_api_OwnerReference,
+ Convert_api_OwnerReference_To_v1_OwnerReference,
+ Convert_v1_PersistentVolume_To_api_PersistentVolume,
+ Convert_api_PersistentVolume_To_v1_PersistentVolume,
+ Convert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim,
+ Convert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim,
+ Convert_v1_PersistentVolumeClaimList_To_api_PersistentVolumeClaimList,
+ Convert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList,
+ Convert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec,
+ Convert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec,
+ Convert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus,
+ Convert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus,
+ Convert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource,
+ Convert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource,
+ Convert_v1_PersistentVolumeList_To_api_PersistentVolumeList,
+ Convert_api_PersistentVolumeList_To_v1_PersistentVolumeList,
+ Convert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource,
+ Convert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource,
+ Convert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec,
+ Convert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec,
+ Convert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus,
+ Convert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus,
+ Convert_v1_Pod_To_api_Pod,
+ Convert_api_Pod_To_v1_Pod,
+ Convert_v1_PodAffinity_To_api_PodAffinity,
+ Convert_api_PodAffinity_To_v1_PodAffinity,
+ Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm,
+ Convert_api_PodAffinityTerm_To_v1_PodAffinityTerm,
+ Convert_v1_PodAntiAffinity_To_api_PodAntiAffinity,
+ Convert_api_PodAntiAffinity_To_v1_PodAntiAffinity,
+ Convert_v1_PodAttachOptions_To_api_PodAttachOptions,
+ Convert_api_PodAttachOptions_To_v1_PodAttachOptions,
+ Convert_v1_PodCondition_To_api_PodCondition,
+ Convert_api_PodCondition_To_v1_PodCondition,
+ Convert_v1_PodExecOptions_To_api_PodExecOptions,
+ Convert_api_PodExecOptions_To_v1_PodExecOptions,
+ Convert_v1_PodList_To_api_PodList,
+ Convert_api_PodList_To_v1_PodList,
+ Convert_v1_PodLogOptions_To_api_PodLogOptions,
+ Convert_api_PodLogOptions_To_v1_PodLogOptions,
+ Convert_v1_PodProxyOptions_To_api_PodProxyOptions,
+ Convert_api_PodProxyOptions_To_v1_PodProxyOptions,
+ Convert_v1_PodSecurityContext_To_api_PodSecurityContext,
+ Convert_api_PodSecurityContext_To_v1_PodSecurityContext,
+ Convert_v1_PodSpec_To_api_PodSpec,
+ Convert_api_PodSpec_To_v1_PodSpec,
+ Convert_v1_PodStatus_To_api_PodStatus,
+ Convert_api_PodStatus_To_v1_PodStatus,
+ Convert_v1_PodStatusResult_To_api_PodStatusResult,
+ Convert_api_PodStatusResult_To_v1_PodStatusResult,
+ Convert_v1_PodTemplate_To_api_PodTemplate,
+ Convert_api_PodTemplate_To_v1_PodTemplate,
+ Convert_v1_PodTemplateList_To_api_PodTemplateList,
+ Convert_api_PodTemplateList_To_v1_PodTemplateList,
+ Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec,
+ Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec,
+ Convert_v1_Preconditions_To_api_Preconditions,
+ Convert_api_Preconditions_To_v1_Preconditions,
+ Convert_v1_PreferredSchedulingTerm_To_api_PreferredSchedulingTerm,
+ Convert_api_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm,
+ Convert_v1_Probe_To_api_Probe,
+ Convert_api_Probe_To_v1_Probe,
+ Convert_v1_RBDVolumeSource_To_api_RBDVolumeSource,
+ Convert_api_RBDVolumeSource_To_v1_RBDVolumeSource,
+ Convert_v1_RangeAllocation_To_api_RangeAllocation,
+ Convert_api_RangeAllocation_To_v1_RangeAllocation,
+ Convert_v1_ReplicationController_To_api_ReplicationController,
+ Convert_api_ReplicationController_To_v1_ReplicationController,
+ Convert_v1_ReplicationControllerList_To_api_ReplicationControllerList,
+ Convert_api_ReplicationControllerList_To_v1_ReplicationControllerList,
+ Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec,
+ Convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec,
+ Convert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus,
+ Convert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus,
+ Convert_v1_ResourceFieldSelector_To_api_ResourceFieldSelector,
+ Convert_api_ResourceFieldSelector_To_v1_ResourceFieldSelector,
+ Convert_v1_ResourceQuota_To_api_ResourceQuota,
+ Convert_api_ResourceQuota_To_v1_ResourceQuota,
+ Convert_v1_ResourceQuotaList_To_api_ResourceQuotaList,
+ Convert_api_ResourceQuotaList_To_v1_ResourceQuotaList,
+ Convert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec,
+ Convert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec,
+ Convert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus,
+ Convert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus,
+ Convert_v1_ResourceRequirements_To_api_ResourceRequirements,
+ Convert_api_ResourceRequirements_To_v1_ResourceRequirements,
+ Convert_v1_SELinuxOptions_To_api_SELinuxOptions,
+ Convert_api_SELinuxOptions_To_v1_SELinuxOptions,
+ Convert_v1_Secret_To_api_Secret,
+ Convert_api_Secret_To_v1_Secret,
+ Convert_v1_SecretKeySelector_To_api_SecretKeySelector,
+ Convert_api_SecretKeySelector_To_v1_SecretKeySelector,
+ Convert_v1_SecretList_To_api_SecretList,
+ Convert_api_SecretList_To_v1_SecretList,
+ Convert_v1_SecretVolumeSource_To_api_SecretVolumeSource,
+ Convert_api_SecretVolumeSource_To_v1_SecretVolumeSource,
+ Convert_v1_SecurityContext_To_api_SecurityContext,
+ Convert_api_SecurityContext_To_v1_SecurityContext,
+ Convert_v1_SerializedReference_To_api_SerializedReference,
+ Convert_api_SerializedReference_To_v1_SerializedReference,
+ Convert_v1_Service_To_api_Service,
+ Convert_api_Service_To_v1_Service,
+ Convert_v1_ServiceAccount_To_api_ServiceAccount,
+ Convert_api_ServiceAccount_To_v1_ServiceAccount,
+ Convert_v1_ServiceAccountList_To_api_ServiceAccountList,
+ Convert_api_ServiceAccountList_To_v1_ServiceAccountList,
+ Convert_v1_ServiceList_To_api_ServiceList,
+ Convert_api_ServiceList_To_v1_ServiceList,
+ Convert_v1_ServicePort_To_api_ServicePort,
+ Convert_api_ServicePort_To_v1_ServicePort,
+ Convert_v1_ServiceProxyOptions_To_api_ServiceProxyOptions,
+ Convert_api_ServiceProxyOptions_To_v1_ServiceProxyOptions,
+ Convert_v1_ServiceSpec_To_api_ServiceSpec,
+ Convert_api_ServiceSpec_To_v1_ServiceSpec,
+ Convert_v1_ServiceStatus_To_api_ServiceStatus,
+ Convert_api_ServiceStatus_To_v1_ServiceStatus,
+ Convert_v1_TCPSocketAction_To_api_TCPSocketAction,
+ Convert_api_TCPSocketAction_To_v1_TCPSocketAction,
+ Convert_v1_Taint_To_api_Taint,
+ Convert_api_Taint_To_v1_Taint,
+ Convert_v1_Toleration_To_api_Toleration,
+ Convert_api_Toleration_To_v1_Toleration,
+ Convert_v1_Volume_To_api_Volume,
+ Convert_api_Volume_To_v1_Volume,
+ Convert_v1_VolumeMount_To_api_VolumeMount,
+ Convert_api_VolumeMount_To_v1_VolumeMount,
+ Convert_v1_VolumeSource_To_api_VolumeSource,
+ Convert_api_VolumeSource_To_v1_VolumeSource,
+ Convert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource,
+ Convert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource,
+ Convert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm,
+ Convert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm,
+ ); err != nil {
+ // if one of the conversion functions is malformed, detect it immediately.
+ panic(err)
+ }
+}
+
+func autoConvert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(in *AWSElasticBlockStoreVolumeSource, out *api.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error {
+ out.VolumeID = in.VolumeID
+ out.FSType = in.FSType
+ out.Partition = in.Partition
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
+func Convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(in *AWSElasticBlockStoreVolumeSource, out *api.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error {
+ return autoConvert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(in, out, s)
+}
+
+func autoConvert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in *api.AWSElasticBlockStoreVolumeSource, out *AWSElasticBlockStoreVolumeSource, s conversion.Scope) error {
+ out.VolumeID = in.VolumeID
+ out.FSType = in.FSType
+ out.Partition = in.Partition
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
+func Convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in *api.AWSElasticBlockStoreVolumeSource, out *AWSElasticBlockStoreVolumeSource, s conversion.Scope) error {
+ return autoConvert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in, out, s)
+}
+
+func autoConvert_v1_Affinity_To_api_Affinity(in *Affinity, out *api.Affinity, s conversion.Scope) error {
+ if in.NodeAffinity != nil {
+ in, out := &in.NodeAffinity, &out.NodeAffinity
+ *out = new(api.NodeAffinity)
+ if err := Convert_v1_NodeAffinity_To_api_NodeAffinity(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.NodeAffinity = nil
+ }
+ if in.PodAffinity != nil {
+ in, out := &in.PodAffinity, &out.PodAffinity
+ *out = new(api.PodAffinity)
+ if err := Convert_v1_PodAffinity_To_api_PodAffinity(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.PodAffinity = nil
+ }
+ if in.PodAntiAffinity != nil {
+ in, out := &in.PodAntiAffinity, &out.PodAntiAffinity
+ *out = new(api.PodAntiAffinity)
+ if err := Convert_v1_PodAntiAffinity_To_api_PodAntiAffinity(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.PodAntiAffinity = nil
+ }
+ return nil
+}
+
+func Convert_v1_Affinity_To_api_Affinity(in *Affinity, out *api.Affinity, s conversion.Scope) error {
+ return autoConvert_v1_Affinity_To_api_Affinity(in, out, s)
+}
+
+func autoConvert_api_Affinity_To_v1_Affinity(in *api.Affinity, out *Affinity, s conversion.Scope) error {
+ if in.NodeAffinity != nil {
+ in, out := &in.NodeAffinity, &out.NodeAffinity
+ *out = new(NodeAffinity)
+ if err := Convert_api_NodeAffinity_To_v1_NodeAffinity(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.NodeAffinity = nil
+ }
+ if in.PodAffinity != nil {
+ in, out := &in.PodAffinity, &out.PodAffinity
+ *out = new(PodAffinity)
+ if err := Convert_api_PodAffinity_To_v1_PodAffinity(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.PodAffinity = nil
+ }
+ if in.PodAntiAffinity != nil {
+ in, out := &in.PodAntiAffinity, &out.PodAntiAffinity
+ *out = new(PodAntiAffinity)
+ if err := Convert_api_PodAntiAffinity_To_v1_PodAntiAffinity(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.PodAntiAffinity = nil
+ }
+ return nil
+}
+
+func Convert_api_Affinity_To_v1_Affinity(in *api.Affinity, out *Affinity, s conversion.Scope) error {
+ return autoConvert_api_Affinity_To_v1_Affinity(in, out, s)
+}
+
+func autoConvert_v1_AttachedVolume_To_api_AttachedVolume(in *AttachedVolume, out *api.AttachedVolume, s conversion.Scope) error {
+ out.Name = api.UniqueVolumeName(in.Name)
+ out.DevicePath = in.DevicePath
+ return nil
+}
+
+func Convert_v1_AttachedVolume_To_api_AttachedVolume(in *AttachedVolume, out *api.AttachedVolume, s conversion.Scope) error {
+ return autoConvert_v1_AttachedVolume_To_api_AttachedVolume(in, out, s)
+}
+
+func autoConvert_api_AttachedVolume_To_v1_AttachedVolume(in *api.AttachedVolume, out *AttachedVolume, s conversion.Scope) error {
+ out.Name = UniqueVolumeName(in.Name)
+ out.DevicePath = in.DevicePath
+ return nil
+}
+
+func Convert_api_AttachedVolume_To_v1_AttachedVolume(in *api.AttachedVolume, out *AttachedVolume, s conversion.Scope) error {
+ return autoConvert_api_AttachedVolume_To_v1_AttachedVolume(in, out, s)
+}
+
+func autoConvert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource(in *AzureFileVolumeSource, out *api.AzureFileVolumeSource, s conversion.Scope) error {
+ out.SecretName = in.SecretName
+ out.ShareName = in.ShareName
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
+func Convert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource(in *AzureFileVolumeSource, out *api.AzureFileVolumeSource, s conversion.Scope) error {
+ return autoConvert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource(in, out, s)
+}
+
+func autoConvert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in *api.AzureFileVolumeSource, out *AzureFileVolumeSource, s conversion.Scope) error {
+ out.SecretName = in.SecretName
+ out.ShareName = in.ShareName
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
+func Convert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in *api.AzureFileVolumeSource, out *AzureFileVolumeSource, s conversion.Scope) error {
+ return autoConvert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in, out, s)
+}
+
+func autoConvert_v1_Binding_To_api_Binding(in *Binding, out *api.Binding, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_ObjectReference_To_api_ObjectReference(&in.Target, &out.Target, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1_Binding_To_api_Binding(in *Binding, out *api.Binding, s conversion.Scope) error {
+ return autoConvert_v1_Binding_To_api_Binding(in, out, s)
+}
+
+func autoConvert_api_Binding_To_v1_Binding(in *api.Binding, out *Binding, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_api_ObjectReference_To_v1_ObjectReference(&in.Target, &out.Target, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_api_Binding_To_v1_Binding(in *api.Binding, out *Binding, s conversion.Scope) error {
+ return autoConvert_api_Binding_To_v1_Binding(in, out, s)
+}
+
+func autoConvert_v1_Capabilities_To_api_Capabilities(in *Capabilities, out *api.Capabilities, s conversion.Scope) error {
+ if in.Add != nil {
+ in, out := &in.Add, &out.Add
+ *out = make([]api.Capability, len(*in))
+ for i := range *in {
+ (*out)[i] = api.Capability((*in)[i])
+ }
+ } else {
+ out.Add = nil
+ }
+ if in.Drop != nil {
+ in, out := &in.Drop, &out.Drop
+ *out = make([]api.Capability, len(*in))
+ for i := range *in {
+ (*out)[i] = api.Capability((*in)[i])
+ }
+ } else {
+ out.Drop = nil
+ }
+ return nil
+}
+
+func Convert_v1_Capabilities_To_api_Capabilities(in *Capabilities, out *api.Capabilities, s conversion.Scope) error {
+ return autoConvert_v1_Capabilities_To_api_Capabilities(in, out, s)
+}
+
+func autoConvert_api_Capabilities_To_v1_Capabilities(in *api.Capabilities, out *Capabilities, s conversion.Scope) error {
+ if in.Add != nil {
+ in, out := &in.Add, &out.Add
+ *out = make([]Capability, len(*in))
+ for i := range *in {
+ (*out)[i] = Capability((*in)[i])
+ }
+ } else {
+ out.Add = nil
+ }
+ if in.Drop != nil {
+ in, out := &in.Drop, &out.Drop
+ *out = make([]Capability, len(*in))
+ for i := range *in {
+ (*out)[i] = Capability((*in)[i])
+ }
+ } else {
+ out.Drop = nil
+ }
+ return nil
+}
+
+func Convert_api_Capabilities_To_v1_Capabilities(in *api.Capabilities, out *Capabilities, s conversion.Scope) error {
+ return autoConvert_api_Capabilities_To_v1_Capabilities(in, out, s)
+}
+
+func autoConvert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in *CephFSVolumeSource, out *api.CephFSVolumeSource, s conversion.Scope) error {
+ out.Monitors = in.Monitors
+ out.Path = in.Path
+ out.User = in.User
+ out.SecretFile = in.SecretFile
+ if in.SecretRef != nil {
+ in, out := &in.SecretRef, &out.SecretRef
+ *out = new(api.LocalObjectReference)
+ if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.SecretRef = nil
+ }
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
+func Convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in *CephFSVolumeSource, out *api.CephFSVolumeSource, s conversion.Scope) error {
+ return autoConvert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in, out, s)
+}
+
+func autoConvert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in *api.CephFSVolumeSource, out *CephFSVolumeSource, s conversion.Scope) error {
+ out.Monitors = in.Monitors
+ out.Path = in.Path
+ out.User = in.User
+ out.SecretFile = in.SecretFile
+ if in.SecretRef != nil {
+ in, out := &in.SecretRef, &out.SecretRef
+ *out = new(LocalObjectReference)
+ if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.SecretRef = nil
+ }
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
+func Convert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in *api.CephFSVolumeSource, out *CephFSVolumeSource, s conversion.Scope) error {
+ return autoConvert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in, out, s)
+}
+
+func autoConvert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in *CinderVolumeSource, out *api.CinderVolumeSource, s conversion.Scope) error {
+ out.VolumeID = in.VolumeID
+ out.FSType = in.FSType
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
+func Convert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in *CinderVolumeSource, out *api.CinderVolumeSource, s conversion.Scope) error {
+ return autoConvert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in, out, s)
+}
+
+func autoConvert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in *api.CinderVolumeSource, out *CinderVolumeSource, s conversion.Scope) error {
+ out.VolumeID = in.VolumeID
+ out.FSType = in.FSType
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
+func Convert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in *api.CinderVolumeSource, out *CinderVolumeSource, s conversion.Scope) error {
+ return autoConvert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in, out, s)
+}
+
+func autoConvert_v1_ComponentCondition_To_api_ComponentCondition(in *ComponentCondition, out *api.ComponentCondition, s conversion.Scope) error {
+ out.Type = api.ComponentConditionType(in.Type)
+ out.Status = api.ConditionStatus(in.Status)
+ out.Message = in.Message
+ out.Error = in.Error
+ return nil
+}
+
+func Convert_v1_ComponentCondition_To_api_ComponentCondition(in *ComponentCondition, out *api.ComponentCondition, s conversion.Scope) error {
+ return autoConvert_v1_ComponentCondition_To_api_ComponentCondition(in, out, s)
+}
+
+func autoConvert_api_ComponentCondition_To_v1_ComponentCondition(in *api.ComponentCondition, out *ComponentCondition, s conversion.Scope) error {
+ out.Type = ComponentConditionType(in.Type)
+ out.Status = ConditionStatus(in.Status)
+ out.Message = in.Message
+ out.Error = in.Error
+ return nil
+}
+
+func Convert_api_ComponentCondition_To_v1_ComponentCondition(in *api.ComponentCondition, out *ComponentCondition, s conversion.Scope) error {
+ return autoConvert_api_ComponentCondition_To_v1_ComponentCondition(in, out, s)
+}
+
+func autoConvert_v1_ComponentStatus_To_api_ComponentStatus(in *ComponentStatus, out *api.ComponentStatus, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil {
+ return err
+ }
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]api.ComponentCondition, len(*in))
+ for i := range *in {
+ if err := Convert_v1_ComponentCondition_To_api_ComponentCondition(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Conditions = nil
+ }
+ return nil
+}
+
+func Convert_v1_ComponentStatus_To_api_ComponentStatus(in *ComponentStatus, out *api.ComponentStatus, s conversion.Scope) error {
+ return autoConvert_v1_ComponentStatus_To_api_ComponentStatus(in, out, s)
+}
+
+func autoConvert_api_ComponentStatus_To_v1_ComponentStatus(in *api.ComponentStatus, out *ComponentStatus, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil {
+ return err
+ }
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]ComponentCondition, len(*in))
+ for i := range *in {
+ if err := Convert_api_ComponentCondition_To_v1_ComponentCondition(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Conditions = nil
+ }
+ return nil
+}
+
+func Convert_api_ComponentStatus_To_v1_ComponentStatus(in *api.ComponentStatus, out *ComponentStatus, s conversion.Scope) error {
+ return autoConvert_api_ComponentStatus_To_v1_ComponentStatus(in, out, s)
+}
+
+func autoConvert_v1_ComponentStatusList_To_api_ComponentStatusList(in *ComponentStatusList, out *api.ComponentStatusList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]api.ComponentStatus, len(*in))
+ for i := range *in {
+ if err := Convert_v1_ComponentStatus_To_api_ComponentStatus(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_v1_ComponentStatusList_To_api_ComponentStatusList(in *ComponentStatusList, out *api.ComponentStatusList, s conversion.Scope) error {
+ return autoConvert_v1_ComponentStatusList_To_api_ComponentStatusList(in, out, s)
+}
+
+func autoConvert_api_ComponentStatusList_To_v1_ComponentStatusList(in *api.ComponentStatusList, out *ComponentStatusList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ComponentStatus, len(*in))
+ for i := range *in {
+ if err := Convert_api_ComponentStatus_To_v1_ComponentStatus(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_api_ComponentStatusList_To_v1_ComponentStatusList(in *api.ComponentStatusList, out *ComponentStatusList, s conversion.Scope) error {
+ return autoConvert_api_ComponentStatusList_To_v1_ComponentStatusList(in, out, s)
+}
+
+func autoConvert_v1_ConfigMap_To_api_ConfigMap(in *ConfigMap, out *api.ConfigMap, s conversion.Scope) error {
+ SetDefaults_ConfigMap(in)
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil {
+ return err
+ }
+ out.Data = in.Data
+ return nil
+}
+
+func Convert_v1_ConfigMap_To_api_ConfigMap(in *ConfigMap, out *api.ConfigMap, s conversion.Scope) error {
+ return autoConvert_v1_ConfigMap_To_api_ConfigMap(in, out, s)
+}
+
+func autoConvert_api_ConfigMap_To_v1_ConfigMap(in *api.ConfigMap, out *ConfigMap, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil {
+ return err
+ }
+ out.Data = in.Data
+ return nil
+}
+
+func Convert_api_ConfigMap_To_v1_ConfigMap(in *api.ConfigMap, out *ConfigMap, s conversion.Scope) error {
+ return autoConvert_api_ConfigMap_To_v1_ConfigMap(in, out, s)
+}
+
+func autoConvert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector(in *ConfigMapKeySelector, out *api.ConfigMapKeySelector, s conversion.Scope) error {
+ if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil {
+ return err
+ }
+ out.Key = in.Key
+ return nil
+}
+
+func Convert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector(in *ConfigMapKeySelector, out *api.ConfigMapKeySelector, s conversion.Scope) error {
+ return autoConvert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector(in, out, s)
+}
+
+func autoConvert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in *api.ConfigMapKeySelector, out *ConfigMapKeySelector, s conversion.Scope) error {
+ if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil {
+ return err
+ }
+ out.Key = in.Key
+ return nil
+}
+
+func Convert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in *api.ConfigMapKeySelector, out *ConfigMapKeySelector, s conversion.Scope) error {
+ return autoConvert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in, out, s)
+}
+
+func autoConvert_v1_ConfigMapList_To_api_ConfigMapList(in *ConfigMapList, out *api.ConfigMapList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]api.ConfigMap, len(*in))
+ for i := range *in {
+ if err := Convert_v1_ConfigMap_To_api_ConfigMap(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_v1_ConfigMapList_To_api_ConfigMapList(in *ConfigMapList, out *api.ConfigMapList, s conversion.Scope) error {
+ return autoConvert_v1_ConfigMapList_To_api_ConfigMapList(in, out, s)
+}
+
+func autoConvert_api_ConfigMapList_To_v1_ConfigMapList(in *api.ConfigMapList, out *ConfigMapList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ConfigMap, len(*in))
+ for i := range *in {
+ if err := Convert_api_ConfigMap_To_v1_ConfigMap(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_api_ConfigMapList_To_v1_ConfigMapList(in *api.ConfigMapList, out *ConfigMapList, s conversion.Scope) error {
+ return autoConvert_api_ConfigMapList_To_v1_ConfigMapList(in, out, s)
+}
+
+func autoConvert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource(in *ConfigMapVolumeSource, out *api.ConfigMapVolumeSource, s conversion.Scope) error {
+ if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]api.KeyToPath, len(*in))
+ for i := range *in {
+ if err := Convert_v1_KeyToPath_To_api_KeyToPath(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource(in *ConfigMapVolumeSource, out *api.ConfigMapVolumeSource, s conversion.Scope) error {
+ return autoConvert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource(in, out, s)
+}
+
+func autoConvert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in *api.ConfigMapVolumeSource, out *ConfigMapVolumeSource, s conversion.Scope) error {
+ if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]KeyToPath, len(*in))
+ for i := range *in {
+ if err := Convert_api_KeyToPath_To_v1_KeyToPath(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in *api.ConfigMapVolumeSource, out *ConfigMapVolumeSource, s conversion.Scope) error {
+ return autoConvert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in, out, s)
+}
+
+func autoConvert_v1_Container_To_api_Container(in *Container, out *api.Container, s conversion.Scope) error {
+ SetDefaults_Container(in)
+ out.Name = in.Name
+ out.Image = in.Image
+ out.Command = in.Command
+ out.Args = in.Args
+ out.WorkingDir = in.WorkingDir
+ if in.Ports != nil {
+ in, out := &in.Ports, &out.Ports
+ *out = make([]api.ContainerPort, len(*in))
+ for i := range *in {
+ if err := Convert_v1_ContainerPort_To_api_ContainerPort(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Ports = nil
+ }
+ if in.Env != nil {
+ in, out := &in.Env, &out.Env
+ *out = make([]api.EnvVar, len(*in))
+ for i := range *in {
+ if err := Convert_v1_EnvVar_To_api_EnvVar(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Env = nil
+ }
+ if err := Convert_v1_ResourceRequirements_To_api_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil {
+ return err
+ }
+ if in.VolumeMounts != nil {
+ in, out := &in.VolumeMounts, &out.VolumeMounts
+ *out = make([]api.VolumeMount, len(*in))
+ for i := range *in {
+ if err := Convert_v1_VolumeMount_To_api_VolumeMount(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.VolumeMounts = nil
+ }
+ if in.LivenessProbe != nil {
+ in, out := &in.LivenessProbe, &out.LivenessProbe
+ *out = new(api.Probe)
+ if err := Convert_v1_Probe_To_api_Probe(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.LivenessProbe = nil
+ }
+ if in.ReadinessProbe != nil {
+ in, out := &in.ReadinessProbe, &out.ReadinessProbe
+ *out = new(api.Probe)
+ if err := Convert_v1_Probe_To_api_Probe(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.ReadinessProbe = nil
+ }
+ if in.Lifecycle != nil {
+ in, out := &in.Lifecycle, &out.Lifecycle
+ *out = new(api.Lifecycle)
+ if err := Convert_v1_Lifecycle_To_api_Lifecycle(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Lifecycle = nil
+ }
+ out.TerminationMessagePath = in.TerminationMessagePath
+ out.ImagePullPolicy = api.PullPolicy(in.ImagePullPolicy)
+ if in.SecurityContext != nil {
+ in, out := &in.SecurityContext, &out.SecurityContext
+ *out = new(api.SecurityContext)
+ if err := Convert_v1_SecurityContext_To_api_SecurityContext(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.SecurityContext = nil
+ }
+ out.Stdin = in.Stdin
+ out.StdinOnce = in.StdinOnce
+ out.TTY = in.TTY
+ return nil
+}
+
+func Convert_v1_Container_To_api_Container(in *Container, out *api.Container, s conversion.Scope) error {
+ return autoConvert_v1_Container_To_api_Container(in, out, s)
+}
+
+func autoConvert_api_Container_To_v1_Container(in *api.Container, out *Container, s conversion.Scope) error {
+ out.Name = in.Name
+ out.Image = in.Image
+ out.Command = in.Command
+ out.Args = in.Args
+ out.WorkingDir = in.WorkingDir
+ if in.Ports != nil {
+ in, out := &in.Ports, &out.Ports
+ *out = make([]ContainerPort, len(*in))
+ for i := range *in {
+ if err := Convert_api_ContainerPort_To_v1_ContainerPort(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Ports = nil
+ }
+ if in.Env != nil {
+ in, out := &in.Env, &out.Env
+ *out = make([]EnvVar, len(*in))
+ for i := range *in {
+ if err := Convert_api_EnvVar_To_v1_EnvVar(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Env = nil
+ }
+ if err := Convert_api_ResourceRequirements_To_v1_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil {
+ return err
+ }
+ if in.VolumeMounts != nil {
+ in, out := &in.VolumeMounts, &out.VolumeMounts
+ *out = make([]VolumeMount, len(*in))
+ for i := range *in {
+ if err := Convert_api_VolumeMount_To_v1_VolumeMount(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.VolumeMounts = nil
+ }
+ if in.LivenessProbe != nil {
+ in, out := &in.LivenessProbe, &out.LivenessProbe
+ *out = new(Probe)
+ if err := Convert_api_Probe_To_v1_Probe(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.LivenessProbe = nil
+ }
+ if in.ReadinessProbe != nil {
+ in, out := &in.ReadinessProbe, &out.ReadinessProbe
+ *out = new(Probe)
+ if err := Convert_api_Probe_To_v1_Probe(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.ReadinessProbe = nil
+ }
+ if in.Lifecycle != nil {
+ in, out := &in.Lifecycle, &out.Lifecycle
+ *out = new(Lifecycle)
+ if err := Convert_api_Lifecycle_To_v1_Lifecycle(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Lifecycle = nil
+ }
+ out.TerminationMessagePath = in.TerminationMessagePath
+ out.ImagePullPolicy = PullPolicy(in.ImagePullPolicy)
+ if in.SecurityContext != nil {
+ in, out := &in.SecurityContext, &out.SecurityContext
+ *out = new(SecurityContext)
+ if err := Convert_api_SecurityContext_To_v1_SecurityContext(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.SecurityContext = nil
+ }
+ out.Stdin = in.Stdin
+ out.StdinOnce = in.StdinOnce
+ out.TTY = in.TTY
+ return nil
+}
+
+func Convert_api_Container_To_v1_Container(in *api.Container, out *Container, s conversion.Scope) error {
+ return autoConvert_api_Container_To_v1_Container(in, out, s)
+}
+
+func autoConvert_v1_ContainerImage_To_api_ContainerImage(in *ContainerImage, out *api.ContainerImage, s conversion.Scope) error {
+ out.Names = in.Names
+ out.SizeBytes = in.SizeBytes
+ return nil
+}
+
+func Convert_v1_ContainerImage_To_api_ContainerImage(in *ContainerImage, out *api.ContainerImage, s conversion.Scope) error {
+ return autoConvert_v1_ContainerImage_To_api_ContainerImage(in, out, s)
+}
+
+func autoConvert_api_ContainerImage_To_v1_ContainerImage(in *api.ContainerImage, out *ContainerImage, s conversion.Scope) error {
+ out.Names = in.Names
+ out.SizeBytes = in.SizeBytes
+ return nil
+}
+
+func Convert_api_ContainerImage_To_v1_ContainerImage(in *api.ContainerImage, out *ContainerImage, s conversion.Scope) error {
+ return autoConvert_api_ContainerImage_To_v1_ContainerImage(in, out, s)
+}
+
+func autoConvert_v1_ContainerPort_To_api_ContainerPort(in *ContainerPort, out *api.ContainerPort, s conversion.Scope) error {
+ SetDefaults_ContainerPort(in)
+ out.Name = in.Name
+ out.HostPort = in.HostPort
+ out.ContainerPort = in.ContainerPort
+ out.Protocol = api.Protocol(in.Protocol)
+ out.HostIP = in.HostIP
+ return nil
+}
+
+func Convert_v1_ContainerPort_To_api_ContainerPort(in *ContainerPort, out *api.ContainerPort, s conversion.Scope) error {
+ return autoConvert_v1_ContainerPort_To_api_ContainerPort(in, out, s)
+}
+
+func autoConvert_api_ContainerPort_To_v1_ContainerPort(in *api.ContainerPort, out *ContainerPort, s conversion.Scope) error {
+ out.Name = in.Name
+ out.HostPort = in.HostPort
+ out.ContainerPort = in.ContainerPort
+ out.Protocol = Protocol(in.Protocol)
+ out.HostIP = in.HostIP
+ return nil
+}
+
+func Convert_api_ContainerPort_To_v1_ContainerPort(in *api.ContainerPort, out *ContainerPort, s conversion.Scope) error {
+ return autoConvert_api_ContainerPort_To_v1_ContainerPort(in, out, s)
+}
+
+func autoConvert_v1_ContainerState_To_api_ContainerState(in *ContainerState, out *api.ContainerState, s conversion.Scope) error {
+ if in.Waiting != nil {
+ in, out := &in.Waiting, &out.Waiting
+ *out = new(api.ContainerStateWaiting)
+ if err := Convert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Waiting = nil
+ }
+ if in.Running != nil {
+ in, out := &in.Running, &out.Running
+ *out = new(api.ContainerStateRunning)
+ if err := Convert_v1_ContainerStateRunning_To_api_ContainerStateRunning(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Running = nil
+ }
+ if in.Terminated != nil {
+ in, out := &in.Terminated, &out.Terminated
+ *out = new(api.ContainerStateTerminated)
+ if err := Convert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Terminated = nil
+ }
+ return nil
+}
+
+func Convert_v1_ContainerState_To_api_ContainerState(in *ContainerState, out *api.ContainerState, s conversion.Scope) error {
+ return autoConvert_v1_ContainerState_To_api_ContainerState(in, out, s)
+}
+
+func autoConvert_api_ContainerState_To_v1_ContainerState(in *api.ContainerState, out *ContainerState, s conversion.Scope) error {
+ if in.Waiting != nil {
+ in, out := &in.Waiting, &out.Waiting
+ *out = new(ContainerStateWaiting)
+ if err := Convert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Waiting = nil
+ }
+ if in.Running != nil {
+ in, out := &in.Running, &out.Running
+ *out = new(ContainerStateRunning)
+ if err := Convert_api_ContainerStateRunning_To_v1_ContainerStateRunning(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Running = nil
+ }
+ if in.Terminated != nil {
+ in, out := &in.Terminated, &out.Terminated
+ *out = new(ContainerStateTerminated)
+ if err := Convert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Terminated = nil
+ }
+ return nil
+}
+
+func Convert_api_ContainerState_To_v1_ContainerState(in *api.ContainerState, out *ContainerState, s conversion.Scope) error {
+ return autoConvert_api_ContainerState_To_v1_ContainerState(in, out, s)
+}
+
+func autoConvert_v1_ContainerStateRunning_To_api_ContainerStateRunning(in *ContainerStateRunning, out *api.ContainerStateRunning, s conversion.Scope) error {
+ if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.StartedAt, &out.StartedAt, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1_ContainerStateRunning_To_api_ContainerStateRunning(in *ContainerStateRunning, out *api.ContainerStateRunning, s conversion.Scope) error {
+ return autoConvert_v1_ContainerStateRunning_To_api_ContainerStateRunning(in, out, s)
+}
+
+func autoConvert_api_ContainerStateRunning_To_v1_ContainerStateRunning(in *api.ContainerStateRunning, out *ContainerStateRunning, s conversion.Scope) error {
+ if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.StartedAt, &out.StartedAt, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_api_ContainerStateRunning_To_v1_ContainerStateRunning(in *api.ContainerStateRunning, out *ContainerStateRunning, s conversion.Scope) error {
+ return autoConvert_api_ContainerStateRunning_To_v1_ContainerStateRunning(in, out, s)
+}
+
+func autoConvert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated(in *ContainerStateTerminated, out *api.ContainerStateTerminated, s conversion.Scope) error {
+ out.ExitCode = in.ExitCode
+ out.Signal = in.Signal
+ out.Reason = in.Reason
+ out.Message = in.Message
+ if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.StartedAt, &out.StartedAt, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.FinishedAt, &out.FinishedAt, s); err != nil {
+ return err
+ }
+ out.ContainerID = in.ContainerID
+ return nil
+}
+
+func Convert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated(in *ContainerStateTerminated, out *api.ContainerStateTerminated, s conversion.Scope) error {
+ return autoConvert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated(in, out, s)
+}
+
+func autoConvert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated(in *api.ContainerStateTerminated, out *ContainerStateTerminated, s conversion.Scope) error {
+ out.ExitCode = in.ExitCode
+ out.Signal = in.Signal
+ out.Reason = in.Reason
+ out.Message = in.Message
+ if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.StartedAt, &out.StartedAt, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.FinishedAt, &out.FinishedAt, s); err != nil {
+ return err
+ }
+ out.ContainerID = in.ContainerID
+ return nil
+}
+
+func Convert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated(in *api.ContainerStateTerminated, out *ContainerStateTerminated, s conversion.Scope) error {
+ return autoConvert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated(in, out, s)
+}
+
+func autoConvert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting(in *ContainerStateWaiting, out *api.ContainerStateWaiting, s conversion.Scope) error {
+ out.Reason = in.Reason
+ out.Message = in.Message
+ return nil
+}
+
+func Convert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting(in *ContainerStateWaiting, out *api.ContainerStateWaiting, s conversion.Scope) error {
+ return autoConvert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting(in, out, s)
+}
+
+func autoConvert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting(in *api.ContainerStateWaiting, out *ContainerStateWaiting, s conversion.Scope) error {
+ out.Reason = in.Reason
+ out.Message = in.Message
+ return nil
+}
+
+func Convert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting(in *api.ContainerStateWaiting, out *ContainerStateWaiting, s conversion.Scope) error {
+ return autoConvert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting(in, out, s)
+}
+
+func autoConvert_v1_ContainerStatus_To_api_ContainerStatus(in *ContainerStatus, out *api.ContainerStatus, s conversion.Scope) error {
+ out.Name = in.Name
+ if err := Convert_v1_ContainerState_To_api_ContainerState(&in.State, &out.State, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_ContainerState_To_api_ContainerState(&in.LastTerminationState, &out.LastTerminationState, s); err != nil {
+ return err
+ }
+ out.Ready = in.Ready
+ out.RestartCount = in.RestartCount
+ out.Image = in.Image
+ out.ImageID = in.ImageID
+ out.ContainerID = in.ContainerID
+ return nil
+}
+
+func Convert_v1_ContainerStatus_To_api_ContainerStatus(in *ContainerStatus, out *api.ContainerStatus, s conversion.Scope) error {
+ return autoConvert_v1_ContainerStatus_To_api_ContainerStatus(in, out, s)
+}
+
+func autoConvert_api_ContainerStatus_To_v1_ContainerStatus(in *api.ContainerStatus, out *ContainerStatus, s conversion.Scope) error {
+ out.Name = in.Name
+ if err := Convert_api_ContainerState_To_v1_ContainerState(&in.State, &out.State, s); err != nil {
+ return err
+ }
+ if err := Convert_api_ContainerState_To_v1_ContainerState(&in.LastTerminationState, &out.LastTerminationState, s); err != nil {
+ return err
+ }
+ out.Ready = in.Ready
+ out.RestartCount = in.RestartCount
+ out.Image = in.Image
+ out.ImageID = in.ImageID
+ out.ContainerID = in.ContainerID
+ return nil
+}
+
+func Convert_api_ContainerStatus_To_v1_ContainerStatus(in *api.ContainerStatus, out *ContainerStatus, s conversion.Scope) error {
+ return autoConvert_api_ContainerStatus_To_v1_ContainerStatus(in, out, s)
+}
+
+func autoConvert_v1_DaemonEndpoint_To_api_DaemonEndpoint(in *DaemonEndpoint, out *api.DaemonEndpoint, s conversion.Scope) error {
+ out.Port = in.Port
+ return nil
+}
+
+func Convert_v1_DaemonEndpoint_To_api_DaemonEndpoint(in *DaemonEndpoint, out *api.DaemonEndpoint, s conversion.Scope) error {
+ return autoConvert_v1_DaemonEndpoint_To_api_DaemonEndpoint(in, out, s)
+}
+
+func autoConvert_api_DaemonEndpoint_To_v1_DaemonEndpoint(in *api.DaemonEndpoint, out *DaemonEndpoint, s conversion.Scope) error {
+ out.Port = in.Port
+ return nil
+}
+
+func Convert_api_DaemonEndpoint_To_v1_DaemonEndpoint(in *api.DaemonEndpoint, out *DaemonEndpoint, s conversion.Scope) error {
+ return autoConvert_api_DaemonEndpoint_To_v1_DaemonEndpoint(in, out, s)
+}
+
+func autoConvert_v1_DeleteOptions_To_api_DeleteOptions(in *DeleteOptions, out *api.DeleteOptions, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ out.GracePeriodSeconds = in.GracePeriodSeconds
+ if in.Preconditions != nil {
+ in, out := &in.Preconditions, &out.Preconditions
+ *out = new(api.Preconditions)
+ if err := Convert_v1_Preconditions_To_api_Preconditions(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Preconditions = nil
+ }
+ out.OrphanDependents = in.OrphanDependents
+ return nil
+}
+
+func Convert_v1_DeleteOptions_To_api_DeleteOptions(in *DeleteOptions, out *api.DeleteOptions, s conversion.Scope) error {
+ return autoConvert_v1_DeleteOptions_To_api_DeleteOptions(in, out, s)
+}
+
+func autoConvert_api_DeleteOptions_To_v1_DeleteOptions(in *api.DeleteOptions, out *DeleteOptions, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ out.GracePeriodSeconds = in.GracePeriodSeconds
+ if in.Preconditions != nil {
+ in, out := &in.Preconditions, &out.Preconditions
+ *out = new(Preconditions)
+ if err := Convert_api_Preconditions_To_v1_Preconditions(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Preconditions = nil
+ }
+ out.OrphanDependents = in.OrphanDependents
+ return nil
+}
+
+func Convert_api_DeleteOptions_To_v1_DeleteOptions(in *api.DeleteOptions, out *DeleteOptions, s conversion.Scope) error {
+ return autoConvert_api_DeleteOptions_To_v1_DeleteOptions(in, out, s)
+}
+
+func autoConvert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile(in *DownwardAPIVolumeFile, out *api.DownwardAPIVolumeFile, s conversion.Scope) error {
+ out.Path = in.Path
+ if in.FieldRef != nil {
+ in, out := &in.FieldRef, &out.FieldRef
+ *out = new(api.ObjectFieldSelector)
+ if err := Convert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.FieldRef = nil
+ }
+ if in.ResourceFieldRef != nil {
+ in, out := &in.ResourceFieldRef, &out.ResourceFieldRef
+ *out = new(api.ResourceFieldSelector)
+ if err := Convert_v1_ResourceFieldSelector_To_api_ResourceFieldSelector(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.ResourceFieldRef = nil
+ }
+ return nil
+}
+
+func Convert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile(in *DownwardAPIVolumeFile, out *api.DownwardAPIVolumeFile, s conversion.Scope) error {
+ return autoConvert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile(in, out, s)
+}
+
+func autoConvert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in *api.DownwardAPIVolumeFile, out *DownwardAPIVolumeFile, s conversion.Scope) error {
+ out.Path = in.Path
+ if in.FieldRef != nil {
+ in, out := &in.FieldRef, &out.FieldRef
+ *out = new(ObjectFieldSelector)
+ if err := Convert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.FieldRef = nil
+ }
+ if in.ResourceFieldRef != nil {
+ in, out := &in.ResourceFieldRef, &out.ResourceFieldRef
+ *out = new(ResourceFieldSelector)
+ if err := Convert_api_ResourceFieldSelector_To_v1_ResourceFieldSelector(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.ResourceFieldRef = nil
+ }
+ return nil
+}
+
+func Convert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in *api.DownwardAPIVolumeFile, out *DownwardAPIVolumeFile, s conversion.Scope) error {
+ return autoConvert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in, out, s)
+}
+
+func autoConvert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource(in *DownwardAPIVolumeSource, out *api.DownwardAPIVolumeSource, s conversion.Scope) error {
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]api.DownwardAPIVolumeFile, len(*in))
+ for i := range *in {
+ if err := Convert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource(in *DownwardAPIVolumeSource, out *api.DownwardAPIVolumeSource, s conversion.Scope) error {
+ return autoConvert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource(in, out, s)
+}
+
+func autoConvert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in *api.DownwardAPIVolumeSource, out *DownwardAPIVolumeSource, s conversion.Scope) error {
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]DownwardAPIVolumeFile, len(*in))
+ for i := range *in {
+ if err := Convert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in *api.DownwardAPIVolumeSource, out *DownwardAPIVolumeSource, s conversion.Scope) error {
+ return autoConvert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in, out, s)
+}
+
+func autoConvert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource(in *EmptyDirVolumeSource, out *api.EmptyDirVolumeSource, s conversion.Scope) error {
+ out.Medium = api.StorageMedium(in.Medium)
+ return nil
+}
+
+func Convert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource(in *EmptyDirVolumeSource, out *api.EmptyDirVolumeSource, s conversion.Scope) error {
+ return autoConvert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource(in, out, s)
+}
+
+func autoConvert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in *api.EmptyDirVolumeSource, out *EmptyDirVolumeSource, s conversion.Scope) error {
+ out.Medium = StorageMedium(in.Medium)
+ return nil
+}
+
+func Convert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in *api.EmptyDirVolumeSource, out *EmptyDirVolumeSource, s conversion.Scope) error {
+ return autoConvert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in, out, s)
+}
+
+func autoConvert_v1_EndpointAddress_To_api_EndpointAddress(in *EndpointAddress, out *api.EndpointAddress, s conversion.Scope) error {
+ out.IP = in.IP
+ out.Hostname = in.Hostname
+ if in.TargetRef != nil {
+ in, out := &in.TargetRef, &out.TargetRef
+ *out = new(api.ObjectReference)
+ if err := Convert_v1_ObjectReference_To_api_ObjectReference(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.TargetRef = nil
+ }
+ return nil
+}
+
+func Convert_v1_EndpointAddress_To_api_EndpointAddress(in *EndpointAddress, out *api.EndpointAddress, s conversion.Scope) error {
+ return autoConvert_v1_EndpointAddress_To_api_EndpointAddress(in, out, s)
+}
+
+func autoConvert_api_EndpointAddress_To_v1_EndpointAddress(in *api.EndpointAddress, out *EndpointAddress, s conversion.Scope) error {
+ out.IP = in.IP
+ out.Hostname = in.Hostname
+ if in.TargetRef != nil {
+ in, out := &in.TargetRef, &out.TargetRef
+ *out = new(ObjectReference)
+ if err := Convert_api_ObjectReference_To_v1_ObjectReference(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.TargetRef = nil
+ }
+ return nil
+}
+
+func Convert_api_EndpointAddress_To_v1_EndpointAddress(in *api.EndpointAddress, out *EndpointAddress, s conversion.Scope) error {
+ return autoConvert_api_EndpointAddress_To_v1_EndpointAddress(in, out, s)
+}
+
+func autoConvert_v1_EndpointPort_To_api_EndpointPort(in *EndpointPort, out *api.EndpointPort, s conversion.Scope) error {
+ out.Name = in.Name
+ out.Port = in.Port
+ out.Protocol = api.Protocol(in.Protocol)
+ return nil
+}
+
+func Convert_v1_EndpointPort_To_api_EndpointPort(in *EndpointPort, out *api.EndpointPort, s conversion.Scope) error {
+ return autoConvert_v1_EndpointPort_To_api_EndpointPort(in, out, s)
+}
+
+func autoConvert_api_EndpointPort_To_v1_EndpointPort(in *api.EndpointPort, out *EndpointPort, s conversion.Scope) error {
+ out.Name = in.Name
+ out.Port = in.Port
+ out.Protocol = Protocol(in.Protocol)
+ return nil
+}
+
+func Convert_api_EndpointPort_To_v1_EndpointPort(in *api.EndpointPort, out *EndpointPort, s conversion.Scope) error {
+ return autoConvert_api_EndpointPort_To_v1_EndpointPort(in, out, s)
+}
+
+func autoConvert_v1_EndpointSubset_To_api_EndpointSubset(in *EndpointSubset, out *api.EndpointSubset, s conversion.Scope) error {
+ if in.Addresses != nil {
+ in, out := &in.Addresses, &out.Addresses
+ *out = make([]api.EndpointAddress, len(*in))
+ for i := range *in {
+ if err := Convert_v1_EndpointAddress_To_api_EndpointAddress(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Addresses = nil
+ }
+ if in.NotReadyAddresses != nil {
+ in, out := &in.NotReadyAddresses, &out.NotReadyAddresses
+ *out = make([]api.EndpointAddress, len(*in))
+ for i := range *in {
+ if err := Convert_v1_EndpointAddress_To_api_EndpointAddress(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.NotReadyAddresses = nil
+ }
+ if in.Ports != nil {
+ in, out := &in.Ports, &out.Ports
+ *out = make([]api.EndpointPort, len(*in))
+ for i := range *in {
+ if err := Convert_v1_EndpointPort_To_api_EndpointPort(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Ports = nil
+ }
+ return nil
+}
+
+func Convert_v1_EndpointSubset_To_api_EndpointSubset(in *EndpointSubset, out *api.EndpointSubset, s conversion.Scope) error {
+ return autoConvert_v1_EndpointSubset_To_api_EndpointSubset(in, out, s)
+}
+
+func autoConvert_api_EndpointSubset_To_v1_EndpointSubset(in *api.EndpointSubset, out *EndpointSubset, s conversion.Scope) error {
+ if in.Addresses != nil {
+ in, out := &in.Addresses, &out.Addresses
+ *out = make([]EndpointAddress, len(*in))
+ for i := range *in {
+ if err := Convert_api_EndpointAddress_To_v1_EndpointAddress(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Addresses = nil
+ }
+ if in.NotReadyAddresses != nil {
+ in, out := &in.NotReadyAddresses, &out.NotReadyAddresses
+ *out = make([]EndpointAddress, len(*in))
+ for i := range *in {
+ if err := Convert_api_EndpointAddress_To_v1_EndpointAddress(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.NotReadyAddresses = nil
+ }
+ if in.Ports != nil {
+ in, out := &in.Ports, &out.Ports
+ *out = make([]EndpointPort, len(*in))
+ for i := range *in {
+ if err := Convert_api_EndpointPort_To_v1_EndpointPort(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Ports = nil
+ }
+ return nil
+}
+
+func Convert_api_EndpointSubset_To_v1_EndpointSubset(in *api.EndpointSubset, out *EndpointSubset, s conversion.Scope) error {
+ return autoConvert_api_EndpointSubset_To_v1_EndpointSubset(in, out, s)
+}
+
+func autoConvert_v1_Endpoints_To_api_Endpoints(in *Endpoints, out *api.Endpoints, s conversion.Scope) error {
+ SetDefaults_Endpoints(in)
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil {
+ return err
+ }
+ if in.Subsets != nil {
+ in, out := &in.Subsets, &out.Subsets
+ *out = make([]api.EndpointSubset, len(*in))
+ for i := range *in {
+ if err := Convert_v1_EndpointSubset_To_api_EndpointSubset(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Subsets = nil
+ }
+ return nil
+}
+
+func Convert_v1_Endpoints_To_api_Endpoints(in *Endpoints, out *api.Endpoints, s conversion.Scope) error {
+ return autoConvert_v1_Endpoints_To_api_Endpoints(in, out, s)
+}
+
+func autoConvert_api_Endpoints_To_v1_Endpoints(in *api.Endpoints, out *Endpoints, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil {
+ return err
+ }
+ if in.Subsets != nil {
+ in, out := &in.Subsets, &out.Subsets
+ *out = make([]EndpointSubset, len(*in))
+ for i := range *in {
+ if err := Convert_api_EndpointSubset_To_v1_EndpointSubset(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Subsets = nil
+ }
+ return nil
+}
+
+func Convert_api_Endpoints_To_v1_Endpoints(in *api.Endpoints, out *Endpoints, s conversion.Scope) error {
+ return autoConvert_api_Endpoints_To_v1_Endpoints(in, out, s)
+}
+
+func autoConvert_v1_EndpointsList_To_api_EndpointsList(in *EndpointsList, out *api.EndpointsList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]api.Endpoints, len(*in))
+ for i := range *in {
+ if err := Convert_v1_Endpoints_To_api_Endpoints(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_v1_EndpointsList_To_api_EndpointsList(in *EndpointsList, out *api.EndpointsList, s conversion.Scope) error {
+ return autoConvert_v1_EndpointsList_To_api_EndpointsList(in, out, s)
+}
+
+func autoConvert_api_EndpointsList_To_v1_EndpointsList(in *api.EndpointsList, out *EndpointsList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Endpoints, len(*in))
+ for i := range *in {
+ if err := Convert_api_Endpoints_To_v1_Endpoints(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_api_EndpointsList_To_v1_EndpointsList(in *api.EndpointsList, out *EndpointsList, s conversion.Scope) error {
+ return autoConvert_api_EndpointsList_To_v1_EndpointsList(in, out, s)
+}
+
+func autoConvert_v1_EnvVar_To_api_EnvVar(in *EnvVar, out *api.EnvVar, s conversion.Scope) error {
+ out.Name = in.Name
+ out.Value = in.Value
+ if in.ValueFrom != nil {
+ in, out := &in.ValueFrom, &out.ValueFrom
+ *out = new(api.EnvVarSource)
+ if err := Convert_v1_EnvVarSource_To_api_EnvVarSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.ValueFrom = nil
+ }
+ return nil
+}
+
+func Convert_v1_EnvVar_To_api_EnvVar(in *EnvVar, out *api.EnvVar, s conversion.Scope) error {
+ return autoConvert_v1_EnvVar_To_api_EnvVar(in, out, s)
+}
+
+func autoConvert_api_EnvVar_To_v1_EnvVar(in *api.EnvVar, out *EnvVar, s conversion.Scope) error {
+ out.Name = in.Name
+ out.Value = in.Value
+ if in.ValueFrom != nil {
+ in, out := &in.ValueFrom, &out.ValueFrom
+ *out = new(EnvVarSource)
+ if err := Convert_api_EnvVarSource_To_v1_EnvVarSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.ValueFrom = nil
+ }
+ return nil
+}
+
+func Convert_api_EnvVar_To_v1_EnvVar(in *api.EnvVar, out *EnvVar, s conversion.Scope) error {
+ return autoConvert_api_EnvVar_To_v1_EnvVar(in, out, s)
+}
+
+func autoConvert_v1_EnvVarSource_To_api_EnvVarSource(in *EnvVarSource, out *api.EnvVarSource, s conversion.Scope) error {
+ if in.FieldRef != nil {
+ in, out := &in.FieldRef, &out.FieldRef
+ *out = new(api.ObjectFieldSelector)
+ if err := Convert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.FieldRef = nil
+ }
+ if in.ResourceFieldRef != nil {
+ in, out := &in.ResourceFieldRef, &out.ResourceFieldRef
+ *out = new(api.ResourceFieldSelector)
+ if err := Convert_v1_ResourceFieldSelector_To_api_ResourceFieldSelector(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.ResourceFieldRef = nil
+ }
+ if in.ConfigMapKeyRef != nil {
+ in, out := &in.ConfigMapKeyRef, &out.ConfigMapKeyRef
+ *out = new(api.ConfigMapKeySelector)
+ if err := Convert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.ConfigMapKeyRef = nil
+ }
+ if in.SecretKeyRef != nil {
+ in, out := &in.SecretKeyRef, &out.SecretKeyRef
+ *out = new(api.SecretKeySelector)
+ if err := Convert_v1_SecretKeySelector_To_api_SecretKeySelector(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.SecretKeyRef = nil
+ }
+ return nil
+}
+
+func Convert_v1_EnvVarSource_To_api_EnvVarSource(in *EnvVarSource, out *api.EnvVarSource, s conversion.Scope) error {
+ return autoConvert_v1_EnvVarSource_To_api_EnvVarSource(in, out, s)
+}
+
+func autoConvert_api_EnvVarSource_To_v1_EnvVarSource(in *api.EnvVarSource, out *EnvVarSource, s conversion.Scope) error {
+ if in.FieldRef != nil {
+ in, out := &in.FieldRef, &out.FieldRef
+ *out = new(ObjectFieldSelector)
+ if err := Convert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.FieldRef = nil
+ }
+ if in.ResourceFieldRef != nil {
+ in, out := &in.ResourceFieldRef, &out.ResourceFieldRef
+ *out = new(ResourceFieldSelector)
+ if err := Convert_api_ResourceFieldSelector_To_v1_ResourceFieldSelector(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.ResourceFieldRef = nil
+ }
+ if in.ConfigMapKeyRef != nil {
+ in, out := &in.ConfigMapKeyRef, &out.ConfigMapKeyRef
+ *out = new(ConfigMapKeySelector)
+ if err := Convert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.ConfigMapKeyRef = nil
+ }
+ if in.SecretKeyRef != nil {
+ in, out := &in.SecretKeyRef, &out.SecretKeyRef
+ *out = new(SecretKeySelector)
+ if err := Convert_api_SecretKeySelector_To_v1_SecretKeySelector(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.SecretKeyRef = nil
+ }
+ return nil
+}
+
+func Convert_api_EnvVarSource_To_v1_EnvVarSource(in *api.EnvVarSource, out *EnvVarSource, s conversion.Scope) error {
+ return autoConvert_api_EnvVarSource_To_v1_EnvVarSource(in, out, s)
+}
+
+func autoConvert_v1_Event_To_api_Event(in *Event, out *api.Event, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_ObjectReference_To_api_ObjectReference(&in.InvolvedObject, &out.InvolvedObject, s); err != nil {
+ return err
+ }
+ out.Reason = in.Reason
+ out.Message = in.Message
+ if err := Convert_v1_EventSource_To_api_EventSource(&in.Source, &out.Source, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.FirstTimestamp, &out.FirstTimestamp, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTimestamp, &out.LastTimestamp, s); err != nil {
+ return err
+ }
+ out.Count = in.Count
+ out.Type = in.Type
+ return nil
+}
+
+func Convert_v1_Event_To_api_Event(in *Event, out *api.Event, s conversion.Scope) error {
+ return autoConvert_v1_Event_To_api_Event(in, out, s)
+}
+
+func autoConvert_api_Event_To_v1_Event(in *api.Event, out *Event, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_api_ObjectReference_To_v1_ObjectReference(&in.InvolvedObject, &out.InvolvedObject, s); err != nil {
+ return err
+ }
+ out.Reason = in.Reason
+ out.Message = in.Message
+ if err := Convert_api_EventSource_To_v1_EventSource(&in.Source, &out.Source, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.FirstTimestamp, &out.FirstTimestamp, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTimestamp, &out.LastTimestamp, s); err != nil {
+ return err
+ }
+ out.Count = in.Count
+ out.Type = in.Type
+ return nil
+}
+
+func Convert_api_Event_To_v1_Event(in *api.Event, out *Event, s conversion.Scope) error {
+ return autoConvert_api_Event_To_v1_Event(in, out, s)
+}
+
+func autoConvert_v1_EventList_To_api_EventList(in *EventList, out *api.EventList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]api.Event, len(*in))
+ for i := range *in {
+ if err := Convert_v1_Event_To_api_Event(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_v1_EventList_To_api_EventList(in *EventList, out *api.EventList, s conversion.Scope) error {
+ return autoConvert_v1_EventList_To_api_EventList(in, out, s)
+}
+
+func autoConvert_api_EventList_To_v1_EventList(in *api.EventList, out *EventList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Event, len(*in))
+ for i := range *in {
+ if err := Convert_api_Event_To_v1_Event(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_api_EventList_To_v1_EventList(in *api.EventList, out *EventList, s conversion.Scope) error {
+ return autoConvert_api_EventList_To_v1_EventList(in, out, s)
+}
+
+func autoConvert_v1_EventSource_To_api_EventSource(in *EventSource, out *api.EventSource, s conversion.Scope) error {
+ out.Component = in.Component
+ out.Host = in.Host
+ return nil
+}
+
+func Convert_v1_EventSource_To_api_EventSource(in *EventSource, out *api.EventSource, s conversion.Scope) error {
+ return autoConvert_v1_EventSource_To_api_EventSource(in, out, s)
+}
+
+func autoConvert_api_EventSource_To_v1_EventSource(in *api.EventSource, out *EventSource, s conversion.Scope) error {
+ out.Component = in.Component
+ out.Host = in.Host
+ return nil
+}
+
+func Convert_api_EventSource_To_v1_EventSource(in *api.EventSource, out *EventSource, s conversion.Scope) error {
+ return autoConvert_api_EventSource_To_v1_EventSource(in, out, s)
+}
+
+func autoConvert_v1_ExecAction_To_api_ExecAction(in *ExecAction, out *api.ExecAction, s conversion.Scope) error {
+ out.Command = in.Command
+ return nil
+}
+
+func Convert_v1_ExecAction_To_api_ExecAction(in *ExecAction, out *api.ExecAction, s conversion.Scope) error {
+ return autoConvert_v1_ExecAction_To_api_ExecAction(in, out, s)
+}
+
+func autoConvert_api_ExecAction_To_v1_ExecAction(in *api.ExecAction, out *ExecAction, s conversion.Scope) error {
+ out.Command = in.Command
+ return nil
+}
+
+func Convert_api_ExecAction_To_v1_ExecAction(in *api.ExecAction, out *ExecAction, s conversion.Scope) error {
+ return autoConvert_api_ExecAction_To_v1_ExecAction(in, out, s)
+}
+
+func autoConvert_v1_ExportOptions_To_api_ExportOptions(in *ExportOptions, out *api.ExportOptions, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ out.Export = in.Export
+ out.Exact = in.Exact
+ return nil
+}
+
+func Convert_v1_ExportOptions_To_api_ExportOptions(in *ExportOptions, out *api.ExportOptions, s conversion.Scope) error {
+ return autoConvert_v1_ExportOptions_To_api_ExportOptions(in, out, s)
+}
+
+func autoConvert_api_ExportOptions_To_v1_ExportOptions(in *api.ExportOptions, out *ExportOptions, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ out.Export = in.Export
+ out.Exact = in.Exact
+ return nil
+}
+
+func Convert_api_ExportOptions_To_v1_ExportOptions(in *api.ExportOptions, out *ExportOptions, s conversion.Scope) error {
+ return autoConvert_api_ExportOptions_To_v1_ExportOptions(in, out, s)
+}
+
+func autoConvert_v1_FCVolumeSource_To_api_FCVolumeSource(in *FCVolumeSource, out *api.FCVolumeSource, s conversion.Scope) error {
+ out.TargetWWNs = in.TargetWWNs
+ out.Lun = in.Lun
+ out.FSType = in.FSType
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
+func Convert_v1_FCVolumeSource_To_api_FCVolumeSource(in *FCVolumeSource, out *api.FCVolumeSource, s conversion.Scope) error {
+ return autoConvert_v1_FCVolumeSource_To_api_FCVolumeSource(in, out, s)
+}
+
+func autoConvert_api_FCVolumeSource_To_v1_FCVolumeSource(in *api.FCVolumeSource, out *FCVolumeSource, s conversion.Scope) error {
+ out.TargetWWNs = in.TargetWWNs
+ out.Lun = in.Lun
+ out.FSType = in.FSType
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
+func Convert_api_FCVolumeSource_To_v1_FCVolumeSource(in *api.FCVolumeSource, out *FCVolumeSource, s conversion.Scope) error {
+ return autoConvert_api_FCVolumeSource_To_v1_FCVolumeSource(in, out, s)
+}
+
+func autoConvert_v1_FlexVolumeSource_To_api_FlexVolumeSource(in *FlexVolumeSource, out *api.FlexVolumeSource, s conversion.Scope) error {
+ out.Driver = in.Driver
+ out.FSType = in.FSType
+ if in.SecretRef != nil {
+ in, out := &in.SecretRef, &out.SecretRef
+ *out = new(api.LocalObjectReference)
+ if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.SecretRef = nil
+ }
+ out.ReadOnly = in.ReadOnly
+ out.Options = in.Options
+ return nil
+}
+
+func Convert_v1_FlexVolumeSource_To_api_FlexVolumeSource(in *FlexVolumeSource, out *api.FlexVolumeSource, s conversion.Scope) error {
+ return autoConvert_v1_FlexVolumeSource_To_api_FlexVolumeSource(in, out, s)
+}
+
+func autoConvert_api_FlexVolumeSource_To_v1_FlexVolumeSource(in *api.FlexVolumeSource, out *FlexVolumeSource, s conversion.Scope) error {
+ out.Driver = in.Driver
+ out.FSType = in.FSType
+ if in.SecretRef != nil {
+ in, out := &in.SecretRef, &out.SecretRef
+ *out = new(LocalObjectReference)
+ if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.SecretRef = nil
+ }
+ out.ReadOnly = in.ReadOnly
+ out.Options = in.Options
+ return nil
+}
+
+func Convert_api_FlexVolumeSource_To_v1_FlexVolumeSource(in *api.FlexVolumeSource, out *FlexVolumeSource, s conversion.Scope) error {
+ return autoConvert_api_FlexVolumeSource_To_v1_FlexVolumeSource(in, out, s)
+}
+
+func autoConvert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource(in *FlockerVolumeSource, out *api.FlockerVolumeSource, s conversion.Scope) error {
+ out.DatasetName = in.DatasetName
+ return nil
+}
+
+func Convert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource(in *FlockerVolumeSource, out *api.FlockerVolumeSource, s conversion.Scope) error {
+ return autoConvert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource(in, out, s)
+}
+
+func autoConvert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource(in *api.FlockerVolumeSource, out *FlockerVolumeSource, s conversion.Scope) error {
+ out.DatasetName = in.DatasetName
+ return nil
+}
+
+func Convert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource(in *api.FlockerVolumeSource, out *FlockerVolumeSource, s conversion.Scope) error {
+ return autoConvert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource(in, out, s)
+}
+
+func autoConvert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(in *GCEPersistentDiskVolumeSource, out *api.GCEPersistentDiskVolumeSource, s conversion.Scope) error {
+ out.PDName = in.PDName
+ out.FSType = in.FSType
+ out.Partition = in.Partition
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
+func Convert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(in *GCEPersistentDiskVolumeSource, out *api.GCEPersistentDiskVolumeSource, s conversion.Scope) error {
+ return autoConvert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(in, out, s)
+}
+
+func autoConvert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in *api.GCEPersistentDiskVolumeSource, out *GCEPersistentDiskVolumeSource, s conversion.Scope) error {
+ out.PDName = in.PDName
+ out.FSType = in.FSType
+ out.Partition = in.Partition
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
+func Convert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in *api.GCEPersistentDiskVolumeSource, out *GCEPersistentDiskVolumeSource, s conversion.Scope) error {
+ return autoConvert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in, out, s)
+}
+
+func autoConvert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource(in *GitRepoVolumeSource, out *api.GitRepoVolumeSource, s conversion.Scope) error {
+ out.Repository = in.Repository
+ out.Revision = in.Revision
+ out.Directory = in.Directory
+ return nil
+}
+
+func Convert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource(in *GitRepoVolumeSource, out *api.GitRepoVolumeSource, s conversion.Scope) error {
+ return autoConvert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource(in, out, s)
+}
+
+func autoConvert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in *api.GitRepoVolumeSource, out *GitRepoVolumeSource, s conversion.Scope) error {
+ out.Repository = in.Repository
+ out.Revision = in.Revision
+ out.Directory = in.Directory
+ return nil
+}
+
+func Convert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in *api.GitRepoVolumeSource, out *GitRepoVolumeSource, s conversion.Scope) error {
+ return autoConvert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in, out, s)
+}
+
+func autoConvert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(in *GlusterfsVolumeSource, out *api.GlusterfsVolumeSource, s conversion.Scope) error {
+ out.EndpointsName = in.EndpointsName
+ out.Path = in.Path
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
+func Convert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(in *GlusterfsVolumeSource, out *api.GlusterfsVolumeSource, s conversion.Scope) error {
+ return autoConvert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(in, out, s)
+}
+
+func autoConvert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in *api.GlusterfsVolumeSource, out *GlusterfsVolumeSource, s conversion.Scope) error {
+ out.EndpointsName = in.EndpointsName
+ out.Path = in.Path
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
+func Convert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in *api.GlusterfsVolumeSource, out *GlusterfsVolumeSource, s conversion.Scope) error {
+ return autoConvert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in, out, s)
+}
+
+func autoConvert_v1_HTTPGetAction_To_api_HTTPGetAction(in *HTTPGetAction, out *api.HTTPGetAction, s conversion.Scope) error {
+ SetDefaults_HTTPGetAction(in)
+ out.Path = in.Path
+ if err := api.Convert_intstr_IntOrString_To_intstr_IntOrString(&in.Port, &out.Port, s); err != nil {
+ return err
+ }
+ out.Host = in.Host
+ out.Scheme = api.URIScheme(in.Scheme)
+ if in.HTTPHeaders != nil {
+ in, out := &in.HTTPHeaders, &out.HTTPHeaders
+ *out = make([]api.HTTPHeader, len(*in))
+ for i := range *in {
+ if err := Convert_v1_HTTPHeader_To_api_HTTPHeader(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.HTTPHeaders = nil
+ }
+ return nil
+}
+
+func Convert_v1_HTTPGetAction_To_api_HTTPGetAction(in *HTTPGetAction, out *api.HTTPGetAction, s conversion.Scope) error {
+ return autoConvert_v1_HTTPGetAction_To_api_HTTPGetAction(in, out, s)
+}
+
+func autoConvert_api_HTTPGetAction_To_v1_HTTPGetAction(in *api.HTTPGetAction, out *HTTPGetAction, s conversion.Scope) error {
+ out.Path = in.Path
+ if err := api.Convert_intstr_IntOrString_To_intstr_IntOrString(&in.Port, &out.Port, s); err != nil {
+ return err
+ }
+ out.Host = in.Host
+ out.Scheme = URIScheme(in.Scheme)
+ if in.HTTPHeaders != nil {
+ in, out := &in.HTTPHeaders, &out.HTTPHeaders
+ *out = make([]HTTPHeader, len(*in))
+ for i := range *in {
+ if err := Convert_api_HTTPHeader_To_v1_HTTPHeader(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.HTTPHeaders = nil
+ }
+ return nil
+}
+
+func Convert_api_HTTPGetAction_To_v1_HTTPGetAction(in *api.HTTPGetAction, out *HTTPGetAction, s conversion.Scope) error {
+ return autoConvert_api_HTTPGetAction_To_v1_HTTPGetAction(in, out, s)
+}
+
+func autoConvert_v1_HTTPHeader_To_api_HTTPHeader(in *HTTPHeader, out *api.HTTPHeader, s conversion.Scope) error {
+ out.Name = in.Name
+ out.Value = in.Value
+ return nil
+}
+
+func Convert_v1_HTTPHeader_To_api_HTTPHeader(in *HTTPHeader, out *api.HTTPHeader, s conversion.Scope) error {
+ return autoConvert_v1_HTTPHeader_To_api_HTTPHeader(in, out, s)
+}
+
+func autoConvert_api_HTTPHeader_To_v1_HTTPHeader(in *api.HTTPHeader, out *HTTPHeader, s conversion.Scope) error {
+ out.Name = in.Name
+ out.Value = in.Value
+ return nil
+}
+
+func Convert_api_HTTPHeader_To_v1_HTTPHeader(in *api.HTTPHeader, out *HTTPHeader, s conversion.Scope) error {
+ return autoConvert_api_HTTPHeader_To_v1_HTTPHeader(in, out, s)
+}
+
+func autoConvert_v1_Handler_To_api_Handler(in *Handler, out *api.Handler, s conversion.Scope) error {
+ if in.Exec != nil {
+ in, out := &in.Exec, &out.Exec
+ *out = new(api.ExecAction)
+ if err := Convert_v1_ExecAction_To_api_ExecAction(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Exec = nil
+ }
+ if in.HTTPGet != nil {
+ in, out := &in.HTTPGet, &out.HTTPGet
+ *out = new(api.HTTPGetAction)
+ if err := Convert_v1_HTTPGetAction_To_api_HTTPGetAction(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.HTTPGet = nil
+ }
+ if in.TCPSocket != nil {
+ in, out := &in.TCPSocket, &out.TCPSocket
+ *out = new(api.TCPSocketAction)
+ if err := Convert_v1_TCPSocketAction_To_api_TCPSocketAction(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.TCPSocket = nil
+ }
+ return nil
+}
+
+func Convert_v1_Handler_To_api_Handler(in *Handler, out *api.Handler, s conversion.Scope) error {
+ return autoConvert_v1_Handler_To_api_Handler(in, out, s)
+}
+
+func autoConvert_api_Handler_To_v1_Handler(in *api.Handler, out *Handler, s conversion.Scope) error {
+ if in.Exec != nil {
+ in, out := &in.Exec, &out.Exec
+ *out = new(ExecAction)
+ if err := Convert_api_ExecAction_To_v1_ExecAction(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Exec = nil
+ }
+ if in.HTTPGet != nil {
+ in, out := &in.HTTPGet, &out.HTTPGet
+ *out = new(HTTPGetAction)
+ if err := Convert_api_HTTPGetAction_To_v1_HTTPGetAction(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.HTTPGet = nil
+ }
+ if in.TCPSocket != nil {
+ in, out := &in.TCPSocket, &out.TCPSocket
+ *out = new(TCPSocketAction)
+ if err := Convert_api_TCPSocketAction_To_v1_TCPSocketAction(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.TCPSocket = nil
+ }
+ return nil
+}
+
+func Convert_api_Handler_To_v1_Handler(in *api.Handler, out *Handler, s conversion.Scope) error {
+ return autoConvert_api_Handler_To_v1_Handler(in, out, s)
+}
+
+func autoConvert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(in *HostPathVolumeSource, out *api.HostPathVolumeSource, s conversion.Scope) error {
+ out.Path = in.Path
+ return nil
+}
+
+func Convert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(in *HostPathVolumeSource, out *api.HostPathVolumeSource, s conversion.Scope) error {
+ return autoConvert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(in, out, s)
+}
+
+func autoConvert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource(in *api.HostPathVolumeSource, out *HostPathVolumeSource, s conversion.Scope) error {
+ out.Path = in.Path
+ return nil
+}
+
+func Convert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource(in *api.HostPathVolumeSource, out *HostPathVolumeSource, s conversion.Scope) error {
+ return autoConvert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource(in, out, s)
+}
+
+func autoConvert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(in *ISCSIVolumeSource, out *api.ISCSIVolumeSource, s conversion.Scope) error {
+ SetDefaults_ISCSIVolumeSource(in)
+ out.TargetPortal = in.TargetPortal
+ out.IQN = in.IQN
+ out.Lun = in.Lun
+ out.ISCSIInterface = in.ISCSIInterface
+ out.FSType = in.FSType
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
+func Convert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(in *ISCSIVolumeSource, out *api.ISCSIVolumeSource, s conversion.Scope) error {
+ return autoConvert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(in, out, s)
+}
+
+func autoConvert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in *api.ISCSIVolumeSource, out *ISCSIVolumeSource, s conversion.Scope) error {
+ out.TargetPortal = in.TargetPortal
+ out.IQN = in.IQN
+ out.Lun = in.Lun
+ out.ISCSIInterface = in.ISCSIInterface
+ out.FSType = in.FSType
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
+func Convert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in *api.ISCSIVolumeSource, out *ISCSIVolumeSource, s conversion.Scope) error {
+ return autoConvert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in, out, s)
+}
+
+func autoConvert_v1_KeyToPath_To_api_KeyToPath(in *KeyToPath, out *api.KeyToPath, s conversion.Scope) error {
+ out.Key = in.Key
+ out.Path = in.Path
+ return nil
+}
+
+func Convert_v1_KeyToPath_To_api_KeyToPath(in *KeyToPath, out *api.KeyToPath, s conversion.Scope) error {
+ return autoConvert_v1_KeyToPath_To_api_KeyToPath(in, out, s)
+}
+
+func autoConvert_api_KeyToPath_To_v1_KeyToPath(in *api.KeyToPath, out *KeyToPath, s conversion.Scope) error {
+ out.Key = in.Key
+ out.Path = in.Path
+ return nil
+}
+
+func Convert_api_KeyToPath_To_v1_KeyToPath(in *api.KeyToPath, out *KeyToPath, s conversion.Scope) error {
+ return autoConvert_api_KeyToPath_To_v1_KeyToPath(in, out, s)
+}
+
+func autoConvert_v1_Lifecycle_To_api_Lifecycle(in *Lifecycle, out *api.Lifecycle, s conversion.Scope) error {
+ if in.PostStart != nil {
+ in, out := &in.PostStart, &out.PostStart
+ *out = new(api.Handler)
+ if err := Convert_v1_Handler_To_api_Handler(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.PostStart = nil
+ }
+ if in.PreStop != nil {
+ in, out := &in.PreStop, &out.PreStop
+ *out = new(api.Handler)
+ if err := Convert_v1_Handler_To_api_Handler(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.PreStop = nil
+ }
+ return nil
+}
+
+func Convert_v1_Lifecycle_To_api_Lifecycle(in *Lifecycle, out *api.Lifecycle, s conversion.Scope) error {
+ return autoConvert_v1_Lifecycle_To_api_Lifecycle(in, out, s)
+}
+
+func autoConvert_api_Lifecycle_To_v1_Lifecycle(in *api.Lifecycle, out *Lifecycle, s conversion.Scope) error {
+ if in.PostStart != nil {
+ in, out := &in.PostStart, &out.PostStart
+ *out = new(Handler)
+ if err := Convert_api_Handler_To_v1_Handler(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.PostStart = nil
+ }
+ if in.PreStop != nil {
+ in, out := &in.PreStop, &out.PreStop
+ *out = new(Handler)
+ if err := Convert_api_Handler_To_v1_Handler(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.PreStop = nil
+ }
+ return nil
+}
+
+func Convert_api_Lifecycle_To_v1_Lifecycle(in *api.Lifecycle, out *Lifecycle, s conversion.Scope) error {
+ return autoConvert_api_Lifecycle_To_v1_Lifecycle(in, out, s)
+}
+
+func autoConvert_v1_LimitRange_To_api_LimitRange(in *LimitRange, out *api.LimitRange, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_LimitRangeSpec_To_api_LimitRangeSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1_LimitRange_To_api_LimitRange(in *LimitRange, out *api.LimitRange, s conversion.Scope) error {
+ return autoConvert_v1_LimitRange_To_api_LimitRange(in, out, s)
+}
+
+func autoConvert_api_LimitRange_To_v1_LimitRange(in *api.LimitRange, out *LimitRange, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_api_LimitRangeSpec_To_v1_LimitRangeSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_api_LimitRange_To_v1_LimitRange(in *api.LimitRange, out *LimitRange, s conversion.Scope) error {
+ return autoConvert_api_LimitRange_To_v1_LimitRange(in, out, s)
+}
+
+func autoConvert_v1_LimitRangeItem_To_api_LimitRangeItem(in *LimitRangeItem, out *api.LimitRangeItem, s conversion.Scope) error {
+ SetDefaults_LimitRangeItem(in)
+ out.Type = api.LimitType(in.Type)
+ if err := Convert_v1_ResourceList_To_api_ResourceList(&in.Max, &out.Max, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_ResourceList_To_api_ResourceList(&in.Min, &out.Min, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_ResourceList_To_api_ResourceList(&in.Default, &out.Default, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_ResourceList_To_api_ResourceList(&in.DefaultRequest, &out.DefaultRequest, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_ResourceList_To_api_ResourceList(&in.MaxLimitRequestRatio, &out.MaxLimitRequestRatio, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1_LimitRangeItem_To_api_LimitRangeItem(in *LimitRangeItem, out *api.LimitRangeItem, s conversion.Scope) error {
+ return autoConvert_v1_LimitRangeItem_To_api_LimitRangeItem(in, out, s)
+}
+
+func autoConvert_api_LimitRangeItem_To_v1_LimitRangeItem(in *api.LimitRangeItem, out *LimitRangeItem, s conversion.Scope) error {
+ out.Type = LimitType(in.Type)
+ if in.Max != nil {
+ in, out := &in.Max, &out.Max
+ *out = make(ResourceList, len(*in))
+ for key, val := range *in {
+ newVal := new(resource.Quantity)
+ if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, newVal, s); err != nil {
+ return err
+ }
+ (*out)[ResourceName(key)] = *newVal
+ }
+ } else {
+ out.Max = nil
+ }
+ if in.Min != nil {
+ in, out := &in.Min, &out.Min
+ *out = make(ResourceList, len(*in))
+ for key, val := range *in {
+ newVal := new(resource.Quantity)
+ if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, newVal, s); err != nil {
+ return err
+ }
+ (*out)[ResourceName(key)] = *newVal
+ }
+ } else {
+ out.Min = nil
+ }
+ if in.Default != nil {
+ in, out := &in.Default, &out.Default
+ *out = make(ResourceList, len(*in))
+ for key, val := range *in {
+ newVal := new(resource.Quantity)
+ if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, newVal, s); err != nil {
+ return err
+ }
+ (*out)[ResourceName(key)] = *newVal
+ }
+ } else {
+ out.Default = nil
+ }
+ if in.DefaultRequest != nil {
+ in, out := &in.DefaultRequest, &out.DefaultRequest
+ *out = make(ResourceList, len(*in))
+ for key, val := range *in {
+ newVal := new(resource.Quantity)
+ if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, newVal, s); err != nil {
+ return err
+ }
+ (*out)[ResourceName(key)] = *newVal
+ }
+ } else {
+ out.DefaultRequest = nil
+ }
+ if in.MaxLimitRequestRatio != nil {
+ in, out := &in.MaxLimitRequestRatio, &out.MaxLimitRequestRatio
+ *out = make(ResourceList, len(*in))
+ for key, val := range *in {
+ newVal := new(resource.Quantity)
+ if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, newVal, s); err != nil {
+ return err
+ }
+ (*out)[ResourceName(key)] = *newVal
+ }
+ } else {
+ out.MaxLimitRequestRatio = nil
+ }
+ return nil
+}
+
+func Convert_api_LimitRangeItem_To_v1_LimitRangeItem(in *api.LimitRangeItem, out *LimitRangeItem, s conversion.Scope) error {
+ return autoConvert_api_LimitRangeItem_To_v1_LimitRangeItem(in, out, s)
+}
+
+func autoConvert_v1_LimitRangeList_To_api_LimitRangeList(in *LimitRangeList, out *api.LimitRangeList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]api.LimitRange, len(*in))
+ for i := range *in {
+ if err := Convert_v1_LimitRange_To_api_LimitRange(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_v1_LimitRangeList_To_api_LimitRangeList(in *LimitRangeList, out *api.LimitRangeList, s conversion.Scope) error {
+ return autoConvert_v1_LimitRangeList_To_api_LimitRangeList(in, out, s)
+}
+
+func autoConvert_api_LimitRangeList_To_v1_LimitRangeList(in *api.LimitRangeList, out *LimitRangeList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]LimitRange, len(*in))
+ for i := range *in {
+ if err := Convert_api_LimitRange_To_v1_LimitRange(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_api_LimitRangeList_To_v1_LimitRangeList(in *api.LimitRangeList, out *LimitRangeList, s conversion.Scope) error {
+ return autoConvert_api_LimitRangeList_To_v1_LimitRangeList(in, out, s)
+}
+
+func autoConvert_v1_LimitRangeSpec_To_api_LimitRangeSpec(in *LimitRangeSpec, out *api.LimitRangeSpec, s conversion.Scope) error {
+ if in.Limits != nil {
+ in, out := &in.Limits, &out.Limits
+ *out = make([]api.LimitRangeItem, len(*in))
+ for i := range *in {
+ if err := Convert_v1_LimitRangeItem_To_api_LimitRangeItem(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Limits = nil
+ }
+ return nil
+}
+
+func Convert_v1_LimitRangeSpec_To_api_LimitRangeSpec(in *LimitRangeSpec, out *api.LimitRangeSpec, s conversion.Scope) error {
+ return autoConvert_v1_LimitRangeSpec_To_api_LimitRangeSpec(in, out, s)
+}
+
+func autoConvert_api_LimitRangeSpec_To_v1_LimitRangeSpec(in *api.LimitRangeSpec, out *LimitRangeSpec, s conversion.Scope) error {
+ if in.Limits != nil {
+ in, out := &in.Limits, &out.Limits
+ *out = make([]LimitRangeItem, len(*in))
+ for i := range *in {
+ if err := Convert_api_LimitRangeItem_To_v1_LimitRangeItem(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Limits = nil
+ }
+ return nil
+}
+
+func Convert_api_LimitRangeSpec_To_v1_LimitRangeSpec(in *api.LimitRangeSpec, out *LimitRangeSpec, s conversion.Scope) error {
+ return autoConvert_api_LimitRangeSpec_To_v1_LimitRangeSpec(in, out, s)
+}
+
+func autoConvert_v1_List_To_api_List(in *List, out *api.List, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]runtime.Object, len(*in))
+ for i := range *in {
+ if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_v1_List_To_api_List(in *List, out *api.List, s conversion.Scope) error {
+ return autoConvert_v1_List_To_api_List(in, out, s)
+}
+
+func autoConvert_api_List_To_v1_List(in *api.List, out *List, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]runtime.RawExtension, len(*in))
+ for i := range *in {
+ if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_api_List_To_v1_List(in *api.List, out *List, s conversion.Scope) error {
+ return autoConvert_api_List_To_v1_List(in, out, s)
+}
+
+func autoConvert_v1_ListOptions_To_api_ListOptions(in *ListOptions, out *api.ListOptions, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_string_To_labels_Selector(&in.LabelSelector, &out.LabelSelector, s); err != nil {
+ return err
+ }
+ if err := api.Convert_string_To_fields_Selector(&in.FieldSelector, &out.FieldSelector, s); err != nil {
+ return err
+ }
+ out.Watch = in.Watch
+ out.ResourceVersion = in.ResourceVersion
+ out.TimeoutSeconds = in.TimeoutSeconds
+ return nil
+}
+
+func Convert_v1_ListOptions_To_api_ListOptions(in *ListOptions, out *api.ListOptions, s conversion.Scope) error {
+ return autoConvert_v1_ListOptions_To_api_ListOptions(in, out, s)
+}
+
+func autoConvert_api_ListOptions_To_v1_ListOptions(in *api.ListOptions, out *ListOptions, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_labels_Selector_To_string(&in.LabelSelector, &out.LabelSelector, s); err != nil {
+ return err
+ }
+ if err := api.Convert_fields_Selector_To_string(&in.FieldSelector, &out.FieldSelector, s); err != nil {
+ return err
+ }
+ out.Watch = in.Watch
+ out.ResourceVersion = in.ResourceVersion
+ out.TimeoutSeconds = in.TimeoutSeconds
+ return nil
+}
+
+func Convert_api_ListOptions_To_v1_ListOptions(in *api.ListOptions, out *ListOptions, s conversion.Scope) error {
+ return autoConvert_api_ListOptions_To_v1_ListOptions(in, out, s)
+}
+
+func autoConvert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress(in *LoadBalancerIngress, out *api.LoadBalancerIngress, s conversion.Scope) error {
+ out.IP = in.IP
+ out.Hostname = in.Hostname
+ return nil
+}
+
+func Convert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress(in *LoadBalancerIngress, out *api.LoadBalancerIngress, s conversion.Scope) error {
+ return autoConvert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress(in, out, s)
+}
+
+func autoConvert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress(in *api.LoadBalancerIngress, out *LoadBalancerIngress, s conversion.Scope) error {
+ out.IP = in.IP
+ out.Hostname = in.Hostname
+ return nil
+}
+
+func Convert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress(in *api.LoadBalancerIngress, out *LoadBalancerIngress, s conversion.Scope) error {
+ return autoConvert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress(in, out, s)
+}
+
+func autoConvert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus(in *LoadBalancerStatus, out *api.LoadBalancerStatus, s conversion.Scope) error {
+ if in.Ingress != nil {
+ in, out := &in.Ingress, &out.Ingress
+ *out = make([]api.LoadBalancerIngress, len(*in))
+ for i := range *in {
+ if err := Convert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Ingress = nil
+ }
+ return nil
+}
+
+func Convert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus(in *LoadBalancerStatus, out *api.LoadBalancerStatus, s conversion.Scope) error {
+ return autoConvert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus(in, out, s)
+}
+
+func autoConvert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus(in *api.LoadBalancerStatus, out *LoadBalancerStatus, s conversion.Scope) error {
+ if in.Ingress != nil {
+ in, out := &in.Ingress, &out.Ingress
+ *out = make([]LoadBalancerIngress, len(*in))
+ for i := range *in {
+ if err := Convert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Ingress = nil
+ }
+ return nil
+}
+
+func Convert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus(in *api.LoadBalancerStatus, out *LoadBalancerStatus, s conversion.Scope) error {
+ return autoConvert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus(in, out, s)
+}
+
+func autoConvert_v1_LocalObjectReference_To_api_LocalObjectReference(in *LocalObjectReference, out *api.LocalObjectReference, s conversion.Scope) error {
+ out.Name = in.Name
+ return nil
+}
+
+func Convert_v1_LocalObjectReference_To_api_LocalObjectReference(in *LocalObjectReference, out *api.LocalObjectReference, s conversion.Scope) error {
+ return autoConvert_v1_LocalObjectReference_To_api_LocalObjectReference(in, out, s)
+}
+
+func autoConvert_api_LocalObjectReference_To_v1_LocalObjectReference(in *api.LocalObjectReference, out *LocalObjectReference, s conversion.Scope) error {
+ out.Name = in.Name
+ return nil
+}
+
+func Convert_api_LocalObjectReference_To_v1_LocalObjectReference(in *api.LocalObjectReference, out *LocalObjectReference, s conversion.Scope) error {
+ return autoConvert_api_LocalObjectReference_To_v1_LocalObjectReference(in, out, s)
+}
+
+func autoConvert_v1_NFSVolumeSource_To_api_NFSVolumeSource(in *NFSVolumeSource, out *api.NFSVolumeSource, s conversion.Scope) error {
+ out.Server = in.Server
+ out.Path = in.Path
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
+func Convert_v1_NFSVolumeSource_To_api_NFSVolumeSource(in *NFSVolumeSource, out *api.NFSVolumeSource, s conversion.Scope) error {
+ return autoConvert_v1_NFSVolumeSource_To_api_NFSVolumeSource(in, out, s)
+}
+
+func autoConvert_api_NFSVolumeSource_To_v1_NFSVolumeSource(in *api.NFSVolumeSource, out *NFSVolumeSource, s conversion.Scope) error {
+ out.Server = in.Server
+ out.Path = in.Path
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
+func Convert_api_NFSVolumeSource_To_v1_NFSVolumeSource(in *api.NFSVolumeSource, out *NFSVolumeSource, s conversion.Scope) error {
+ return autoConvert_api_NFSVolumeSource_To_v1_NFSVolumeSource(in, out, s)
+}
+
+func autoConvert_v1_Namespace_To_api_Namespace(in *Namespace, out *api.Namespace, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_NamespaceSpec_To_api_NamespaceSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_NamespaceStatus_To_api_NamespaceStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1_Namespace_To_api_Namespace(in *Namespace, out *api.Namespace, s conversion.Scope) error {
+ return autoConvert_v1_Namespace_To_api_Namespace(in, out, s)
+}
+
+func autoConvert_api_Namespace_To_v1_Namespace(in *api.Namespace, out *Namespace, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_api_NamespaceSpec_To_v1_NamespaceSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_api_NamespaceStatus_To_v1_NamespaceStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_api_Namespace_To_v1_Namespace(in *api.Namespace, out *Namespace, s conversion.Scope) error {
+ return autoConvert_api_Namespace_To_v1_Namespace(in, out, s)
+}
+
+func autoConvert_v1_NamespaceList_To_api_NamespaceList(in *NamespaceList, out *api.NamespaceList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]api.Namespace, len(*in))
+ for i := range *in {
+ if err := Convert_v1_Namespace_To_api_Namespace(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_v1_NamespaceList_To_api_NamespaceList(in *NamespaceList, out *api.NamespaceList, s conversion.Scope) error {
+ return autoConvert_v1_NamespaceList_To_api_NamespaceList(in, out, s)
+}
+
+func autoConvert_api_NamespaceList_To_v1_NamespaceList(in *api.NamespaceList, out *NamespaceList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Namespace, len(*in))
+ for i := range *in {
+ if err := Convert_api_Namespace_To_v1_Namespace(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_api_NamespaceList_To_v1_NamespaceList(in *api.NamespaceList, out *NamespaceList, s conversion.Scope) error {
+ return autoConvert_api_NamespaceList_To_v1_NamespaceList(in, out, s)
+}
+
+func autoConvert_v1_NamespaceSpec_To_api_NamespaceSpec(in *NamespaceSpec, out *api.NamespaceSpec, s conversion.Scope) error {
+ if in.Finalizers != nil {
+ in, out := &in.Finalizers, &out.Finalizers
+ *out = make([]api.FinalizerName, len(*in))
+ for i := range *in {
+ (*out)[i] = api.FinalizerName((*in)[i])
+ }
+ } else {
+ out.Finalizers = nil
+ }
+ return nil
+}
+
+func Convert_v1_NamespaceSpec_To_api_NamespaceSpec(in *NamespaceSpec, out *api.NamespaceSpec, s conversion.Scope) error {
+ return autoConvert_v1_NamespaceSpec_To_api_NamespaceSpec(in, out, s)
+}
+
+func autoConvert_api_NamespaceSpec_To_v1_NamespaceSpec(in *api.NamespaceSpec, out *NamespaceSpec, s conversion.Scope) error {
+ if in.Finalizers != nil {
+ in, out := &in.Finalizers, &out.Finalizers
+ *out = make([]FinalizerName, len(*in))
+ for i := range *in {
+ (*out)[i] = FinalizerName((*in)[i])
+ }
+ } else {
+ out.Finalizers = nil
+ }
+ return nil
+}
+
+func Convert_api_NamespaceSpec_To_v1_NamespaceSpec(in *api.NamespaceSpec, out *NamespaceSpec, s conversion.Scope) error {
+ return autoConvert_api_NamespaceSpec_To_v1_NamespaceSpec(in, out, s)
+}
+
+func autoConvert_v1_NamespaceStatus_To_api_NamespaceStatus(in *NamespaceStatus, out *api.NamespaceStatus, s conversion.Scope) error {
+ SetDefaults_NamespaceStatus(in)
+ out.Phase = api.NamespacePhase(in.Phase)
+ return nil
+}
+
+func Convert_v1_NamespaceStatus_To_api_NamespaceStatus(in *NamespaceStatus, out *api.NamespaceStatus, s conversion.Scope) error {
+ return autoConvert_v1_NamespaceStatus_To_api_NamespaceStatus(in, out, s)
+}
+
+func autoConvert_api_NamespaceStatus_To_v1_NamespaceStatus(in *api.NamespaceStatus, out *NamespaceStatus, s conversion.Scope) error {
+ out.Phase = NamespacePhase(in.Phase)
+ return nil
+}
+
+func Convert_api_NamespaceStatus_To_v1_NamespaceStatus(in *api.NamespaceStatus, out *NamespaceStatus, s conversion.Scope) error {
+ return autoConvert_api_NamespaceStatus_To_v1_NamespaceStatus(in, out, s)
+}
+
+func autoConvert_v1_Node_To_api_Node(in *Node, out *api.Node, s conversion.Scope) error {
+ SetDefaults_Node(in)
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_NodeSpec_To_api_NodeSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_NodeStatus_To_api_NodeStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1_Node_To_api_Node(in *Node, out *api.Node, s conversion.Scope) error {
+ return autoConvert_v1_Node_To_api_Node(in, out, s)
+}
+
+func autoConvert_api_Node_To_v1_Node(in *api.Node, out *Node, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_api_NodeSpec_To_v1_NodeSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_api_NodeStatus_To_v1_NodeStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_api_Node_To_v1_Node(in *api.Node, out *Node, s conversion.Scope) error {
+ return autoConvert_api_Node_To_v1_Node(in, out, s)
+}
+
+func autoConvert_v1_NodeAddress_To_api_NodeAddress(in *NodeAddress, out *api.NodeAddress, s conversion.Scope) error {
+ out.Type = api.NodeAddressType(in.Type)
+ out.Address = in.Address
+ return nil
+}
+
+func Convert_v1_NodeAddress_To_api_NodeAddress(in *NodeAddress, out *api.NodeAddress, s conversion.Scope) error {
+ return autoConvert_v1_NodeAddress_To_api_NodeAddress(in, out, s)
+}
+
+func autoConvert_api_NodeAddress_To_v1_NodeAddress(in *api.NodeAddress, out *NodeAddress, s conversion.Scope) error {
+ out.Type = NodeAddressType(in.Type)
+ out.Address = in.Address
+ return nil
+}
+
+func Convert_api_NodeAddress_To_v1_NodeAddress(in *api.NodeAddress, out *NodeAddress, s conversion.Scope) error {
+ return autoConvert_api_NodeAddress_To_v1_NodeAddress(in, out, s)
+}
+
+func autoConvert_v1_NodeAffinity_To_api_NodeAffinity(in *NodeAffinity, out *api.NodeAffinity, s conversion.Scope) error {
+ if in.RequiredDuringSchedulingIgnoredDuringExecution != nil {
+ in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution
+ *out = new(api.NodeSelector)
+ if err := Convert_v1_NodeSelector_To_api_NodeSelector(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.RequiredDuringSchedulingIgnoredDuringExecution = nil
+ }
+ if in.PreferredDuringSchedulingIgnoredDuringExecution != nil {
+ in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution
+ *out = make([]api.PreferredSchedulingTerm, len(*in))
+ for i := range *in {
+ if err := Convert_v1_PreferredSchedulingTerm_To_api_PreferredSchedulingTerm(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.PreferredDuringSchedulingIgnoredDuringExecution = nil
+ }
+ return nil
+}
+
+func Convert_v1_NodeAffinity_To_api_NodeAffinity(in *NodeAffinity, out *api.NodeAffinity, s conversion.Scope) error {
+ return autoConvert_v1_NodeAffinity_To_api_NodeAffinity(in, out, s)
+}
+
+func autoConvert_api_NodeAffinity_To_v1_NodeAffinity(in *api.NodeAffinity, out *NodeAffinity, s conversion.Scope) error {
+ if in.RequiredDuringSchedulingIgnoredDuringExecution != nil {
+ in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution
+ *out = new(NodeSelector)
+ if err := Convert_api_NodeSelector_To_v1_NodeSelector(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.RequiredDuringSchedulingIgnoredDuringExecution = nil
+ }
+ if in.PreferredDuringSchedulingIgnoredDuringExecution != nil {
+ in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution
+ *out = make([]PreferredSchedulingTerm, len(*in))
+ for i := range *in {
+ if err := Convert_api_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.PreferredDuringSchedulingIgnoredDuringExecution = nil
+ }
+ return nil
+}
+
+func Convert_api_NodeAffinity_To_v1_NodeAffinity(in *api.NodeAffinity, out *NodeAffinity, s conversion.Scope) error {
+ return autoConvert_api_NodeAffinity_To_v1_NodeAffinity(in, out, s)
+}
+
+func autoConvert_v1_NodeCondition_To_api_NodeCondition(in *NodeCondition, out *api.NodeCondition, s conversion.Scope) error {
+ out.Type = api.NodeConditionType(in.Type)
+ out.Status = api.ConditionStatus(in.Status)
+ if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastHeartbeatTime, &out.LastHeartbeatTime, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTransitionTime, &out.LastTransitionTime, s); err != nil {
+ return err
+ }
+ out.Reason = in.Reason
+ out.Message = in.Message
+ return nil
+}
+
+func Convert_v1_NodeCondition_To_api_NodeCondition(in *NodeCondition, out *api.NodeCondition, s conversion.Scope) error {
+ return autoConvert_v1_NodeCondition_To_api_NodeCondition(in, out, s)
+}
+
+func autoConvert_api_NodeCondition_To_v1_NodeCondition(in *api.NodeCondition, out *NodeCondition, s conversion.Scope) error {
+ out.Type = NodeConditionType(in.Type)
+ out.Status = ConditionStatus(in.Status)
+ if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastHeartbeatTime, &out.LastHeartbeatTime, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTransitionTime, &out.LastTransitionTime, s); err != nil {
+ return err
+ }
+ out.Reason = in.Reason
+ out.Message = in.Message
+ return nil
+}
+
+func Convert_api_NodeCondition_To_v1_NodeCondition(in *api.NodeCondition, out *NodeCondition, s conversion.Scope) error {
+ return autoConvert_api_NodeCondition_To_v1_NodeCondition(in, out, s)
+}
+
+func autoConvert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints(in *NodeDaemonEndpoints, out *api.NodeDaemonEndpoints, s conversion.Scope) error {
+ if err := Convert_v1_DaemonEndpoint_To_api_DaemonEndpoint(&in.KubeletEndpoint, &out.KubeletEndpoint, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints(in *NodeDaemonEndpoints, out *api.NodeDaemonEndpoints, s conversion.Scope) error {
+ return autoConvert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints(in, out, s)
+}
+
+func autoConvert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in *api.NodeDaemonEndpoints, out *NodeDaemonEndpoints, s conversion.Scope) error {
+ if err := Convert_api_DaemonEndpoint_To_v1_DaemonEndpoint(&in.KubeletEndpoint, &out.KubeletEndpoint, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in *api.NodeDaemonEndpoints, out *NodeDaemonEndpoints, s conversion.Scope) error {
+ return autoConvert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in, out, s)
+}
+
+func autoConvert_v1_NodeList_To_api_NodeList(in *NodeList, out *api.NodeList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]api.Node, len(*in))
+ for i := range *in {
+ if err := Convert_v1_Node_To_api_Node(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_v1_NodeList_To_api_NodeList(in *NodeList, out *api.NodeList, s conversion.Scope) error {
+ return autoConvert_v1_NodeList_To_api_NodeList(in, out, s)
+}
+
+func autoConvert_api_NodeList_To_v1_NodeList(in *api.NodeList, out *NodeList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Node, len(*in))
+ for i := range *in {
+ if err := Convert_api_Node_To_v1_Node(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_api_NodeList_To_v1_NodeList(in *api.NodeList, out *NodeList, s conversion.Scope) error {
+ return autoConvert_api_NodeList_To_v1_NodeList(in, out, s)
+}
+
+func autoConvert_v1_NodeProxyOptions_To_api_NodeProxyOptions(in *NodeProxyOptions, out *api.NodeProxyOptions, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ out.Path = in.Path
+ return nil
+}
+
+func Convert_v1_NodeProxyOptions_To_api_NodeProxyOptions(in *NodeProxyOptions, out *api.NodeProxyOptions, s conversion.Scope) error {
+ return autoConvert_v1_NodeProxyOptions_To_api_NodeProxyOptions(in, out, s)
+}
+
+func autoConvert_api_NodeProxyOptions_To_v1_NodeProxyOptions(in *api.NodeProxyOptions, out *NodeProxyOptions, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ out.Path = in.Path
+ return nil
+}
+
+func Convert_api_NodeProxyOptions_To_v1_NodeProxyOptions(in *api.NodeProxyOptions, out *NodeProxyOptions, s conversion.Scope) error {
+ return autoConvert_api_NodeProxyOptions_To_v1_NodeProxyOptions(in, out, s)
+}
+
+func autoConvert_v1_NodeSelector_To_api_NodeSelector(in *NodeSelector, out *api.NodeSelector, s conversion.Scope) error {
+ if in.NodeSelectorTerms != nil {
+ in, out := &in.NodeSelectorTerms, &out.NodeSelectorTerms
+ *out = make([]api.NodeSelectorTerm, len(*in))
+ for i := range *in {
+ if err := Convert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.NodeSelectorTerms = nil
+ }
+ return nil
+}
+
+func Convert_v1_NodeSelector_To_api_NodeSelector(in *NodeSelector, out *api.NodeSelector, s conversion.Scope) error {
+ return autoConvert_v1_NodeSelector_To_api_NodeSelector(in, out, s)
+}
+
+func autoConvert_api_NodeSelector_To_v1_NodeSelector(in *api.NodeSelector, out *NodeSelector, s conversion.Scope) error {
+ if in.NodeSelectorTerms != nil {
+ in, out := &in.NodeSelectorTerms, &out.NodeSelectorTerms
+ *out = make([]NodeSelectorTerm, len(*in))
+ for i := range *in {
+ if err := Convert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.NodeSelectorTerms = nil
+ }
+ return nil
+}
+
+func Convert_api_NodeSelector_To_v1_NodeSelector(in *api.NodeSelector, out *NodeSelector, s conversion.Scope) error {
+ return autoConvert_api_NodeSelector_To_v1_NodeSelector(in, out, s)
+}
+
+func autoConvert_v1_NodeSelectorRequirement_To_api_NodeSelectorRequirement(in *NodeSelectorRequirement, out *api.NodeSelectorRequirement, s conversion.Scope) error {
+ out.Key = in.Key
+ out.Operator = api.NodeSelectorOperator(in.Operator)
+ out.Values = in.Values
+ return nil
+}
+
+func Convert_v1_NodeSelectorRequirement_To_api_NodeSelectorRequirement(in *NodeSelectorRequirement, out *api.NodeSelectorRequirement, s conversion.Scope) error {
+ return autoConvert_v1_NodeSelectorRequirement_To_api_NodeSelectorRequirement(in, out, s)
+}
+
+func autoConvert_api_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(in *api.NodeSelectorRequirement, out *NodeSelectorRequirement, s conversion.Scope) error {
+ out.Key = in.Key
+ out.Operator = NodeSelectorOperator(in.Operator)
+ out.Values = in.Values
+ return nil
+}
+
+func Convert_api_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(in *api.NodeSelectorRequirement, out *NodeSelectorRequirement, s conversion.Scope) error {
+ return autoConvert_api_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(in, out, s)
+}
+
+func autoConvert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm(in *NodeSelectorTerm, out *api.NodeSelectorTerm, s conversion.Scope) error {
+ if in.MatchExpressions != nil {
+ in, out := &in.MatchExpressions, &out.MatchExpressions
+ *out = make([]api.NodeSelectorRequirement, len(*in))
+ for i := range *in {
+ if err := Convert_v1_NodeSelectorRequirement_To_api_NodeSelectorRequirement(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.MatchExpressions = nil
+ }
+ return nil
+}
+
+func Convert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm(in *NodeSelectorTerm, out *api.NodeSelectorTerm, s conversion.Scope) error {
+ return autoConvert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm(in, out, s)
+}
+
+func autoConvert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm(in *api.NodeSelectorTerm, out *NodeSelectorTerm, s conversion.Scope) error {
+ if in.MatchExpressions != nil {
+ in, out := &in.MatchExpressions, &out.MatchExpressions
+ *out = make([]NodeSelectorRequirement, len(*in))
+ for i := range *in {
+ if err := Convert_api_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.MatchExpressions = nil
+ }
+ return nil
+}
+
+func Convert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm(in *api.NodeSelectorTerm, out *NodeSelectorTerm, s conversion.Scope) error {
+ return autoConvert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm(in, out, s)
+}
+
+func autoConvert_v1_NodeSpec_To_api_NodeSpec(in *NodeSpec, out *api.NodeSpec, s conversion.Scope) error {
+ out.PodCIDR = in.PodCIDR
+ out.ExternalID = in.ExternalID
+ out.ProviderID = in.ProviderID
+ out.Unschedulable = in.Unschedulable
+ return nil
+}
+
+func Convert_v1_NodeSpec_To_api_NodeSpec(in *NodeSpec, out *api.NodeSpec, s conversion.Scope) error {
+ return autoConvert_v1_NodeSpec_To_api_NodeSpec(in, out, s)
+}
+
+func autoConvert_api_NodeSpec_To_v1_NodeSpec(in *api.NodeSpec, out *NodeSpec, s conversion.Scope) error {
+ out.PodCIDR = in.PodCIDR
+ out.ExternalID = in.ExternalID
+ out.ProviderID = in.ProviderID
+ out.Unschedulable = in.Unschedulable
+ return nil
+}
+
+func Convert_api_NodeSpec_To_v1_NodeSpec(in *api.NodeSpec, out *NodeSpec, s conversion.Scope) error {
+ return autoConvert_api_NodeSpec_To_v1_NodeSpec(in, out, s)
+}
+
+func autoConvert_v1_NodeStatus_To_api_NodeStatus(in *NodeStatus, out *api.NodeStatus, s conversion.Scope) error {
+ SetDefaults_NodeStatus(in)
+ if err := Convert_v1_ResourceList_To_api_ResourceList(&in.Capacity, &out.Capacity, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_ResourceList_To_api_ResourceList(&in.Allocatable, &out.Allocatable, s); err != nil {
+ return err
+ }
+ out.Phase = api.NodePhase(in.Phase)
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]api.NodeCondition, len(*in))
+ for i := range *in {
+ if err := Convert_v1_NodeCondition_To_api_NodeCondition(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Conditions = nil
+ }
+ if in.Addresses != nil {
+ in, out := &in.Addresses, &out.Addresses
+ *out = make([]api.NodeAddress, len(*in))
+ for i := range *in {
+ if err := Convert_v1_NodeAddress_To_api_NodeAddress(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Addresses = nil
+ }
+ if err := Convert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints(&in.DaemonEndpoints, &out.DaemonEndpoints, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_NodeSystemInfo_To_api_NodeSystemInfo(&in.NodeInfo, &out.NodeInfo, s); err != nil {
+ return err
+ }
+ if in.Images != nil {
+ in, out := &in.Images, &out.Images
+ *out = make([]api.ContainerImage, len(*in))
+ for i := range *in {
+ if err := Convert_v1_ContainerImage_To_api_ContainerImage(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Images = nil
+ }
+ if in.VolumesInUse != nil {
+ in, out := &in.VolumesInUse, &out.VolumesInUse
+ *out = make([]api.UniqueVolumeName, len(*in))
+ for i := range *in {
+ (*out)[i] = api.UniqueVolumeName((*in)[i])
+ }
+ } else {
+ out.VolumesInUse = nil
+ }
+ if in.VolumesAttached != nil {
+ in, out := &in.VolumesAttached, &out.VolumesAttached
+ *out = make([]api.AttachedVolume, len(*in))
+ for i := range *in {
+ if err := Convert_v1_AttachedVolume_To_api_AttachedVolume(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.VolumesAttached = nil
+ }
+ return nil
+}
+
+func Convert_v1_NodeStatus_To_api_NodeStatus(in *NodeStatus, out *api.NodeStatus, s conversion.Scope) error {
+ return autoConvert_v1_NodeStatus_To_api_NodeStatus(in, out, s)
+}
+
+func autoConvert_api_NodeStatus_To_v1_NodeStatus(in *api.NodeStatus, out *NodeStatus, s conversion.Scope) error {
+ if in.Capacity != nil {
+ in, out := &in.Capacity, &out.Capacity
+ *out = make(ResourceList, len(*in))
+ for key, val := range *in {
+ newVal := new(resource.Quantity)
+ if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, newVal, s); err != nil {
+ return err
+ }
+ (*out)[ResourceName(key)] = *newVal
+ }
+ } else {
+ out.Capacity = nil
+ }
+ if in.Allocatable != nil {
+ in, out := &in.Allocatable, &out.Allocatable
+ *out = make(ResourceList, len(*in))
+ for key, val := range *in {
+ newVal := new(resource.Quantity)
+ if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, newVal, s); err != nil {
+ return err
+ }
+ (*out)[ResourceName(key)] = *newVal
+ }
+ } else {
+ out.Allocatable = nil
+ }
+ out.Phase = NodePhase(in.Phase)
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]NodeCondition, len(*in))
+ for i := range *in {
+ if err := Convert_api_NodeCondition_To_v1_NodeCondition(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Conditions = nil
+ }
+ if in.Addresses != nil {
+ in, out := &in.Addresses, &out.Addresses
+ *out = make([]NodeAddress, len(*in))
+ for i := range *in {
+ if err := Convert_api_NodeAddress_To_v1_NodeAddress(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Addresses = nil
+ }
+ if err := Convert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(&in.DaemonEndpoints, &out.DaemonEndpoints, s); err != nil {
+ return err
+ }
+ if err := Convert_api_NodeSystemInfo_To_v1_NodeSystemInfo(&in.NodeInfo, &out.NodeInfo, s); err != nil {
+ return err
+ }
+ if in.Images != nil {
+ in, out := &in.Images, &out.Images
+ *out = make([]ContainerImage, len(*in))
+ for i := range *in {
+ if err := Convert_api_ContainerImage_To_v1_ContainerImage(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Images = nil
+ }
+ if in.VolumesInUse != nil {
+ in, out := &in.VolumesInUse, &out.VolumesInUse
+ *out = make([]UniqueVolumeName, len(*in))
+ for i := range *in {
+ (*out)[i] = UniqueVolumeName((*in)[i])
+ }
+ } else {
+ out.VolumesInUse = nil
+ }
+ if in.VolumesAttached != nil {
+ in, out := &in.VolumesAttached, &out.VolumesAttached
+ *out = make([]AttachedVolume, len(*in))
+ for i := range *in {
+ if err := Convert_api_AttachedVolume_To_v1_AttachedVolume(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.VolumesAttached = nil
+ }
+ return nil
+}
+
+func Convert_api_NodeStatus_To_v1_NodeStatus(in *api.NodeStatus, out *NodeStatus, s conversion.Scope) error {
+ return autoConvert_api_NodeStatus_To_v1_NodeStatus(in, out, s)
+}
+
+func autoConvert_v1_NodeSystemInfo_To_api_NodeSystemInfo(in *NodeSystemInfo, out *api.NodeSystemInfo, s conversion.Scope) error {
+ out.MachineID = in.MachineID
+ out.SystemUUID = in.SystemUUID
+ out.BootID = in.BootID
+ out.KernelVersion = in.KernelVersion
+ out.OSImage = in.OSImage
+ out.ContainerRuntimeVersion = in.ContainerRuntimeVersion
+ out.KubeletVersion = in.KubeletVersion
+ out.KubeProxyVersion = in.KubeProxyVersion
+ out.OperatingSystem = in.OperatingSystem
+ out.Architecture = in.Architecture
+ return nil
+}
+
+func Convert_v1_NodeSystemInfo_To_api_NodeSystemInfo(in *NodeSystemInfo, out *api.NodeSystemInfo, s conversion.Scope) error {
+ return autoConvert_v1_NodeSystemInfo_To_api_NodeSystemInfo(in, out, s)
+}
+
+func autoConvert_api_NodeSystemInfo_To_v1_NodeSystemInfo(in *api.NodeSystemInfo, out *NodeSystemInfo, s conversion.Scope) error {
+ out.MachineID = in.MachineID
+ out.SystemUUID = in.SystemUUID
+ out.BootID = in.BootID
+ out.KernelVersion = in.KernelVersion
+ out.OSImage = in.OSImage
+ out.ContainerRuntimeVersion = in.ContainerRuntimeVersion
+ out.KubeletVersion = in.KubeletVersion
+ out.KubeProxyVersion = in.KubeProxyVersion
+ out.OperatingSystem = in.OperatingSystem
+ out.Architecture = in.Architecture
+ return nil
+}
+
+func Convert_api_NodeSystemInfo_To_v1_NodeSystemInfo(in *api.NodeSystemInfo, out *NodeSystemInfo, s conversion.Scope) error {
+ return autoConvert_api_NodeSystemInfo_To_v1_NodeSystemInfo(in, out, s)
+}
+
+func autoConvert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector(in *ObjectFieldSelector, out *api.ObjectFieldSelector, s conversion.Scope) error {
+ SetDefaults_ObjectFieldSelector(in)
+ out.APIVersion = in.APIVersion
+ out.FieldPath = in.FieldPath
+ return nil
+}
+
+func Convert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector(in *ObjectFieldSelector, out *api.ObjectFieldSelector, s conversion.Scope) error {
+ return autoConvert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector(in, out, s)
+}
+
+func autoConvert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(in *api.ObjectFieldSelector, out *ObjectFieldSelector, s conversion.Scope) error {
+ out.APIVersion = in.APIVersion
+ out.FieldPath = in.FieldPath
+ return nil
+}
+
+func Convert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(in *api.ObjectFieldSelector, out *ObjectFieldSelector, s conversion.Scope) error {
+ return autoConvert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(in, out, s)
+}
+
+func autoConvert_v1_ObjectMeta_To_api_ObjectMeta(in *ObjectMeta, out *api.ObjectMeta, s conversion.Scope) error {
+ out.Name = in.Name
+ out.GenerateName = in.GenerateName
+ out.Namespace = in.Namespace
+ out.SelfLink = in.SelfLink
+ out.UID = types.UID(in.UID)
+ out.ResourceVersion = in.ResourceVersion
+ out.Generation = in.Generation
+ if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.CreationTimestamp, &out.CreationTimestamp, s); err != nil {
+ return err
+ }
+ out.DeletionTimestamp = in.DeletionTimestamp
+ out.DeletionGracePeriodSeconds = in.DeletionGracePeriodSeconds
+ out.Labels = in.Labels
+ out.Annotations = in.Annotations
+ if in.OwnerReferences != nil {
+ in, out := &in.OwnerReferences, &out.OwnerReferences
+ *out = make([]api.OwnerReference, len(*in))
+ for i := range *in {
+ if err := Convert_v1_OwnerReference_To_api_OwnerReference(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.OwnerReferences = nil
+ }
+ out.Finalizers = in.Finalizers
+ return nil
+}
+
+func Convert_v1_ObjectMeta_To_api_ObjectMeta(in *ObjectMeta, out *api.ObjectMeta, s conversion.Scope) error {
+ return autoConvert_v1_ObjectMeta_To_api_ObjectMeta(in, out, s)
+}
+
+func autoConvert_api_ObjectMeta_To_v1_ObjectMeta(in *api.ObjectMeta, out *ObjectMeta, s conversion.Scope) error {
+ out.Name = in.Name
+ out.GenerateName = in.GenerateName
+ out.Namespace = in.Namespace
+ out.SelfLink = in.SelfLink
+ out.UID = types.UID(in.UID)
+ out.ResourceVersion = in.ResourceVersion
+ out.Generation = in.Generation
+ if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.CreationTimestamp, &out.CreationTimestamp, s); err != nil {
+ return err
+ }
+ out.DeletionTimestamp = in.DeletionTimestamp
+ out.DeletionGracePeriodSeconds = in.DeletionGracePeriodSeconds
+ out.Labels = in.Labels
+ out.Annotations = in.Annotations
+ if in.OwnerReferences != nil {
+ in, out := &in.OwnerReferences, &out.OwnerReferences
+ *out = make([]OwnerReference, len(*in))
+ for i := range *in {
+ if err := Convert_api_OwnerReference_To_v1_OwnerReference(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.OwnerReferences = nil
+ }
+ out.Finalizers = in.Finalizers
+ return nil
+}
+
+func Convert_api_ObjectMeta_To_v1_ObjectMeta(in *api.ObjectMeta, out *ObjectMeta, s conversion.Scope) error {
+ return autoConvert_api_ObjectMeta_To_v1_ObjectMeta(in, out, s)
+}
+
+func autoConvert_v1_ObjectReference_To_api_ObjectReference(in *ObjectReference, out *api.ObjectReference, s conversion.Scope) error {
+ out.Kind = in.Kind
+ out.Namespace = in.Namespace
+ out.Name = in.Name
+ out.UID = types.UID(in.UID)
+ out.APIVersion = in.APIVersion
+ out.ResourceVersion = in.ResourceVersion
+ out.FieldPath = in.FieldPath
+ return nil
+}
+
+func Convert_v1_ObjectReference_To_api_ObjectReference(in *ObjectReference, out *api.ObjectReference, s conversion.Scope) error {
+ return autoConvert_v1_ObjectReference_To_api_ObjectReference(in, out, s)
+}
+
+func autoConvert_api_ObjectReference_To_v1_ObjectReference(in *api.ObjectReference, out *ObjectReference, s conversion.Scope) error {
+ out.Kind = in.Kind
+ out.Namespace = in.Namespace
+ out.Name = in.Name
+ out.UID = types.UID(in.UID)
+ out.APIVersion = in.APIVersion
+ out.ResourceVersion = in.ResourceVersion
+ out.FieldPath = in.FieldPath
+ return nil
+}
+
+func Convert_api_ObjectReference_To_v1_ObjectReference(in *api.ObjectReference, out *ObjectReference, s conversion.Scope) error {
+ return autoConvert_api_ObjectReference_To_v1_ObjectReference(in, out, s)
+}
+
+func autoConvert_v1_OwnerReference_To_api_OwnerReference(in *OwnerReference, out *api.OwnerReference, s conversion.Scope) error {
+ out.APIVersion = in.APIVersion
+ out.Kind = in.Kind
+ out.Name = in.Name
+ out.UID = types.UID(in.UID)
+ out.Controller = in.Controller
+ return nil
+}
+
+func Convert_v1_OwnerReference_To_api_OwnerReference(in *OwnerReference, out *api.OwnerReference, s conversion.Scope) error {
+ return autoConvert_v1_OwnerReference_To_api_OwnerReference(in, out, s)
+}
+
+func autoConvert_api_OwnerReference_To_v1_OwnerReference(in *api.OwnerReference, out *OwnerReference, s conversion.Scope) error {
+ out.APIVersion = in.APIVersion
+ out.Kind = in.Kind
+ out.Name = in.Name
+ out.UID = types.UID(in.UID)
+ out.Controller = in.Controller
+ return nil
+}
+
+func Convert_api_OwnerReference_To_v1_OwnerReference(in *api.OwnerReference, out *OwnerReference, s conversion.Scope) error {
+ return autoConvert_api_OwnerReference_To_v1_OwnerReference(in, out, s)
+}
+
+func autoConvert_v1_PersistentVolume_To_api_PersistentVolume(in *PersistentVolume, out *api.PersistentVolume, s conversion.Scope) error {
+ SetDefaults_PersistentVolume(in)
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1_PersistentVolume_To_api_PersistentVolume(in *PersistentVolume, out *api.PersistentVolume, s conversion.Scope) error {
+ return autoConvert_v1_PersistentVolume_To_api_PersistentVolume(in, out, s)
+}
+
+func autoConvert_api_PersistentVolume_To_v1_PersistentVolume(in *api.PersistentVolume, out *PersistentVolume, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_api_PersistentVolume_To_v1_PersistentVolume(in *api.PersistentVolume, out *PersistentVolume, s conversion.Scope) error {
+ return autoConvert_api_PersistentVolume_To_v1_PersistentVolume(in, out, s)
+}
+
+func autoConvert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim(in *PersistentVolumeClaim, out *api.PersistentVolumeClaim, s conversion.Scope) error {
+ SetDefaults_PersistentVolumeClaim(in)
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim(in *PersistentVolumeClaim, out *api.PersistentVolumeClaim, s conversion.Scope) error {
+ return autoConvert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim(in, out, s)
+}
+
+func autoConvert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in *api.PersistentVolumeClaim, out *PersistentVolumeClaim, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in *api.PersistentVolumeClaim, out *PersistentVolumeClaim, s conversion.Scope) error {
+ return autoConvert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in, out, s)
+}
+
+func autoConvert_v1_PersistentVolumeClaimList_To_api_PersistentVolumeClaimList(in *PersistentVolumeClaimList, out *api.PersistentVolumeClaimList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]api.PersistentVolumeClaim, len(*in))
+ for i := range *in {
+ if err := Convert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_v1_PersistentVolumeClaimList_To_api_PersistentVolumeClaimList(in *PersistentVolumeClaimList, out *api.PersistentVolumeClaimList, s conversion.Scope) error {
+ return autoConvert_v1_PersistentVolumeClaimList_To_api_PersistentVolumeClaimList(in, out, s)
+}
+
+func autoConvert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in *api.PersistentVolumeClaimList, out *PersistentVolumeClaimList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]PersistentVolumeClaim, len(*in))
+ for i := range *in {
+ if err := Convert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in *api.PersistentVolumeClaimList, out *PersistentVolumeClaimList, s conversion.Scope) error {
+ return autoConvert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in, out, s)
+}
+
+func autoConvert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec(in *PersistentVolumeClaimSpec, out *api.PersistentVolumeClaimSpec, s conversion.Scope) error {
+ if in.AccessModes != nil {
+ in, out := &in.AccessModes, &out.AccessModes
+ *out = make([]api.PersistentVolumeAccessMode, len(*in))
+ for i := range *in {
+ (*out)[i] = api.PersistentVolumeAccessMode((*in)[i])
+ }
+ } else {
+ out.AccessModes = nil
+ }
+ out.Selector = in.Selector
+ if err := Convert_v1_ResourceRequirements_To_api_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil {
+ return err
+ }
+ out.VolumeName = in.VolumeName
+ return nil
+}
+
+func Convert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec(in *PersistentVolumeClaimSpec, out *api.PersistentVolumeClaimSpec, s conversion.Scope) error {
+ return autoConvert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec(in, out, s)
+}
+
+func autoConvert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in *api.PersistentVolumeClaimSpec, out *PersistentVolumeClaimSpec, s conversion.Scope) error {
+ if in.AccessModes != nil {
+ in, out := &in.AccessModes, &out.AccessModes
+ *out = make([]PersistentVolumeAccessMode, len(*in))
+ for i := range *in {
+ (*out)[i] = PersistentVolumeAccessMode((*in)[i])
+ }
+ } else {
+ out.AccessModes = nil
+ }
+ out.Selector = in.Selector
+ if err := Convert_api_ResourceRequirements_To_v1_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil {
+ return err
+ }
+ out.VolumeName = in.VolumeName
+ return nil
+}
+
+func Convert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in *api.PersistentVolumeClaimSpec, out *PersistentVolumeClaimSpec, s conversion.Scope) error {
+ return autoConvert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in, out, s)
+}
+
+func autoConvert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus(in *PersistentVolumeClaimStatus, out *api.PersistentVolumeClaimStatus, s conversion.Scope) error {
+ out.Phase = api.PersistentVolumeClaimPhase(in.Phase)
+ if in.AccessModes != nil {
+ in, out := &in.AccessModes, &out.AccessModes
+ *out = make([]api.PersistentVolumeAccessMode, len(*in))
+ for i := range *in {
+ (*out)[i] = api.PersistentVolumeAccessMode((*in)[i])
+ }
+ } else {
+ out.AccessModes = nil
+ }
+ if err := Convert_v1_ResourceList_To_api_ResourceList(&in.Capacity, &out.Capacity, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus(in *PersistentVolumeClaimStatus, out *api.PersistentVolumeClaimStatus, s conversion.Scope) error {
+ return autoConvert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus(in, out, s)
+}
+
+func autoConvert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in *api.PersistentVolumeClaimStatus, out *PersistentVolumeClaimStatus, s conversion.Scope) error {
+ out.Phase = PersistentVolumeClaimPhase(in.Phase)
+ if in.AccessModes != nil {
+ in, out := &in.AccessModes, &out.AccessModes
+ *out = make([]PersistentVolumeAccessMode, len(*in))
+ for i := range *in {
+ (*out)[i] = PersistentVolumeAccessMode((*in)[i])
+ }
+ } else {
+ out.AccessModes = nil
+ }
+ if in.Capacity != nil {
+ in, out := &in.Capacity, &out.Capacity
+ *out = make(ResourceList, len(*in))
+ for key, val := range *in {
+ newVal := new(resource.Quantity)
+ if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, newVal, s); err != nil {
+ return err
+ }
+ (*out)[ResourceName(key)] = *newVal
+ }
+ } else {
+ out.Capacity = nil
+ }
+ return nil
+}
+
+func Convert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in *api.PersistentVolumeClaimStatus, out *PersistentVolumeClaimStatus, s conversion.Scope) error {
+ return autoConvert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in, out, s)
+}
+
+func autoConvert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource(in *PersistentVolumeClaimVolumeSource, out *api.PersistentVolumeClaimVolumeSource, s conversion.Scope) error {
+ out.ClaimName = in.ClaimName
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
+func Convert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource(in *PersistentVolumeClaimVolumeSource, out *api.PersistentVolumeClaimVolumeSource, s conversion.Scope) error {
+ return autoConvert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource(in, out, s)
+}
+
+func autoConvert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in *api.PersistentVolumeClaimVolumeSource, out *PersistentVolumeClaimVolumeSource, s conversion.Scope) error {
+ out.ClaimName = in.ClaimName
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
+func Convert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in *api.PersistentVolumeClaimVolumeSource, out *PersistentVolumeClaimVolumeSource, s conversion.Scope) error {
+ return autoConvert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in, out, s)
+}
+
+func autoConvert_v1_PersistentVolumeList_To_api_PersistentVolumeList(in *PersistentVolumeList, out *api.PersistentVolumeList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]api.PersistentVolume, len(*in))
+ for i := range *in {
+ if err := Convert_v1_PersistentVolume_To_api_PersistentVolume(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_v1_PersistentVolumeList_To_api_PersistentVolumeList(in *PersistentVolumeList, out *api.PersistentVolumeList, s conversion.Scope) error {
+ return autoConvert_v1_PersistentVolumeList_To_api_PersistentVolumeList(in, out, s)
+}
+
+func autoConvert_api_PersistentVolumeList_To_v1_PersistentVolumeList(in *api.PersistentVolumeList, out *PersistentVolumeList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]PersistentVolume, len(*in))
+ for i := range *in {
+ if err := Convert_api_PersistentVolume_To_v1_PersistentVolume(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_api_PersistentVolumeList_To_v1_PersistentVolumeList(in *api.PersistentVolumeList, out *PersistentVolumeList, s conversion.Scope) error {
+ return autoConvert_api_PersistentVolumeList_To_v1_PersistentVolumeList(in, out, s)
+}
+
+func autoConvert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(in *PersistentVolumeSource, out *api.PersistentVolumeSource, s conversion.Scope) error {
+ if in.GCEPersistentDisk != nil {
+ in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk
+ *out = new(api.GCEPersistentDiskVolumeSource)
+ if err := Convert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.GCEPersistentDisk = nil
+ }
+ if in.AWSElasticBlockStore != nil {
+ in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore
+ *out = new(api.AWSElasticBlockStoreVolumeSource)
+ if err := Convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.AWSElasticBlockStore = nil
+ }
+ if in.HostPath != nil {
+ in, out := &in.HostPath, &out.HostPath
+ *out = new(api.HostPathVolumeSource)
+ if err := Convert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.HostPath = nil
+ }
+ if in.Glusterfs != nil {
+ in, out := &in.Glusterfs, &out.Glusterfs
+ *out = new(api.GlusterfsVolumeSource)
+ if err := Convert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Glusterfs = nil
+ }
+ if in.NFS != nil {
+ in, out := &in.NFS, &out.NFS
+ *out = new(api.NFSVolumeSource)
+ if err := Convert_v1_NFSVolumeSource_To_api_NFSVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.NFS = nil
+ }
+ if in.RBD != nil {
+ in, out := &in.RBD, &out.RBD
+ *out = new(api.RBDVolumeSource)
+ if err := Convert_v1_RBDVolumeSource_To_api_RBDVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.RBD = nil
+ }
+ if in.ISCSI != nil {
+ in, out := &in.ISCSI, &out.ISCSI
+ *out = new(api.ISCSIVolumeSource)
+ if err := Convert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.ISCSI = nil
+ }
+ if in.Cinder != nil {
+ in, out := &in.Cinder, &out.Cinder
+ *out = new(api.CinderVolumeSource)
+ if err := Convert_v1_CinderVolumeSource_To_api_CinderVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Cinder = nil
+ }
+ if in.CephFS != nil {
+ in, out := &in.CephFS, &out.CephFS
+ *out = new(api.CephFSVolumeSource)
+ if err := Convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.CephFS = nil
+ }
+ if in.FC != nil {
+ in, out := &in.FC, &out.FC
+ *out = new(api.FCVolumeSource)
+ if err := Convert_v1_FCVolumeSource_To_api_FCVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.FC = nil
+ }
+ if in.Flocker != nil {
+ in, out := &in.Flocker, &out.Flocker
+ *out = new(api.FlockerVolumeSource)
+ if err := Convert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Flocker = nil
+ }
+ if in.FlexVolume != nil {
+ in, out := &in.FlexVolume, &out.FlexVolume
+ *out = new(api.FlexVolumeSource)
+ if err := Convert_v1_FlexVolumeSource_To_api_FlexVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.FlexVolume = nil
+ }
+ if in.AzureFile != nil {
+ in, out := &in.AzureFile, &out.AzureFile
+ *out = new(api.AzureFileVolumeSource)
+ if err := Convert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.AzureFile = nil
+ }
+ if in.VsphereVolume != nil {
+ in, out := &in.VsphereVolume, &out.VsphereVolume
+ *out = new(api.VsphereVirtualDiskVolumeSource)
+ if err := Convert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.VsphereVolume = nil
+ }
+ return nil
+}
+
+func Convert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(in *PersistentVolumeSource, out *api.PersistentVolumeSource, s conversion.Scope) error {
+ return autoConvert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(in, out, s)
+}
+
+func autoConvert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *api.PersistentVolumeSource, out *PersistentVolumeSource, s conversion.Scope) error {
+ if in.GCEPersistentDisk != nil {
+ in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk
+ *out = new(GCEPersistentDiskVolumeSource)
+ if err := Convert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.GCEPersistentDisk = nil
+ }
+ if in.AWSElasticBlockStore != nil {
+ in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore
+ *out = new(AWSElasticBlockStoreVolumeSource)
+ if err := Convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.AWSElasticBlockStore = nil
+ }
+ if in.HostPath != nil {
+ in, out := &in.HostPath, &out.HostPath
+ *out = new(HostPathVolumeSource)
+ if err := Convert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.HostPath = nil
+ }
+ if in.Glusterfs != nil {
+ in, out := &in.Glusterfs, &out.Glusterfs
+ *out = new(GlusterfsVolumeSource)
+ if err := Convert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Glusterfs = nil
+ }
+ if in.NFS != nil {
+ in, out := &in.NFS, &out.NFS
+ *out = new(NFSVolumeSource)
+ if err := Convert_api_NFSVolumeSource_To_v1_NFSVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.NFS = nil
+ }
+ if in.RBD != nil {
+ in, out := &in.RBD, &out.RBD
+ *out = new(RBDVolumeSource)
+ if err := Convert_api_RBDVolumeSource_To_v1_RBDVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.RBD = nil
+ }
+ if in.ISCSI != nil {
+ in, out := &in.ISCSI, &out.ISCSI
+ *out = new(ISCSIVolumeSource)
+ if err := Convert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.ISCSI = nil
+ }
+ if in.FlexVolume != nil {
+ in, out := &in.FlexVolume, &out.FlexVolume
+ *out = new(FlexVolumeSource)
+ if err := Convert_api_FlexVolumeSource_To_v1_FlexVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.FlexVolume = nil
+ }
+ if in.Cinder != nil {
+ in, out := &in.Cinder, &out.Cinder
+ *out = new(CinderVolumeSource)
+ if err := Convert_api_CinderVolumeSource_To_v1_CinderVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Cinder = nil
+ }
+ if in.CephFS != nil {
+ in, out := &in.CephFS, &out.CephFS
+ *out = new(CephFSVolumeSource)
+ if err := Convert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.CephFS = nil
+ }
+ if in.FC != nil {
+ in, out := &in.FC, &out.FC
+ *out = new(FCVolumeSource)
+ if err := Convert_api_FCVolumeSource_To_v1_FCVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.FC = nil
+ }
+ if in.Flocker != nil {
+ in, out := &in.Flocker, &out.Flocker
+ *out = new(FlockerVolumeSource)
+ if err := Convert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Flocker = nil
+ }
+ if in.AzureFile != nil {
+ in, out := &in.AzureFile, &out.AzureFile
+ *out = new(AzureFileVolumeSource)
+ if err := Convert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.AzureFile = nil
+ }
+ if in.VsphereVolume != nil {
+ in, out := &in.VsphereVolume, &out.VsphereVolume
+ *out = new(VsphereVirtualDiskVolumeSource)
+ if err := Convert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.VsphereVolume = nil
+ }
+ return nil
+}
+
+func Convert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *api.PersistentVolumeSource, out *PersistentVolumeSource, s conversion.Scope) error {
+ return autoConvert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(in, out, s)
+}
+
+func autoConvert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec(in *PersistentVolumeSpec, out *api.PersistentVolumeSpec, s conversion.Scope) error {
+ if err := Convert_v1_ResourceList_To_api_ResourceList(&in.Capacity, &out.Capacity, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(&in.PersistentVolumeSource, &out.PersistentVolumeSource, s); err != nil {
+ return err
+ }
+ if in.AccessModes != nil {
+ in, out := &in.AccessModes, &out.AccessModes
+ *out = make([]api.PersistentVolumeAccessMode, len(*in))
+ for i := range *in {
+ (*out)[i] = api.PersistentVolumeAccessMode((*in)[i])
+ }
+ } else {
+ out.AccessModes = nil
+ }
+ if in.ClaimRef != nil {
+ in, out := &in.ClaimRef, &out.ClaimRef
+ *out = new(api.ObjectReference)
+ if err := Convert_v1_ObjectReference_To_api_ObjectReference(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.ClaimRef = nil
+ }
+ out.PersistentVolumeReclaimPolicy = api.PersistentVolumeReclaimPolicy(in.PersistentVolumeReclaimPolicy)
+ return nil
+}
+
+func Convert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec(in *PersistentVolumeSpec, out *api.PersistentVolumeSpec, s conversion.Scope) error {
+ return autoConvert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec(in, out, s)
+}
+
+func autoConvert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in *api.PersistentVolumeSpec, out *PersistentVolumeSpec, s conversion.Scope) error {
+ if in.Capacity != nil {
+ in, out := &in.Capacity, &out.Capacity
+ *out = make(ResourceList, len(*in))
+ for key, val := range *in {
+ newVal := new(resource.Quantity)
+ if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, newVal, s); err != nil {
+ return err
+ }
+ (*out)[ResourceName(key)] = *newVal
+ }
+ } else {
+ out.Capacity = nil
+ }
+ if err := Convert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(&in.PersistentVolumeSource, &out.PersistentVolumeSource, s); err != nil {
+ return err
+ }
+ if in.AccessModes != nil {
+ in, out := &in.AccessModes, &out.AccessModes
+ *out = make([]PersistentVolumeAccessMode, len(*in))
+ for i := range *in {
+ (*out)[i] = PersistentVolumeAccessMode((*in)[i])
+ }
+ } else {
+ out.AccessModes = nil
+ }
+ if in.ClaimRef != nil {
+ in, out := &in.ClaimRef, &out.ClaimRef
+ *out = new(ObjectReference)
+ if err := Convert_api_ObjectReference_To_v1_ObjectReference(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.ClaimRef = nil
+ }
+ out.PersistentVolumeReclaimPolicy = PersistentVolumeReclaimPolicy(in.PersistentVolumeReclaimPolicy)
+ return nil
+}
+
+func Convert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in *api.PersistentVolumeSpec, out *PersistentVolumeSpec, s conversion.Scope) error {
+ return autoConvert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in, out, s)
+}
+
+func autoConvert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus(in *PersistentVolumeStatus, out *api.PersistentVolumeStatus, s conversion.Scope) error {
+ out.Phase = api.PersistentVolumePhase(in.Phase)
+ out.Message = in.Message
+ out.Reason = in.Reason
+ return nil
+}
+
+func Convert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus(in *PersistentVolumeStatus, out *api.PersistentVolumeStatus, s conversion.Scope) error {
+ return autoConvert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus(in, out, s)
+}
+
+func autoConvert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in *api.PersistentVolumeStatus, out *PersistentVolumeStatus, s conversion.Scope) error {
+ out.Phase = PersistentVolumePhase(in.Phase)
+ out.Message = in.Message
+ out.Reason = in.Reason
+ return nil
+}
+
+func Convert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in *api.PersistentVolumeStatus, out *PersistentVolumeStatus, s conversion.Scope) error {
+ return autoConvert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in, out, s)
+}
+
+func autoConvert_v1_Pod_To_api_Pod(in *Pod, out *api.Pod, s conversion.Scope) error {
+ SetDefaults_Pod(in)
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_PodSpec_To_api_PodSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_PodStatus_To_api_PodStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func autoConvert_api_Pod_To_v1_Pod(in *api.Pod, out *Pod, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_api_PodSpec_To_v1_PodSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_api_PodStatus_To_v1_PodStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func autoConvert_v1_PodAffinity_To_api_PodAffinity(in *PodAffinity, out *api.PodAffinity, s conversion.Scope) error {
+ if in.RequiredDuringSchedulingIgnoredDuringExecution != nil {
+ in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution
+ *out = make([]api.PodAffinityTerm, len(*in))
+ for i := range *in {
+ if err := Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.RequiredDuringSchedulingIgnoredDuringExecution = nil
+ }
+ if in.PreferredDuringSchedulingIgnoredDuringExecution != nil {
+ in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution
+ *out = make([]api.WeightedPodAffinityTerm, len(*in))
+ for i := range *in {
+ if err := Convert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.PreferredDuringSchedulingIgnoredDuringExecution = nil
+ }
+ return nil
+}
+
+func Convert_v1_PodAffinity_To_api_PodAffinity(in *PodAffinity, out *api.PodAffinity, s conversion.Scope) error {
+ return autoConvert_v1_PodAffinity_To_api_PodAffinity(in, out, s)
+}
+
+func autoConvert_api_PodAffinity_To_v1_PodAffinity(in *api.PodAffinity, out *PodAffinity, s conversion.Scope) error {
+ if in.RequiredDuringSchedulingIgnoredDuringExecution != nil {
+ in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution
+ *out = make([]PodAffinityTerm, len(*in))
+ for i := range *in {
+ if err := Convert_api_PodAffinityTerm_To_v1_PodAffinityTerm(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.RequiredDuringSchedulingIgnoredDuringExecution = nil
+ }
+ if in.PreferredDuringSchedulingIgnoredDuringExecution != nil {
+ in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution
+ *out = make([]WeightedPodAffinityTerm, len(*in))
+ for i := range *in {
+ if err := Convert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.PreferredDuringSchedulingIgnoredDuringExecution = nil
+ }
+ return nil
+}
+
+func Convert_api_PodAffinity_To_v1_PodAffinity(in *api.PodAffinity, out *PodAffinity, s conversion.Scope) error {
+ return autoConvert_api_PodAffinity_To_v1_PodAffinity(in, out, s)
+}
+
+func autoConvert_v1_PodAffinityTerm_To_api_PodAffinityTerm(in *PodAffinityTerm, out *api.PodAffinityTerm, s conversion.Scope) error {
+ out.LabelSelector = in.LabelSelector
+ out.Namespaces = in.Namespaces
+ out.TopologyKey = in.TopologyKey
+ return nil
+}
+
+func Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm(in *PodAffinityTerm, out *api.PodAffinityTerm, s conversion.Scope) error {
+ return autoConvert_v1_PodAffinityTerm_To_api_PodAffinityTerm(in, out, s)
+}
+
+func autoConvert_api_PodAffinityTerm_To_v1_PodAffinityTerm(in *api.PodAffinityTerm, out *PodAffinityTerm, s conversion.Scope) error {
+ out.LabelSelector = in.LabelSelector
+ out.Namespaces = in.Namespaces
+ out.TopologyKey = in.TopologyKey
+ return nil
+}
+
+func Convert_api_PodAffinityTerm_To_v1_PodAffinityTerm(in *api.PodAffinityTerm, out *PodAffinityTerm, s conversion.Scope) error {
+ return autoConvert_api_PodAffinityTerm_To_v1_PodAffinityTerm(in, out, s)
+}
+
+func autoConvert_v1_PodAntiAffinity_To_api_PodAntiAffinity(in *PodAntiAffinity, out *api.PodAntiAffinity, s conversion.Scope) error {
+ if in.RequiredDuringSchedulingIgnoredDuringExecution != nil {
+ in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution
+ *out = make([]api.PodAffinityTerm, len(*in))
+ for i := range *in {
+ if err := Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.RequiredDuringSchedulingIgnoredDuringExecution = nil
+ }
+ if in.PreferredDuringSchedulingIgnoredDuringExecution != nil {
+ in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution
+ *out = make([]api.WeightedPodAffinityTerm, len(*in))
+ for i := range *in {
+ if err := Convert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.PreferredDuringSchedulingIgnoredDuringExecution = nil
+ }
+ return nil
+}
+
+func Convert_v1_PodAntiAffinity_To_api_PodAntiAffinity(in *PodAntiAffinity, out *api.PodAntiAffinity, s conversion.Scope) error {
+ return autoConvert_v1_PodAntiAffinity_To_api_PodAntiAffinity(in, out, s)
+}
+
+func autoConvert_api_PodAntiAffinity_To_v1_PodAntiAffinity(in *api.PodAntiAffinity, out *PodAntiAffinity, s conversion.Scope) error {
+ if in.RequiredDuringSchedulingIgnoredDuringExecution != nil {
+ in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution
+ *out = make([]PodAffinityTerm, len(*in))
+ for i := range *in {
+ if err := Convert_api_PodAffinityTerm_To_v1_PodAffinityTerm(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.RequiredDuringSchedulingIgnoredDuringExecution = nil
+ }
+ if in.PreferredDuringSchedulingIgnoredDuringExecution != nil {
+ in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution
+ *out = make([]WeightedPodAffinityTerm, len(*in))
+ for i := range *in {
+ if err := Convert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.PreferredDuringSchedulingIgnoredDuringExecution = nil
+ }
+ return nil
+}
+
+func Convert_api_PodAntiAffinity_To_v1_PodAntiAffinity(in *api.PodAntiAffinity, out *PodAntiAffinity, s conversion.Scope) error {
+ return autoConvert_api_PodAntiAffinity_To_v1_PodAntiAffinity(in, out, s)
+}
+
+func autoConvert_v1_PodAttachOptions_To_api_PodAttachOptions(in *PodAttachOptions, out *api.PodAttachOptions, s conversion.Scope) error {
+ SetDefaults_PodAttachOptions(in)
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ out.Stdin = in.Stdin
+ out.Stdout = in.Stdout
+ out.Stderr = in.Stderr
+ out.TTY = in.TTY
+ out.Container = in.Container
+ return nil
+}
+
+func Convert_v1_PodAttachOptions_To_api_PodAttachOptions(in *PodAttachOptions, out *api.PodAttachOptions, s conversion.Scope) error {
+ return autoConvert_v1_PodAttachOptions_To_api_PodAttachOptions(in, out, s)
+}
+
+func autoConvert_api_PodAttachOptions_To_v1_PodAttachOptions(in *api.PodAttachOptions, out *PodAttachOptions, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ out.Stdin = in.Stdin
+ out.Stdout = in.Stdout
+ out.Stderr = in.Stderr
+ out.TTY = in.TTY
+ out.Container = in.Container
+ return nil
+}
+
+func Convert_api_PodAttachOptions_To_v1_PodAttachOptions(in *api.PodAttachOptions, out *PodAttachOptions, s conversion.Scope) error {
+ return autoConvert_api_PodAttachOptions_To_v1_PodAttachOptions(in, out, s)
+}
+
+func autoConvert_v1_PodCondition_To_api_PodCondition(in *PodCondition, out *api.PodCondition, s conversion.Scope) error {
+ out.Type = api.PodConditionType(in.Type)
+ out.Status = api.ConditionStatus(in.Status)
+ if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastProbeTime, &out.LastProbeTime, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTransitionTime, &out.LastTransitionTime, s); err != nil {
+ return err
+ }
+ out.Reason = in.Reason
+ out.Message = in.Message
+ return nil
+}
+
+func Convert_v1_PodCondition_To_api_PodCondition(in *PodCondition, out *api.PodCondition, s conversion.Scope) error {
+ return autoConvert_v1_PodCondition_To_api_PodCondition(in, out, s)
+}
+
+func autoConvert_api_PodCondition_To_v1_PodCondition(in *api.PodCondition, out *PodCondition, s conversion.Scope) error {
+ out.Type = PodConditionType(in.Type)
+ out.Status = ConditionStatus(in.Status)
+ if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastProbeTime, &out.LastProbeTime, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTransitionTime, &out.LastTransitionTime, s); err != nil {
+ return err
+ }
+ out.Reason = in.Reason
+ out.Message = in.Message
+ return nil
+}
+
+func Convert_api_PodCondition_To_v1_PodCondition(in *api.PodCondition, out *PodCondition, s conversion.Scope) error {
+ return autoConvert_api_PodCondition_To_v1_PodCondition(in, out, s)
+}
+
+func autoConvert_v1_PodExecOptions_To_api_PodExecOptions(in *PodExecOptions, out *api.PodExecOptions, s conversion.Scope) error {
+ SetDefaults_PodExecOptions(in)
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ out.Stdin = in.Stdin
+ out.Stdout = in.Stdout
+ out.Stderr = in.Stderr
+ out.TTY = in.TTY
+ out.Container = in.Container
+ out.Command = in.Command
+ return nil
+}
+
+func Convert_v1_PodExecOptions_To_api_PodExecOptions(in *PodExecOptions, out *api.PodExecOptions, s conversion.Scope) error {
+ return autoConvert_v1_PodExecOptions_To_api_PodExecOptions(in, out, s)
+}
+
+func autoConvert_api_PodExecOptions_To_v1_PodExecOptions(in *api.PodExecOptions, out *PodExecOptions, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ out.Stdin = in.Stdin
+ out.Stdout = in.Stdout
+ out.Stderr = in.Stderr
+ out.TTY = in.TTY
+ out.Container = in.Container
+ out.Command = in.Command
+ return nil
+}
+
+func Convert_api_PodExecOptions_To_v1_PodExecOptions(in *api.PodExecOptions, out *PodExecOptions, s conversion.Scope) error {
+ return autoConvert_api_PodExecOptions_To_v1_PodExecOptions(in, out, s)
+}
+
+func autoConvert_v1_PodList_To_api_PodList(in *PodList, out *api.PodList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]api.Pod, len(*in))
+ for i := range *in {
+ if err := Convert_v1_Pod_To_api_Pod(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_v1_PodList_To_api_PodList(in *PodList, out *api.PodList, s conversion.Scope) error {
+ return autoConvert_v1_PodList_To_api_PodList(in, out, s)
+}
+
+func autoConvert_api_PodList_To_v1_PodList(in *api.PodList, out *PodList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Pod, len(*in))
+ for i := range *in {
+ if err := Convert_api_Pod_To_v1_Pod(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_api_PodList_To_v1_PodList(in *api.PodList, out *PodList, s conversion.Scope) error {
+ return autoConvert_api_PodList_To_v1_PodList(in, out, s)
+}
+
+func autoConvert_v1_PodLogOptions_To_api_PodLogOptions(in *PodLogOptions, out *api.PodLogOptions, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ out.Container = in.Container
+ out.Follow = in.Follow
+ out.Previous = in.Previous
+ out.SinceSeconds = in.SinceSeconds
+ out.SinceTime = in.SinceTime
+ out.Timestamps = in.Timestamps
+ out.TailLines = in.TailLines
+ out.LimitBytes = in.LimitBytes
+ return nil
+}
+
+func Convert_v1_PodLogOptions_To_api_PodLogOptions(in *PodLogOptions, out *api.PodLogOptions, s conversion.Scope) error {
+ return autoConvert_v1_PodLogOptions_To_api_PodLogOptions(in, out, s)
+}
+
+func autoConvert_api_PodLogOptions_To_v1_PodLogOptions(in *api.PodLogOptions, out *PodLogOptions, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ out.Container = in.Container
+ out.Follow = in.Follow
+ out.Previous = in.Previous
+ out.SinceSeconds = in.SinceSeconds
+ out.SinceTime = in.SinceTime
+ out.Timestamps = in.Timestamps
+ out.TailLines = in.TailLines
+ out.LimitBytes = in.LimitBytes
+ return nil
+}
+
+func Convert_api_PodLogOptions_To_v1_PodLogOptions(in *api.PodLogOptions, out *PodLogOptions, s conversion.Scope) error {
+ return autoConvert_api_PodLogOptions_To_v1_PodLogOptions(in, out, s)
+}
+
+func autoConvert_v1_PodProxyOptions_To_api_PodProxyOptions(in *PodProxyOptions, out *api.PodProxyOptions, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ out.Path = in.Path
+ return nil
+}
+
+func Convert_v1_PodProxyOptions_To_api_PodProxyOptions(in *PodProxyOptions, out *api.PodProxyOptions, s conversion.Scope) error {
+ return autoConvert_v1_PodProxyOptions_To_api_PodProxyOptions(in, out, s)
+}
+
+func autoConvert_api_PodProxyOptions_To_v1_PodProxyOptions(in *api.PodProxyOptions, out *PodProxyOptions, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ out.Path = in.Path
+ return nil
+}
+
+func Convert_api_PodProxyOptions_To_v1_PodProxyOptions(in *api.PodProxyOptions, out *PodProxyOptions, s conversion.Scope) error {
+ return autoConvert_api_PodProxyOptions_To_v1_PodProxyOptions(in, out, s)
+}
+
+func autoConvert_v1_PodSecurityContext_To_api_PodSecurityContext(in *PodSecurityContext, out *api.PodSecurityContext, s conversion.Scope) error {
+ if in.SELinuxOptions != nil {
+ in, out := &in.SELinuxOptions, &out.SELinuxOptions
+ *out = new(api.SELinuxOptions)
+ if err := Convert_v1_SELinuxOptions_To_api_SELinuxOptions(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.SELinuxOptions = nil
+ }
+ out.RunAsUser = in.RunAsUser
+ out.RunAsNonRoot = in.RunAsNonRoot
+ out.SupplementalGroups = in.SupplementalGroups
+ out.FSGroup = in.FSGroup
+ return nil
+}
+
+func autoConvert_api_PodSecurityContext_To_v1_PodSecurityContext(in *api.PodSecurityContext, out *PodSecurityContext, s conversion.Scope) error {
+ if in.SELinuxOptions != nil {
+ in, out := &in.SELinuxOptions, &out.SELinuxOptions
+ *out = new(SELinuxOptions)
+ if err := Convert_api_SELinuxOptions_To_v1_SELinuxOptions(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.SELinuxOptions = nil
+ }
+ out.RunAsUser = in.RunAsUser
+ out.RunAsNonRoot = in.RunAsNonRoot
+ out.SupplementalGroups = in.SupplementalGroups
+ out.FSGroup = in.FSGroup
+ return nil
+}
+
+func autoConvert_v1_PodSpec_To_api_PodSpec(in *PodSpec, out *api.PodSpec, s conversion.Scope) error {
+ SetDefaults_PodSpec(in)
+ if in.Volumes != nil {
+ in, out := &in.Volumes, &out.Volumes
+ *out = make([]api.Volume, len(*in))
+ for i := range *in {
+ if err := Convert_v1_Volume_To_api_Volume(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Volumes = nil
+ }
+ if in.InitContainers != nil {
+ in, out := &in.InitContainers, &out.InitContainers
+ *out = make([]api.Container, len(*in))
+ for i := range *in {
+ if err := Convert_v1_Container_To_api_Container(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.InitContainers = nil
+ }
+ if in.Containers != nil {
+ in, out := &in.Containers, &out.Containers
+ *out = make([]api.Container, len(*in))
+ for i := range *in {
+ if err := Convert_v1_Container_To_api_Container(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Containers = nil
+ }
+ out.RestartPolicy = api.RestartPolicy(in.RestartPolicy)
+ out.TerminationGracePeriodSeconds = in.TerminationGracePeriodSeconds
+ out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds
+ out.DNSPolicy = api.DNSPolicy(in.DNSPolicy)
+ out.NodeSelector = in.NodeSelector
+ out.ServiceAccountName = in.ServiceAccountName
+ out.NodeName = in.NodeName
+ if in.SecurityContext != nil {
+ in, out := &in.SecurityContext, &out.SecurityContext
+ *out = new(api.PodSecurityContext)
+ if err := Convert_v1_PodSecurityContext_To_api_PodSecurityContext(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.SecurityContext = nil
+ }
+ if in.ImagePullSecrets != nil {
+ in, out := &in.ImagePullSecrets, &out.ImagePullSecrets
+ *out = make([]api.LocalObjectReference, len(*in))
+ for i := range *in {
+ if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.ImagePullSecrets = nil
+ }
+ out.Hostname = in.Hostname
+ out.Subdomain = in.Subdomain
+ return nil
+}
+
+func autoConvert_api_PodSpec_To_v1_PodSpec(in *api.PodSpec, out *PodSpec, s conversion.Scope) error {
+ if in.Volumes != nil {
+ in, out := &in.Volumes, &out.Volumes
+ *out = make([]Volume, len(*in))
+ for i := range *in {
+ if err := Convert_api_Volume_To_v1_Volume(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Volumes = nil
+ }
+ if in.InitContainers != nil {
+ in, out := &in.InitContainers, &out.InitContainers
+ *out = make([]Container, len(*in))
+ for i := range *in {
+ if err := Convert_api_Container_To_v1_Container(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.InitContainers = nil
+ }
+ if in.Containers != nil {
+ in, out := &in.Containers, &out.Containers
+ *out = make([]Container, len(*in))
+ for i := range *in {
+ if err := Convert_api_Container_To_v1_Container(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Containers = nil
+ }
+ out.RestartPolicy = RestartPolicy(in.RestartPolicy)
+ out.TerminationGracePeriodSeconds = in.TerminationGracePeriodSeconds
+ out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds
+ out.DNSPolicy = DNSPolicy(in.DNSPolicy)
+ out.NodeSelector = in.NodeSelector
+ out.ServiceAccountName = in.ServiceAccountName
+ out.NodeName = in.NodeName
+ if in.SecurityContext != nil {
+ in, out := &in.SecurityContext, &out.SecurityContext
+ *out = new(PodSecurityContext)
+ if err := Convert_api_PodSecurityContext_To_v1_PodSecurityContext(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.SecurityContext = nil
+ }
+ if in.ImagePullSecrets != nil {
+ in, out := &in.ImagePullSecrets, &out.ImagePullSecrets
+ *out = make([]LocalObjectReference, len(*in))
+ for i := range *in {
+ if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.ImagePullSecrets = nil
+ }
+ out.Hostname = in.Hostname
+ out.Subdomain = in.Subdomain
+ return nil
+}
+
+func autoConvert_v1_PodStatus_To_api_PodStatus(in *PodStatus, out *api.PodStatus, s conversion.Scope) error {
+ out.Phase = api.PodPhase(in.Phase)
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]api.PodCondition, len(*in))
+ for i := range *in {
+ if err := Convert_v1_PodCondition_To_api_PodCondition(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Conditions = nil
+ }
+ out.Message = in.Message
+ out.Reason = in.Reason
+ out.HostIP = in.HostIP
+ out.PodIP = in.PodIP
+ out.StartTime = in.StartTime
+ if in.InitContainerStatuses != nil {
+ in, out := &in.InitContainerStatuses, &out.InitContainerStatuses
+ *out = make([]api.ContainerStatus, len(*in))
+ for i := range *in {
+ if err := Convert_v1_ContainerStatus_To_api_ContainerStatus(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.InitContainerStatuses = nil
+ }
+ if in.ContainerStatuses != nil {
+ in, out := &in.ContainerStatuses, &out.ContainerStatuses
+ *out = make([]api.ContainerStatus, len(*in))
+ for i := range *in {
+ if err := Convert_v1_ContainerStatus_To_api_ContainerStatus(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.ContainerStatuses = nil
+ }
+ return nil
+}
+
+func Convert_v1_PodStatus_To_api_PodStatus(in *PodStatus, out *api.PodStatus, s conversion.Scope) error {
+ return autoConvert_v1_PodStatus_To_api_PodStatus(in, out, s)
+}
+
+func autoConvert_api_PodStatus_To_v1_PodStatus(in *api.PodStatus, out *PodStatus, s conversion.Scope) error {
+ out.Phase = PodPhase(in.Phase)
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]PodCondition, len(*in))
+ for i := range *in {
+ if err := Convert_api_PodCondition_To_v1_PodCondition(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Conditions = nil
+ }
+ out.Message = in.Message
+ out.Reason = in.Reason
+ out.HostIP = in.HostIP
+ out.PodIP = in.PodIP
+ out.StartTime = in.StartTime
+ if in.InitContainerStatuses != nil {
+ in, out := &in.InitContainerStatuses, &out.InitContainerStatuses
+ *out = make([]ContainerStatus, len(*in))
+ for i := range *in {
+ if err := Convert_api_ContainerStatus_To_v1_ContainerStatus(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.InitContainerStatuses = nil
+ }
+ if in.ContainerStatuses != nil {
+ in, out := &in.ContainerStatuses, &out.ContainerStatuses
+ *out = make([]ContainerStatus, len(*in))
+ for i := range *in {
+ if err := Convert_api_ContainerStatus_To_v1_ContainerStatus(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.ContainerStatuses = nil
+ }
+ return nil
+}
+
+func Convert_api_PodStatus_To_v1_PodStatus(in *api.PodStatus, out *PodStatus, s conversion.Scope) error {
+ return autoConvert_api_PodStatus_To_v1_PodStatus(in, out, s)
+}
+
+func autoConvert_v1_PodStatusResult_To_api_PodStatusResult(in *PodStatusResult, out *api.PodStatusResult, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_PodStatus_To_api_PodStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func autoConvert_api_PodStatusResult_To_v1_PodStatusResult(in *api.PodStatusResult, out *PodStatusResult, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_api_PodStatus_To_v1_PodStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func autoConvert_v1_PodTemplate_To_api_PodTemplate(in *PodTemplate, out *api.PodTemplate, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1_PodTemplate_To_api_PodTemplate(in *PodTemplate, out *api.PodTemplate, s conversion.Scope) error {
+ return autoConvert_v1_PodTemplate_To_api_PodTemplate(in, out, s)
+}
+
+func autoConvert_api_PodTemplate_To_v1_PodTemplate(in *api.PodTemplate, out *PodTemplate, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_api_PodTemplate_To_v1_PodTemplate(in *api.PodTemplate, out *PodTemplate, s conversion.Scope) error {
+ return autoConvert_api_PodTemplate_To_v1_PodTemplate(in, out, s)
+}
+
+func autoConvert_v1_PodTemplateList_To_api_PodTemplateList(in *PodTemplateList, out *api.PodTemplateList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]api.PodTemplate, len(*in))
+ for i := range *in {
+ if err := Convert_v1_PodTemplate_To_api_PodTemplate(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_v1_PodTemplateList_To_api_PodTemplateList(in *PodTemplateList, out *api.PodTemplateList, s conversion.Scope) error {
+ return autoConvert_v1_PodTemplateList_To_api_PodTemplateList(in, out, s)
+}
+
+func autoConvert_api_PodTemplateList_To_v1_PodTemplateList(in *api.PodTemplateList, out *PodTemplateList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]PodTemplate, len(*in))
+ for i := range *in {
+ if err := Convert_api_PodTemplate_To_v1_PodTemplate(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_api_PodTemplateList_To_v1_PodTemplateList(in *api.PodTemplateList, out *PodTemplateList, s conversion.Scope) error {
+ return autoConvert_api_PodTemplateList_To_v1_PodTemplateList(in, out, s)
+}
+
+func autoConvert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in *PodTemplateSpec, out *api.PodTemplateSpec, s conversion.Scope) error {
+ if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_PodSpec_To_api_PodSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func autoConvert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in *api.PodTemplateSpec, out *PodTemplateSpec, s conversion.Scope) error {
+ if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_api_PodSpec_To_v1_PodSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func autoConvert_v1_Preconditions_To_api_Preconditions(in *Preconditions, out *api.Preconditions, s conversion.Scope) error {
+ out.UID = in.UID
+ return nil
+}
+
+func Convert_v1_Preconditions_To_api_Preconditions(in *Preconditions, out *api.Preconditions, s conversion.Scope) error {
+ return autoConvert_v1_Preconditions_To_api_Preconditions(in, out, s)
+}
+
+func autoConvert_api_Preconditions_To_v1_Preconditions(in *api.Preconditions, out *Preconditions, s conversion.Scope) error {
+ out.UID = in.UID
+ return nil
+}
+
+func Convert_api_Preconditions_To_v1_Preconditions(in *api.Preconditions, out *Preconditions, s conversion.Scope) error {
+ return autoConvert_api_Preconditions_To_v1_Preconditions(in, out, s)
+}
+
+func autoConvert_v1_PreferredSchedulingTerm_To_api_PreferredSchedulingTerm(in *PreferredSchedulingTerm, out *api.PreferredSchedulingTerm, s conversion.Scope) error {
+ out.Weight = in.Weight
+ if err := Convert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm(&in.Preference, &out.Preference, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1_PreferredSchedulingTerm_To_api_PreferredSchedulingTerm(in *PreferredSchedulingTerm, out *api.PreferredSchedulingTerm, s conversion.Scope) error {
+ return autoConvert_v1_PreferredSchedulingTerm_To_api_PreferredSchedulingTerm(in, out, s)
+}
+
+func autoConvert_api_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in *api.PreferredSchedulingTerm, out *PreferredSchedulingTerm, s conversion.Scope) error {
+ out.Weight = in.Weight
+ if err := Convert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm(&in.Preference, &out.Preference, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_api_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in *api.PreferredSchedulingTerm, out *PreferredSchedulingTerm, s conversion.Scope) error {
+ return autoConvert_api_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in, out, s)
+}
+
+func autoConvert_v1_Probe_To_api_Probe(in *Probe, out *api.Probe, s conversion.Scope) error {
+ SetDefaults_Probe(in)
+ if err := Convert_v1_Handler_To_api_Handler(&in.Handler, &out.Handler, s); err != nil {
+ return err
+ }
+ out.InitialDelaySeconds = in.InitialDelaySeconds
+ out.TimeoutSeconds = in.TimeoutSeconds
+ out.PeriodSeconds = in.PeriodSeconds
+ out.SuccessThreshold = in.SuccessThreshold
+ out.FailureThreshold = in.FailureThreshold
+ return nil
+}
+
+func Convert_v1_Probe_To_api_Probe(in *Probe, out *api.Probe, s conversion.Scope) error {
+ return autoConvert_v1_Probe_To_api_Probe(in, out, s)
+}
+
+func autoConvert_api_Probe_To_v1_Probe(in *api.Probe, out *Probe, s conversion.Scope) error {
+ if err := Convert_api_Handler_To_v1_Handler(&in.Handler, &out.Handler, s); err != nil {
+ return err
+ }
+ out.InitialDelaySeconds = in.InitialDelaySeconds
+ out.TimeoutSeconds = in.TimeoutSeconds
+ out.PeriodSeconds = in.PeriodSeconds
+ out.SuccessThreshold = in.SuccessThreshold
+ out.FailureThreshold = in.FailureThreshold
+ return nil
+}
+
+func Convert_api_Probe_To_v1_Probe(in *api.Probe, out *Probe, s conversion.Scope) error {
+ return autoConvert_api_Probe_To_v1_Probe(in, out, s)
+}
+
+func autoConvert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in *RBDVolumeSource, out *api.RBDVolumeSource, s conversion.Scope) error {
+ SetDefaults_RBDVolumeSource(in)
+ out.CephMonitors = in.CephMonitors
+ out.RBDImage = in.RBDImage
+ out.FSType = in.FSType
+ out.RBDPool = in.RBDPool
+ out.RadosUser = in.RadosUser
+ out.Keyring = in.Keyring
+ if in.SecretRef != nil {
+ in, out := &in.SecretRef, &out.SecretRef
+ *out = new(api.LocalObjectReference)
+ if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.SecretRef = nil
+ }
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
+func Convert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in *RBDVolumeSource, out *api.RBDVolumeSource, s conversion.Scope) error {
+ return autoConvert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in, out, s)
+}
+
+func autoConvert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in *api.RBDVolumeSource, out *RBDVolumeSource, s conversion.Scope) error {
+ out.CephMonitors = in.CephMonitors
+ out.RBDImage = in.RBDImage
+ out.FSType = in.FSType
+ out.RBDPool = in.RBDPool
+ out.RadosUser = in.RadosUser
+ out.Keyring = in.Keyring
+ if in.SecretRef != nil {
+ in, out := &in.SecretRef, &out.SecretRef
+ *out = new(LocalObjectReference)
+ if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.SecretRef = nil
+ }
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
+func Convert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in *api.RBDVolumeSource, out *RBDVolumeSource, s conversion.Scope) error {
+ return autoConvert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in, out, s)
+}
+
+func autoConvert_v1_RangeAllocation_To_api_RangeAllocation(in *RangeAllocation, out *api.RangeAllocation, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil {
+ return err
+ }
+ out.Range = in.Range
+ if err := conversion.Convert_Slice_byte_To_Slice_byte(&in.Data, &out.Data, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1_RangeAllocation_To_api_RangeAllocation(in *RangeAllocation, out *api.RangeAllocation, s conversion.Scope) error {
+ return autoConvert_v1_RangeAllocation_To_api_RangeAllocation(in, out, s)
+}
+
+func autoConvert_api_RangeAllocation_To_v1_RangeAllocation(in *api.RangeAllocation, out *RangeAllocation, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil {
+ return err
+ }
+ out.Range = in.Range
+ if err := conversion.Convert_Slice_byte_To_Slice_byte(&in.Data, &out.Data, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_api_RangeAllocation_To_v1_RangeAllocation(in *api.RangeAllocation, out *RangeAllocation, s conversion.Scope) error {
+ return autoConvert_api_RangeAllocation_To_v1_RangeAllocation(in, out, s)
+}
+
+func autoConvert_v1_ReplicationController_To_api_ReplicationController(in *ReplicationController, out *api.ReplicationController, s conversion.Scope) error {
+ SetDefaults_ReplicationController(in)
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1_ReplicationController_To_api_ReplicationController(in *ReplicationController, out *api.ReplicationController, s conversion.Scope) error {
+ return autoConvert_v1_ReplicationController_To_api_ReplicationController(in, out, s)
+}
+
+func autoConvert_api_ReplicationController_To_v1_ReplicationController(in *api.ReplicationController, out *ReplicationController, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_api_ReplicationController_To_v1_ReplicationController(in *api.ReplicationController, out *ReplicationController, s conversion.Scope) error {
+ return autoConvert_api_ReplicationController_To_v1_ReplicationController(in, out, s)
+}
+
+func autoConvert_v1_ReplicationControllerList_To_api_ReplicationControllerList(in *ReplicationControllerList, out *api.ReplicationControllerList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]api.ReplicationController, len(*in))
+ for i := range *in {
+ if err := Convert_v1_ReplicationController_To_api_ReplicationController(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_v1_ReplicationControllerList_To_api_ReplicationControllerList(in *ReplicationControllerList, out *api.ReplicationControllerList, s conversion.Scope) error {
+ return autoConvert_v1_ReplicationControllerList_To_api_ReplicationControllerList(in, out, s)
+}
+
+func autoConvert_api_ReplicationControllerList_To_v1_ReplicationControllerList(in *api.ReplicationControllerList, out *ReplicationControllerList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ReplicationController, len(*in))
+ for i := range *in {
+ if err := Convert_api_ReplicationController_To_v1_ReplicationController(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_api_ReplicationControllerList_To_v1_ReplicationControllerList(in *api.ReplicationControllerList, out *ReplicationControllerList, s conversion.Scope) error {
+ return autoConvert_api_ReplicationControllerList_To_v1_ReplicationControllerList(in, out, s)
+}
+
+func autoConvert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus(in *ReplicationControllerStatus, out *api.ReplicationControllerStatus, s conversion.Scope) error {
+ out.Replicas = in.Replicas
+ out.FullyLabeledReplicas = in.FullyLabeledReplicas
+ out.ObservedGeneration = in.ObservedGeneration
+ return nil
+}
+
+func Convert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus(in *ReplicationControllerStatus, out *api.ReplicationControllerStatus, s conversion.Scope) error {
+ return autoConvert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus(in, out, s)
+}
+
+func autoConvert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in *api.ReplicationControllerStatus, out *ReplicationControllerStatus, s conversion.Scope) error {
+ out.Replicas = in.Replicas
+ out.FullyLabeledReplicas = in.FullyLabeledReplicas
+ out.ObservedGeneration = in.ObservedGeneration
+ return nil
+}
+
+func Convert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in *api.ReplicationControllerStatus, out *ReplicationControllerStatus, s conversion.Scope) error {
+ return autoConvert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in, out, s)
+}
+
+func autoConvert_v1_ResourceFieldSelector_To_api_ResourceFieldSelector(in *ResourceFieldSelector, out *api.ResourceFieldSelector, s conversion.Scope) error {
+ out.ContainerName = in.ContainerName
+ out.Resource = in.Resource
+ if err := api.Convert_resource_Quantity_To_resource_Quantity(&in.Divisor, &out.Divisor, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1_ResourceFieldSelector_To_api_ResourceFieldSelector(in *ResourceFieldSelector, out *api.ResourceFieldSelector, s conversion.Scope) error {
+ return autoConvert_v1_ResourceFieldSelector_To_api_ResourceFieldSelector(in, out, s)
+}
+
+func autoConvert_api_ResourceFieldSelector_To_v1_ResourceFieldSelector(in *api.ResourceFieldSelector, out *ResourceFieldSelector, s conversion.Scope) error {
+ out.ContainerName = in.ContainerName
+ out.Resource = in.Resource
+ if err := api.Convert_resource_Quantity_To_resource_Quantity(&in.Divisor, &out.Divisor, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_api_ResourceFieldSelector_To_v1_ResourceFieldSelector(in *api.ResourceFieldSelector, out *ResourceFieldSelector, s conversion.Scope) error {
+ return autoConvert_api_ResourceFieldSelector_To_v1_ResourceFieldSelector(in, out, s)
+}
+
+func autoConvert_v1_ResourceQuota_To_api_ResourceQuota(in *ResourceQuota, out *api.ResourceQuota, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1_ResourceQuota_To_api_ResourceQuota(in *ResourceQuota, out *api.ResourceQuota, s conversion.Scope) error {
+ return autoConvert_v1_ResourceQuota_To_api_ResourceQuota(in, out, s)
+}
+
+func autoConvert_api_ResourceQuota_To_v1_ResourceQuota(in *api.ResourceQuota, out *ResourceQuota, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_api_ResourceQuota_To_v1_ResourceQuota(in *api.ResourceQuota, out *ResourceQuota, s conversion.Scope) error {
+ return autoConvert_api_ResourceQuota_To_v1_ResourceQuota(in, out, s)
+}
+
+func autoConvert_v1_ResourceQuotaList_To_api_ResourceQuotaList(in *ResourceQuotaList, out *api.ResourceQuotaList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]api.ResourceQuota, len(*in))
+ for i := range *in {
+ if err := Convert_v1_ResourceQuota_To_api_ResourceQuota(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_v1_ResourceQuotaList_To_api_ResourceQuotaList(in *ResourceQuotaList, out *api.ResourceQuotaList, s conversion.Scope) error {
+ return autoConvert_v1_ResourceQuotaList_To_api_ResourceQuotaList(in, out, s)
+}
+
+func autoConvert_api_ResourceQuotaList_To_v1_ResourceQuotaList(in *api.ResourceQuotaList, out *ResourceQuotaList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ResourceQuota, len(*in))
+ for i := range *in {
+ if err := Convert_api_ResourceQuota_To_v1_ResourceQuota(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_api_ResourceQuotaList_To_v1_ResourceQuotaList(in *api.ResourceQuotaList, out *ResourceQuotaList, s conversion.Scope) error {
+ return autoConvert_api_ResourceQuotaList_To_v1_ResourceQuotaList(in, out, s)
+}
+
+func autoConvert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec(in *ResourceQuotaSpec, out *api.ResourceQuotaSpec, s conversion.Scope) error {
+ if err := Convert_v1_ResourceList_To_api_ResourceList(&in.Hard, &out.Hard, s); err != nil {
+ return err
+ }
+ if in.Scopes != nil {
+ in, out := &in.Scopes, &out.Scopes
+ *out = make([]api.ResourceQuotaScope, len(*in))
+ for i := range *in {
+ (*out)[i] = api.ResourceQuotaScope((*in)[i])
+ }
+ } else {
+ out.Scopes = nil
+ }
+ return nil
+}
+
+func Convert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec(in *ResourceQuotaSpec, out *api.ResourceQuotaSpec, s conversion.Scope) error {
+ return autoConvert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec(in, out, s)
+}
+
+func autoConvert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in *api.ResourceQuotaSpec, out *ResourceQuotaSpec, s conversion.Scope) error {
+ if in.Hard != nil {
+ in, out := &in.Hard, &out.Hard
+ *out = make(ResourceList, len(*in))
+ for key, val := range *in {
+ newVal := new(resource.Quantity)
+ if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, newVal, s); err != nil {
+ return err
+ }
+ (*out)[ResourceName(key)] = *newVal
+ }
+ } else {
+ out.Hard = nil
+ }
+ if in.Scopes != nil {
+ in, out := &in.Scopes, &out.Scopes
+ *out = make([]ResourceQuotaScope, len(*in))
+ for i := range *in {
+ (*out)[i] = ResourceQuotaScope((*in)[i])
+ }
+ } else {
+ out.Scopes = nil
+ }
+ return nil
+}
+
+func Convert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in *api.ResourceQuotaSpec, out *ResourceQuotaSpec, s conversion.Scope) error {
+ return autoConvert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in, out, s)
+}
+
+func autoConvert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus(in *ResourceQuotaStatus, out *api.ResourceQuotaStatus, s conversion.Scope) error {
+ if err := Convert_v1_ResourceList_To_api_ResourceList(&in.Hard, &out.Hard, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_ResourceList_To_api_ResourceList(&in.Used, &out.Used, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus(in *ResourceQuotaStatus, out *api.ResourceQuotaStatus, s conversion.Scope) error {
+ return autoConvert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus(in, out, s)
+}
+
+func autoConvert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in *api.ResourceQuotaStatus, out *ResourceQuotaStatus, s conversion.Scope) error {
+ if in.Hard != nil {
+ in, out := &in.Hard, &out.Hard
+ *out = make(ResourceList, len(*in))
+ for key, val := range *in {
+ newVal := new(resource.Quantity)
+ if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, newVal, s); err != nil {
+ return err
+ }
+ (*out)[ResourceName(key)] = *newVal
+ }
+ } else {
+ out.Hard = nil
+ }
+ if in.Used != nil {
+ in, out := &in.Used, &out.Used
+ *out = make(ResourceList, len(*in))
+ for key, val := range *in {
+ newVal := new(resource.Quantity)
+ if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, newVal, s); err != nil {
+ return err
+ }
+ (*out)[ResourceName(key)] = *newVal
+ }
+ } else {
+ out.Used = nil
+ }
+ return nil
+}
+
+func Convert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in *api.ResourceQuotaStatus, out *ResourceQuotaStatus, s conversion.Scope) error {
+ return autoConvert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in, out, s)
+}
+
+func autoConvert_v1_ResourceRequirements_To_api_ResourceRequirements(in *ResourceRequirements, out *api.ResourceRequirements, s conversion.Scope) error {
+ if err := Convert_v1_ResourceList_To_api_ResourceList(&in.Limits, &out.Limits, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_ResourceList_To_api_ResourceList(&in.Requests, &out.Requests, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1_ResourceRequirements_To_api_ResourceRequirements(in *ResourceRequirements, out *api.ResourceRequirements, s conversion.Scope) error {
+ return autoConvert_v1_ResourceRequirements_To_api_ResourceRequirements(in, out, s)
+}
+
+func autoConvert_api_ResourceRequirements_To_v1_ResourceRequirements(in *api.ResourceRequirements, out *ResourceRequirements, s conversion.Scope) error {
+ if in.Limits != nil {
+ in, out := &in.Limits, &out.Limits
+ *out = make(ResourceList, len(*in))
+ for key, val := range *in {
+ newVal := new(resource.Quantity)
+ if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, newVal, s); err != nil {
+ return err
+ }
+ (*out)[ResourceName(key)] = *newVal
+ }
+ } else {
+ out.Limits = nil
+ }
+ if in.Requests != nil {
+ in, out := &in.Requests, &out.Requests
+ *out = make(ResourceList, len(*in))
+ for key, val := range *in {
+ newVal := new(resource.Quantity)
+ if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, newVal, s); err != nil {
+ return err
+ }
+ (*out)[ResourceName(key)] = *newVal
+ }
+ } else {
+ out.Requests = nil
+ }
+ return nil
+}
+
+func Convert_api_ResourceRequirements_To_v1_ResourceRequirements(in *api.ResourceRequirements, out *ResourceRequirements, s conversion.Scope) error {
+ return autoConvert_api_ResourceRequirements_To_v1_ResourceRequirements(in, out, s)
+}
+
+func autoConvert_v1_SELinuxOptions_To_api_SELinuxOptions(in *SELinuxOptions, out *api.SELinuxOptions, s conversion.Scope) error {
+ out.User = in.User
+ out.Role = in.Role
+ out.Type = in.Type
+ out.Level = in.Level
+ return nil
+}
+
+func Convert_v1_SELinuxOptions_To_api_SELinuxOptions(in *SELinuxOptions, out *api.SELinuxOptions, s conversion.Scope) error {
+ return autoConvert_v1_SELinuxOptions_To_api_SELinuxOptions(in, out, s)
+}
+
+func autoConvert_api_SELinuxOptions_To_v1_SELinuxOptions(in *api.SELinuxOptions, out *SELinuxOptions, s conversion.Scope) error {
+ out.User = in.User
+ out.Role = in.Role
+ out.Type = in.Type
+ out.Level = in.Level
+ return nil
+}
+
+func Convert_api_SELinuxOptions_To_v1_SELinuxOptions(in *api.SELinuxOptions, out *SELinuxOptions, s conversion.Scope) error {
+ return autoConvert_api_SELinuxOptions_To_v1_SELinuxOptions(in, out, s)
+}
+
+func autoConvert_v1_Secret_To_api_Secret(in *Secret, out *api.Secret, s conversion.Scope) error {
+ SetDefaults_Secret(in)
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil {
+ return err
+ }
+ out.Data = in.Data
+ out.Type = api.SecretType(in.Type)
+ return nil
+}
+
+func autoConvert_api_Secret_To_v1_Secret(in *api.Secret, out *Secret, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil {
+ return err
+ }
+ out.Data = in.Data
+ out.Type = SecretType(in.Type)
+ return nil
+}
+
+func Convert_api_Secret_To_v1_Secret(in *api.Secret, out *Secret, s conversion.Scope) error {
+ return autoConvert_api_Secret_To_v1_Secret(in, out, s)
+}
+
+func autoConvert_v1_SecretKeySelector_To_api_SecretKeySelector(in *SecretKeySelector, out *api.SecretKeySelector, s conversion.Scope) error {
+ if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil {
+ return err
+ }
+ out.Key = in.Key
+ return nil
+}
+
+func Convert_v1_SecretKeySelector_To_api_SecretKeySelector(in *SecretKeySelector, out *api.SecretKeySelector, s conversion.Scope) error {
+ return autoConvert_v1_SecretKeySelector_To_api_SecretKeySelector(in, out, s)
+}
+
+func autoConvert_api_SecretKeySelector_To_v1_SecretKeySelector(in *api.SecretKeySelector, out *SecretKeySelector, s conversion.Scope) error {
+ if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil {
+ return err
+ }
+ out.Key = in.Key
+ return nil
+}
+
+func Convert_api_SecretKeySelector_To_v1_SecretKeySelector(in *api.SecretKeySelector, out *SecretKeySelector, s conversion.Scope) error {
+ return autoConvert_api_SecretKeySelector_To_v1_SecretKeySelector(in, out, s)
+}
+
+func autoConvert_v1_SecretList_To_api_SecretList(in *SecretList, out *api.SecretList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]api.Secret, len(*in))
+ for i := range *in {
+ if err := Convert_v1_Secret_To_api_Secret(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_v1_SecretList_To_api_SecretList(in *SecretList, out *api.SecretList, s conversion.Scope) error {
+ return autoConvert_v1_SecretList_To_api_SecretList(in, out, s)
+}
+
+func autoConvert_api_SecretList_To_v1_SecretList(in *api.SecretList, out *SecretList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Secret, len(*in))
+ for i := range *in {
+ if err := Convert_api_Secret_To_v1_Secret(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_api_SecretList_To_v1_SecretList(in *api.SecretList, out *SecretList, s conversion.Scope) error {
+ return autoConvert_api_SecretList_To_v1_SecretList(in, out, s)
+}
+
+func autoConvert_v1_SecretVolumeSource_To_api_SecretVolumeSource(in *SecretVolumeSource, out *api.SecretVolumeSource, s conversion.Scope) error {
+ out.SecretName = in.SecretName
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]api.KeyToPath, len(*in))
+ for i := range *in {
+ if err := Convert_v1_KeyToPath_To_api_KeyToPath(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_v1_SecretVolumeSource_To_api_SecretVolumeSource(in *SecretVolumeSource, out *api.SecretVolumeSource, s conversion.Scope) error {
+ return autoConvert_v1_SecretVolumeSource_To_api_SecretVolumeSource(in, out, s)
+}
+
+func autoConvert_api_SecretVolumeSource_To_v1_SecretVolumeSource(in *api.SecretVolumeSource, out *SecretVolumeSource, s conversion.Scope) error {
+ out.SecretName = in.SecretName
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]KeyToPath, len(*in))
+ for i := range *in {
+ if err := Convert_api_KeyToPath_To_v1_KeyToPath(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_api_SecretVolumeSource_To_v1_SecretVolumeSource(in *api.SecretVolumeSource, out *SecretVolumeSource, s conversion.Scope) error {
+ return autoConvert_api_SecretVolumeSource_To_v1_SecretVolumeSource(in, out, s)
+}
+
+func autoConvert_v1_SecurityContext_To_api_SecurityContext(in *SecurityContext, out *api.SecurityContext, s conversion.Scope) error {
+ if in.Capabilities != nil {
+ in, out := &in.Capabilities, &out.Capabilities
+ *out = new(api.Capabilities)
+ if err := Convert_v1_Capabilities_To_api_Capabilities(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Capabilities = nil
+ }
+ out.Privileged = in.Privileged
+ if in.SELinuxOptions != nil {
+ in, out := &in.SELinuxOptions, &out.SELinuxOptions
+ *out = new(api.SELinuxOptions)
+ if err := Convert_v1_SELinuxOptions_To_api_SELinuxOptions(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.SELinuxOptions = nil
+ }
+ out.RunAsUser = in.RunAsUser
+ out.RunAsNonRoot = in.RunAsNonRoot
+ out.ReadOnlyRootFilesystem = in.ReadOnlyRootFilesystem
+ return nil
+}
+
+func Convert_v1_SecurityContext_To_api_SecurityContext(in *SecurityContext, out *api.SecurityContext, s conversion.Scope) error {
+ return autoConvert_v1_SecurityContext_To_api_SecurityContext(in, out, s)
+}
+
+func autoConvert_api_SecurityContext_To_v1_SecurityContext(in *api.SecurityContext, out *SecurityContext, s conversion.Scope) error {
+ if in.Capabilities != nil {
+ in, out := &in.Capabilities, &out.Capabilities
+ *out = new(Capabilities)
+ if err := Convert_api_Capabilities_To_v1_Capabilities(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Capabilities = nil
+ }
+ out.Privileged = in.Privileged
+ if in.SELinuxOptions != nil {
+ in, out := &in.SELinuxOptions, &out.SELinuxOptions
+ *out = new(SELinuxOptions)
+ if err := Convert_api_SELinuxOptions_To_v1_SELinuxOptions(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.SELinuxOptions = nil
+ }
+ out.RunAsUser = in.RunAsUser
+ out.RunAsNonRoot = in.RunAsNonRoot
+ out.ReadOnlyRootFilesystem = in.ReadOnlyRootFilesystem
+ return nil
+}
+
+func Convert_api_SecurityContext_To_v1_SecurityContext(in *api.SecurityContext, out *SecurityContext, s conversion.Scope) error {
+ return autoConvert_api_SecurityContext_To_v1_SecurityContext(in, out, s)
+}
+
+func autoConvert_v1_SerializedReference_To_api_SerializedReference(in *SerializedReference, out *api.SerializedReference, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_ObjectReference_To_api_ObjectReference(&in.Reference, &out.Reference, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1_SerializedReference_To_api_SerializedReference(in *SerializedReference, out *api.SerializedReference, s conversion.Scope) error {
+ return autoConvert_v1_SerializedReference_To_api_SerializedReference(in, out, s)
+}
+
+func autoConvert_api_SerializedReference_To_v1_SerializedReference(in *api.SerializedReference, out *SerializedReference, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_api_ObjectReference_To_v1_ObjectReference(&in.Reference, &out.Reference, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_api_SerializedReference_To_v1_SerializedReference(in *api.SerializedReference, out *SerializedReference, s conversion.Scope) error {
+ return autoConvert_api_SerializedReference_To_v1_SerializedReference(in, out, s)
+}
+
+func autoConvert_v1_Service_To_api_Service(in *Service, out *api.Service, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_ServiceSpec_To_api_ServiceSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_ServiceStatus_To_api_ServiceStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1_Service_To_api_Service(in *Service, out *api.Service, s conversion.Scope) error {
+ return autoConvert_v1_Service_To_api_Service(in, out, s)
+}
+
+func autoConvert_api_Service_To_v1_Service(in *api.Service, out *Service, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_api_ServiceSpec_To_v1_ServiceSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_api_ServiceStatus_To_v1_ServiceStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_api_Service_To_v1_Service(in *api.Service, out *Service, s conversion.Scope) error {
+ return autoConvert_api_Service_To_v1_Service(in, out, s)
+}
+
+func autoConvert_v1_ServiceAccount_To_api_ServiceAccount(in *ServiceAccount, out *api.ServiceAccount, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil {
+ return err
+ }
+ if in.Secrets != nil {
+ in, out := &in.Secrets, &out.Secrets
+ *out = make([]api.ObjectReference, len(*in))
+ for i := range *in {
+ if err := Convert_v1_ObjectReference_To_api_ObjectReference(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Secrets = nil
+ }
+ if in.ImagePullSecrets != nil {
+ in, out := &in.ImagePullSecrets, &out.ImagePullSecrets
+ *out = make([]api.LocalObjectReference, len(*in))
+ for i := range *in {
+ if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.ImagePullSecrets = nil
+ }
+ return nil
+}
+
+func Convert_v1_ServiceAccount_To_api_ServiceAccount(in *ServiceAccount, out *api.ServiceAccount, s conversion.Scope) error {
+ return autoConvert_v1_ServiceAccount_To_api_ServiceAccount(in, out, s)
+}
+
+func autoConvert_api_ServiceAccount_To_v1_ServiceAccount(in *api.ServiceAccount, out *ServiceAccount, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil {
+ return err
+ }
+ if in.Secrets != nil {
+ in, out := &in.Secrets, &out.Secrets
+ *out = make([]ObjectReference, len(*in))
+ for i := range *in {
+ if err := Convert_api_ObjectReference_To_v1_ObjectReference(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Secrets = nil
+ }
+ if in.ImagePullSecrets != nil {
+ in, out := &in.ImagePullSecrets, &out.ImagePullSecrets
+ *out = make([]LocalObjectReference, len(*in))
+ for i := range *in {
+ if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.ImagePullSecrets = nil
+ }
+ return nil
+}
+
+func Convert_api_ServiceAccount_To_v1_ServiceAccount(in *api.ServiceAccount, out *ServiceAccount, s conversion.Scope) error {
+ return autoConvert_api_ServiceAccount_To_v1_ServiceAccount(in, out, s)
+}
+
+func autoConvert_v1_ServiceAccountList_To_api_ServiceAccountList(in *ServiceAccountList, out *api.ServiceAccountList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]api.ServiceAccount, len(*in))
+ for i := range *in {
+ if err := Convert_v1_ServiceAccount_To_api_ServiceAccount(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_v1_ServiceAccountList_To_api_ServiceAccountList(in *ServiceAccountList, out *api.ServiceAccountList, s conversion.Scope) error {
+ return autoConvert_v1_ServiceAccountList_To_api_ServiceAccountList(in, out, s)
+}
+
+func autoConvert_api_ServiceAccountList_To_v1_ServiceAccountList(in *api.ServiceAccountList, out *ServiceAccountList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ServiceAccount, len(*in))
+ for i := range *in {
+ if err := Convert_api_ServiceAccount_To_v1_ServiceAccount(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_api_ServiceAccountList_To_v1_ServiceAccountList(in *api.ServiceAccountList, out *ServiceAccountList, s conversion.Scope) error {
+ return autoConvert_api_ServiceAccountList_To_v1_ServiceAccountList(in, out, s)
+}
+
+func autoConvert_v1_ServiceList_To_api_ServiceList(in *ServiceList, out *api.ServiceList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]api.Service, len(*in))
+ for i := range *in {
+ if err := Convert_v1_Service_To_api_Service(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_v1_ServiceList_To_api_ServiceList(in *ServiceList, out *api.ServiceList, s conversion.Scope) error {
+ return autoConvert_v1_ServiceList_To_api_ServiceList(in, out, s)
+}
+
+func autoConvert_api_ServiceList_To_v1_ServiceList(in *api.ServiceList, out *ServiceList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Service, len(*in))
+ for i := range *in {
+ if err := Convert_api_Service_To_v1_Service(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_api_ServiceList_To_v1_ServiceList(in *api.ServiceList, out *ServiceList, s conversion.Scope) error {
+ return autoConvert_api_ServiceList_To_v1_ServiceList(in, out, s)
+}
+
+func autoConvert_v1_ServicePort_To_api_ServicePort(in *ServicePort, out *api.ServicePort, s conversion.Scope) error {
+ out.Name = in.Name
+ out.Protocol = api.Protocol(in.Protocol)
+ out.Port = in.Port
+ if err := api.Convert_intstr_IntOrString_To_intstr_IntOrString(&in.TargetPort, &out.TargetPort, s); err != nil {
+ return err
+ }
+ out.NodePort = in.NodePort
+ return nil
+}
+
+func Convert_v1_ServicePort_To_api_ServicePort(in *ServicePort, out *api.ServicePort, s conversion.Scope) error {
+ return autoConvert_v1_ServicePort_To_api_ServicePort(in, out, s)
+}
+
+func autoConvert_api_ServicePort_To_v1_ServicePort(in *api.ServicePort, out *ServicePort, s conversion.Scope) error {
+ out.Name = in.Name
+ out.Protocol = Protocol(in.Protocol)
+ out.Port = in.Port
+ if err := api.Convert_intstr_IntOrString_To_intstr_IntOrString(&in.TargetPort, &out.TargetPort, s); err != nil {
+ return err
+ }
+ out.NodePort = in.NodePort
+ return nil
+}
+
+func Convert_api_ServicePort_To_v1_ServicePort(in *api.ServicePort, out *ServicePort, s conversion.Scope) error {
+ return autoConvert_api_ServicePort_To_v1_ServicePort(in, out, s)
+}
+
+func autoConvert_v1_ServiceProxyOptions_To_api_ServiceProxyOptions(in *ServiceProxyOptions, out *api.ServiceProxyOptions, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ out.Path = in.Path
+ return nil
+}
+
+func Convert_v1_ServiceProxyOptions_To_api_ServiceProxyOptions(in *ServiceProxyOptions, out *api.ServiceProxyOptions, s conversion.Scope) error {
+ return autoConvert_v1_ServiceProxyOptions_To_api_ServiceProxyOptions(in, out, s)
+}
+
+func autoConvert_api_ServiceProxyOptions_To_v1_ServiceProxyOptions(in *api.ServiceProxyOptions, out *ServiceProxyOptions, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ out.Path = in.Path
+ return nil
+}
+
+func Convert_api_ServiceProxyOptions_To_v1_ServiceProxyOptions(in *api.ServiceProxyOptions, out *ServiceProxyOptions, s conversion.Scope) error {
+ return autoConvert_api_ServiceProxyOptions_To_v1_ServiceProxyOptions(in, out, s)
+}
+
+func autoConvert_v1_ServiceSpec_To_api_ServiceSpec(in *ServiceSpec, out *api.ServiceSpec, s conversion.Scope) error {
+ SetDefaults_ServiceSpec(in)
+ if in.Ports != nil {
+ in, out := &in.Ports, &out.Ports
+ *out = make([]api.ServicePort, len(*in))
+ for i := range *in {
+ if err := Convert_v1_ServicePort_To_api_ServicePort(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Ports = nil
+ }
+ out.Selector = in.Selector
+ out.ClusterIP = in.ClusterIP
+ out.Type = api.ServiceType(in.Type)
+ out.ExternalIPs = in.ExternalIPs
+ out.SessionAffinity = api.ServiceAffinity(in.SessionAffinity)
+ out.LoadBalancerIP = in.LoadBalancerIP
+ out.LoadBalancerSourceRanges = in.LoadBalancerSourceRanges
+ return nil
+}
+
+func autoConvert_api_ServiceSpec_To_v1_ServiceSpec(in *api.ServiceSpec, out *ServiceSpec, s conversion.Scope) error {
+ out.Type = ServiceType(in.Type)
+ if in.Ports != nil {
+ in, out := &in.Ports, &out.Ports
+ *out = make([]ServicePort, len(*in))
+ for i := range *in {
+ if err := Convert_api_ServicePort_To_v1_ServicePort(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Ports = nil
+ }
+ out.Selector = in.Selector
+ out.ClusterIP = in.ClusterIP
+ out.ExternalIPs = in.ExternalIPs
+ out.LoadBalancerIP = in.LoadBalancerIP
+ out.SessionAffinity = ServiceAffinity(in.SessionAffinity)
+ out.LoadBalancerSourceRanges = in.LoadBalancerSourceRanges
+ return nil
+}
+
+func autoConvert_v1_ServiceStatus_To_api_ServiceStatus(in *ServiceStatus, out *api.ServiceStatus, s conversion.Scope) error {
+ if err := Convert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1_ServiceStatus_To_api_ServiceStatus(in *ServiceStatus, out *api.ServiceStatus, s conversion.Scope) error {
+ return autoConvert_v1_ServiceStatus_To_api_ServiceStatus(in, out, s)
+}
+
+func autoConvert_api_ServiceStatus_To_v1_ServiceStatus(in *api.ServiceStatus, out *ServiceStatus, s conversion.Scope) error {
+ if err := Convert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_api_ServiceStatus_To_v1_ServiceStatus(in *api.ServiceStatus, out *ServiceStatus, s conversion.Scope) error {
+ return autoConvert_api_ServiceStatus_To_v1_ServiceStatus(in, out, s)
+}
+
+func autoConvert_v1_TCPSocketAction_To_api_TCPSocketAction(in *TCPSocketAction, out *api.TCPSocketAction, s conversion.Scope) error {
+ if err := api.Convert_intstr_IntOrString_To_intstr_IntOrString(&in.Port, &out.Port, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1_TCPSocketAction_To_api_TCPSocketAction(in *TCPSocketAction, out *api.TCPSocketAction, s conversion.Scope) error {
+ return autoConvert_v1_TCPSocketAction_To_api_TCPSocketAction(in, out, s)
+}
+
+func autoConvert_api_TCPSocketAction_To_v1_TCPSocketAction(in *api.TCPSocketAction, out *TCPSocketAction, s conversion.Scope) error {
+ if err := api.Convert_intstr_IntOrString_To_intstr_IntOrString(&in.Port, &out.Port, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_api_TCPSocketAction_To_v1_TCPSocketAction(in *api.TCPSocketAction, out *TCPSocketAction, s conversion.Scope) error {
+ return autoConvert_api_TCPSocketAction_To_v1_TCPSocketAction(in, out, s)
+}
+
+func autoConvert_v1_Taint_To_api_Taint(in *Taint, out *api.Taint, s conversion.Scope) error {
+ out.Key = in.Key
+ out.Value = in.Value
+ out.Effect = api.TaintEffect(in.Effect)
+ return nil
+}
+
+func Convert_v1_Taint_To_api_Taint(in *Taint, out *api.Taint, s conversion.Scope) error {
+ return autoConvert_v1_Taint_To_api_Taint(in, out, s)
+}
+
+func autoConvert_api_Taint_To_v1_Taint(in *api.Taint, out *Taint, s conversion.Scope) error {
+ out.Key = in.Key
+ out.Value = in.Value
+ out.Effect = TaintEffect(in.Effect)
+ return nil
+}
+
+func Convert_api_Taint_To_v1_Taint(in *api.Taint, out *Taint, s conversion.Scope) error {
+ return autoConvert_api_Taint_To_v1_Taint(in, out, s)
+}
+
+func autoConvert_v1_Toleration_To_api_Toleration(in *Toleration, out *api.Toleration, s conversion.Scope) error {
+ out.Key = in.Key
+ out.Operator = api.TolerationOperator(in.Operator)
+ out.Value = in.Value
+ out.Effect = api.TaintEffect(in.Effect)
+ return nil
+}
+
+func Convert_v1_Toleration_To_api_Toleration(in *Toleration, out *api.Toleration, s conversion.Scope) error {
+ return autoConvert_v1_Toleration_To_api_Toleration(in, out, s)
+}
+
+func autoConvert_api_Toleration_To_v1_Toleration(in *api.Toleration, out *Toleration, s conversion.Scope) error {
+ out.Key = in.Key
+ out.Operator = TolerationOperator(in.Operator)
+ out.Value = in.Value
+ out.Effect = TaintEffect(in.Effect)
+ return nil
+}
+
+func Convert_api_Toleration_To_v1_Toleration(in *api.Toleration, out *Toleration, s conversion.Scope) error {
+ return autoConvert_api_Toleration_To_v1_Toleration(in, out, s)
+}
+
+func autoConvert_v1_Volume_To_api_Volume(in *Volume, out *api.Volume, s conversion.Scope) error {
+ SetDefaults_Volume(in)
+ out.Name = in.Name
+ if err := Convert_v1_VolumeSource_To_api_VolumeSource(&in.VolumeSource, &out.VolumeSource, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1_Volume_To_api_Volume(in *Volume, out *api.Volume, s conversion.Scope) error {
+ return autoConvert_v1_Volume_To_api_Volume(in, out, s)
+}
+
+func autoConvert_api_Volume_To_v1_Volume(in *api.Volume, out *Volume, s conversion.Scope) error {
+ out.Name = in.Name
+ if err := Convert_api_VolumeSource_To_v1_VolumeSource(&in.VolumeSource, &out.VolumeSource, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_api_Volume_To_v1_Volume(in *api.Volume, out *Volume, s conversion.Scope) error {
+ return autoConvert_api_Volume_To_v1_Volume(in, out, s)
+}
+
+func autoConvert_v1_VolumeMount_To_api_VolumeMount(in *VolumeMount, out *api.VolumeMount, s conversion.Scope) error {
+ out.Name = in.Name
+ out.ReadOnly = in.ReadOnly
+ out.MountPath = in.MountPath
+ out.SubPath = in.SubPath
+ return nil
+}
+
+func Convert_v1_VolumeMount_To_api_VolumeMount(in *VolumeMount, out *api.VolumeMount, s conversion.Scope) error {
+ return autoConvert_v1_VolumeMount_To_api_VolumeMount(in, out, s)
+}
+
+func autoConvert_api_VolumeMount_To_v1_VolumeMount(in *api.VolumeMount, out *VolumeMount, s conversion.Scope) error {
+ out.Name = in.Name
+ out.ReadOnly = in.ReadOnly
+ out.MountPath = in.MountPath
+ out.SubPath = in.SubPath
+ return nil
+}
+
+func Convert_api_VolumeMount_To_v1_VolumeMount(in *api.VolumeMount, out *VolumeMount, s conversion.Scope) error {
+ return autoConvert_api_VolumeMount_To_v1_VolumeMount(in, out, s)
+}
+
+func autoConvert_v1_VolumeSource_To_api_VolumeSource(in *VolumeSource, out *api.VolumeSource, s conversion.Scope) error {
+ if in.HostPath != nil {
+ in, out := &in.HostPath, &out.HostPath
+ *out = new(api.HostPathVolumeSource)
+ if err := Convert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.HostPath = nil
+ }
+ if in.EmptyDir != nil {
+ in, out := &in.EmptyDir, &out.EmptyDir
+ *out = new(api.EmptyDirVolumeSource)
+ if err := Convert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.EmptyDir = nil
+ }
+ if in.GCEPersistentDisk != nil {
+ in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk
+ *out = new(api.GCEPersistentDiskVolumeSource)
+ if err := Convert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.GCEPersistentDisk = nil
+ }
+ if in.AWSElasticBlockStore != nil {
+ in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore
+ *out = new(api.AWSElasticBlockStoreVolumeSource)
+ if err := Convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.AWSElasticBlockStore = nil
+ }
+ if in.GitRepo != nil {
+ in, out := &in.GitRepo, &out.GitRepo
+ *out = new(api.GitRepoVolumeSource)
+ if err := Convert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.GitRepo = nil
+ }
+ if in.Secret != nil {
+ in, out := &in.Secret, &out.Secret
+ *out = new(api.SecretVolumeSource)
+ if err := Convert_v1_SecretVolumeSource_To_api_SecretVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Secret = nil
+ }
+ if in.NFS != nil {
+ in, out := &in.NFS, &out.NFS
+ *out = new(api.NFSVolumeSource)
+ if err := Convert_v1_NFSVolumeSource_To_api_NFSVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.NFS = nil
+ }
+ if in.ISCSI != nil {
+ in, out := &in.ISCSI, &out.ISCSI
+ *out = new(api.ISCSIVolumeSource)
+ if err := Convert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.ISCSI = nil
+ }
+ if in.Glusterfs != nil {
+ in, out := &in.Glusterfs, &out.Glusterfs
+ *out = new(api.GlusterfsVolumeSource)
+ if err := Convert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Glusterfs = nil
+ }
+ if in.PersistentVolumeClaim != nil {
+ in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim
+ *out = new(api.PersistentVolumeClaimVolumeSource)
+ if err := Convert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.PersistentVolumeClaim = nil
+ }
+ if in.RBD != nil {
+ in, out := &in.RBD, &out.RBD
+ *out = new(api.RBDVolumeSource)
+ if err := Convert_v1_RBDVolumeSource_To_api_RBDVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.RBD = nil
+ }
+ if in.FlexVolume != nil {
+ in, out := &in.FlexVolume, &out.FlexVolume
+ *out = new(api.FlexVolumeSource)
+ if err := Convert_v1_FlexVolumeSource_To_api_FlexVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.FlexVolume = nil
+ }
+ if in.Cinder != nil {
+ in, out := &in.Cinder, &out.Cinder
+ *out = new(api.CinderVolumeSource)
+ if err := Convert_v1_CinderVolumeSource_To_api_CinderVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Cinder = nil
+ }
+ if in.CephFS != nil {
+ in, out := &in.CephFS, &out.CephFS
+ *out = new(api.CephFSVolumeSource)
+ if err := Convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.CephFS = nil
+ }
+ if in.Flocker != nil {
+ in, out := &in.Flocker, &out.Flocker
+ *out = new(api.FlockerVolumeSource)
+ if err := Convert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Flocker = nil
+ }
+ if in.DownwardAPI != nil {
+ in, out := &in.DownwardAPI, &out.DownwardAPI
+ *out = new(api.DownwardAPIVolumeSource)
+ if err := Convert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.DownwardAPI = nil
+ }
+ if in.FC != nil {
+ in, out := &in.FC, &out.FC
+ *out = new(api.FCVolumeSource)
+ if err := Convert_v1_FCVolumeSource_To_api_FCVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.FC = nil
+ }
+ if in.AzureFile != nil {
+ in, out := &in.AzureFile, &out.AzureFile
+ *out = new(api.AzureFileVolumeSource)
+ if err := Convert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.AzureFile = nil
+ }
+ if in.ConfigMap != nil {
+ in, out := &in.ConfigMap, &out.ConfigMap
+ *out = new(api.ConfigMapVolumeSource)
+ if err := Convert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.ConfigMap = nil
+ }
+ if in.VsphereVolume != nil {
+ in, out := &in.VsphereVolume, &out.VsphereVolume
+ *out = new(api.VsphereVirtualDiskVolumeSource)
+ if err := Convert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.VsphereVolume = nil
+ }
+ return nil
+}
+
+func Convert_v1_VolumeSource_To_api_VolumeSource(in *VolumeSource, out *api.VolumeSource, s conversion.Scope) error {
+ return autoConvert_v1_VolumeSource_To_api_VolumeSource(in, out, s)
+}
+
+func autoConvert_api_VolumeSource_To_v1_VolumeSource(in *api.VolumeSource, out *VolumeSource, s conversion.Scope) error {
+ if in.HostPath != nil {
+ in, out := &in.HostPath, &out.HostPath
+ *out = new(HostPathVolumeSource)
+ if err := Convert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.HostPath = nil
+ }
+ if in.EmptyDir != nil {
+ in, out := &in.EmptyDir, &out.EmptyDir
+ *out = new(EmptyDirVolumeSource)
+ if err := Convert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.EmptyDir = nil
+ }
+ if in.GCEPersistentDisk != nil {
+ in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk
+ *out = new(GCEPersistentDiskVolumeSource)
+ if err := Convert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.GCEPersistentDisk = nil
+ }
+ if in.AWSElasticBlockStore != nil {
+ in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore
+ *out = new(AWSElasticBlockStoreVolumeSource)
+ if err := Convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.AWSElasticBlockStore = nil
+ }
+ if in.GitRepo != nil {
+ in, out := &in.GitRepo, &out.GitRepo
+ *out = new(GitRepoVolumeSource)
+ if err := Convert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.GitRepo = nil
+ }
+ if in.Secret != nil {
+ in, out := &in.Secret, &out.Secret
+ *out = new(SecretVolumeSource)
+ if err := Convert_api_SecretVolumeSource_To_v1_SecretVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Secret = nil
+ }
+ if in.NFS != nil {
+ in, out := &in.NFS, &out.NFS
+ *out = new(NFSVolumeSource)
+ if err := Convert_api_NFSVolumeSource_To_v1_NFSVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.NFS = nil
+ }
+ if in.ISCSI != nil {
+ in, out := &in.ISCSI, &out.ISCSI
+ *out = new(ISCSIVolumeSource)
+ if err := Convert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.ISCSI = nil
+ }
+ if in.Glusterfs != nil {
+ in, out := &in.Glusterfs, &out.Glusterfs
+ *out = new(GlusterfsVolumeSource)
+ if err := Convert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Glusterfs = nil
+ }
+ if in.PersistentVolumeClaim != nil {
+ in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim
+ *out = new(PersistentVolumeClaimVolumeSource)
+ if err := Convert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.PersistentVolumeClaim = nil
+ }
+ if in.RBD != nil {
+ in, out := &in.RBD, &out.RBD
+ *out = new(RBDVolumeSource)
+ if err := Convert_api_RBDVolumeSource_To_v1_RBDVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.RBD = nil
+ }
+ if in.FlexVolume != nil {
+ in, out := &in.FlexVolume, &out.FlexVolume
+ *out = new(FlexVolumeSource)
+ if err := Convert_api_FlexVolumeSource_To_v1_FlexVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.FlexVolume = nil
+ }
+ if in.Cinder != nil {
+ in, out := &in.Cinder, &out.Cinder
+ *out = new(CinderVolumeSource)
+ if err := Convert_api_CinderVolumeSource_To_v1_CinderVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Cinder = nil
+ }
+ if in.CephFS != nil {
+ in, out := &in.CephFS, &out.CephFS
+ *out = new(CephFSVolumeSource)
+ if err := Convert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.CephFS = nil
+ }
+ if in.Flocker != nil {
+ in, out := &in.Flocker, &out.Flocker
+ *out = new(FlockerVolumeSource)
+ if err := Convert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Flocker = nil
+ }
+ if in.DownwardAPI != nil {
+ in, out := &in.DownwardAPI, &out.DownwardAPI
+ *out = new(DownwardAPIVolumeSource)
+ if err := Convert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.DownwardAPI = nil
+ }
+ if in.FC != nil {
+ in, out := &in.FC, &out.FC
+ *out = new(FCVolumeSource)
+ if err := Convert_api_FCVolumeSource_To_v1_FCVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.FC = nil
+ }
+ if in.AzureFile != nil {
+ in, out := &in.AzureFile, &out.AzureFile
+ *out = new(AzureFileVolumeSource)
+ if err := Convert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.AzureFile = nil
+ }
+ if in.ConfigMap != nil {
+ in, out := &in.ConfigMap, &out.ConfigMap
+ *out = new(ConfigMapVolumeSource)
+ if err := Convert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.ConfigMap = nil
+ }
+ if in.VsphereVolume != nil {
+ in, out := &in.VsphereVolume, &out.VsphereVolume
+ *out = new(VsphereVirtualDiskVolumeSource)
+ if err := Convert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.VsphereVolume = nil
+ }
+ return nil
+}
+
+func Convert_api_VolumeSource_To_v1_VolumeSource(in *api.VolumeSource, out *VolumeSource, s conversion.Scope) error {
+ return autoConvert_api_VolumeSource_To_v1_VolumeSource(in, out, s)
+}
+
+func autoConvert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource(in *VsphereVirtualDiskVolumeSource, out *api.VsphereVirtualDiskVolumeSource, s conversion.Scope) error {
+ out.VolumePath = in.VolumePath
+ out.FSType = in.FSType
+ return nil
+}
+
+func Convert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource(in *VsphereVirtualDiskVolumeSource, out *api.VsphereVirtualDiskVolumeSource, s conversion.Scope) error {
+ return autoConvert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource(in, out, s)
+}
+
+func autoConvert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in *api.VsphereVirtualDiskVolumeSource, out *VsphereVirtualDiskVolumeSource, s conversion.Scope) error {
+ out.VolumePath = in.VolumePath
+ out.FSType = in.FSType
+ return nil
+}
+
+func Convert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in *api.VsphereVirtualDiskVolumeSource, out *VsphereVirtualDiskVolumeSource, s conversion.Scope) error {
+ return autoConvert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in, out, s)
+}
+
+func autoConvert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm(in *WeightedPodAffinityTerm, out *api.WeightedPodAffinityTerm, s conversion.Scope) error {
+ out.Weight = int(in.Weight)
+ if err := Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm(&in.PodAffinityTerm, &out.PodAffinityTerm, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm(in *WeightedPodAffinityTerm, out *api.WeightedPodAffinityTerm, s conversion.Scope) error {
+ return autoConvert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm(in, out, s)
+}
+
+func autoConvert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in *api.WeightedPodAffinityTerm, out *WeightedPodAffinityTerm, s conversion.Scope) error {
+ out.Weight = int32(in.Weight)
+ if err := Convert_api_PodAffinityTerm_To_v1_PodAffinityTerm(&in.PodAffinityTerm, &out.PodAffinityTerm, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in *api.WeightedPodAffinityTerm, out *WeightedPodAffinityTerm, s conversion.Scope) error {
+ return autoConvert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in, out, s)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/v1/deep_copy_generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/v1/deep_copy_generated.go
new file mode 100644
index 0000000..9054da3
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/v1/deep_copy_generated.go
@@ -0,0 +1,2924 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by deepcopy-gen. Do not edit it manually!
+
+package v1
+
+import (
+ api "k8s.io/kubernetes/pkg/api"
+ unversioned "k8s.io/kubernetes/pkg/api/unversioned"
+ conversion "k8s.io/kubernetes/pkg/conversion"
+ runtime "k8s.io/kubernetes/pkg/runtime"
+ types "k8s.io/kubernetes/pkg/types"
+)
+
+func init() {
+ if err := api.Scheme.AddGeneratedDeepCopyFuncs(
+ DeepCopy_v1_AWSElasticBlockStoreVolumeSource,
+ DeepCopy_v1_Affinity,
+ DeepCopy_v1_AttachedVolume,
+ DeepCopy_v1_AzureFileVolumeSource,
+ DeepCopy_v1_Binding,
+ DeepCopy_v1_Capabilities,
+ DeepCopy_v1_CephFSVolumeSource,
+ DeepCopy_v1_CinderVolumeSource,
+ DeepCopy_v1_ComponentCondition,
+ DeepCopy_v1_ComponentStatus,
+ DeepCopy_v1_ComponentStatusList,
+ DeepCopy_v1_ConfigMap,
+ DeepCopy_v1_ConfigMapKeySelector,
+ DeepCopy_v1_ConfigMapList,
+ DeepCopy_v1_ConfigMapVolumeSource,
+ DeepCopy_v1_Container,
+ DeepCopy_v1_ContainerImage,
+ DeepCopy_v1_ContainerPort,
+ DeepCopy_v1_ContainerState,
+ DeepCopy_v1_ContainerStateRunning,
+ DeepCopy_v1_ContainerStateTerminated,
+ DeepCopy_v1_ContainerStateWaiting,
+ DeepCopy_v1_ContainerStatus,
+ DeepCopy_v1_DaemonEndpoint,
+ DeepCopy_v1_DeleteOptions,
+ DeepCopy_v1_DownwardAPIVolumeFile,
+ DeepCopy_v1_DownwardAPIVolumeSource,
+ DeepCopy_v1_EmptyDirVolumeSource,
+ DeepCopy_v1_EndpointAddress,
+ DeepCopy_v1_EndpointPort,
+ DeepCopy_v1_EndpointSubset,
+ DeepCopy_v1_Endpoints,
+ DeepCopy_v1_EndpointsList,
+ DeepCopy_v1_EnvVar,
+ DeepCopy_v1_EnvVarSource,
+ DeepCopy_v1_Event,
+ DeepCopy_v1_EventList,
+ DeepCopy_v1_EventSource,
+ DeepCopy_v1_ExecAction,
+ DeepCopy_v1_ExportOptions,
+ DeepCopy_v1_FCVolumeSource,
+ DeepCopy_v1_FlexVolumeSource,
+ DeepCopy_v1_FlockerVolumeSource,
+ DeepCopy_v1_GCEPersistentDiskVolumeSource,
+ DeepCopy_v1_GitRepoVolumeSource,
+ DeepCopy_v1_GlusterfsVolumeSource,
+ DeepCopy_v1_HTTPGetAction,
+ DeepCopy_v1_HTTPHeader,
+ DeepCopy_v1_Handler,
+ DeepCopy_v1_HostPathVolumeSource,
+ DeepCopy_v1_ISCSIVolumeSource,
+ DeepCopy_v1_KeyToPath,
+ DeepCopy_v1_Lifecycle,
+ DeepCopy_v1_LimitRange,
+ DeepCopy_v1_LimitRangeItem,
+ DeepCopy_v1_LimitRangeList,
+ DeepCopy_v1_LimitRangeSpec,
+ DeepCopy_v1_List,
+ DeepCopy_v1_ListOptions,
+ DeepCopy_v1_LoadBalancerIngress,
+ DeepCopy_v1_LoadBalancerStatus,
+ DeepCopy_v1_LocalObjectReference,
+ DeepCopy_v1_NFSVolumeSource,
+ DeepCopy_v1_Namespace,
+ DeepCopy_v1_NamespaceList,
+ DeepCopy_v1_NamespaceSpec,
+ DeepCopy_v1_NamespaceStatus,
+ DeepCopy_v1_Node,
+ DeepCopy_v1_NodeAddress,
+ DeepCopy_v1_NodeAffinity,
+ DeepCopy_v1_NodeCondition,
+ DeepCopy_v1_NodeDaemonEndpoints,
+ DeepCopy_v1_NodeList,
+ DeepCopy_v1_NodeProxyOptions,
+ DeepCopy_v1_NodeSelector,
+ DeepCopy_v1_NodeSelectorRequirement,
+ DeepCopy_v1_NodeSelectorTerm,
+ DeepCopy_v1_NodeSpec,
+ DeepCopy_v1_NodeStatus,
+ DeepCopy_v1_NodeSystemInfo,
+ DeepCopy_v1_ObjectFieldSelector,
+ DeepCopy_v1_ObjectMeta,
+ DeepCopy_v1_ObjectReference,
+ DeepCopy_v1_OwnerReference,
+ DeepCopy_v1_PersistentVolume,
+ DeepCopy_v1_PersistentVolumeClaim,
+ DeepCopy_v1_PersistentVolumeClaimList,
+ DeepCopy_v1_PersistentVolumeClaimSpec,
+ DeepCopy_v1_PersistentVolumeClaimStatus,
+ DeepCopy_v1_PersistentVolumeClaimVolumeSource,
+ DeepCopy_v1_PersistentVolumeList,
+ DeepCopy_v1_PersistentVolumeSource,
+ DeepCopy_v1_PersistentVolumeSpec,
+ DeepCopy_v1_PersistentVolumeStatus,
+ DeepCopy_v1_Pod,
+ DeepCopy_v1_PodAffinity,
+ DeepCopy_v1_PodAffinityTerm,
+ DeepCopy_v1_PodAntiAffinity,
+ DeepCopy_v1_PodAttachOptions,
+ DeepCopy_v1_PodCondition,
+ DeepCopy_v1_PodExecOptions,
+ DeepCopy_v1_PodList,
+ DeepCopy_v1_PodLogOptions,
+ DeepCopy_v1_PodProxyOptions,
+ DeepCopy_v1_PodSecurityContext,
+ DeepCopy_v1_PodSpec,
+ DeepCopy_v1_PodStatus,
+ DeepCopy_v1_PodStatusResult,
+ DeepCopy_v1_PodTemplate,
+ DeepCopy_v1_PodTemplateList,
+ DeepCopy_v1_PodTemplateSpec,
+ DeepCopy_v1_Preconditions,
+ DeepCopy_v1_PreferredSchedulingTerm,
+ DeepCopy_v1_Probe,
+ DeepCopy_v1_RBDVolumeSource,
+ DeepCopy_v1_RangeAllocation,
+ DeepCopy_v1_ReplicationController,
+ DeepCopy_v1_ReplicationControllerList,
+ DeepCopy_v1_ReplicationControllerSpec,
+ DeepCopy_v1_ReplicationControllerStatus,
+ DeepCopy_v1_ResourceFieldSelector,
+ DeepCopy_v1_ResourceQuota,
+ DeepCopy_v1_ResourceQuotaList,
+ DeepCopy_v1_ResourceQuotaSpec,
+ DeepCopy_v1_ResourceQuotaStatus,
+ DeepCopy_v1_ResourceRequirements,
+ DeepCopy_v1_SELinuxOptions,
+ DeepCopy_v1_Secret,
+ DeepCopy_v1_SecretKeySelector,
+ DeepCopy_v1_SecretList,
+ DeepCopy_v1_SecretVolumeSource,
+ DeepCopy_v1_SecurityContext,
+ DeepCopy_v1_SerializedReference,
+ DeepCopy_v1_Service,
+ DeepCopy_v1_ServiceAccount,
+ DeepCopy_v1_ServiceAccountList,
+ DeepCopy_v1_ServiceList,
+ DeepCopy_v1_ServicePort,
+ DeepCopy_v1_ServiceProxyOptions,
+ DeepCopy_v1_ServiceSpec,
+ DeepCopy_v1_ServiceStatus,
+ DeepCopy_v1_TCPSocketAction,
+ DeepCopy_v1_Taint,
+ DeepCopy_v1_Toleration,
+ DeepCopy_v1_Volume,
+ DeepCopy_v1_VolumeMount,
+ DeepCopy_v1_VolumeSource,
+ DeepCopy_v1_VsphereVirtualDiskVolumeSource,
+ DeepCopy_v1_WeightedPodAffinityTerm,
+ ); err != nil {
+ // if one of the deep copy functions is malformed, detect it immediately.
+ panic(err)
+ }
+}
+
+func DeepCopy_v1_AWSElasticBlockStoreVolumeSource(in AWSElasticBlockStoreVolumeSource, out *AWSElasticBlockStoreVolumeSource, c *conversion.Cloner) error {
+ out.VolumeID = in.VolumeID
+ out.FSType = in.FSType
+ out.Partition = in.Partition
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
+func DeepCopy_v1_Affinity(in Affinity, out *Affinity, c *conversion.Cloner) error {
+ if in.NodeAffinity != nil {
+ in, out := in.NodeAffinity, &out.NodeAffinity
+ *out = new(NodeAffinity)
+ if err := DeepCopy_v1_NodeAffinity(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.NodeAffinity = nil
+ }
+ if in.PodAffinity != nil {
+ in, out := in.PodAffinity, &out.PodAffinity
+ *out = new(PodAffinity)
+ if err := DeepCopy_v1_PodAffinity(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.PodAffinity = nil
+ }
+ if in.PodAntiAffinity != nil {
+ in, out := in.PodAntiAffinity, &out.PodAntiAffinity
+ *out = new(PodAntiAffinity)
+ if err := DeepCopy_v1_PodAntiAffinity(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.PodAntiAffinity = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_AttachedVolume(in AttachedVolume, out *AttachedVolume, c *conversion.Cloner) error {
+ out.Name = in.Name
+ out.DevicePath = in.DevicePath
+ return nil
+}
+
+func DeepCopy_v1_AzureFileVolumeSource(in AzureFileVolumeSource, out *AzureFileVolumeSource, c *conversion.Cloner) error {
+ out.SecretName = in.SecretName
+ out.ShareName = in.ShareName
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
+func DeepCopy_v1_Binding(in Binding, out *Binding, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ out.Target = in.Target
+ return nil
+}
+
+func DeepCopy_v1_Capabilities(in Capabilities, out *Capabilities, c *conversion.Cloner) error {
+ if in.Add != nil {
+ in, out := in.Add, &out.Add
+ *out = make([]Capability, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.Add = nil
+ }
+ if in.Drop != nil {
+ in, out := in.Drop, &out.Drop
+ *out = make([]Capability, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.Drop = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_CephFSVolumeSource(in CephFSVolumeSource, out *CephFSVolumeSource, c *conversion.Cloner) error {
+ if in.Monitors != nil {
+ in, out := in.Monitors, &out.Monitors
+ *out = make([]string, len(in))
+ copy(*out, in)
+ } else {
+ out.Monitors = nil
+ }
+ out.Path = in.Path
+ out.User = in.User
+ out.SecretFile = in.SecretFile
+ if in.SecretRef != nil {
+ in, out := in.SecretRef, &out.SecretRef
+ *out = new(LocalObjectReference)
+ **out = *in
+ } else {
+ out.SecretRef = nil
+ }
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
+func DeepCopy_v1_CinderVolumeSource(in CinderVolumeSource, out *CinderVolumeSource, c *conversion.Cloner) error {
+ out.VolumeID = in.VolumeID
+ out.FSType = in.FSType
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
+func DeepCopy_v1_ComponentCondition(in ComponentCondition, out *ComponentCondition, c *conversion.Cloner) error {
+ out.Type = in.Type
+ out.Status = in.Status
+ out.Message = in.Message
+ out.Error = in.Error
+ return nil
+}
+
+func DeepCopy_v1_ComponentStatus(in ComponentStatus, out *ComponentStatus, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if in.Conditions != nil {
+ in, out := in.Conditions, &out.Conditions
+ *out = make([]ComponentCondition, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.Conditions = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_ComponentStatusList(in ComponentStatusList, out *ComponentStatusList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]ComponentStatus, len(in))
+ for i := range in {
+ if err := DeepCopy_v1_ComponentStatus(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_ConfigMap(in ConfigMap, out *ConfigMap, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if in.Data != nil {
+ in, out := in.Data, &out.Data
+ *out = make(map[string]string)
+ for key, val := range in {
+ (*out)[key] = val
+ }
+ } else {
+ out.Data = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_ConfigMapKeySelector(in ConfigMapKeySelector, out *ConfigMapKeySelector, c *conversion.Cloner) error {
+ out.LocalObjectReference = in.LocalObjectReference
+ out.Key = in.Key
+ return nil
+}
+
+func DeepCopy_v1_ConfigMapList(in ConfigMapList, out *ConfigMapList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]ConfigMap, len(in))
+ for i := range in {
+ if err := DeepCopy_v1_ConfigMap(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_ConfigMapVolumeSource(in ConfigMapVolumeSource, out *ConfigMapVolumeSource, c *conversion.Cloner) error {
+ out.LocalObjectReference = in.LocalObjectReference
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]KeyToPath, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_Container(in Container, out *Container, c *conversion.Cloner) error {
+ out.Name = in.Name
+ out.Image = in.Image
+ if in.Command != nil {
+ in, out := in.Command, &out.Command
+ *out = make([]string, len(in))
+ copy(*out, in)
+ } else {
+ out.Command = nil
+ }
+ if in.Args != nil {
+ in, out := in.Args, &out.Args
+ *out = make([]string, len(in))
+ copy(*out, in)
+ } else {
+ out.Args = nil
+ }
+ out.WorkingDir = in.WorkingDir
+ if in.Ports != nil {
+ in, out := in.Ports, &out.Ports
+ *out = make([]ContainerPort, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.Ports = nil
+ }
+ if in.Env != nil {
+ in, out := in.Env, &out.Env
+ *out = make([]EnvVar, len(in))
+ for i := range in {
+ if err := DeepCopy_v1_EnvVar(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Env = nil
+ }
+ if err := DeepCopy_v1_ResourceRequirements(in.Resources, &out.Resources, c); err != nil {
+ return err
+ }
+ if in.VolumeMounts != nil {
+ in, out := in.VolumeMounts, &out.VolumeMounts
+ *out = make([]VolumeMount, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.VolumeMounts = nil
+ }
+ if in.LivenessProbe != nil {
+ in, out := in.LivenessProbe, &out.LivenessProbe
+ *out = new(Probe)
+ if err := DeepCopy_v1_Probe(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.LivenessProbe = nil
+ }
+ if in.ReadinessProbe != nil {
+ in, out := in.ReadinessProbe, &out.ReadinessProbe
+ *out = new(Probe)
+ if err := DeepCopy_v1_Probe(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.ReadinessProbe = nil
+ }
+ if in.Lifecycle != nil {
+ in, out := in.Lifecycle, &out.Lifecycle
+ *out = new(Lifecycle)
+ if err := DeepCopy_v1_Lifecycle(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.Lifecycle = nil
+ }
+ out.TerminationMessagePath = in.TerminationMessagePath
+ out.ImagePullPolicy = in.ImagePullPolicy
+ if in.SecurityContext != nil {
+ in, out := in.SecurityContext, &out.SecurityContext
+ *out = new(SecurityContext)
+ if err := DeepCopy_v1_SecurityContext(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.SecurityContext = nil
+ }
+ out.Stdin = in.Stdin
+ out.StdinOnce = in.StdinOnce
+ out.TTY = in.TTY
+ return nil
+}
+
+func DeepCopy_v1_ContainerImage(in ContainerImage, out *ContainerImage, c *conversion.Cloner) error {
+ if in.Names != nil {
+ in, out := in.Names, &out.Names
+ *out = make([]string, len(in))
+ copy(*out, in)
+ } else {
+ out.Names = nil
+ }
+ out.SizeBytes = in.SizeBytes
+ return nil
+}
+
+func DeepCopy_v1_ContainerPort(in ContainerPort, out *ContainerPort, c *conversion.Cloner) error {
+ out.Name = in.Name
+ out.HostPort = in.HostPort
+ out.ContainerPort = in.ContainerPort
+ out.Protocol = in.Protocol
+ out.HostIP = in.HostIP
+ return nil
+}
+
+func DeepCopy_v1_ContainerState(in ContainerState, out *ContainerState, c *conversion.Cloner) error {
+ if in.Waiting != nil {
+ in, out := in.Waiting, &out.Waiting
+ *out = new(ContainerStateWaiting)
+ **out = *in
+ } else {
+ out.Waiting = nil
+ }
+ if in.Running != nil {
+ in, out := in.Running, &out.Running
+ *out = new(ContainerStateRunning)
+ if err := DeepCopy_v1_ContainerStateRunning(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.Running = nil
+ }
+ if in.Terminated != nil {
+ in, out := in.Terminated, &out.Terminated
+ *out = new(ContainerStateTerminated)
+ if err := DeepCopy_v1_ContainerStateTerminated(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.Terminated = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_ContainerStateRunning(in ContainerStateRunning, out *ContainerStateRunning, c *conversion.Cloner) error {
+ out.StartedAt = in.StartedAt.DeepCopy()
+ return nil
+}
+
+func DeepCopy_v1_ContainerStateTerminated(in ContainerStateTerminated, out *ContainerStateTerminated, c *conversion.Cloner) error {
+ out.ExitCode = in.ExitCode
+ out.Signal = in.Signal
+ out.Reason = in.Reason
+ out.Message = in.Message
+ out.StartedAt = in.StartedAt.DeepCopy()
+ out.FinishedAt = in.FinishedAt.DeepCopy()
+ out.ContainerID = in.ContainerID
+ return nil
+}
+
+func DeepCopy_v1_ContainerStateWaiting(in ContainerStateWaiting, out *ContainerStateWaiting, c *conversion.Cloner) error {
+ out.Reason = in.Reason
+ out.Message = in.Message
+ return nil
+}
+
+func DeepCopy_v1_ContainerStatus(in ContainerStatus, out *ContainerStatus, c *conversion.Cloner) error {
+ out.Name = in.Name
+ if err := DeepCopy_v1_ContainerState(in.State, &out.State, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_v1_ContainerState(in.LastTerminationState, &out.LastTerminationState, c); err != nil {
+ return err
+ }
+ out.Ready = in.Ready
+ out.RestartCount = in.RestartCount
+ out.Image = in.Image
+ out.ImageID = in.ImageID
+ out.ContainerID = in.ContainerID
+ return nil
+}
+
+func DeepCopy_v1_DaemonEndpoint(in DaemonEndpoint, out *DaemonEndpoint, c *conversion.Cloner) error {
+ out.Port = in.Port
+ return nil
+}
+
+func DeepCopy_v1_DeleteOptions(in DeleteOptions, out *DeleteOptions, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if in.GracePeriodSeconds != nil {
+ in, out := in.GracePeriodSeconds, &out.GracePeriodSeconds
+ *out = new(int64)
+ **out = *in
+ } else {
+ out.GracePeriodSeconds = nil
+ }
+ if in.Preconditions != nil {
+ in, out := in.Preconditions, &out.Preconditions
+ *out = new(Preconditions)
+ if err := DeepCopy_v1_Preconditions(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.Preconditions = nil
+ }
+ if in.OrphanDependents != nil {
+ in, out := in.OrphanDependents, &out.OrphanDependents
+ *out = new(bool)
+ **out = *in
+ } else {
+ out.OrphanDependents = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_DownwardAPIVolumeFile(in DownwardAPIVolumeFile, out *DownwardAPIVolumeFile, c *conversion.Cloner) error {
+ out.Path = in.Path
+ if in.FieldRef != nil {
+ in, out := in.FieldRef, &out.FieldRef
+ *out = new(ObjectFieldSelector)
+ **out = *in
+ } else {
+ out.FieldRef = nil
+ }
+ if in.ResourceFieldRef != nil {
+ in, out := in.ResourceFieldRef, &out.ResourceFieldRef
+ *out = new(ResourceFieldSelector)
+ if err := DeepCopy_v1_ResourceFieldSelector(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.ResourceFieldRef = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_DownwardAPIVolumeSource(in DownwardAPIVolumeSource, out *DownwardAPIVolumeSource, c *conversion.Cloner) error {
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]DownwardAPIVolumeFile, len(in))
+ for i := range in {
+ if err := DeepCopy_v1_DownwardAPIVolumeFile(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_EmptyDirVolumeSource(in EmptyDirVolumeSource, out *EmptyDirVolumeSource, c *conversion.Cloner) error {
+ out.Medium = in.Medium
+ return nil
+}
+
+func DeepCopy_v1_EndpointAddress(in EndpointAddress, out *EndpointAddress, c *conversion.Cloner) error {
+ out.IP = in.IP
+ out.Hostname = in.Hostname
+ if in.TargetRef != nil {
+ in, out := in.TargetRef, &out.TargetRef
+ *out = new(ObjectReference)
+ **out = *in
+ } else {
+ out.TargetRef = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_EndpointPort(in EndpointPort, out *EndpointPort, c *conversion.Cloner) error {
+ out.Name = in.Name
+ out.Port = in.Port
+ out.Protocol = in.Protocol
+ return nil
+}
+
+func DeepCopy_v1_EndpointSubset(in EndpointSubset, out *EndpointSubset, c *conversion.Cloner) error {
+ if in.Addresses != nil {
+ in, out := in.Addresses, &out.Addresses
+ *out = make([]EndpointAddress, len(in))
+ for i := range in {
+ if err := DeepCopy_v1_EndpointAddress(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Addresses = nil
+ }
+ if in.NotReadyAddresses != nil {
+ in, out := in.NotReadyAddresses, &out.NotReadyAddresses
+ *out = make([]EndpointAddress, len(in))
+ for i := range in {
+ if err := DeepCopy_v1_EndpointAddress(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.NotReadyAddresses = nil
+ }
+ if in.Ports != nil {
+ in, out := in.Ports, &out.Ports
+ *out = make([]EndpointPort, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.Ports = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_Endpoints(in Endpoints, out *Endpoints, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if in.Subsets != nil {
+ in, out := in.Subsets, &out.Subsets
+ *out = make([]EndpointSubset, len(in))
+ for i := range in {
+ if err := DeepCopy_v1_EndpointSubset(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Subsets = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_EndpointsList(in EndpointsList, out *EndpointsList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]Endpoints, len(in))
+ for i := range in {
+ if err := DeepCopy_v1_Endpoints(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_EnvVar(in EnvVar, out *EnvVar, c *conversion.Cloner) error {
+ out.Name = in.Name
+ out.Value = in.Value
+ if in.ValueFrom != nil {
+ in, out := in.ValueFrom, &out.ValueFrom
+ *out = new(EnvVarSource)
+ if err := DeepCopy_v1_EnvVarSource(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.ValueFrom = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_EnvVarSource(in EnvVarSource, out *EnvVarSource, c *conversion.Cloner) error {
+ if in.FieldRef != nil {
+ in, out := in.FieldRef, &out.FieldRef
+ *out = new(ObjectFieldSelector)
+ **out = *in
+ } else {
+ out.FieldRef = nil
+ }
+ if in.ResourceFieldRef != nil {
+ in, out := in.ResourceFieldRef, &out.ResourceFieldRef
+ *out = new(ResourceFieldSelector)
+ if err := DeepCopy_v1_ResourceFieldSelector(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.ResourceFieldRef = nil
+ }
+ if in.ConfigMapKeyRef != nil {
+ in, out := in.ConfigMapKeyRef, &out.ConfigMapKeyRef
+ *out = new(ConfigMapKeySelector)
+ **out = *in
+ } else {
+ out.ConfigMapKeyRef = nil
+ }
+ if in.SecretKeyRef != nil {
+ in, out := in.SecretKeyRef, &out.SecretKeyRef
+ *out = new(SecretKeySelector)
+ **out = *in
+ } else {
+ out.SecretKeyRef = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_Event(in Event, out *Event, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ out.InvolvedObject = in.InvolvedObject
+ out.Reason = in.Reason
+ out.Message = in.Message
+ out.Source = in.Source
+ out.FirstTimestamp = in.FirstTimestamp.DeepCopy()
+ out.LastTimestamp = in.LastTimestamp.DeepCopy()
+ out.Count = in.Count
+ out.Type = in.Type
+ return nil
+}
+
+func DeepCopy_v1_EventList(in EventList, out *EventList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]Event, len(in))
+ for i := range in {
+ if err := DeepCopy_v1_Event(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_EventSource(in EventSource, out *EventSource, c *conversion.Cloner) error {
+ out.Component = in.Component
+ out.Host = in.Host
+ return nil
+}
+
+func DeepCopy_v1_ExecAction(in ExecAction, out *ExecAction, c *conversion.Cloner) error {
+ if in.Command != nil {
+ in, out := in.Command, &out.Command
+ *out = make([]string, len(in))
+ copy(*out, in)
+ } else {
+ out.Command = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_ExportOptions(in ExportOptions, out *ExportOptions, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.Export = in.Export
+ out.Exact = in.Exact
+ return nil
+}
+
+func DeepCopy_v1_FCVolumeSource(in FCVolumeSource, out *FCVolumeSource, c *conversion.Cloner) error {
+ if in.TargetWWNs != nil {
+ in, out := in.TargetWWNs, &out.TargetWWNs
+ *out = make([]string, len(in))
+ copy(*out, in)
+ } else {
+ out.TargetWWNs = nil
+ }
+ if in.Lun != nil {
+ in, out := in.Lun, &out.Lun
+ *out = new(int32)
+ **out = *in
+ } else {
+ out.Lun = nil
+ }
+ out.FSType = in.FSType
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
+func DeepCopy_v1_FlexVolumeSource(in FlexVolumeSource, out *FlexVolumeSource, c *conversion.Cloner) error {
+ out.Driver = in.Driver
+ out.FSType = in.FSType
+ if in.SecretRef != nil {
+ in, out := in.SecretRef, &out.SecretRef
+ *out = new(LocalObjectReference)
+ **out = *in
+ } else {
+ out.SecretRef = nil
+ }
+ out.ReadOnly = in.ReadOnly
+ if in.Options != nil {
+ in, out := in.Options, &out.Options
+ *out = make(map[string]string)
+ for key, val := range in {
+ (*out)[key] = val
+ }
+ } else {
+ out.Options = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_FlockerVolumeSource(in FlockerVolumeSource, out *FlockerVolumeSource, c *conversion.Cloner) error {
+ out.DatasetName = in.DatasetName
+ return nil
+}
+
+func DeepCopy_v1_GCEPersistentDiskVolumeSource(in GCEPersistentDiskVolumeSource, out *GCEPersistentDiskVolumeSource, c *conversion.Cloner) error {
+ out.PDName = in.PDName
+ out.FSType = in.FSType
+ out.Partition = in.Partition
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
+func DeepCopy_v1_GitRepoVolumeSource(in GitRepoVolumeSource, out *GitRepoVolumeSource, c *conversion.Cloner) error {
+ out.Repository = in.Repository
+ out.Revision = in.Revision
+ out.Directory = in.Directory
+ return nil
+}
+
+func DeepCopy_v1_GlusterfsVolumeSource(in GlusterfsVolumeSource, out *GlusterfsVolumeSource, c *conversion.Cloner) error {
+ out.EndpointsName = in.EndpointsName
+ out.Path = in.Path
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
+func DeepCopy_v1_HTTPGetAction(in HTTPGetAction, out *HTTPGetAction, c *conversion.Cloner) error {
+ out.Path = in.Path
+ out.Port = in.Port
+ out.Host = in.Host
+ out.Scheme = in.Scheme
+ if in.HTTPHeaders != nil {
+ in, out := in.HTTPHeaders, &out.HTTPHeaders
+ *out = make([]HTTPHeader, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.HTTPHeaders = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_HTTPHeader(in HTTPHeader, out *HTTPHeader, c *conversion.Cloner) error {
+ out.Name = in.Name
+ out.Value = in.Value
+ return nil
+}
+
+func DeepCopy_v1_Handler(in Handler, out *Handler, c *conversion.Cloner) error {
+ if in.Exec != nil {
+ in, out := in.Exec, &out.Exec
+ *out = new(ExecAction)
+ if err := DeepCopy_v1_ExecAction(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.Exec = nil
+ }
+ if in.HTTPGet != nil {
+ in, out := in.HTTPGet, &out.HTTPGet
+ *out = new(HTTPGetAction)
+ if err := DeepCopy_v1_HTTPGetAction(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.HTTPGet = nil
+ }
+ if in.TCPSocket != nil {
+ in, out := in.TCPSocket, &out.TCPSocket
+ *out = new(TCPSocketAction)
+ **out = *in
+ } else {
+ out.TCPSocket = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_HostPathVolumeSource(in HostPathVolumeSource, out *HostPathVolumeSource, c *conversion.Cloner) error {
+ out.Path = in.Path
+ return nil
+}
+
+func DeepCopy_v1_ISCSIVolumeSource(in ISCSIVolumeSource, out *ISCSIVolumeSource, c *conversion.Cloner) error {
+ out.TargetPortal = in.TargetPortal
+ out.IQN = in.IQN
+ out.Lun = in.Lun
+ out.ISCSIInterface = in.ISCSIInterface
+ out.FSType = in.FSType
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
+func DeepCopy_v1_KeyToPath(in KeyToPath, out *KeyToPath, c *conversion.Cloner) error {
+ out.Key = in.Key
+ out.Path = in.Path
+ return nil
+}
+
+func DeepCopy_v1_Lifecycle(in Lifecycle, out *Lifecycle, c *conversion.Cloner) error {
+ if in.PostStart != nil {
+ in, out := in.PostStart, &out.PostStart
+ *out = new(Handler)
+ if err := DeepCopy_v1_Handler(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.PostStart = nil
+ }
+ if in.PreStop != nil {
+ in, out := in.PreStop, &out.PreStop
+ *out = new(Handler)
+ if err := DeepCopy_v1_Handler(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.PreStop = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_LimitRange(in LimitRange, out *LimitRange, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_v1_LimitRangeSpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_v1_LimitRangeItem(in LimitRangeItem, out *LimitRangeItem, c *conversion.Cloner) error {
+ out.Type = in.Type
+ if in.Max != nil {
+ in, out := in.Max, &out.Max
+ *out = make(ResourceList)
+ for key, val := range in {
+ (*out)[key] = val.DeepCopy()
+ }
+ } else {
+ out.Max = nil
+ }
+ if in.Min != nil {
+ in, out := in.Min, &out.Min
+ *out = make(ResourceList)
+ for key, val := range in {
+ (*out)[key] = val.DeepCopy()
+ }
+ } else {
+ out.Min = nil
+ }
+ if in.Default != nil {
+ in, out := in.Default, &out.Default
+ *out = make(ResourceList)
+ for key, val := range in {
+ (*out)[key] = val.DeepCopy()
+ }
+ } else {
+ out.Default = nil
+ }
+ if in.DefaultRequest != nil {
+ in, out := in.DefaultRequest, &out.DefaultRequest
+ *out = make(ResourceList)
+ for key, val := range in {
+ (*out)[key] = val.DeepCopy()
+ }
+ } else {
+ out.DefaultRequest = nil
+ }
+ if in.MaxLimitRequestRatio != nil {
+ in, out := in.MaxLimitRequestRatio, &out.MaxLimitRequestRatio
+ *out = make(ResourceList)
+ for key, val := range in {
+ (*out)[key] = val.DeepCopy()
+ }
+ } else {
+ out.MaxLimitRequestRatio = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_LimitRangeList(in LimitRangeList, out *LimitRangeList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]LimitRange, len(in))
+ for i := range in {
+ if err := DeepCopy_v1_LimitRange(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_LimitRangeSpec(in LimitRangeSpec, out *LimitRangeSpec, c *conversion.Cloner) error {
+ if in.Limits != nil {
+ in, out := in.Limits, &out.Limits
+ *out = make([]LimitRangeItem, len(in))
+ for i := range in {
+ if err := DeepCopy_v1_LimitRangeItem(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Limits = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_List(in List, out *List, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]runtime.RawExtension, len(in))
+ for i := range in {
+ if err := runtime.DeepCopy_runtime_RawExtension(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_ListOptions(in ListOptions, out *ListOptions, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.LabelSelector = in.LabelSelector
+ out.FieldSelector = in.FieldSelector
+ out.Watch = in.Watch
+ out.ResourceVersion = in.ResourceVersion
+ if in.TimeoutSeconds != nil {
+ in, out := in.TimeoutSeconds, &out.TimeoutSeconds
+ *out = new(int64)
+ **out = *in
+ } else {
+ out.TimeoutSeconds = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_LoadBalancerIngress(in LoadBalancerIngress, out *LoadBalancerIngress, c *conversion.Cloner) error {
+ out.IP = in.IP
+ out.Hostname = in.Hostname
+ return nil
+}
+
+func DeepCopy_v1_LoadBalancerStatus(in LoadBalancerStatus, out *LoadBalancerStatus, c *conversion.Cloner) error {
+ if in.Ingress != nil {
+ in, out := in.Ingress, &out.Ingress
+ *out = make([]LoadBalancerIngress, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.Ingress = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_LocalObjectReference(in LocalObjectReference, out *LocalObjectReference, c *conversion.Cloner) error {
+ out.Name = in.Name
+ return nil
+}
+
+func DeepCopy_v1_NFSVolumeSource(in NFSVolumeSource, out *NFSVolumeSource, c *conversion.Cloner) error {
+ out.Server = in.Server
+ out.Path = in.Path
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
+func DeepCopy_v1_Namespace(in Namespace, out *Namespace, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_v1_NamespaceSpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ out.Status = in.Status
+ return nil
+}
+
+func DeepCopy_v1_NamespaceList(in NamespaceList, out *NamespaceList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]Namespace, len(in))
+ for i := range in {
+ if err := DeepCopy_v1_Namespace(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_NamespaceSpec(in NamespaceSpec, out *NamespaceSpec, c *conversion.Cloner) error {
+ if in.Finalizers != nil {
+ in, out := in.Finalizers, &out.Finalizers
+ *out = make([]FinalizerName, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.Finalizers = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_NamespaceStatus(in NamespaceStatus, out *NamespaceStatus, c *conversion.Cloner) error {
+ out.Phase = in.Phase
+ return nil
+}
+
+func DeepCopy_v1_Node(in Node, out *Node, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ out.Spec = in.Spec
+ if err := DeepCopy_v1_NodeStatus(in.Status, &out.Status, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_v1_NodeAddress(in NodeAddress, out *NodeAddress, c *conversion.Cloner) error {
+ out.Type = in.Type
+ out.Address = in.Address
+ return nil
+}
+
+func DeepCopy_v1_NodeAffinity(in NodeAffinity, out *NodeAffinity, c *conversion.Cloner) error {
+ if in.RequiredDuringSchedulingIgnoredDuringExecution != nil {
+ in, out := in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution
+ *out = new(NodeSelector)
+ if err := DeepCopy_v1_NodeSelector(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.RequiredDuringSchedulingIgnoredDuringExecution = nil
+ }
+ if in.PreferredDuringSchedulingIgnoredDuringExecution != nil {
+ in, out := in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution
+ *out = make([]PreferredSchedulingTerm, len(in))
+ for i := range in {
+ if err := DeepCopy_v1_PreferredSchedulingTerm(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.PreferredDuringSchedulingIgnoredDuringExecution = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_NodeCondition(in NodeCondition, out *NodeCondition, c *conversion.Cloner) error {
+ out.Type = in.Type
+ out.Status = in.Status
+ out.LastHeartbeatTime = in.LastHeartbeatTime.DeepCopy()
+ out.LastTransitionTime = in.LastTransitionTime.DeepCopy()
+ out.Reason = in.Reason
+ out.Message = in.Message
+ return nil
+}
+
+func DeepCopy_v1_NodeDaemonEndpoints(in NodeDaemonEndpoints, out *NodeDaemonEndpoints, c *conversion.Cloner) error {
+ out.KubeletEndpoint = in.KubeletEndpoint
+ return nil
+}
+
+func DeepCopy_v1_NodeList(in NodeList, out *NodeList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]Node, len(in))
+ for i := range in {
+ if err := DeepCopy_v1_Node(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_NodeProxyOptions(in NodeProxyOptions, out *NodeProxyOptions, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.Path = in.Path
+ return nil
+}
+
+func DeepCopy_v1_NodeSelector(in NodeSelector, out *NodeSelector, c *conversion.Cloner) error {
+ if in.NodeSelectorTerms != nil {
+ in, out := in.NodeSelectorTerms, &out.NodeSelectorTerms
+ *out = make([]NodeSelectorTerm, len(in))
+ for i := range in {
+ if err := DeepCopy_v1_NodeSelectorTerm(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.NodeSelectorTerms = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_NodeSelectorRequirement(in NodeSelectorRequirement, out *NodeSelectorRequirement, c *conversion.Cloner) error {
+ out.Key = in.Key
+ out.Operator = in.Operator
+ if in.Values != nil {
+ in, out := in.Values, &out.Values
+ *out = make([]string, len(in))
+ copy(*out, in)
+ } else {
+ out.Values = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_NodeSelectorTerm(in NodeSelectorTerm, out *NodeSelectorTerm, c *conversion.Cloner) error {
+ if in.MatchExpressions != nil {
+ in, out := in.MatchExpressions, &out.MatchExpressions
+ *out = make([]NodeSelectorRequirement, len(in))
+ for i := range in {
+ if err := DeepCopy_v1_NodeSelectorRequirement(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.MatchExpressions = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_NodeSpec(in NodeSpec, out *NodeSpec, c *conversion.Cloner) error {
+ out.PodCIDR = in.PodCIDR
+ out.ExternalID = in.ExternalID
+ out.ProviderID = in.ProviderID
+ out.Unschedulable = in.Unschedulable
+ return nil
+}
+
+func DeepCopy_v1_NodeStatus(in NodeStatus, out *NodeStatus, c *conversion.Cloner) error {
+ if in.Capacity != nil {
+ in, out := in.Capacity, &out.Capacity
+ *out = make(ResourceList)
+ for key, val := range in {
+ (*out)[key] = val.DeepCopy()
+ }
+ } else {
+ out.Capacity = nil
+ }
+ if in.Allocatable != nil {
+ in, out := in.Allocatable, &out.Allocatable
+ *out = make(ResourceList)
+ for key, val := range in {
+ (*out)[key] = val.DeepCopy()
+ }
+ } else {
+ out.Allocatable = nil
+ }
+ out.Phase = in.Phase
+ if in.Conditions != nil {
+ in, out := in.Conditions, &out.Conditions
+ *out = make([]NodeCondition, len(in))
+ for i := range in {
+ if err := DeepCopy_v1_NodeCondition(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Conditions = nil
+ }
+ if in.Addresses != nil {
+ in, out := in.Addresses, &out.Addresses
+ *out = make([]NodeAddress, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.Addresses = nil
+ }
+ out.DaemonEndpoints = in.DaemonEndpoints
+ out.NodeInfo = in.NodeInfo
+ if in.Images != nil {
+ in, out := in.Images, &out.Images
+ *out = make([]ContainerImage, len(in))
+ for i := range in {
+ if err := DeepCopy_v1_ContainerImage(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Images = nil
+ }
+ if in.VolumesInUse != nil {
+ in, out := in.VolumesInUse, &out.VolumesInUse
+ *out = make([]UniqueVolumeName, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.VolumesInUse = nil
+ }
+ if in.VolumesAttached != nil {
+ in, out := in.VolumesAttached, &out.VolumesAttached
+ *out = make([]AttachedVolume, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.VolumesAttached = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_NodeSystemInfo(in NodeSystemInfo, out *NodeSystemInfo, c *conversion.Cloner) error {
+ out.MachineID = in.MachineID
+ out.SystemUUID = in.SystemUUID
+ out.BootID = in.BootID
+ out.KernelVersion = in.KernelVersion
+ out.OSImage = in.OSImage
+ out.ContainerRuntimeVersion = in.ContainerRuntimeVersion
+ out.KubeletVersion = in.KubeletVersion
+ out.KubeProxyVersion = in.KubeProxyVersion
+ out.OperatingSystem = in.OperatingSystem
+ out.Architecture = in.Architecture
+ return nil
+}
+
+func DeepCopy_v1_ObjectFieldSelector(in ObjectFieldSelector, out *ObjectFieldSelector, c *conversion.Cloner) error {
+ out.APIVersion = in.APIVersion
+ out.FieldPath = in.FieldPath
+ return nil
+}
+
+func DeepCopy_v1_ObjectMeta(in ObjectMeta, out *ObjectMeta, c *conversion.Cloner) error {
+ out.Name = in.Name
+ out.GenerateName = in.GenerateName
+ out.Namespace = in.Namespace
+ out.SelfLink = in.SelfLink
+ out.UID = in.UID
+ out.ResourceVersion = in.ResourceVersion
+ out.Generation = in.Generation
+ out.CreationTimestamp = in.CreationTimestamp.DeepCopy()
+ if in.DeletionTimestamp != nil {
+ in, out := in.DeletionTimestamp, &out.DeletionTimestamp
+ *out = new(unversioned.Time)
+ **out = in.DeepCopy()
+ } else {
+ out.DeletionTimestamp = nil
+ }
+ if in.DeletionGracePeriodSeconds != nil {
+ in, out := in.DeletionGracePeriodSeconds, &out.DeletionGracePeriodSeconds
+ *out = new(int64)
+ **out = *in
+ } else {
+ out.DeletionGracePeriodSeconds = nil
+ }
+ if in.Labels != nil {
+ in, out := in.Labels, &out.Labels
+ *out = make(map[string]string)
+ for key, val := range in {
+ (*out)[key] = val
+ }
+ } else {
+ out.Labels = nil
+ }
+ if in.Annotations != nil {
+ in, out := in.Annotations, &out.Annotations
+ *out = make(map[string]string)
+ for key, val := range in {
+ (*out)[key] = val
+ }
+ } else {
+ out.Annotations = nil
+ }
+ if in.OwnerReferences != nil {
+ in, out := in.OwnerReferences, &out.OwnerReferences
+ *out = make([]OwnerReference, len(in))
+ for i := range in {
+ if err := DeepCopy_v1_OwnerReference(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.OwnerReferences = nil
+ }
+ if in.Finalizers != nil {
+ in, out := in.Finalizers, &out.Finalizers
+ *out = make([]string, len(in))
+ copy(*out, in)
+ } else {
+ out.Finalizers = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_ObjectReference(in ObjectReference, out *ObjectReference, c *conversion.Cloner) error {
+ out.Kind = in.Kind
+ out.Namespace = in.Namespace
+ out.Name = in.Name
+ out.UID = in.UID
+ out.APIVersion = in.APIVersion
+ out.ResourceVersion = in.ResourceVersion
+ out.FieldPath = in.FieldPath
+ return nil
+}
+
+func DeepCopy_v1_OwnerReference(in OwnerReference, out *OwnerReference, c *conversion.Cloner) error {
+ out.APIVersion = in.APIVersion
+ out.Kind = in.Kind
+ out.Name = in.Name
+ out.UID = in.UID
+ if in.Controller != nil {
+ in, out := in.Controller, &out.Controller
+ *out = new(bool)
+ **out = *in
+ } else {
+ out.Controller = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_PersistentVolume(in PersistentVolume, out *PersistentVolume, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_v1_PersistentVolumeSpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ out.Status = in.Status
+ return nil
+}
+
+func DeepCopy_v1_PersistentVolumeClaim(in PersistentVolumeClaim, out *PersistentVolumeClaim, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_v1_PersistentVolumeClaimSpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_v1_PersistentVolumeClaimStatus(in.Status, &out.Status, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_v1_PersistentVolumeClaimList(in PersistentVolumeClaimList, out *PersistentVolumeClaimList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]PersistentVolumeClaim, len(in))
+ for i := range in {
+ if err := DeepCopy_v1_PersistentVolumeClaim(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_PersistentVolumeClaimSpec(in PersistentVolumeClaimSpec, out *PersistentVolumeClaimSpec, c *conversion.Cloner) error {
+ if in.AccessModes != nil {
+ in, out := in.AccessModes, &out.AccessModes
+ *out = make([]PersistentVolumeAccessMode, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.AccessModes = nil
+ }
+ if in.Selector != nil {
+ in, out := in.Selector, &out.Selector
+ *out = new(unversioned.LabelSelector)
+ if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.Selector = nil
+ }
+ if err := DeepCopy_v1_ResourceRequirements(in.Resources, &out.Resources, c); err != nil {
+ return err
+ }
+ out.VolumeName = in.VolumeName
+ return nil
+}
+
+func DeepCopy_v1_PersistentVolumeClaimStatus(in PersistentVolumeClaimStatus, out *PersistentVolumeClaimStatus, c *conversion.Cloner) error {
+ out.Phase = in.Phase
+ if in.AccessModes != nil {
+ in, out := in.AccessModes, &out.AccessModes
+ *out = make([]PersistentVolumeAccessMode, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.AccessModes = nil
+ }
+ if in.Capacity != nil {
+ in, out := in.Capacity, &out.Capacity
+ *out = make(ResourceList)
+ for key, val := range in {
+ (*out)[key] = val.DeepCopy()
+ }
+ } else {
+ out.Capacity = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_PersistentVolumeClaimVolumeSource(in PersistentVolumeClaimVolumeSource, out *PersistentVolumeClaimVolumeSource, c *conversion.Cloner) error {
+ out.ClaimName = in.ClaimName
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
+func DeepCopy_v1_PersistentVolumeList(in PersistentVolumeList, out *PersistentVolumeList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]PersistentVolume, len(in))
+ for i := range in {
+ if err := DeepCopy_v1_PersistentVolume(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_PersistentVolumeSource(in PersistentVolumeSource, out *PersistentVolumeSource, c *conversion.Cloner) error {
+ if in.GCEPersistentDisk != nil {
+ in, out := in.GCEPersistentDisk, &out.GCEPersistentDisk
+ *out = new(GCEPersistentDiskVolumeSource)
+ **out = *in
+ } else {
+ out.GCEPersistentDisk = nil
+ }
+ if in.AWSElasticBlockStore != nil {
+ in, out := in.AWSElasticBlockStore, &out.AWSElasticBlockStore
+ *out = new(AWSElasticBlockStoreVolumeSource)
+ **out = *in
+ } else {
+ out.AWSElasticBlockStore = nil
+ }
+ if in.HostPath != nil {
+ in, out := in.HostPath, &out.HostPath
+ *out = new(HostPathVolumeSource)
+ **out = *in
+ } else {
+ out.HostPath = nil
+ }
+ if in.Glusterfs != nil {
+ in, out := in.Glusterfs, &out.Glusterfs
+ *out = new(GlusterfsVolumeSource)
+ **out = *in
+ } else {
+ out.Glusterfs = nil
+ }
+ if in.NFS != nil {
+ in, out := in.NFS, &out.NFS
+ *out = new(NFSVolumeSource)
+ **out = *in
+ } else {
+ out.NFS = nil
+ }
+ if in.RBD != nil {
+ in, out := in.RBD, &out.RBD
+ *out = new(RBDVolumeSource)
+ if err := DeepCopy_v1_RBDVolumeSource(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.RBD = nil
+ }
+ if in.ISCSI != nil {
+ in, out := in.ISCSI, &out.ISCSI
+ *out = new(ISCSIVolumeSource)
+ **out = *in
+ } else {
+ out.ISCSI = nil
+ }
+ if in.Cinder != nil {
+ in, out := in.Cinder, &out.Cinder
+ *out = new(CinderVolumeSource)
+ **out = *in
+ } else {
+ out.Cinder = nil
+ }
+ if in.CephFS != nil {
+ in, out := in.CephFS, &out.CephFS
+ *out = new(CephFSVolumeSource)
+ if err := DeepCopy_v1_CephFSVolumeSource(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.CephFS = nil
+ }
+ if in.FC != nil {
+ in, out := in.FC, &out.FC
+ *out = new(FCVolumeSource)
+ if err := DeepCopy_v1_FCVolumeSource(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.FC = nil
+ }
+ if in.Flocker != nil {
+ in, out := in.Flocker, &out.Flocker
+ *out = new(FlockerVolumeSource)
+ **out = *in
+ } else {
+ out.Flocker = nil
+ }
+ if in.FlexVolume != nil {
+ in, out := in.FlexVolume, &out.FlexVolume
+ *out = new(FlexVolumeSource)
+ if err := DeepCopy_v1_FlexVolumeSource(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.FlexVolume = nil
+ }
+ if in.AzureFile != nil {
+ in, out := in.AzureFile, &out.AzureFile
+ *out = new(AzureFileVolumeSource)
+ **out = *in
+ } else {
+ out.AzureFile = nil
+ }
+ if in.VsphereVolume != nil {
+ in, out := in.VsphereVolume, &out.VsphereVolume
+ *out = new(VsphereVirtualDiskVolumeSource)
+ **out = *in
+ } else {
+ out.VsphereVolume = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_PersistentVolumeSpec(in PersistentVolumeSpec, out *PersistentVolumeSpec, c *conversion.Cloner) error {
+ if in.Capacity != nil {
+ in, out := in.Capacity, &out.Capacity
+ *out = make(ResourceList)
+ for key, val := range in {
+ (*out)[key] = val.DeepCopy()
+ }
+ } else {
+ out.Capacity = nil
+ }
+ if err := DeepCopy_v1_PersistentVolumeSource(in.PersistentVolumeSource, &out.PersistentVolumeSource, c); err != nil {
+ return err
+ }
+ if in.AccessModes != nil {
+ in, out := in.AccessModes, &out.AccessModes
+ *out = make([]PersistentVolumeAccessMode, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.AccessModes = nil
+ }
+ if in.ClaimRef != nil {
+ in, out := in.ClaimRef, &out.ClaimRef
+ *out = new(ObjectReference)
+ **out = *in
+ } else {
+ out.ClaimRef = nil
+ }
+ out.PersistentVolumeReclaimPolicy = in.PersistentVolumeReclaimPolicy
+ return nil
+}
+
+func DeepCopy_v1_PersistentVolumeStatus(in PersistentVolumeStatus, out *PersistentVolumeStatus, c *conversion.Cloner) error {
+ out.Phase = in.Phase
+ out.Message = in.Message
+ out.Reason = in.Reason
+ return nil
+}
+
+func DeepCopy_v1_Pod(in Pod, out *Pod, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_v1_PodSpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_v1_PodStatus(in.Status, &out.Status, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_v1_PodAffinity(in PodAffinity, out *PodAffinity, c *conversion.Cloner) error {
+ if in.RequiredDuringSchedulingIgnoredDuringExecution != nil {
+ in, out := in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution
+ *out = make([]PodAffinityTerm, len(in))
+ for i := range in {
+ if err := DeepCopy_v1_PodAffinityTerm(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.RequiredDuringSchedulingIgnoredDuringExecution = nil
+ }
+ if in.PreferredDuringSchedulingIgnoredDuringExecution != nil {
+ in, out := in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution
+ *out = make([]WeightedPodAffinityTerm, len(in))
+ for i := range in {
+ if err := DeepCopy_v1_WeightedPodAffinityTerm(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.PreferredDuringSchedulingIgnoredDuringExecution = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_PodAffinityTerm(in PodAffinityTerm, out *PodAffinityTerm, c *conversion.Cloner) error {
+ if in.LabelSelector != nil {
+ in, out := in.LabelSelector, &out.LabelSelector
+ *out = new(unversioned.LabelSelector)
+ if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.LabelSelector = nil
+ }
+ if in.Namespaces != nil {
+ in, out := in.Namespaces, &out.Namespaces
+ *out = make([]string, len(in))
+ copy(*out, in)
+ } else {
+ out.Namespaces = nil
+ }
+ out.TopologyKey = in.TopologyKey
+ return nil
+}
+
+func DeepCopy_v1_PodAntiAffinity(in PodAntiAffinity, out *PodAntiAffinity, c *conversion.Cloner) error {
+ if in.RequiredDuringSchedulingIgnoredDuringExecution != nil {
+ in, out := in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution
+ *out = make([]PodAffinityTerm, len(in))
+ for i := range in {
+ if err := DeepCopy_v1_PodAffinityTerm(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.RequiredDuringSchedulingIgnoredDuringExecution = nil
+ }
+ if in.PreferredDuringSchedulingIgnoredDuringExecution != nil {
+ in, out := in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution
+ *out = make([]WeightedPodAffinityTerm, len(in))
+ for i := range in {
+ if err := DeepCopy_v1_WeightedPodAffinityTerm(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.PreferredDuringSchedulingIgnoredDuringExecution = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_PodAttachOptions(in PodAttachOptions, out *PodAttachOptions, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.Stdin = in.Stdin
+ out.Stdout = in.Stdout
+ out.Stderr = in.Stderr
+ out.TTY = in.TTY
+ out.Container = in.Container
+ return nil
+}
+
+func DeepCopy_v1_PodCondition(in PodCondition, out *PodCondition, c *conversion.Cloner) error {
+ out.Type = in.Type
+ out.Status = in.Status
+ out.LastProbeTime = in.LastProbeTime.DeepCopy()
+ out.LastTransitionTime = in.LastTransitionTime.DeepCopy()
+ out.Reason = in.Reason
+ out.Message = in.Message
+ return nil
+}
+
+func DeepCopy_v1_PodExecOptions(in PodExecOptions, out *PodExecOptions, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.Stdin = in.Stdin
+ out.Stdout = in.Stdout
+ out.Stderr = in.Stderr
+ out.TTY = in.TTY
+ out.Container = in.Container
+ if in.Command != nil {
+ in, out := in.Command, &out.Command
+ *out = make([]string, len(in))
+ copy(*out, in)
+ } else {
+ out.Command = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_PodList(in PodList, out *PodList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]Pod, len(in))
+ for i := range in {
+ if err := DeepCopy_v1_Pod(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_PodLogOptions(in PodLogOptions, out *PodLogOptions, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.Container = in.Container
+ out.Follow = in.Follow
+ out.Previous = in.Previous
+ if in.SinceSeconds != nil {
+ in, out := in.SinceSeconds, &out.SinceSeconds
+ *out = new(int64)
+ **out = *in
+ } else {
+ out.SinceSeconds = nil
+ }
+ if in.SinceTime != nil {
+ in, out := in.SinceTime, &out.SinceTime
+ *out = new(unversioned.Time)
+ **out = in.DeepCopy()
+ } else {
+ out.SinceTime = nil
+ }
+ out.Timestamps = in.Timestamps
+ if in.TailLines != nil {
+ in, out := in.TailLines, &out.TailLines
+ *out = new(int64)
+ **out = *in
+ } else {
+ out.TailLines = nil
+ }
+ if in.LimitBytes != nil {
+ in, out := in.LimitBytes, &out.LimitBytes
+ *out = new(int64)
+ **out = *in
+ } else {
+ out.LimitBytes = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_PodProxyOptions(in PodProxyOptions, out *PodProxyOptions, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.Path = in.Path
+ return nil
+}
+
+func DeepCopy_v1_PodSecurityContext(in PodSecurityContext, out *PodSecurityContext, c *conversion.Cloner) error {
+ if in.SELinuxOptions != nil {
+ in, out := in.SELinuxOptions, &out.SELinuxOptions
+ *out = new(SELinuxOptions)
+ **out = *in
+ } else {
+ out.SELinuxOptions = nil
+ }
+ if in.RunAsUser != nil {
+ in, out := in.RunAsUser, &out.RunAsUser
+ *out = new(int64)
+ **out = *in
+ } else {
+ out.RunAsUser = nil
+ }
+ if in.RunAsNonRoot != nil {
+ in, out := in.RunAsNonRoot, &out.RunAsNonRoot
+ *out = new(bool)
+ **out = *in
+ } else {
+ out.RunAsNonRoot = nil
+ }
+ if in.SupplementalGroups != nil {
+ in, out := in.SupplementalGroups, &out.SupplementalGroups
+ *out = make([]int64, len(in))
+ copy(*out, in)
+ } else {
+ out.SupplementalGroups = nil
+ }
+ if in.FSGroup != nil {
+ in, out := in.FSGroup, &out.FSGroup
+ *out = new(int64)
+ **out = *in
+ } else {
+ out.FSGroup = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_PodSpec(in PodSpec, out *PodSpec, c *conversion.Cloner) error {
+ if in.Volumes != nil {
+ in, out := in.Volumes, &out.Volumes
+ *out = make([]Volume, len(in))
+ for i := range in {
+ if err := DeepCopy_v1_Volume(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Volumes = nil
+ }
+ if in.InitContainers != nil {
+ in, out := in.InitContainers, &out.InitContainers
+ *out = make([]Container, len(in))
+ for i := range in {
+ if err := DeepCopy_v1_Container(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.InitContainers = nil
+ }
+ if in.Containers != nil {
+ in, out := in.Containers, &out.Containers
+ *out = make([]Container, len(in))
+ for i := range in {
+ if err := DeepCopy_v1_Container(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Containers = nil
+ }
+ out.RestartPolicy = in.RestartPolicy
+ if in.TerminationGracePeriodSeconds != nil {
+ in, out := in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds
+ *out = new(int64)
+ **out = *in
+ } else {
+ out.TerminationGracePeriodSeconds = nil
+ }
+ if in.ActiveDeadlineSeconds != nil {
+ in, out := in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds
+ *out = new(int64)
+ **out = *in
+ } else {
+ out.ActiveDeadlineSeconds = nil
+ }
+ out.DNSPolicy = in.DNSPolicy
+ if in.NodeSelector != nil {
+ in, out := in.NodeSelector, &out.NodeSelector
+ *out = make(map[string]string)
+ for key, val := range in {
+ (*out)[key] = val
+ }
+ } else {
+ out.NodeSelector = nil
+ }
+ out.ServiceAccountName = in.ServiceAccountName
+ out.DeprecatedServiceAccount = in.DeprecatedServiceAccount
+ out.NodeName = in.NodeName
+ out.HostNetwork = in.HostNetwork
+ out.HostPID = in.HostPID
+ out.HostIPC = in.HostIPC
+ if in.SecurityContext != nil {
+ in, out := in.SecurityContext, &out.SecurityContext
+ *out = new(PodSecurityContext)
+ if err := DeepCopy_v1_PodSecurityContext(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.SecurityContext = nil
+ }
+ if in.ImagePullSecrets != nil {
+ in, out := in.ImagePullSecrets, &out.ImagePullSecrets
+ *out = make([]LocalObjectReference, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.ImagePullSecrets = nil
+ }
+ out.Hostname = in.Hostname
+ out.Subdomain = in.Subdomain
+ return nil
+}
+
+func DeepCopy_v1_PodStatus(in PodStatus, out *PodStatus, c *conversion.Cloner) error {
+ out.Phase = in.Phase
+ if in.Conditions != nil {
+ in, out := in.Conditions, &out.Conditions
+ *out = make([]PodCondition, len(in))
+ for i := range in {
+ if err := DeepCopy_v1_PodCondition(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Conditions = nil
+ }
+ out.Message = in.Message
+ out.Reason = in.Reason
+ out.HostIP = in.HostIP
+ out.PodIP = in.PodIP
+ if in.StartTime != nil {
+ in, out := in.StartTime, &out.StartTime
+ *out = new(unversioned.Time)
+ **out = in.DeepCopy()
+ } else {
+ out.StartTime = nil
+ }
+ if in.InitContainerStatuses != nil {
+ in, out := in.InitContainerStatuses, &out.InitContainerStatuses
+ *out = make([]ContainerStatus, len(in))
+ for i := range in {
+ if err := DeepCopy_v1_ContainerStatus(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.InitContainerStatuses = nil
+ }
+ if in.ContainerStatuses != nil {
+ in, out := in.ContainerStatuses, &out.ContainerStatuses
+ *out = make([]ContainerStatus, len(in))
+ for i := range in {
+ if err := DeepCopy_v1_ContainerStatus(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.ContainerStatuses = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_PodStatusResult(in PodStatusResult, out *PodStatusResult, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_v1_PodStatus(in.Status, &out.Status, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_v1_PodTemplate(in PodTemplate, out *PodTemplate, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_v1_PodTemplateSpec(in.Template, &out.Template, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_v1_PodTemplateList(in PodTemplateList, out *PodTemplateList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]PodTemplate, len(in))
+ for i := range in {
+ if err := DeepCopy_v1_PodTemplate(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_PodTemplateSpec(in PodTemplateSpec, out *PodTemplateSpec, c *conversion.Cloner) error {
+ if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_v1_PodSpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_v1_Preconditions(in Preconditions, out *Preconditions, c *conversion.Cloner) error {
+ if in.UID != nil {
+ in, out := in.UID, &out.UID
+ *out = new(types.UID)
+ **out = *in
+ } else {
+ out.UID = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_PreferredSchedulingTerm(in PreferredSchedulingTerm, out *PreferredSchedulingTerm, c *conversion.Cloner) error {
+ out.Weight = in.Weight
+ if err := DeepCopy_v1_NodeSelectorTerm(in.Preference, &out.Preference, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_v1_Probe(in Probe, out *Probe, c *conversion.Cloner) error {
+ if err := DeepCopy_v1_Handler(in.Handler, &out.Handler, c); err != nil {
+ return err
+ }
+ out.InitialDelaySeconds = in.InitialDelaySeconds
+ out.TimeoutSeconds = in.TimeoutSeconds
+ out.PeriodSeconds = in.PeriodSeconds
+ out.SuccessThreshold = in.SuccessThreshold
+ out.FailureThreshold = in.FailureThreshold
+ return nil
+}
+
+func DeepCopy_v1_RBDVolumeSource(in RBDVolumeSource, out *RBDVolumeSource, c *conversion.Cloner) error {
+ if in.CephMonitors != nil {
+ in, out := in.CephMonitors, &out.CephMonitors
+ *out = make([]string, len(in))
+ copy(*out, in)
+ } else {
+ out.CephMonitors = nil
+ }
+ out.RBDImage = in.RBDImage
+ out.FSType = in.FSType
+ out.RBDPool = in.RBDPool
+ out.RadosUser = in.RadosUser
+ out.Keyring = in.Keyring
+ if in.SecretRef != nil {
+ in, out := in.SecretRef, &out.SecretRef
+ *out = new(LocalObjectReference)
+ **out = *in
+ } else {
+ out.SecretRef = nil
+ }
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
+func DeepCopy_v1_RangeAllocation(in RangeAllocation, out *RangeAllocation, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ out.Range = in.Range
+ if in.Data != nil {
+ in, out := in.Data, &out.Data
+ *out = make([]byte, len(in))
+ copy(*out, in)
+ } else {
+ out.Data = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_ReplicationController(in ReplicationController, out *ReplicationController, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_v1_ReplicationControllerSpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ out.Status = in.Status
+ return nil
+}
+
+func DeepCopy_v1_ReplicationControllerList(in ReplicationControllerList, out *ReplicationControllerList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]ReplicationController, len(in))
+ for i := range in {
+ if err := DeepCopy_v1_ReplicationController(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_ReplicationControllerSpec(in ReplicationControllerSpec, out *ReplicationControllerSpec, c *conversion.Cloner) error {
+ if in.Replicas != nil {
+ in, out := in.Replicas, &out.Replicas
+ *out = new(int32)
+ **out = *in
+ } else {
+ out.Replicas = nil
+ }
+ if in.Selector != nil {
+ in, out := in.Selector, &out.Selector
+ *out = make(map[string]string)
+ for key, val := range in {
+ (*out)[key] = val
+ }
+ } else {
+ out.Selector = nil
+ }
+ if in.Template != nil {
+ in, out := in.Template, &out.Template
+ *out = new(PodTemplateSpec)
+ if err := DeepCopy_v1_PodTemplateSpec(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.Template = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_ReplicationControllerStatus(in ReplicationControllerStatus, out *ReplicationControllerStatus, c *conversion.Cloner) error {
+ out.Replicas = in.Replicas
+ out.FullyLabeledReplicas = in.FullyLabeledReplicas
+ out.ObservedGeneration = in.ObservedGeneration
+ return nil
+}
+
+func DeepCopy_v1_ResourceFieldSelector(in ResourceFieldSelector, out *ResourceFieldSelector, c *conversion.Cloner) error {
+ out.ContainerName = in.ContainerName
+ out.Resource = in.Resource
+ out.Divisor = in.Divisor.DeepCopy()
+ return nil
+}
+
+func DeepCopy_v1_ResourceQuota(in ResourceQuota, out *ResourceQuota, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_v1_ResourceQuotaSpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_v1_ResourceQuotaStatus(in.Status, &out.Status, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_v1_ResourceQuotaList(in ResourceQuotaList, out *ResourceQuotaList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]ResourceQuota, len(in))
+ for i := range in {
+ if err := DeepCopy_v1_ResourceQuota(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_ResourceQuotaSpec(in ResourceQuotaSpec, out *ResourceQuotaSpec, c *conversion.Cloner) error {
+ if in.Hard != nil {
+ in, out := in.Hard, &out.Hard
+ *out = make(ResourceList)
+ for key, val := range in {
+ (*out)[key] = val.DeepCopy()
+ }
+ } else {
+ out.Hard = nil
+ }
+ if in.Scopes != nil {
+ in, out := in.Scopes, &out.Scopes
+ *out = make([]ResourceQuotaScope, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.Scopes = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_ResourceQuotaStatus(in ResourceQuotaStatus, out *ResourceQuotaStatus, c *conversion.Cloner) error {
+ if in.Hard != nil {
+ in, out := in.Hard, &out.Hard
+ *out = make(ResourceList)
+ for key, val := range in {
+ (*out)[key] = val.DeepCopy()
+ }
+ } else {
+ out.Hard = nil
+ }
+ if in.Used != nil {
+ in, out := in.Used, &out.Used
+ *out = make(ResourceList)
+ for key, val := range in {
+ (*out)[key] = val.DeepCopy()
+ }
+ } else {
+ out.Used = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_ResourceRequirements(in ResourceRequirements, out *ResourceRequirements, c *conversion.Cloner) error {
+ if in.Limits != nil {
+ in, out := in.Limits, &out.Limits
+ *out = make(ResourceList)
+ for key, val := range in {
+ (*out)[key] = val.DeepCopy()
+ }
+ } else {
+ out.Limits = nil
+ }
+ if in.Requests != nil {
+ in, out := in.Requests, &out.Requests
+ *out = make(ResourceList)
+ for key, val := range in {
+ (*out)[key] = val.DeepCopy()
+ }
+ } else {
+ out.Requests = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_SELinuxOptions(in SELinuxOptions, out *SELinuxOptions, c *conversion.Cloner) error {
+ out.User = in.User
+ out.Role = in.Role
+ out.Type = in.Type
+ out.Level = in.Level
+ return nil
+}
+
+func DeepCopy_v1_Secret(in Secret, out *Secret, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if in.Data != nil {
+ in, out := in.Data, &out.Data
+ *out = make(map[string][]byte)
+ for key, val := range in {
+ if newVal, err := c.DeepCopy(val); err != nil {
+ return err
+ } else {
+ (*out)[key] = newVal.([]byte)
+ }
+ }
+ } else {
+ out.Data = nil
+ }
+ if in.StringData != nil {
+ in, out := in.StringData, &out.StringData
+ *out = make(map[string]string)
+ for key, val := range in {
+ (*out)[key] = val
+ }
+ } else {
+ out.StringData = nil
+ }
+ out.Type = in.Type
+ return nil
+}
+
+func DeepCopy_v1_SecretKeySelector(in SecretKeySelector, out *SecretKeySelector, c *conversion.Cloner) error {
+ out.LocalObjectReference = in.LocalObjectReference
+ out.Key = in.Key
+ return nil
+}
+
+func DeepCopy_v1_SecretList(in SecretList, out *SecretList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]Secret, len(in))
+ for i := range in {
+ if err := DeepCopy_v1_Secret(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_SecretVolumeSource(in SecretVolumeSource, out *SecretVolumeSource, c *conversion.Cloner) error {
+ out.SecretName = in.SecretName
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]KeyToPath, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_SecurityContext(in SecurityContext, out *SecurityContext, c *conversion.Cloner) error {
+ if in.Capabilities != nil {
+ in, out := in.Capabilities, &out.Capabilities
+ *out = new(Capabilities)
+ if err := DeepCopy_v1_Capabilities(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.Capabilities = nil
+ }
+ if in.Privileged != nil {
+ in, out := in.Privileged, &out.Privileged
+ *out = new(bool)
+ **out = *in
+ } else {
+ out.Privileged = nil
+ }
+ if in.SELinuxOptions != nil {
+ in, out := in.SELinuxOptions, &out.SELinuxOptions
+ *out = new(SELinuxOptions)
+ **out = *in
+ } else {
+ out.SELinuxOptions = nil
+ }
+ if in.RunAsUser != nil {
+ in, out := in.RunAsUser, &out.RunAsUser
+ *out = new(int64)
+ **out = *in
+ } else {
+ out.RunAsUser = nil
+ }
+ if in.RunAsNonRoot != nil {
+ in, out := in.RunAsNonRoot, &out.RunAsNonRoot
+ *out = new(bool)
+ **out = *in
+ } else {
+ out.RunAsNonRoot = nil
+ }
+ if in.ReadOnlyRootFilesystem != nil {
+ in, out := in.ReadOnlyRootFilesystem, &out.ReadOnlyRootFilesystem
+ *out = new(bool)
+ **out = *in
+ } else {
+ out.ReadOnlyRootFilesystem = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_SerializedReference(in SerializedReference, out *SerializedReference, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.Reference = in.Reference
+ return nil
+}
+
+func DeepCopy_v1_Service(in Service, out *Service, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_v1_ServiceSpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_v1_ServiceStatus(in.Status, &out.Status, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_v1_ServiceAccount(in ServiceAccount, out *ServiceAccount, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if in.Secrets != nil {
+ in, out := in.Secrets, &out.Secrets
+ *out = make([]ObjectReference, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.Secrets = nil
+ }
+ if in.ImagePullSecrets != nil {
+ in, out := in.ImagePullSecrets, &out.ImagePullSecrets
+ *out = make([]LocalObjectReference, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.ImagePullSecrets = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_ServiceAccountList(in ServiceAccountList, out *ServiceAccountList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]ServiceAccount, len(in))
+ for i := range in {
+ if err := DeepCopy_v1_ServiceAccount(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_ServiceList(in ServiceList, out *ServiceList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]Service, len(in))
+ for i := range in {
+ if err := DeepCopy_v1_Service(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_ServicePort(in ServicePort, out *ServicePort, c *conversion.Cloner) error {
+ out.Name = in.Name
+ out.Protocol = in.Protocol
+ out.Port = in.Port
+ out.TargetPort = in.TargetPort
+ out.NodePort = in.NodePort
+ return nil
+}
+
+func DeepCopy_v1_ServiceProxyOptions(in ServiceProxyOptions, out *ServiceProxyOptions, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.Path = in.Path
+ return nil
+}
+
+func DeepCopy_v1_ServiceSpec(in ServiceSpec, out *ServiceSpec, c *conversion.Cloner) error {
+ if in.Ports != nil {
+ in, out := in.Ports, &out.Ports
+ *out = make([]ServicePort, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.Ports = nil
+ }
+ if in.Selector != nil {
+ in, out := in.Selector, &out.Selector
+ *out = make(map[string]string)
+ for key, val := range in {
+ (*out)[key] = val
+ }
+ } else {
+ out.Selector = nil
+ }
+ out.ClusterIP = in.ClusterIP
+ out.Type = in.Type
+ if in.ExternalIPs != nil {
+ in, out := in.ExternalIPs, &out.ExternalIPs
+ *out = make([]string, len(in))
+ copy(*out, in)
+ } else {
+ out.ExternalIPs = nil
+ }
+ if in.DeprecatedPublicIPs != nil {
+ in, out := in.DeprecatedPublicIPs, &out.DeprecatedPublicIPs
+ *out = make([]string, len(in))
+ copy(*out, in)
+ } else {
+ out.DeprecatedPublicIPs = nil
+ }
+ out.SessionAffinity = in.SessionAffinity
+ out.LoadBalancerIP = in.LoadBalancerIP
+ if in.LoadBalancerSourceRanges != nil {
+ in, out := in.LoadBalancerSourceRanges, &out.LoadBalancerSourceRanges
+ *out = make([]string, len(in))
+ copy(*out, in)
+ } else {
+ out.LoadBalancerSourceRanges = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_ServiceStatus(in ServiceStatus, out *ServiceStatus, c *conversion.Cloner) error {
+ if err := DeepCopy_v1_LoadBalancerStatus(in.LoadBalancer, &out.LoadBalancer, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_v1_TCPSocketAction(in TCPSocketAction, out *TCPSocketAction, c *conversion.Cloner) error {
+ out.Port = in.Port
+ return nil
+}
+
+func DeepCopy_v1_Taint(in Taint, out *Taint, c *conversion.Cloner) error {
+ out.Key = in.Key
+ out.Value = in.Value
+ out.Effect = in.Effect
+ return nil
+}
+
+func DeepCopy_v1_Toleration(in Toleration, out *Toleration, c *conversion.Cloner) error {
+ out.Key = in.Key
+ out.Operator = in.Operator
+ out.Value = in.Value
+ out.Effect = in.Effect
+ return nil
+}
+
+func DeepCopy_v1_Volume(in Volume, out *Volume, c *conversion.Cloner) error {
+ out.Name = in.Name
+ if err := DeepCopy_v1_VolumeSource(in.VolumeSource, &out.VolumeSource, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_v1_VolumeMount(in VolumeMount, out *VolumeMount, c *conversion.Cloner) error {
+ out.Name = in.Name
+ out.ReadOnly = in.ReadOnly
+ out.MountPath = in.MountPath
+ out.SubPath = in.SubPath
+ return nil
+}
+
+func DeepCopy_v1_VolumeSource(in VolumeSource, out *VolumeSource, c *conversion.Cloner) error {
+ if in.HostPath != nil {
+ in, out := in.HostPath, &out.HostPath
+ *out = new(HostPathVolumeSource)
+ **out = *in
+ } else {
+ out.HostPath = nil
+ }
+ if in.EmptyDir != nil {
+ in, out := in.EmptyDir, &out.EmptyDir
+ *out = new(EmptyDirVolumeSource)
+ **out = *in
+ } else {
+ out.EmptyDir = nil
+ }
+ if in.GCEPersistentDisk != nil {
+ in, out := in.GCEPersistentDisk, &out.GCEPersistentDisk
+ *out = new(GCEPersistentDiskVolumeSource)
+ **out = *in
+ } else {
+ out.GCEPersistentDisk = nil
+ }
+ if in.AWSElasticBlockStore != nil {
+ in, out := in.AWSElasticBlockStore, &out.AWSElasticBlockStore
+ *out = new(AWSElasticBlockStoreVolumeSource)
+ **out = *in
+ } else {
+ out.AWSElasticBlockStore = nil
+ }
+ if in.GitRepo != nil {
+ in, out := in.GitRepo, &out.GitRepo
+ *out = new(GitRepoVolumeSource)
+ **out = *in
+ } else {
+ out.GitRepo = nil
+ }
+ if in.Secret != nil {
+ in, out := in.Secret, &out.Secret
+ *out = new(SecretVolumeSource)
+ if err := DeepCopy_v1_SecretVolumeSource(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.Secret = nil
+ }
+ if in.NFS != nil {
+ in, out := in.NFS, &out.NFS
+ *out = new(NFSVolumeSource)
+ **out = *in
+ } else {
+ out.NFS = nil
+ }
+ if in.ISCSI != nil {
+ in, out := in.ISCSI, &out.ISCSI
+ *out = new(ISCSIVolumeSource)
+ **out = *in
+ } else {
+ out.ISCSI = nil
+ }
+ if in.Glusterfs != nil {
+ in, out := in.Glusterfs, &out.Glusterfs
+ *out = new(GlusterfsVolumeSource)
+ **out = *in
+ } else {
+ out.Glusterfs = nil
+ }
+ if in.PersistentVolumeClaim != nil {
+ in, out := in.PersistentVolumeClaim, &out.PersistentVolumeClaim
+ *out = new(PersistentVolumeClaimVolumeSource)
+ **out = *in
+ } else {
+ out.PersistentVolumeClaim = nil
+ }
+ if in.RBD != nil {
+ in, out := in.RBD, &out.RBD
+ *out = new(RBDVolumeSource)
+ if err := DeepCopy_v1_RBDVolumeSource(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.RBD = nil
+ }
+ if in.FlexVolume != nil {
+ in, out := in.FlexVolume, &out.FlexVolume
+ *out = new(FlexVolumeSource)
+ if err := DeepCopy_v1_FlexVolumeSource(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.FlexVolume = nil
+ }
+ if in.Cinder != nil {
+ in, out := in.Cinder, &out.Cinder
+ *out = new(CinderVolumeSource)
+ **out = *in
+ } else {
+ out.Cinder = nil
+ }
+ if in.CephFS != nil {
+ in, out := in.CephFS, &out.CephFS
+ *out = new(CephFSVolumeSource)
+ if err := DeepCopy_v1_CephFSVolumeSource(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.CephFS = nil
+ }
+ if in.Flocker != nil {
+ in, out := in.Flocker, &out.Flocker
+ *out = new(FlockerVolumeSource)
+ **out = *in
+ } else {
+ out.Flocker = nil
+ }
+ if in.DownwardAPI != nil {
+ in, out := in.DownwardAPI, &out.DownwardAPI
+ *out = new(DownwardAPIVolumeSource)
+ if err := DeepCopy_v1_DownwardAPIVolumeSource(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.DownwardAPI = nil
+ }
+ if in.FC != nil {
+ in, out := in.FC, &out.FC
+ *out = new(FCVolumeSource)
+ if err := DeepCopy_v1_FCVolumeSource(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.FC = nil
+ }
+ if in.AzureFile != nil {
+ in, out := in.AzureFile, &out.AzureFile
+ *out = new(AzureFileVolumeSource)
+ **out = *in
+ } else {
+ out.AzureFile = nil
+ }
+ if in.ConfigMap != nil {
+ in, out := in.ConfigMap, &out.ConfigMap
+ *out = new(ConfigMapVolumeSource)
+ if err := DeepCopy_v1_ConfigMapVolumeSource(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.ConfigMap = nil
+ }
+ if in.VsphereVolume != nil {
+ in, out := in.VsphereVolume, &out.VsphereVolume
+ *out = new(VsphereVirtualDiskVolumeSource)
+ **out = *in
+ } else {
+ out.VsphereVolume = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_VsphereVirtualDiskVolumeSource(in VsphereVirtualDiskVolumeSource, out *VsphereVirtualDiskVolumeSource, c *conversion.Cloner) error {
+ out.VolumePath = in.VolumePath
+ out.FSType = in.FSType
+ return nil
+}
+
+func DeepCopy_v1_WeightedPodAffinityTerm(in WeightedPodAffinityTerm, out *WeightedPodAffinityTerm, c *conversion.Cloner) error {
+ out.Weight = in.Weight
+ if err := DeepCopy_v1_PodAffinityTerm(in.PodAffinityTerm, &out.PodAffinityTerm, c); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/v1/defaults.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/v1/defaults.go
new file mode 100644
index 0000000..5d6323f
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/v1/defaults.go
@@ -0,0 +1,301 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "k8s.io/kubernetes/pkg/runtime"
+ "k8s.io/kubernetes/pkg/util"
+ "k8s.io/kubernetes/pkg/util/intstr"
+ "k8s.io/kubernetes/pkg/util/parsers"
+)
+
+func addDefaultingFuncs(scheme *runtime.Scheme) {
+ scheme.AddDefaultingFuncs(
+ SetDefaults_PodExecOptions,
+ SetDefaults_PodAttachOptions,
+ SetDefaults_ReplicationController,
+ SetDefaults_Volume,
+ SetDefaults_ContainerPort,
+ SetDefaults_Container,
+ SetDefaults_ServiceSpec,
+ SetDefaults_Pod,
+ SetDefaults_PodSpec,
+ SetDefaults_Probe,
+ SetDefaults_Secret,
+ SetDefaults_PersistentVolume,
+ SetDefaults_PersistentVolumeClaim,
+ SetDefaults_ISCSIVolumeSource,
+ SetDefaults_Endpoints,
+ SetDefaults_HTTPGetAction,
+ SetDefaults_NamespaceStatus,
+ SetDefaults_Node,
+ SetDefaults_NodeStatus,
+ SetDefaults_ObjectFieldSelector,
+ SetDefaults_LimitRangeItem,
+ SetDefaults_ConfigMap,
+ SetDefaults_RBDVolumeSource,
+ )
+}
+
+func SetDefaults_PodExecOptions(obj *PodExecOptions) {
+ obj.Stdout = true
+ obj.Stderr = true
+}
+func SetDefaults_PodAttachOptions(obj *PodAttachOptions) {
+ obj.Stdout = true
+ obj.Stderr = true
+}
+func SetDefaults_ReplicationController(obj *ReplicationController) {
+ var labels map[string]string
+ if obj.Spec.Template != nil {
+ labels = obj.Spec.Template.Labels
+ }
+ // TODO: support templates defined elsewhere when we support them in the API
+ if labels != nil {
+ if len(obj.Spec.Selector) == 0 {
+ obj.Spec.Selector = labels
+ }
+ if len(obj.Labels) == 0 {
+ obj.Labels = labels
+ }
+ }
+ if obj.Spec.Replicas == nil {
+ obj.Spec.Replicas = new(int32)
+ *obj.Spec.Replicas = 1
+ }
+}
+func SetDefaults_Volume(obj *Volume) {
+ if util.AllPtrFieldsNil(&obj.VolumeSource) {
+ obj.VolumeSource = VolumeSource{
+ EmptyDir: &EmptyDirVolumeSource{},
+ }
+ }
+}
+func SetDefaults_ContainerPort(obj *ContainerPort) {
+ if obj.Protocol == "" {
+ obj.Protocol = ProtocolTCP
+ }
+}
+func SetDefaults_Container(obj *Container) {
+ if obj.ImagePullPolicy == "" {
+ // Ignore error and assume it has been validated elsewhere
+ _, tag, _, _ := parsers.ParseImageName(obj.Image)
+
+ // Check image tag
+
+ if tag == "latest" {
+ obj.ImagePullPolicy = PullAlways
+ } else {
+ obj.ImagePullPolicy = PullIfNotPresent
+ }
+ }
+ if obj.TerminationMessagePath == "" {
+ obj.TerminationMessagePath = TerminationMessagePathDefault
+ }
+}
+func SetDefaults_ServiceSpec(obj *ServiceSpec) {
+ if obj.SessionAffinity == "" {
+ obj.SessionAffinity = ServiceAffinityNone
+ }
+ if obj.Type == "" {
+ obj.Type = ServiceTypeClusterIP
+ }
+ for i := range obj.Ports {
+ sp := &obj.Ports[i]
+ if sp.Protocol == "" {
+ sp.Protocol = ProtocolTCP
+ }
+ if sp.TargetPort == intstr.FromInt(0) || sp.TargetPort == intstr.FromString("") {
+ sp.TargetPort = intstr.FromInt(int(sp.Port))
+ }
+ }
+}
+func SetDefaults_Pod(obj *Pod) {
+ // If limits are specified, but requests are not, default requests to limits
+ // This is done here rather than a more specific defaulting pass on ResourceRequirements
+ // because we only want this defaulting semantic to take place on a Pod and not a PodTemplate
+ for i := range obj.Spec.Containers {
+ // set requests to limits if requests are not specified, but limits are
+ if obj.Spec.Containers[i].Resources.Limits != nil {
+ if obj.Spec.Containers[i].Resources.Requests == nil {
+ obj.Spec.Containers[i].Resources.Requests = make(ResourceList)
+ }
+ for key, value := range obj.Spec.Containers[i].Resources.Limits {
+ if _, exists := obj.Spec.Containers[i].Resources.Requests[key]; !exists {
+ obj.Spec.Containers[i].Resources.Requests[key] = *(value.Copy())
+ }
+ }
+ }
+ }
+}
+func SetDefaults_PodSpec(obj *PodSpec) {
+ if obj.DNSPolicy == "" {
+ obj.DNSPolicy = DNSClusterFirst
+ }
+ if obj.RestartPolicy == "" {
+ obj.RestartPolicy = RestartPolicyAlways
+ }
+ if obj.HostNetwork {
+ defaultHostNetworkPorts(&obj.Containers)
+ }
+ if obj.SecurityContext == nil {
+ obj.SecurityContext = &PodSecurityContext{}
+ }
+ if obj.TerminationGracePeriodSeconds == nil {
+ period := int64(DefaultTerminationGracePeriodSeconds)
+ obj.TerminationGracePeriodSeconds = &period
+ }
+}
+func SetDefaults_Probe(obj *Probe) {
+ if obj.TimeoutSeconds == 0 {
+ obj.TimeoutSeconds = 1
+ }
+ if obj.PeriodSeconds == 0 {
+ obj.PeriodSeconds = 10
+ }
+ if obj.SuccessThreshold == 0 {
+ obj.SuccessThreshold = 1
+ }
+ if obj.FailureThreshold == 0 {
+ obj.FailureThreshold = 3
+ }
+}
+func SetDefaults_Secret(obj *Secret) {
+ if obj.Type == "" {
+ obj.Type = SecretTypeOpaque
+ }
+}
+func SetDefaults_PersistentVolume(obj *PersistentVolume) {
+ if obj.Status.Phase == "" {
+ obj.Status.Phase = VolumePending
+ }
+ if obj.Spec.PersistentVolumeReclaimPolicy == "" {
+ obj.Spec.PersistentVolumeReclaimPolicy = PersistentVolumeReclaimRetain
+ }
+}
+func SetDefaults_PersistentVolumeClaim(obj *PersistentVolumeClaim) {
+ if obj.Status.Phase == "" {
+ obj.Status.Phase = ClaimPending
+ }
+}
+func SetDefaults_ISCSIVolumeSource(obj *ISCSIVolumeSource) {
+ if obj.ISCSIInterface == "" {
+ obj.ISCSIInterface = "default"
+ }
+}
+func SetDefaults_Endpoints(obj *Endpoints) {
+ for i := range obj.Subsets {
+ ss := &obj.Subsets[i]
+ for i := range ss.Ports {
+ ep := &ss.Ports[i]
+ if ep.Protocol == "" {
+ ep.Protocol = ProtocolTCP
+ }
+ }
+ }
+}
+func SetDefaults_HTTPGetAction(obj *HTTPGetAction) {
+ if obj.Path == "" {
+ obj.Path = "/"
+ }
+ if obj.Scheme == "" {
+ obj.Scheme = URISchemeHTTP
+ }
+}
+func SetDefaults_NamespaceStatus(obj *NamespaceStatus) {
+ if obj.Phase == "" {
+ obj.Phase = NamespaceActive
+ }
+}
+func SetDefaults_Node(obj *Node) {
+ if obj.Spec.ExternalID == "" {
+ obj.Spec.ExternalID = obj.Name
+ }
+}
+func SetDefaults_NodeStatus(obj *NodeStatus) {
+ if obj.Allocatable == nil && obj.Capacity != nil {
+ obj.Allocatable = make(ResourceList, len(obj.Capacity))
+ for key, value := range obj.Capacity {
+ obj.Allocatable[key] = *(value.Copy())
+ }
+ obj.Allocatable = obj.Capacity
+ }
+}
+func SetDefaults_ObjectFieldSelector(obj *ObjectFieldSelector) {
+ if obj.APIVersion == "" {
+ obj.APIVersion = "v1"
+ }
+}
+func SetDefaults_LimitRangeItem(obj *LimitRangeItem) {
+ // for container limits, we apply default values
+ if obj.Type == LimitTypeContainer {
+
+ if obj.Default == nil {
+ obj.Default = make(ResourceList)
+ }
+ if obj.DefaultRequest == nil {
+ obj.DefaultRequest = make(ResourceList)
+ }
+
+ // If a default limit is unspecified, but the max is specified, default the limit to the max
+ for key, value := range obj.Max {
+ if _, exists := obj.Default[key]; !exists {
+ obj.Default[key] = *(value.Copy())
+ }
+ }
+ // If a default limit is specified, but the default request is not, default request to limit
+ for key, value := range obj.Default {
+ if _, exists := obj.DefaultRequest[key]; !exists {
+ obj.DefaultRequest[key] = *(value.Copy())
+ }
+ }
+ // If a default request is not specified, but the min is provided, default request to the min
+ for key, value := range obj.Min {
+ if _, exists := obj.DefaultRequest[key]; !exists {
+ obj.DefaultRequest[key] = *(value.Copy())
+ }
+ }
+ }
+}
+func SetDefaults_ConfigMap(obj *ConfigMap) {
+ if obj.Data == nil {
+ obj.Data = make(map[string]string)
+ }
+}
+
+// With host networking default all container ports to host ports.
+func defaultHostNetworkPorts(containers *[]Container) {
+ for i := range *containers {
+ for j := range (*containers)[i].Ports {
+ if (*containers)[i].Ports[j].HostPort == 0 {
+ (*containers)[i].Ports[j].HostPort = (*containers)[i].Ports[j].ContainerPort
+ }
+ }
+ }
+}
+
+func SetDefaults_RBDVolumeSource(obj *RBDVolumeSource) {
+ if obj.RBDPool == "" {
+ obj.RBDPool = "rbd"
+ }
+ if obj.RadosUser == "" {
+ obj.RadosUser = "admin"
+ }
+ if obj.Keyring == "" {
+ obj.Keyring = "/etc/ceph/keyring"
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/v1/doc.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/v1/doc.go
new file mode 100644
index 0000000..8849ee1
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/v1/doc.go
@@ -0,0 +1,21 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package,register
+// +k8s:conversion-gen=k8s.io/kubernetes/pkg/api
+
+// Package v1 is the v1 version of the API.
+package v1
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/v1/generated.pb.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/v1/generated.pb.go
new file mode 100644
index 0000000..16ac125
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/v1/generated.pb.go
@@ -0,0 +1,34797 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by protoc-gen-gogo.
+// source: k8s.io/kubernetes/pkg/api/v1/generated.proto
+// DO NOT EDIT!
+
+/*
+ Package v1 is a generated protocol buffer package.
+
+ It is generated from these files:
+ k8s.io/kubernetes/pkg/api/v1/generated.proto
+
+ It has these top-level messages:
+ AWSElasticBlockStoreVolumeSource
+ Affinity
+ AttachedVolume
+ AzureFileVolumeSource
+ Binding
+ Capabilities
+ CephFSVolumeSource
+ CinderVolumeSource
+ ComponentCondition
+ ComponentStatus
+ ComponentStatusList
+ ConfigMap
+ ConfigMapKeySelector
+ ConfigMapList
+ ConfigMapVolumeSource
+ Container
+ ContainerImage
+ ContainerPort
+ ContainerState
+ ContainerStateRunning
+ ContainerStateTerminated
+ ContainerStateWaiting
+ ContainerStatus
+ DaemonEndpoint
+ DeleteOptions
+ DownwardAPIVolumeFile
+ DownwardAPIVolumeSource
+ EmptyDirVolumeSource
+ EndpointAddress
+ EndpointPort
+ EndpointSubset
+ Endpoints
+ EndpointsList
+ EnvVar
+ EnvVarSource
+ Event
+ EventList
+ EventSource
+ ExecAction
+ ExportOptions
+ FCVolumeSource
+ FlexVolumeSource
+ FlockerVolumeSource
+ GCEPersistentDiskVolumeSource
+ GitRepoVolumeSource
+ GlusterfsVolumeSource
+ HTTPGetAction
+ HTTPHeader
+ Handler
+ HostPathVolumeSource
+ ISCSIVolumeSource
+ KeyToPath
+ Lifecycle
+ LimitRange
+ LimitRangeItem
+ LimitRangeList
+ LimitRangeSpec
+ List
+ ListOptions
+ LoadBalancerIngress
+ LoadBalancerStatus
+ LocalObjectReference
+ NFSVolumeSource
+ Namespace
+ NamespaceList
+ NamespaceSpec
+ NamespaceStatus
+ Node
+ NodeAddress
+ NodeAffinity
+ NodeCondition
+ NodeDaemonEndpoints
+ NodeList
+ NodeProxyOptions
+ NodeSelector
+ NodeSelectorRequirement
+ NodeSelectorTerm
+ NodeSpec
+ NodeStatus
+ NodeSystemInfo
+ ObjectFieldSelector
+ ObjectMeta
+ ObjectReference
+ OwnerReference
+ PersistentVolume
+ PersistentVolumeClaim
+ PersistentVolumeClaimList
+ PersistentVolumeClaimSpec
+ PersistentVolumeClaimStatus
+ PersistentVolumeClaimVolumeSource
+ PersistentVolumeList
+ PersistentVolumeSource
+ PersistentVolumeSpec
+ PersistentVolumeStatus
+ Pod
+ PodAffinity
+ PodAffinityTerm
+ PodAntiAffinity
+ PodAttachOptions
+ PodCondition
+ PodExecOptions
+ PodList
+ PodLogOptions
+ PodProxyOptions
+ PodSecurityContext
+ PodSpec
+ PodStatus
+ PodStatusResult
+ PodTemplate
+ PodTemplateList
+ PodTemplateSpec
+ Preconditions
+ PreferredSchedulingTerm
+ Probe
+ RBDVolumeSource
+ RangeAllocation
+ ReplicationController
+ ReplicationControllerList
+ ReplicationControllerSpec
+ ReplicationControllerStatus
+ ResourceFieldSelector
+ ResourceQuota
+ ResourceQuotaList
+ ResourceQuotaSpec
+ ResourceQuotaStatus
+ ResourceRequirements
+ SELinuxOptions
+ Secret
+ SecretKeySelector
+ SecretList
+ SecretVolumeSource
+ SecurityContext
+ SerializedReference
+ Service
+ ServiceAccount
+ ServiceAccountList
+ ServiceList
+ ServicePort
+ ServiceProxyOptions
+ ServiceSpec
+ ServiceStatus
+ TCPSocketAction
+ Taint
+ Toleration
+ Volume
+ VolumeMount
+ VolumeSource
+ VsphereVirtualDiskVolumeSource
+ WeightedPodAffinityTerm
+*/
+package v1
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+import k8s_io_kubernetes_pkg_api_resource "k8s.io/kubernetes/pkg/api/resource"
+import k8s_io_kubernetes_pkg_api_unversioned "k8s.io/kubernetes/pkg/api/unversioned"
+import k8s_io_kubernetes_pkg_runtime "k8s.io/kubernetes/pkg/runtime"
+
+import k8s_io_kubernetes_pkg_types "k8s.io/kubernetes/pkg/types"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+func (m *AWSElasticBlockStoreVolumeSource) Reset() { *m = AWSElasticBlockStoreVolumeSource{} }
+func (m *AWSElasticBlockStoreVolumeSource) String() string { return proto.CompactTextString(m) }
+func (*AWSElasticBlockStoreVolumeSource) ProtoMessage() {}
+
+func (m *Affinity) Reset() { *m = Affinity{} }
+func (m *Affinity) String() string { return proto.CompactTextString(m) }
+func (*Affinity) ProtoMessage() {}
+
+func (m *AttachedVolume) Reset() { *m = AttachedVolume{} }
+func (m *AttachedVolume) String() string { return proto.CompactTextString(m) }
+func (*AttachedVolume) ProtoMessage() {}
+
+func (m *AzureFileVolumeSource) Reset() { *m = AzureFileVolumeSource{} }
+func (m *AzureFileVolumeSource) String() string { return proto.CompactTextString(m) }
+func (*AzureFileVolumeSource) ProtoMessage() {}
+
+func (m *Binding) Reset() { *m = Binding{} }
+func (m *Binding) String() string { return proto.CompactTextString(m) }
+func (*Binding) ProtoMessage() {}
+
+func (m *Capabilities) Reset() { *m = Capabilities{} }
+func (m *Capabilities) String() string { return proto.CompactTextString(m) }
+func (*Capabilities) ProtoMessage() {}
+
+func (m *CephFSVolumeSource) Reset() { *m = CephFSVolumeSource{} }
+func (m *CephFSVolumeSource) String() string { return proto.CompactTextString(m) }
+func (*CephFSVolumeSource) ProtoMessage() {}
+
+func (m *CinderVolumeSource) Reset() { *m = CinderVolumeSource{} }
+func (m *CinderVolumeSource) String() string { return proto.CompactTextString(m) }
+func (*CinderVolumeSource) ProtoMessage() {}
+
+func (m *ComponentCondition) Reset() { *m = ComponentCondition{} }
+func (m *ComponentCondition) String() string { return proto.CompactTextString(m) }
+func (*ComponentCondition) ProtoMessage() {}
+
+func (m *ComponentStatus) Reset() { *m = ComponentStatus{} }
+func (m *ComponentStatus) String() string { return proto.CompactTextString(m) }
+func (*ComponentStatus) ProtoMessage() {}
+
+func (m *ComponentStatusList) Reset() { *m = ComponentStatusList{} }
+func (m *ComponentStatusList) String() string { return proto.CompactTextString(m) }
+func (*ComponentStatusList) ProtoMessage() {}
+
+func (m *ConfigMap) Reset() { *m = ConfigMap{} }
+func (m *ConfigMap) String() string { return proto.CompactTextString(m) }
+func (*ConfigMap) ProtoMessage() {}
+
+func (m *ConfigMapKeySelector) Reset() { *m = ConfigMapKeySelector{} }
+func (m *ConfigMapKeySelector) String() string { return proto.CompactTextString(m) }
+func (*ConfigMapKeySelector) ProtoMessage() {}
+
+func (m *ConfigMapList) Reset() { *m = ConfigMapList{} }
+func (m *ConfigMapList) String() string { return proto.CompactTextString(m) }
+func (*ConfigMapList) ProtoMessage() {}
+
+func (m *ConfigMapVolumeSource) Reset() { *m = ConfigMapVolumeSource{} }
+func (m *ConfigMapVolumeSource) String() string { return proto.CompactTextString(m) }
+func (*ConfigMapVolumeSource) ProtoMessage() {}
+
+func (m *Container) Reset() { *m = Container{} }
+func (m *Container) String() string { return proto.CompactTextString(m) }
+func (*Container) ProtoMessage() {}
+
+func (m *ContainerImage) Reset() { *m = ContainerImage{} }
+func (m *ContainerImage) String() string { return proto.CompactTextString(m) }
+func (*ContainerImage) ProtoMessage() {}
+
+func (m *ContainerPort) Reset() { *m = ContainerPort{} }
+func (m *ContainerPort) String() string { return proto.CompactTextString(m) }
+func (*ContainerPort) ProtoMessage() {}
+
+func (m *ContainerState) Reset() { *m = ContainerState{} }
+func (m *ContainerState) String() string { return proto.CompactTextString(m) }
+func (*ContainerState) ProtoMessage() {}
+
+func (m *ContainerStateRunning) Reset() { *m = ContainerStateRunning{} }
+func (m *ContainerStateRunning) String() string { return proto.CompactTextString(m) }
+func (*ContainerStateRunning) ProtoMessage() {}
+
+func (m *ContainerStateTerminated) Reset() { *m = ContainerStateTerminated{} }
+func (m *ContainerStateTerminated) String() string { return proto.CompactTextString(m) }
+func (*ContainerStateTerminated) ProtoMessage() {}
+
+func (m *ContainerStateWaiting) Reset() { *m = ContainerStateWaiting{} }
+func (m *ContainerStateWaiting) String() string { return proto.CompactTextString(m) }
+func (*ContainerStateWaiting) ProtoMessage() {}
+
+func (m *ContainerStatus) Reset() { *m = ContainerStatus{} }
+func (m *ContainerStatus) String() string { return proto.CompactTextString(m) }
+func (*ContainerStatus) ProtoMessage() {}
+
+func (m *DaemonEndpoint) Reset() { *m = DaemonEndpoint{} }
+func (m *DaemonEndpoint) String() string { return proto.CompactTextString(m) }
+func (*DaemonEndpoint) ProtoMessage() {}
+
+func (m *DeleteOptions) Reset() { *m = DeleteOptions{} }
+func (m *DeleteOptions) String() string { return proto.CompactTextString(m) }
+func (*DeleteOptions) ProtoMessage() {}
+
+func (m *DownwardAPIVolumeFile) Reset() { *m = DownwardAPIVolumeFile{} }
+func (m *DownwardAPIVolumeFile) String() string { return proto.CompactTextString(m) }
+func (*DownwardAPIVolumeFile) ProtoMessage() {}
+
+func (m *DownwardAPIVolumeSource) Reset() { *m = DownwardAPIVolumeSource{} }
+func (m *DownwardAPIVolumeSource) String() string { return proto.CompactTextString(m) }
+func (*DownwardAPIVolumeSource) ProtoMessage() {}
+
+func (m *EmptyDirVolumeSource) Reset() { *m = EmptyDirVolumeSource{} }
+func (m *EmptyDirVolumeSource) String() string { return proto.CompactTextString(m) }
+func (*EmptyDirVolumeSource) ProtoMessage() {}
+
+func (m *EndpointAddress) Reset() { *m = EndpointAddress{} }
+func (m *EndpointAddress) String() string { return proto.CompactTextString(m) }
+func (*EndpointAddress) ProtoMessage() {}
+
+func (m *EndpointPort) Reset() { *m = EndpointPort{} }
+func (m *EndpointPort) String() string { return proto.CompactTextString(m) }
+func (*EndpointPort) ProtoMessage() {}
+
+func (m *EndpointSubset) Reset() { *m = EndpointSubset{} }
+func (m *EndpointSubset) String() string { return proto.CompactTextString(m) }
+func (*EndpointSubset) ProtoMessage() {}
+
+func (m *Endpoints) Reset() { *m = Endpoints{} }
+func (m *Endpoints) String() string { return proto.CompactTextString(m) }
+func (*Endpoints) ProtoMessage() {}
+
+func (m *EndpointsList) Reset() { *m = EndpointsList{} }
+func (m *EndpointsList) String() string { return proto.CompactTextString(m) }
+func (*EndpointsList) ProtoMessage() {}
+
+func (m *EnvVar) Reset() { *m = EnvVar{} }
+func (m *EnvVar) String() string { return proto.CompactTextString(m) }
+func (*EnvVar) ProtoMessage() {}
+
+func (m *EnvVarSource) Reset() { *m = EnvVarSource{} }
+func (m *EnvVarSource) String() string { return proto.CompactTextString(m) }
+func (*EnvVarSource) ProtoMessage() {}
+
+func (m *Event) Reset() { *m = Event{} }
+func (m *Event) String() string { return proto.CompactTextString(m) }
+func (*Event) ProtoMessage() {}
+
+func (m *EventList) Reset() { *m = EventList{} }
+func (m *EventList) String() string { return proto.CompactTextString(m) }
+func (*EventList) ProtoMessage() {}
+
+func (m *EventSource) Reset() { *m = EventSource{} }
+func (m *EventSource) String() string { return proto.CompactTextString(m) }
+func (*EventSource) ProtoMessage() {}
+
+func (m *ExecAction) Reset() { *m = ExecAction{} }
+func (m *ExecAction) String() string { return proto.CompactTextString(m) }
+func (*ExecAction) ProtoMessage() {}
+
+func (m *ExportOptions) Reset() { *m = ExportOptions{} }
+func (m *ExportOptions) String() string { return proto.CompactTextString(m) }
+func (*ExportOptions) ProtoMessage() {}
+
+func (m *FCVolumeSource) Reset() { *m = FCVolumeSource{} }
+func (m *FCVolumeSource) String() string { return proto.CompactTextString(m) }
+func (*FCVolumeSource) ProtoMessage() {}
+
+func (m *FlexVolumeSource) Reset() { *m = FlexVolumeSource{} }
+func (m *FlexVolumeSource) String() string { return proto.CompactTextString(m) }
+func (*FlexVolumeSource) ProtoMessage() {}
+
+func (m *FlockerVolumeSource) Reset() { *m = FlockerVolumeSource{} }
+func (m *FlockerVolumeSource) String() string { return proto.CompactTextString(m) }
+func (*FlockerVolumeSource) ProtoMessage() {}
+
+func (m *GCEPersistentDiskVolumeSource) Reset() { *m = GCEPersistentDiskVolumeSource{} }
+func (m *GCEPersistentDiskVolumeSource) String() string { return proto.CompactTextString(m) }
+func (*GCEPersistentDiskVolumeSource) ProtoMessage() {}
+
+func (m *GitRepoVolumeSource) Reset() { *m = GitRepoVolumeSource{} }
+func (m *GitRepoVolumeSource) String() string { return proto.CompactTextString(m) }
+func (*GitRepoVolumeSource) ProtoMessage() {}
+
+func (m *GlusterfsVolumeSource) Reset() { *m = GlusterfsVolumeSource{} }
+func (m *GlusterfsVolumeSource) String() string { return proto.CompactTextString(m) }
+func (*GlusterfsVolumeSource) ProtoMessage() {}
+
+func (m *HTTPGetAction) Reset() { *m = HTTPGetAction{} }
+func (m *HTTPGetAction) String() string { return proto.CompactTextString(m) }
+func (*HTTPGetAction) ProtoMessage() {}
+
+func (m *HTTPHeader) Reset() { *m = HTTPHeader{} }
+func (m *HTTPHeader) String() string { return proto.CompactTextString(m) }
+func (*HTTPHeader) ProtoMessage() {}
+
+func (m *Handler) Reset() { *m = Handler{} }
+func (m *Handler) String() string { return proto.CompactTextString(m) }
+func (*Handler) ProtoMessage() {}
+
+func (m *HostPathVolumeSource) Reset() { *m = HostPathVolumeSource{} }
+func (m *HostPathVolumeSource) String() string { return proto.CompactTextString(m) }
+func (*HostPathVolumeSource) ProtoMessage() {}
+
+func (m *ISCSIVolumeSource) Reset() { *m = ISCSIVolumeSource{} }
+func (m *ISCSIVolumeSource) String() string { return proto.CompactTextString(m) }
+func (*ISCSIVolumeSource) ProtoMessage() {}
+
+func (m *KeyToPath) Reset() { *m = KeyToPath{} }
+func (m *KeyToPath) String() string { return proto.CompactTextString(m) }
+func (*KeyToPath) ProtoMessage() {}
+
+func (m *Lifecycle) Reset() { *m = Lifecycle{} }
+func (m *Lifecycle) String() string { return proto.CompactTextString(m) }
+func (*Lifecycle) ProtoMessage() {}
+
+func (m *LimitRange) Reset() { *m = LimitRange{} }
+func (m *LimitRange) String() string { return proto.CompactTextString(m) }
+func (*LimitRange) ProtoMessage() {}
+
+func (m *LimitRangeItem) Reset() { *m = LimitRangeItem{} }
+func (m *LimitRangeItem) String() string { return proto.CompactTextString(m) }
+func (*LimitRangeItem) ProtoMessage() {}
+
+func (m *LimitRangeList) Reset() { *m = LimitRangeList{} }
+func (m *LimitRangeList) String() string { return proto.CompactTextString(m) }
+func (*LimitRangeList) ProtoMessage() {}
+
+func (m *LimitRangeSpec) Reset() { *m = LimitRangeSpec{} }
+func (m *LimitRangeSpec) String() string { return proto.CompactTextString(m) }
+func (*LimitRangeSpec) ProtoMessage() {}
+
+func (m *List) Reset() { *m = List{} }
+func (m *List) String() string { return proto.CompactTextString(m) }
+func (*List) ProtoMessage() {}
+
+func (m *ListOptions) Reset() { *m = ListOptions{} }
+func (m *ListOptions) String() string { return proto.CompactTextString(m) }
+func (*ListOptions) ProtoMessage() {}
+
+func (m *LoadBalancerIngress) Reset() { *m = LoadBalancerIngress{} }
+func (m *LoadBalancerIngress) String() string { return proto.CompactTextString(m) }
+func (*LoadBalancerIngress) ProtoMessage() {}
+
+func (m *LoadBalancerStatus) Reset() { *m = LoadBalancerStatus{} }
+func (m *LoadBalancerStatus) String() string { return proto.CompactTextString(m) }
+func (*LoadBalancerStatus) ProtoMessage() {}
+
+func (m *LocalObjectReference) Reset() { *m = LocalObjectReference{} }
+func (m *LocalObjectReference) String() string { return proto.CompactTextString(m) }
+func (*LocalObjectReference) ProtoMessage() {}
+
+func (m *NFSVolumeSource) Reset() { *m = NFSVolumeSource{} }
+func (m *NFSVolumeSource) String() string { return proto.CompactTextString(m) }
+func (*NFSVolumeSource) ProtoMessage() {}
+
+func (m *Namespace) Reset() { *m = Namespace{} }
+func (m *Namespace) String() string { return proto.CompactTextString(m) }
+func (*Namespace) ProtoMessage() {}
+
+func (m *NamespaceList) Reset() { *m = NamespaceList{} }
+func (m *NamespaceList) String() string { return proto.CompactTextString(m) }
+func (*NamespaceList) ProtoMessage() {}
+
+func (m *NamespaceSpec) Reset() { *m = NamespaceSpec{} }
+func (m *NamespaceSpec) String() string { return proto.CompactTextString(m) }
+func (*NamespaceSpec) ProtoMessage() {}
+
+func (m *NamespaceStatus) Reset() { *m = NamespaceStatus{} }
+func (m *NamespaceStatus) String() string { return proto.CompactTextString(m) }
+func (*NamespaceStatus) ProtoMessage() {}
+
+func (m *Node) Reset() { *m = Node{} }
+func (m *Node) String() string { return proto.CompactTextString(m) }
+func (*Node) ProtoMessage() {}
+
+func (m *NodeAddress) Reset() { *m = NodeAddress{} }
+func (m *NodeAddress) String() string { return proto.CompactTextString(m) }
+func (*NodeAddress) ProtoMessage() {}
+
+func (m *NodeAffinity) Reset() { *m = NodeAffinity{} }
+func (m *NodeAffinity) String() string { return proto.CompactTextString(m) }
+func (*NodeAffinity) ProtoMessage() {}
+
+func (m *NodeCondition) Reset() { *m = NodeCondition{} }
+func (m *NodeCondition) String() string { return proto.CompactTextString(m) }
+func (*NodeCondition) ProtoMessage() {}
+
+func (m *NodeDaemonEndpoints) Reset() { *m = NodeDaemonEndpoints{} }
+func (m *NodeDaemonEndpoints) String() string { return proto.CompactTextString(m) }
+func (*NodeDaemonEndpoints) ProtoMessage() {}
+
+func (m *NodeList) Reset() { *m = NodeList{} }
+func (m *NodeList) String() string { return proto.CompactTextString(m) }
+func (*NodeList) ProtoMessage() {}
+
+func (m *NodeProxyOptions) Reset() { *m = NodeProxyOptions{} }
+func (m *NodeProxyOptions) String() string { return proto.CompactTextString(m) }
+func (*NodeProxyOptions) ProtoMessage() {}
+
+func (m *NodeSelector) Reset() { *m = NodeSelector{} }
+func (m *NodeSelector) String() string { return proto.CompactTextString(m) }
+func (*NodeSelector) ProtoMessage() {}
+
+func (m *NodeSelectorRequirement) Reset() { *m = NodeSelectorRequirement{} }
+func (m *NodeSelectorRequirement) String() string { return proto.CompactTextString(m) }
+func (*NodeSelectorRequirement) ProtoMessage() {}
+
+func (m *NodeSelectorTerm) Reset() { *m = NodeSelectorTerm{} }
+func (m *NodeSelectorTerm) String() string { return proto.CompactTextString(m) }
+func (*NodeSelectorTerm) ProtoMessage() {}
+
+func (m *NodeSpec) Reset() { *m = NodeSpec{} }
+func (m *NodeSpec) String() string { return proto.CompactTextString(m) }
+func (*NodeSpec) ProtoMessage() {}
+
+func (m *NodeStatus) Reset() { *m = NodeStatus{} }
+func (m *NodeStatus) String() string { return proto.CompactTextString(m) }
+func (*NodeStatus) ProtoMessage() {}
+
+func (m *NodeSystemInfo) Reset() { *m = NodeSystemInfo{} }
+func (m *NodeSystemInfo) String() string { return proto.CompactTextString(m) }
+func (*NodeSystemInfo) ProtoMessage() {}
+
+func (m *ObjectFieldSelector) Reset() { *m = ObjectFieldSelector{} }
+func (m *ObjectFieldSelector) String() string { return proto.CompactTextString(m) }
+func (*ObjectFieldSelector) ProtoMessage() {}
+
+func (m *ObjectMeta) Reset() { *m = ObjectMeta{} }
+func (m *ObjectMeta) String() string { return proto.CompactTextString(m) }
+func (*ObjectMeta) ProtoMessage() {}
+
+func (m *ObjectReference) Reset() { *m = ObjectReference{} }
+func (m *ObjectReference) String() string { return proto.CompactTextString(m) }
+func (*ObjectReference) ProtoMessage() {}
+
+func (m *OwnerReference) Reset() { *m = OwnerReference{} }
+func (m *OwnerReference) String() string { return proto.CompactTextString(m) }
+func (*OwnerReference) ProtoMessage() {}
+
+func (m *PersistentVolume) Reset() { *m = PersistentVolume{} }
+func (m *PersistentVolume) String() string { return proto.CompactTextString(m) }
+func (*PersistentVolume) ProtoMessage() {}
+
+func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} }
+func (m *PersistentVolumeClaim) String() string { return proto.CompactTextString(m) }
+func (*PersistentVolumeClaim) ProtoMessage() {}
+
+func (m *PersistentVolumeClaimList) Reset() { *m = PersistentVolumeClaimList{} }
+func (m *PersistentVolumeClaimList) String() string { return proto.CompactTextString(m) }
+func (*PersistentVolumeClaimList) ProtoMessage() {}
+
+func (m *PersistentVolumeClaimSpec) Reset() { *m = PersistentVolumeClaimSpec{} }
+func (m *PersistentVolumeClaimSpec) String() string { return proto.CompactTextString(m) }
+func (*PersistentVolumeClaimSpec) ProtoMessage() {}
+
+func (m *PersistentVolumeClaimStatus) Reset() { *m = PersistentVolumeClaimStatus{} }
+func (m *PersistentVolumeClaimStatus) String() string { return proto.CompactTextString(m) }
+func (*PersistentVolumeClaimStatus) ProtoMessage() {}
+
+func (m *PersistentVolumeClaimVolumeSource) Reset() { *m = PersistentVolumeClaimVolumeSource{} }
+func (m *PersistentVolumeClaimVolumeSource) String() string { return proto.CompactTextString(m) }
+func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {}
+
+func (m *PersistentVolumeList) Reset() { *m = PersistentVolumeList{} }
+func (m *PersistentVolumeList) String() string { return proto.CompactTextString(m) }
+func (*PersistentVolumeList) ProtoMessage() {}
+
+func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} }
+func (m *PersistentVolumeSource) String() string { return proto.CompactTextString(m) }
+func (*PersistentVolumeSource) ProtoMessage() {}
+
+func (m *PersistentVolumeSpec) Reset() { *m = PersistentVolumeSpec{} }
+func (m *PersistentVolumeSpec) String() string { return proto.CompactTextString(m) }
+func (*PersistentVolumeSpec) ProtoMessage() {}
+
+func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} }
+func (m *PersistentVolumeStatus) String() string { return proto.CompactTextString(m) }
+func (*PersistentVolumeStatus) ProtoMessage() {}
+
+func (m *Pod) Reset() { *m = Pod{} }
+func (m *Pod) String() string { return proto.CompactTextString(m) }
+func (*Pod) ProtoMessage() {}
+
+func (m *PodAffinity) Reset() { *m = PodAffinity{} }
+func (m *PodAffinity) String() string { return proto.CompactTextString(m) }
+func (*PodAffinity) ProtoMessage() {}
+
+func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} }
+func (m *PodAffinityTerm) String() string { return proto.CompactTextString(m) }
+func (*PodAffinityTerm) ProtoMessage() {}
+
+func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} }
+func (m *PodAntiAffinity) String() string { return proto.CompactTextString(m) }
+func (*PodAntiAffinity) ProtoMessage() {}
+
+func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} }
+func (m *PodAttachOptions) String() string { return proto.CompactTextString(m) }
+func (*PodAttachOptions) ProtoMessage() {}
+
+func (m *PodCondition) Reset() { *m = PodCondition{} }
+func (m *PodCondition) String() string { return proto.CompactTextString(m) }
+func (*PodCondition) ProtoMessage() {}
+
+func (m *PodExecOptions) Reset() { *m = PodExecOptions{} }
+func (m *PodExecOptions) String() string { return proto.CompactTextString(m) }
+func (*PodExecOptions) ProtoMessage() {}
+
+func (m *PodList) Reset() { *m = PodList{} }
+func (m *PodList) String() string { return proto.CompactTextString(m) }
+func (*PodList) ProtoMessage() {}
+
+func (m *PodLogOptions) Reset() { *m = PodLogOptions{} }
+func (m *PodLogOptions) String() string { return proto.CompactTextString(m) }
+func (*PodLogOptions) ProtoMessage() {}
+
+func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} }
+func (m *PodProxyOptions) String() string { return proto.CompactTextString(m) }
+func (*PodProxyOptions) ProtoMessage() {}
+
+func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} }
+func (m *PodSecurityContext) String() string { return proto.CompactTextString(m) }
+func (*PodSecurityContext) ProtoMessage() {}
+
+func (m *PodSpec) Reset() { *m = PodSpec{} }
+func (m *PodSpec) String() string { return proto.CompactTextString(m) }
+func (*PodSpec) ProtoMessage() {}
+
+func (m *PodStatus) Reset() { *m = PodStatus{} }
+func (m *PodStatus) String() string { return proto.CompactTextString(m) }
+func (*PodStatus) ProtoMessage() {}
+
+func (m *PodStatusResult) Reset() { *m = PodStatusResult{} }
+func (m *PodStatusResult) String() string { return proto.CompactTextString(m) }
+func (*PodStatusResult) ProtoMessage() {}
+
+func (m *PodTemplate) Reset() { *m = PodTemplate{} }
+func (m *PodTemplate) String() string { return proto.CompactTextString(m) }
+func (*PodTemplate) ProtoMessage() {}
+
+func (m *PodTemplateList) Reset() { *m = PodTemplateList{} }
+func (m *PodTemplateList) String() string { return proto.CompactTextString(m) }
+func (*PodTemplateList) ProtoMessage() {}
+
+func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} }
+func (m *PodTemplateSpec) String() string { return proto.CompactTextString(m) }
+func (*PodTemplateSpec) ProtoMessage() {}
+
+func (m *Preconditions) Reset() { *m = Preconditions{} }
+func (m *Preconditions) String() string { return proto.CompactTextString(m) }
+func (*Preconditions) ProtoMessage() {}
+
+func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} }
+func (m *PreferredSchedulingTerm) String() string { return proto.CompactTextString(m) }
+func (*PreferredSchedulingTerm) ProtoMessage() {}
+
+func (m *Probe) Reset() { *m = Probe{} }
+func (m *Probe) String() string { return proto.CompactTextString(m) }
+func (*Probe) ProtoMessage() {}
+
+func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} }
+func (m *RBDVolumeSource) String() string { return proto.CompactTextString(m) }
+func (*RBDVolumeSource) ProtoMessage() {}
+
+func (m *RangeAllocation) Reset() { *m = RangeAllocation{} }
+func (m *RangeAllocation) String() string { return proto.CompactTextString(m) }
+func (*RangeAllocation) ProtoMessage() {}
+
+func (m *ReplicationController) Reset() { *m = ReplicationController{} }
+func (m *ReplicationController) String() string { return proto.CompactTextString(m) }
+func (*ReplicationController) ProtoMessage() {}
+
+func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} }
+func (m *ReplicationControllerList) String() string { return proto.CompactTextString(m) }
+func (*ReplicationControllerList) ProtoMessage() {}
+
+func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} }
+func (m *ReplicationControllerSpec) String() string { return proto.CompactTextString(m) }
+func (*ReplicationControllerSpec) ProtoMessage() {}
+
+func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} }
+func (m *ReplicationControllerStatus) String() string { return proto.CompactTextString(m) }
+func (*ReplicationControllerStatus) ProtoMessage() {}
+
+func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} }
+func (m *ResourceFieldSelector) String() string { return proto.CompactTextString(m) }
+func (*ResourceFieldSelector) ProtoMessage() {}
+
+func (m *ResourceQuota) Reset() { *m = ResourceQuota{} }
+func (m *ResourceQuota) String() string { return proto.CompactTextString(m) }
+func (*ResourceQuota) ProtoMessage() {}
+
+func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} }
+func (m *ResourceQuotaList) String() string { return proto.CompactTextString(m) }
+func (*ResourceQuotaList) ProtoMessage() {}
+
+func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} }
+func (m *ResourceQuotaSpec) String() string { return proto.CompactTextString(m) }
+func (*ResourceQuotaSpec) ProtoMessage() {}
+
+func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} }
+func (m *ResourceQuotaStatus) String() string { return proto.CompactTextString(m) }
+func (*ResourceQuotaStatus) ProtoMessage() {}
+
+func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} }
+func (m *ResourceRequirements) String() string { return proto.CompactTextString(m) }
+func (*ResourceRequirements) ProtoMessage() {}
+
+func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} }
+func (m *SELinuxOptions) String() string { return proto.CompactTextString(m) }
+func (*SELinuxOptions) ProtoMessage() {}
+
+func (m *Secret) Reset() { *m = Secret{} }
+func (m *Secret) String() string { return proto.CompactTextString(m) }
+func (*Secret) ProtoMessage() {}
+
+func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} }
+func (m *SecretKeySelector) String() string { return proto.CompactTextString(m) }
+func (*SecretKeySelector) ProtoMessage() {}
+
+func (m *SecretList) Reset() { *m = SecretList{} }
+func (m *SecretList) String() string { return proto.CompactTextString(m) }
+func (*SecretList) ProtoMessage() {}
+
+func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} }
+func (m *SecretVolumeSource) String() string { return proto.CompactTextString(m) }
+func (*SecretVolumeSource) ProtoMessage() {}
+
+func (m *SecurityContext) Reset() { *m = SecurityContext{} }
+func (m *SecurityContext) String() string { return proto.CompactTextString(m) }
+func (*SecurityContext) ProtoMessage() {}
+
+func (m *SerializedReference) Reset() { *m = SerializedReference{} }
+func (m *SerializedReference) String() string { return proto.CompactTextString(m) }
+func (*SerializedReference) ProtoMessage() {}
+
+func (m *Service) Reset() { *m = Service{} }
+func (m *Service) String() string { return proto.CompactTextString(m) }
+func (*Service) ProtoMessage() {}
+
+func (m *ServiceAccount) Reset() { *m = ServiceAccount{} }
+func (m *ServiceAccount) String() string { return proto.CompactTextString(m) }
+func (*ServiceAccount) ProtoMessage() {}
+
+func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} }
+func (m *ServiceAccountList) String() string { return proto.CompactTextString(m) }
+func (*ServiceAccountList) ProtoMessage() {}
+
+func (m *ServiceList) Reset() { *m = ServiceList{} }
+func (m *ServiceList) String() string { return proto.CompactTextString(m) }
+func (*ServiceList) ProtoMessage() {}
+
+func (m *ServicePort) Reset() { *m = ServicePort{} }
+func (m *ServicePort) String() string { return proto.CompactTextString(m) }
+func (*ServicePort) ProtoMessage() {}
+
+func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} }
+func (m *ServiceProxyOptions) String() string { return proto.CompactTextString(m) }
+func (*ServiceProxyOptions) ProtoMessage() {}
+
+func (m *ServiceSpec) Reset() { *m = ServiceSpec{} }
+func (m *ServiceSpec) String() string { return proto.CompactTextString(m) }
+func (*ServiceSpec) ProtoMessage() {}
+
+func (m *ServiceStatus) Reset() { *m = ServiceStatus{} }
+func (m *ServiceStatus) String() string { return proto.CompactTextString(m) }
+func (*ServiceStatus) ProtoMessage() {}
+
+func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} }
+func (m *TCPSocketAction) String() string { return proto.CompactTextString(m) }
+func (*TCPSocketAction) ProtoMessage() {}
+
+func (m *Taint) Reset() { *m = Taint{} }
+func (m *Taint) String() string { return proto.CompactTextString(m) }
+func (*Taint) ProtoMessage() {}
+
+func (m *Toleration) Reset() { *m = Toleration{} }
+func (m *Toleration) String() string { return proto.CompactTextString(m) }
+func (*Toleration) ProtoMessage() {}
+
+func (m *Volume) Reset() { *m = Volume{} }
+func (m *Volume) String() string { return proto.CompactTextString(m) }
+func (*Volume) ProtoMessage() {}
+
+func (m *VolumeMount) Reset() { *m = VolumeMount{} }
+func (m *VolumeMount) String() string { return proto.CompactTextString(m) }
+func (*VolumeMount) ProtoMessage() {}
+
+func (m *VolumeSource) Reset() { *m = VolumeSource{} }
+func (m *VolumeSource) String() string { return proto.CompactTextString(m) }
+func (*VolumeSource) ProtoMessage() {}
+
+func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} }
+func (m *VsphereVirtualDiskVolumeSource) String() string { return proto.CompactTextString(m) }
+func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {}
+
+func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} }
+func (m *WeightedPodAffinityTerm) String() string { return proto.CompactTextString(m) }
+func (*WeightedPodAffinityTerm) ProtoMessage() {}
+
+func init() {
+ proto.RegisterType((*AWSElasticBlockStoreVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.AWSElasticBlockStoreVolumeSource")
+ proto.RegisterType((*Affinity)(nil), "k8s.io.kubernetes.pkg.api.v1.Affinity")
+ proto.RegisterType((*AttachedVolume)(nil), "k8s.io.kubernetes.pkg.api.v1.AttachedVolume")
+ proto.RegisterType((*AzureFileVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.AzureFileVolumeSource")
+ proto.RegisterType((*Binding)(nil), "k8s.io.kubernetes.pkg.api.v1.Binding")
+ proto.RegisterType((*Capabilities)(nil), "k8s.io.kubernetes.pkg.api.v1.Capabilities")
+ proto.RegisterType((*CephFSVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.CephFSVolumeSource")
+ proto.RegisterType((*CinderVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.CinderVolumeSource")
+ proto.RegisterType((*ComponentCondition)(nil), "k8s.io.kubernetes.pkg.api.v1.ComponentCondition")
+ proto.RegisterType((*ComponentStatus)(nil), "k8s.io.kubernetes.pkg.api.v1.ComponentStatus")
+ proto.RegisterType((*ComponentStatusList)(nil), "k8s.io.kubernetes.pkg.api.v1.ComponentStatusList")
+ proto.RegisterType((*ConfigMap)(nil), "k8s.io.kubernetes.pkg.api.v1.ConfigMap")
+ proto.RegisterType((*ConfigMapKeySelector)(nil), "k8s.io.kubernetes.pkg.api.v1.ConfigMapKeySelector")
+ proto.RegisterType((*ConfigMapList)(nil), "k8s.io.kubernetes.pkg.api.v1.ConfigMapList")
+ proto.RegisterType((*ConfigMapVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.ConfigMapVolumeSource")
+ proto.RegisterType((*Container)(nil), "k8s.io.kubernetes.pkg.api.v1.Container")
+ proto.RegisterType((*ContainerImage)(nil), "k8s.io.kubernetes.pkg.api.v1.ContainerImage")
+ proto.RegisterType((*ContainerPort)(nil), "k8s.io.kubernetes.pkg.api.v1.ContainerPort")
+ proto.RegisterType((*ContainerState)(nil), "k8s.io.kubernetes.pkg.api.v1.ContainerState")
+ proto.RegisterType((*ContainerStateRunning)(nil), "k8s.io.kubernetes.pkg.api.v1.ContainerStateRunning")
+ proto.RegisterType((*ContainerStateTerminated)(nil), "k8s.io.kubernetes.pkg.api.v1.ContainerStateTerminated")
+ proto.RegisterType((*ContainerStateWaiting)(nil), "k8s.io.kubernetes.pkg.api.v1.ContainerStateWaiting")
+ proto.RegisterType((*ContainerStatus)(nil), "k8s.io.kubernetes.pkg.api.v1.ContainerStatus")
+ proto.RegisterType((*DaemonEndpoint)(nil), "k8s.io.kubernetes.pkg.api.v1.DaemonEndpoint")
+ proto.RegisterType((*DeleteOptions)(nil), "k8s.io.kubernetes.pkg.api.v1.DeleteOptions")
+ proto.RegisterType((*DownwardAPIVolumeFile)(nil), "k8s.io.kubernetes.pkg.api.v1.DownwardAPIVolumeFile")
+ proto.RegisterType((*DownwardAPIVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.DownwardAPIVolumeSource")
+ proto.RegisterType((*EmptyDirVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.EmptyDirVolumeSource")
+ proto.RegisterType((*EndpointAddress)(nil), "k8s.io.kubernetes.pkg.api.v1.EndpointAddress")
+ proto.RegisterType((*EndpointPort)(nil), "k8s.io.kubernetes.pkg.api.v1.EndpointPort")
+ proto.RegisterType((*EndpointSubset)(nil), "k8s.io.kubernetes.pkg.api.v1.EndpointSubset")
+ proto.RegisterType((*Endpoints)(nil), "k8s.io.kubernetes.pkg.api.v1.Endpoints")
+ proto.RegisterType((*EndpointsList)(nil), "k8s.io.kubernetes.pkg.api.v1.EndpointsList")
+ proto.RegisterType((*EnvVar)(nil), "k8s.io.kubernetes.pkg.api.v1.EnvVar")
+ proto.RegisterType((*EnvVarSource)(nil), "k8s.io.kubernetes.pkg.api.v1.EnvVarSource")
+ proto.RegisterType((*Event)(nil), "k8s.io.kubernetes.pkg.api.v1.Event")
+ proto.RegisterType((*EventList)(nil), "k8s.io.kubernetes.pkg.api.v1.EventList")
+ proto.RegisterType((*EventSource)(nil), "k8s.io.kubernetes.pkg.api.v1.EventSource")
+ proto.RegisterType((*ExecAction)(nil), "k8s.io.kubernetes.pkg.api.v1.ExecAction")
+ proto.RegisterType((*ExportOptions)(nil), "k8s.io.kubernetes.pkg.api.v1.ExportOptions")
+ proto.RegisterType((*FCVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.FCVolumeSource")
+ proto.RegisterType((*FlexVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.FlexVolumeSource")
+ proto.RegisterType((*FlockerVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.FlockerVolumeSource")
+ proto.RegisterType((*GCEPersistentDiskVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.GCEPersistentDiskVolumeSource")
+ proto.RegisterType((*GitRepoVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.GitRepoVolumeSource")
+ proto.RegisterType((*GlusterfsVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.GlusterfsVolumeSource")
+ proto.RegisterType((*HTTPGetAction)(nil), "k8s.io.kubernetes.pkg.api.v1.HTTPGetAction")
+ proto.RegisterType((*HTTPHeader)(nil), "k8s.io.kubernetes.pkg.api.v1.HTTPHeader")
+ proto.RegisterType((*Handler)(nil), "k8s.io.kubernetes.pkg.api.v1.Handler")
+ proto.RegisterType((*HostPathVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.HostPathVolumeSource")
+ proto.RegisterType((*ISCSIVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.ISCSIVolumeSource")
+ proto.RegisterType((*KeyToPath)(nil), "k8s.io.kubernetes.pkg.api.v1.KeyToPath")
+ proto.RegisterType((*Lifecycle)(nil), "k8s.io.kubernetes.pkg.api.v1.Lifecycle")
+ proto.RegisterType((*LimitRange)(nil), "k8s.io.kubernetes.pkg.api.v1.LimitRange")
+ proto.RegisterType((*LimitRangeItem)(nil), "k8s.io.kubernetes.pkg.api.v1.LimitRangeItem")
+ proto.RegisterType((*LimitRangeList)(nil), "k8s.io.kubernetes.pkg.api.v1.LimitRangeList")
+ proto.RegisterType((*LimitRangeSpec)(nil), "k8s.io.kubernetes.pkg.api.v1.LimitRangeSpec")
+ proto.RegisterType((*List)(nil), "k8s.io.kubernetes.pkg.api.v1.List")
+ proto.RegisterType((*ListOptions)(nil), "k8s.io.kubernetes.pkg.api.v1.ListOptions")
+ proto.RegisterType((*LoadBalancerIngress)(nil), "k8s.io.kubernetes.pkg.api.v1.LoadBalancerIngress")
+ proto.RegisterType((*LoadBalancerStatus)(nil), "k8s.io.kubernetes.pkg.api.v1.LoadBalancerStatus")
+ proto.RegisterType((*LocalObjectReference)(nil), "k8s.io.kubernetes.pkg.api.v1.LocalObjectReference")
+ proto.RegisterType((*NFSVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.NFSVolumeSource")
+ proto.RegisterType((*Namespace)(nil), "k8s.io.kubernetes.pkg.api.v1.Namespace")
+ proto.RegisterType((*NamespaceList)(nil), "k8s.io.kubernetes.pkg.api.v1.NamespaceList")
+ proto.RegisterType((*NamespaceSpec)(nil), "k8s.io.kubernetes.pkg.api.v1.NamespaceSpec")
+ proto.RegisterType((*NamespaceStatus)(nil), "k8s.io.kubernetes.pkg.api.v1.NamespaceStatus")
+ proto.RegisterType((*Node)(nil), "k8s.io.kubernetes.pkg.api.v1.Node")
+ proto.RegisterType((*NodeAddress)(nil), "k8s.io.kubernetes.pkg.api.v1.NodeAddress")
+ proto.RegisterType((*NodeAffinity)(nil), "k8s.io.kubernetes.pkg.api.v1.NodeAffinity")
+ proto.RegisterType((*NodeCondition)(nil), "k8s.io.kubernetes.pkg.api.v1.NodeCondition")
+ proto.RegisterType((*NodeDaemonEndpoints)(nil), "k8s.io.kubernetes.pkg.api.v1.NodeDaemonEndpoints")
+ proto.RegisterType((*NodeList)(nil), "k8s.io.kubernetes.pkg.api.v1.NodeList")
+ proto.RegisterType((*NodeProxyOptions)(nil), "k8s.io.kubernetes.pkg.api.v1.NodeProxyOptions")
+ proto.RegisterType((*NodeSelector)(nil), "k8s.io.kubernetes.pkg.api.v1.NodeSelector")
+ proto.RegisterType((*NodeSelectorRequirement)(nil), "k8s.io.kubernetes.pkg.api.v1.NodeSelectorRequirement")
+ proto.RegisterType((*NodeSelectorTerm)(nil), "k8s.io.kubernetes.pkg.api.v1.NodeSelectorTerm")
+ proto.RegisterType((*NodeSpec)(nil), "k8s.io.kubernetes.pkg.api.v1.NodeSpec")
+ proto.RegisterType((*NodeStatus)(nil), "k8s.io.kubernetes.pkg.api.v1.NodeStatus")
+ proto.RegisterType((*NodeSystemInfo)(nil), "k8s.io.kubernetes.pkg.api.v1.NodeSystemInfo")
+ proto.RegisterType((*ObjectFieldSelector)(nil), "k8s.io.kubernetes.pkg.api.v1.ObjectFieldSelector")
+ proto.RegisterType((*ObjectMeta)(nil), "k8s.io.kubernetes.pkg.api.v1.ObjectMeta")
+ proto.RegisterType((*ObjectReference)(nil), "k8s.io.kubernetes.pkg.api.v1.ObjectReference")
+ proto.RegisterType((*OwnerReference)(nil), "k8s.io.kubernetes.pkg.api.v1.OwnerReference")
+ proto.RegisterType((*PersistentVolume)(nil), "k8s.io.kubernetes.pkg.api.v1.PersistentVolume")
+ proto.RegisterType((*PersistentVolumeClaim)(nil), "k8s.io.kubernetes.pkg.api.v1.PersistentVolumeClaim")
+ proto.RegisterType((*PersistentVolumeClaimList)(nil), "k8s.io.kubernetes.pkg.api.v1.PersistentVolumeClaimList")
+ proto.RegisterType((*PersistentVolumeClaimSpec)(nil), "k8s.io.kubernetes.pkg.api.v1.PersistentVolumeClaimSpec")
+ proto.RegisterType((*PersistentVolumeClaimStatus)(nil), "k8s.io.kubernetes.pkg.api.v1.PersistentVolumeClaimStatus")
+ proto.RegisterType((*PersistentVolumeClaimVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.PersistentVolumeClaimVolumeSource")
+ proto.RegisterType((*PersistentVolumeList)(nil), "k8s.io.kubernetes.pkg.api.v1.PersistentVolumeList")
+ proto.RegisterType((*PersistentVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.PersistentVolumeSource")
+ proto.RegisterType((*PersistentVolumeSpec)(nil), "k8s.io.kubernetes.pkg.api.v1.PersistentVolumeSpec")
+ proto.RegisterType((*PersistentVolumeStatus)(nil), "k8s.io.kubernetes.pkg.api.v1.PersistentVolumeStatus")
+ proto.RegisterType((*Pod)(nil), "k8s.io.kubernetes.pkg.api.v1.Pod")
+ proto.RegisterType((*PodAffinity)(nil), "k8s.io.kubernetes.pkg.api.v1.PodAffinity")
+ proto.RegisterType((*PodAffinityTerm)(nil), "k8s.io.kubernetes.pkg.api.v1.PodAffinityTerm")
+ proto.RegisterType((*PodAntiAffinity)(nil), "k8s.io.kubernetes.pkg.api.v1.PodAntiAffinity")
+ proto.RegisterType((*PodAttachOptions)(nil), "k8s.io.kubernetes.pkg.api.v1.PodAttachOptions")
+ proto.RegisterType((*PodCondition)(nil), "k8s.io.kubernetes.pkg.api.v1.PodCondition")
+ proto.RegisterType((*PodExecOptions)(nil), "k8s.io.kubernetes.pkg.api.v1.PodExecOptions")
+ proto.RegisterType((*PodList)(nil), "k8s.io.kubernetes.pkg.api.v1.PodList")
+ proto.RegisterType((*PodLogOptions)(nil), "k8s.io.kubernetes.pkg.api.v1.PodLogOptions")
+ proto.RegisterType((*PodProxyOptions)(nil), "k8s.io.kubernetes.pkg.api.v1.PodProxyOptions")
+ proto.RegisterType((*PodSecurityContext)(nil), "k8s.io.kubernetes.pkg.api.v1.PodSecurityContext")
+ proto.RegisterType((*PodSpec)(nil), "k8s.io.kubernetes.pkg.api.v1.PodSpec")
+ proto.RegisterType((*PodStatus)(nil), "k8s.io.kubernetes.pkg.api.v1.PodStatus")
+ proto.RegisterType((*PodStatusResult)(nil), "k8s.io.kubernetes.pkg.api.v1.PodStatusResult")
+ proto.RegisterType((*PodTemplate)(nil), "k8s.io.kubernetes.pkg.api.v1.PodTemplate")
+ proto.RegisterType((*PodTemplateList)(nil), "k8s.io.kubernetes.pkg.api.v1.PodTemplateList")
+ proto.RegisterType((*PodTemplateSpec)(nil), "k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec")
+ proto.RegisterType((*Preconditions)(nil), "k8s.io.kubernetes.pkg.api.v1.Preconditions")
+ proto.RegisterType((*PreferredSchedulingTerm)(nil), "k8s.io.kubernetes.pkg.api.v1.PreferredSchedulingTerm")
+ proto.RegisterType((*Probe)(nil), "k8s.io.kubernetes.pkg.api.v1.Probe")
+ proto.RegisterType((*RBDVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.RBDVolumeSource")
+ proto.RegisterType((*RangeAllocation)(nil), "k8s.io.kubernetes.pkg.api.v1.RangeAllocation")
+ proto.RegisterType((*ReplicationController)(nil), "k8s.io.kubernetes.pkg.api.v1.ReplicationController")
+ proto.RegisterType((*ReplicationControllerList)(nil), "k8s.io.kubernetes.pkg.api.v1.ReplicationControllerList")
+ proto.RegisterType((*ReplicationControllerSpec)(nil), "k8s.io.kubernetes.pkg.api.v1.ReplicationControllerSpec")
+ proto.RegisterType((*ReplicationControllerStatus)(nil), "k8s.io.kubernetes.pkg.api.v1.ReplicationControllerStatus")
+ proto.RegisterType((*ResourceFieldSelector)(nil), "k8s.io.kubernetes.pkg.api.v1.ResourceFieldSelector")
+ proto.RegisterType((*ResourceQuota)(nil), "k8s.io.kubernetes.pkg.api.v1.ResourceQuota")
+ proto.RegisterType((*ResourceQuotaList)(nil), "k8s.io.kubernetes.pkg.api.v1.ResourceQuotaList")
+ proto.RegisterType((*ResourceQuotaSpec)(nil), "k8s.io.kubernetes.pkg.api.v1.ResourceQuotaSpec")
+ proto.RegisterType((*ResourceQuotaStatus)(nil), "k8s.io.kubernetes.pkg.api.v1.ResourceQuotaStatus")
+ proto.RegisterType((*ResourceRequirements)(nil), "k8s.io.kubernetes.pkg.api.v1.ResourceRequirements")
+ proto.RegisterType((*SELinuxOptions)(nil), "k8s.io.kubernetes.pkg.api.v1.SELinuxOptions")
+ proto.RegisterType((*Secret)(nil), "k8s.io.kubernetes.pkg.api.v1.Secret")
+ proto.RegisterType((*SecretKeySelector)(nil), "k8s.io.kubernetes.pkg.api.v1.SecretKeySelector")
+ proto.RegisterType((*SecretList)(nil), "k8s.io.kubernetes.pkg.api.v1.SecretList")
+ proto.RegisterType((*SecretVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.SecretVolumeSource")
+ proto.RegisterType((*SecurityContext)(nil), "k8s.io.kubernetes.pkg.api.v1.SecurityContext")
+ proto.RegisterType((*SerializedReference)(nil), "k8s.io.kubernetes.pkg.api.v1.SerializedReference")
+ proto.RegisterType((*Service)(nil), "k8s.io.kubernetes.pkg.api.v1.Service")
+ proto.RegisterType((*ServiceAccount)(nil), "k8s.io.kubernetes.pkg.api.v1.ServiceAccount")
+ proto.RegisterType((*ServiceAccountList)(nil), "k8s.io.kubernetes.pkg.api.v1.ServiceAccountList")
+ proto.RegisterType((*ServiceList)(nil), "k8s.io.kubernetes.pkg.api.v1.ServiceList")
+ proto.RegisterType((*ServicePort)(nil), "k8s.io.kubernetes.pkg.api.v1.ServicePort")
+ proto.RegisterType((*ServiceProxyOptions)(nil), "k8s.io.kubernetes.pkg.api.v1.ServiceProxyOptions")
+ proto.RegisterType((*ServiceSpec)(nil), "k8s.io.kubernetes.pkg.api.v1.ServiceSpec")
+ proto.RegisterType((*ServiceStatus)(nil), "k8s.io.kubernetes.pkg.api.v1.ServiceStatus")
+ proto.RegisterType((*TCPSocketAction)(nil), "k8s.io.kubernetes.pkg.api.v1.TCPSocketAction")
+ proto.RegisterType((*Taint)(nil), "k8s.io.kubernetes.pkg.api.v1.Taint")
+ proto.RegisterType((*Toleration)(nil), "k8s.io.kubernetes.pkg.api.v1.Toleration")
+ proto.RegisterType((*Volume)(nil), "k8s.io.kubernetes.pkg.api.v1.Volume")
+ proto.RegisterType((*VolumeMount)(nil), "k8s.io.kubernetes.pkg.api.v1.VolumeMount")
+ proto.RegisterType((*VolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.VolumeSource")
+ proto.RegisterType((*VsphereVirtualDiskVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.VsphereVirtualDiskVolumeSource")
+ proto.RegisterType((*WeightedPodAffinityTerm)(nil), "k8s.io.kubernetes.pkg.api.v1.WeightedPodAffinityTerm")
+}
+func (m *AWSElasticBlockStoreVolumeSource) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *AWSElasticBlockStoreVolumeSource) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.VolumeID)))
+ i += copy(data[i:], m.VolumeID)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.FSType)))
+ i += copy(data[i:], m.FSType)
+ data[i] = 0x18
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Partition))
+ data[i] = 0x20
+ i++
+ if m.ReadOnly {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ return i, nil
+}
+
+func (m *Affinity) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Affinity) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.NodeAffinity != nil {
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.NodeAffinity.Size()))
+ n1, err := m.NodeAffinity.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n1
+ }
+ if m.PodAffinity != nil {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.PodAffinity.Size()))
+ n2, err := m.PodAffinity.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n2
+ }
+ if m.PodAntiAffinity != nil {
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.PodAntiAffinity.Size()))
+ n3, err := m.PodAntiAffinity.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n3
+ }
+ return i, nil
+}
+
+func (m *AttachedVolume) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *AttachedVolume) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Name)))
+ i += copy(data[i:], m.Name)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.DevicePath)))
+ i += copy(data[i:], m.DevicePath)
+ return i, nil
+}
+
+func (m *AzureFileVolumeSource) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *AzureFileVolumeSource) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.SecretName)))
+ i += copy(data[i:], m.SecretName)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.ShareName)))
+ i += copy(data[i:], m.ShareName)
+ data[i] = 0x18
+ i++
+ if m.ReadOnly {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ return i, nil
+}
+
+func (m *Binding) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Binding) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
+ n4, err := m.ObjectMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n4
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Target.Size()))
+ n5, err := m.Target.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n5
+ return i, nil
+}
+
+func (m *Capabilities) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Capabilities) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Add) > 0 {
+ for _, s := range m.Add {
+ data[i] = 0xa
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ if len(m.Drop) > 0 {
+ for _, s := range m.Drop {
+ data[i] = 0x12
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ return i, nil
+}
+
+func (m *CephFSVolumeSource) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *CephFSVolumeSource) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Monitors) > 0 {
+ for _, s := range m.Monitors {
+ data[i] = 0xa
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Path)))
+ i += copy(data[i:], m.Path)
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.User)))
+ i += copy(data[i:], m.User)
+ data[i] = 0x22
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.SecretFile)))
+ i += copy(data[i:], m.SecretFile)
+ if m.SecretRef != nil {
+ data[i] = 0x2a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.SecretRef.Size()))
+ n6, err := m.SecretRef.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n6
+ }
+ data[i] = 0x30
+ i++
+ if m.ReadOnly {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ return i, nil
+}
+
+func (m *CinderVolumeSource) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *CinderVolumeSource) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.VolumeID)))
+ i += copy(data[i:], m.VolumeID)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.FSType)))
+ i += copy(data[i:], m.FSType)
+ data[i] = 0x18
+ i++
+ if m.ReadOnly {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ return i, nil
+}
+
+func (m *ComponentCondition) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ComponentCondition) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Type)))
+ i += copy(data[i:], m.Type)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Status)))
+ i += copy(data[i:], m.Status)
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Message)))
+ i += copy(data[i:], m.Message)
+ data[i] = 0x22
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Error)))
+ i += copy(data[i:], m.Error)
+ return i, nil
+}
+
+func (m *ComponentStatus) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ComponentStatus) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
+ n7, err := m.ObjectMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n7
+ if len(m.Conditions) > 0 {
+ for _, msg := range m.Conditions {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *ComponentStatusList) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ComponentStatusList) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size()))
+ n8, err := m.ListMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n8
+ if len(m.Items) > 0 {
+ for _, msg := range m.Items {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *ConfigMap) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ConfigMap) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
+ n9, err := m.ObjectMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n9
+ if len(m.Data) > 0 {
+ for k := range m.Data {
+ data[i] = 0x12
+ i++
+ v := m.Data[k]
+ mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ i = encodeVarintGenerated(data, i, uint64(mapSize))
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(k)))
+ i += copy(data[i:], k)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(v)))
+ i += copy(data[i:], v)
+ }
+ }
+ return i, nil
+}
+
+func (m *ConfigMapKeySelector) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ConfigMapKeySelector) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.LocalObjectReference.Size()))
+ n10, err := m.LocalObjectReference.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n10
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Key)))
+ i += copy(data[i:], m.Key)
+ return i, nil
+}
+
+func (m *ConfigMapList) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ConfigMapList) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size()))
+ n11, err := m.ListMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n11
+ if len(m.Items) > 0 {
+ for _, msg := range m.Items {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *ConfigMapVolumeSource) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ConfigMapVolumeSource) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.LocalObjectReference.Size()))
+ n12, err := m.LocalObjectReference.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n12
+ if len(m.Items) > 0 {
+ for _, msg := range m.Items {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *Container) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Container) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Name)))
+ i += copy(data[i:], m.Name)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Image)))
+ i += copy(data[i:], m.Image)
+ if len(m.Command) > 0 {
+ for _, s := range m.Command {
+ data[i] = 0x1a
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ if len(m.Args) > 0 {
+ for _, s := range m.Args {
+ data[i] = 0x22
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ data[i] = 0x2a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.WorkingDir)))
+ i += copy(data[i:], m.WorkingDir)
+ if len(m.Ports) > 0 {
+ for _, msg := range m.Ports {
+ data[i] = 0x32
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ if len(m.Env) > 0 {
+ for _, msg := range m.Env {
+ data[i] = 0x3a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ data[i] = 0x42
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Resources.Size()))
+ n13, err := m.Resources.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n13
+ if len(m.VolumeMounts) > 0 {
+ for _, msg := range m.VolumeMounts {
+ data[i] = 0x4a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ if m.LivenessProbe != nil {
+ data[i] = 0x52
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.LivenessProbe.Size()))
+ n14, err := m.LivenessProbe.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n14
+ }
+ if m.ReadinessProbe != nil {
+ data[i] = 0x5a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ReadinessProbe.Size()))
+ n15, err := m.ReadinessProbe.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n15
+ }
+ if m.Lifecycle != nil {
+ data[i] = 0x62
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Lifecycle.Size()))
+ n16, err := m.Lifecycle.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n16
+ }
+ data[i] = 0x6a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.TerminationMessagePath)))
+ i += copy(data[i:], m.TerminationMessagePath)
+ data[i] = 0x72
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.ImagePullPolicy)))
+ i += copy(data[i:], m.ImagePullPolicy)
+ if m.SecurityContext != nil {
+ data[i] = 0x7a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.SecurityContext.Size()))
+ n17, err := m.SecurityContext.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n17
+ }
+ data[i] = 0x80
+ i++
+ data[i] = 0x1
+ i++
+ if m.Stdin {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ data[i] = 0x88
+ i++
+ data[i] = 0x1
+ i++
+ if m.StdinOnce {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ data[i] = 0x90
+ i++
+ data[i] = 0x1
+ i++
+ if m.TTY {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ return i, nil
+}
+
+func (m *ContainerImage) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ContainerImage) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Names) > 0 {
+ for _, s := range m.Names {
+ data[i] = 0xa
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ data[i] = 0x10
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.SizeBytes))
+ return i, nil
+}
+
+func (m *ContainerPort) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ContainerPort) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Name)))
+ i += copy(data[i:], m.Name)
+ data[i] = 0x10
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.HostPort))
+ data[i] = 0x18
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ContainerPort))
+ data[i] = 0x22
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Protocol)))
+ i += copy(data[i:], m.Protocol)
+ data[i] = 0x2a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.HostIP)))
+ i += copy(data[i:], m.HostIP)
+ return i, nil
+}
+
+func (m *ContainerState) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ContainerState) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Waiting != nil {
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Waiting.Size()))
+ n18, err := m.Waiting.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n18
+ }
+ if m.Running != nil {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Running.Size()))
+ n19, err := m.Running.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n19
+ }
+ if m.Terminated != nil {
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Terminated.Size()))
+ n20, err := m.Terminated.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n20
+ }
+ return i, nil
+}
+
+func (m *ContainerStateRunning) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ContainerStateRunning) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.StartedAt.Size()))
+ n21, err := m.StartedAt.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n21
+ return i, nil
+}
+
+func (m *ContainerStateTerminated) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ContainerStateTerminated) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0x8
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ExitCode))
+ data[i] = 0x10
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Signal))
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Reason)))
+ i += copy(data[i:], m.Reason)
+ data[i] = 0x22
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Message)))
+ i += copy(data[i:], m.Message)
+ data[i] = 0x2a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.StartedAt.Size()))
+ n22, err := m.StartedAt.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n22
+ data[i] = 0x32
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.FinishedAt.Size()))
+ n23, err := m.FinishedAt.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n23
+ data[i] = 0x3a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.ContainerID)))
+ i += copy(data[i:], m.ContainerID)
+ return i, nil
+}
+
+func (m *ContainerStateWaiting) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ContainerStateWaiting) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Reason)))
+ i += copy(data[i:], m.Reason)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Message)))
+ i += copy(data[i:], m.Message)
+ return i, nil
+}
+
+func (m *ContainerStatus) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ContainerStatus) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Name)))
+ i += copy(data[i:], m.Name)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.State.Size()))
+ n24, err := m.State.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n24
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.LastTerminationState.Size()))
+ n25, err := m.LastTerminationState.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n25
+ data[i] = 0x20
+ i++
+ if m.Ready {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ data[i] = 0x28
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.RestartCount))
+ data[i] = 0x32
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Image)))
+ i += copy(data[i:], m.Image)
+ data[i] = 0x3a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.ImageID)))
+ i += copy(data[i:], m.ImageID)
+ data[i] = 0x42
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.ContainerID)))
+ i += copy(data[i:], m.ContainerID)
+ return i, nil
+}
+
+func (m *DaemonEndpoint) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *DaemonEndpoint) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0x8
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Port))
+ return i, nil
+}
+
+func (m *DeleteOptions) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *DeleteOptions) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.GracePeriodSeconds != nil {
+ data[i] = 0x8
+ i++
+ i = encodeVarintGenerated(data, i, uint64(*m.GracePeriodSeconds))
+ }
+ if m.Preconditions != nil {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Preconditions.Size()))
+ n26, err := m.Preconditions.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n26
+ }
+ if m.OrphanDependents != nil {
+ data[i] = 0x18
+ i++
+ if *m.OrphanDependents {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ }
+ return i, nil
+}
+
+func (m *DownwardAPIVolumeFile) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *DownwardAPIVolumeFile) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Path)))
+ i += copy(data[i:], m.Path)
+ if m.FieldRef != nil {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.FieldRef.Size()))
+ n27, err := m.FieldRef.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n27
+ }
+ if m.ResourceFieldRef != nil {
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ResourceFieldRef.Size()))
+ n28, err := m.ResourceFieldRef.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n28
+ }
+ return i, nil
+}
+
+func (m *DownwardAPIVolumeSource) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *DownwardAPIVolumeSource) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for _, msg := range m.Items {
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *EmptyDirVolumeSource) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *EmptyDirVolumeSource) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Medium)))
+ i += copy(data[i:], m.Medium)
+ return i, nil
+}
+
+func (m *EndpointAddress) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *EndpointAddress) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.IP)))
+ i += copy(data[i:], m.IP)
+ if m.TargetRef != nil {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.TargetRef.Size()))
+ n29, err := m.TargetRef.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n29
+ }
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Hostname)))
+ i += copy(data[i:], m.Hostname)
+ return i, nil
+}
+
+func (m *EndpointPort) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *EndpointPort) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Name)))
+ i += copy(data[i:], m.Name)
+ data[i] = 0x10
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Port))
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Protocol)))
+ i += copy(data[i:], m.Protocol)
+ return i, nil
+}
+
+func (m *EndpointSubset) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *EndpointSubset) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Addresses) > 0 {
+ for _, msg := range m.Addresses {
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ if len(m.NotReadyAddresses) > 0 {
+ for _, msg := range m.NotReadyAddresses {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ if len(m.Ports) > 0 {
+ for _, msg := range m.Ports {
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *Endpoints) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Endpoints) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
+ n30, err := m.ObjectMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n30
+ if len(m.Subsets) > 0 {
+ for _, msg := range m.Subsets {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *EndpointsList) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *EndpointsList) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size()))
+ n31, err := m.ListMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n31
+ if len(m.Items) > 0 {
+ for _, msg := range m.Items {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *EnvVar) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *EnvVar) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Name)))
+ i += copy(data[i:], m.Name)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Value)))
+ i += copy(data[i:], m.Value)
+ if m.ValueFrom != nil {
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ValueFrom.Size()))
+ n32, err := m.ValueFrom.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n32
+ }
+ return i, nil
+}
+
+func (m *EnvVarSource) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *EnvVarSource) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.FieldRef != nil {
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.FieldRef.Size()))
+ n33, err := m.FieldRef.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n33
+ }
+ if m.ResourceFieldRef != nil {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ResourceFieldRef.Size()))
+ n34, err := m.ResourceFieldRef.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n34
+ }
+ if m.ConfigMapKeyRef != nil {
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ConfigMapKeyRef.Size()))
+ n35, err := m.ConfigMapKeyRef.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n35
+ }
+ if m.SecretKeyRef != nil {
+ data[i] = 0x22
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.SecretKeyRef.Size()))
+ n36, err := m.SecretKeyRef.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n36
+ }
+ return i, nil
+}
+
+func (m *Event) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Event) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
+ n37, err := m.ObjectMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n37
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.InvolvedObject.Size()))
+ n38, err := m.InvolvedObject.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n38
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Reason)))
+ i += copy(data[i:], m.Reason)
+ data[i] = 0x22
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Message)))
+ i += copy(data[i:], m.Message)
+ data[i] = 0x2a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Source.Size()))
+ n39, err := m.Source.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n39
+ data[i] = 0x32
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.FirstTimestamp.Size()))
+ n40, err := m.FirstTimestamp.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n40
+ data[i] = 0x3a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.LastTimestamp.Size()))
+ n41, err := m.LastTimestamp.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n41
+ data[i] = 0x40
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Count))
+ data[i] = 0x4a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Type)))
+ i += copy(data[i:], m.Type)
+ return i, nil
+}
+
+func (m *EventList) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *EventList) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size()))
+ n42, err := m.ListMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n42
+ if len(m.Items) > 0 {
+ for _, msg := range m.Items {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *EventSource) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *EventSource) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Component)))
+ i += copy(data[i:], m.Component)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Host)))
+ i += copy(data[i:], m.Host)
+ return i, nil
+}
+
+func (m *ExecAction) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ExecAction) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Command) > 0 {
+ for _, s := range m.Command {
+ data[i] = 0xa
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ return i, nil
+}
+
+func (m *ExportOptions) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ExportOptions) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0x8
+ i++
+ if m.Export {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ data[i] = 0x10
+ i++
+ if m.Exact {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ return i, nil
+}
+
+func (m *FCVolumeSource) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *FCVolumeSource) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.TargetWWNs) > 0 {
+ for _, s := range m.TargetWWNs {
+ data[i] = 0xa
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ if m.Lun != nil {
+ data[i] = 0x10
+ i++
+ i = encodeVarintGenerated(data, i, uint64(*m.Lun))
+ }
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.FSType)))
+ i += copy(data[i:], m.FSType)
+ data[i] = 0x20
+ i++
+ if m.ReadOnly {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ return i, nil
+}
+
+func (m *FlexVolumeSource) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *FlexVolumeSource) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Driver)))
+ i += copy(data[i:], m.Driver)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.FSType)))
+ i += copy(data[i:], m.FSType)
+ if m.SecretRef != nil {
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.SecretRef.Size()))
+ n43, err := m.SecretRef.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n43
+ }
+ data[i] = 0x20
+ i++
+ if m.ReadOnly {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ if len(m.Options) > 0 {
+ for k := range m.Options {
+ data[i] = 0x2a
+ i++
+ v := m.Options[k]
+ mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ i = encodeVarintGenerated(data, i, uint64(mapSize))
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(k)))
+ i += copy(data[i:], k)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(v)))
+ i += copy(data[i:], v)
+ }
+ }
+ return i, nil
+}
+
+func (m *FlockerVolumeSource) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *FlockerVolumeSource) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.DatasetName)))
+ i += copy(data[i:], m.DatasetName)
+ return i, nil
+}
+
+func (m *GCEPersistentDiskVolumeSource) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *GCEPersistentDiskVolumeSource) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.PDName)))
+ i += copy(data[i:], m.PDName)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.FSType)))
+ i += copy(data[i:], m.FSType)
+ data[i] = 0x18
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Partition))
+ data[i] = 0x20
+ i++
+ if m.ReadOnly {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ return i, nil
+}
+
+func (m *GitRepoVolumeSource) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *GitRepoVolumeSource) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Repository)))
+ i += copy(data[i:], m.Repository)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Revision)))
+ i += copy(data[i:], m.Revision)
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Directory)))
+ i += copy(data[i:], m.Directory)
+ return i, nil
+}
+
+func (m *GlusterfsVolumeSource) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *GlusterfsVolumeSource) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.EndpointsName)))
+ i += copy(data[i:], m.EndpointsName)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Path)))
+ i += copy(data[i:], m.Path)
+ data[i] = 0x18
+ i++
+ if m.ReadOnly {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ return i, nil
+}
+
+func (m *HTTPGetAction) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *HTTPGetAction) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Path)))
+ i += copy(data[i:], m.Path)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Port.Size()))
+ n44, err := m.Port.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n44
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Host)))
+ i += copy(data[i:], m.Host)
+ data[i] = 0x22
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Scheme)))
+ i += copy(data[i:], m.Scheme)
+ if len(m.HTTPHeaders) > 0 {
+ for _, msg := range m.HTTPHeaders {
+ data[i] = 0x2a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *HTTPHeader) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *HTTPHeader) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Name)))
+ i += copy(data[i:], m.Name)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Value)))
+ i += copy(data[i:], m.Value)
+ return i, nil
+}
+
+func (m *Handler) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Handler) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Exec != nil {
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Exec.Size()))
+ n45, err := m.Exec.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n45
+ }
+ if m.HTTPGet != nil {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.HTTPGet.Size()))
+ n46, err := m.HTTPGet.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n46
+ }
+ if m.TCPSocket != nil {
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.TCPSocket.Size()))
+ n47, err := m.TCPSocket.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n47
+ }
+ return i, nil
+}
+
+func (m *HostPathVolumeSource) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *HostPathVolumeSource) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Path)))
+ i += copy(data[i:], m.Path)
+ return i, nil
+}
+
+func (m *ISCSIVolumeSource) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ISCSIVolumeSource) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.TargetPortal)))
+ i += copy(data[i:], m.TargetPortal)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.IQN)))
+ i += copy(data[i:], m.IQN)
+ data[i] = 0x18
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Lun))
+ data[i] = 0x22
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.ISCSIInterface)))
+ i += copy(data[i:], m.ISCSIInterface)
+ data[i] = 0x2a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.FSType)))
+ i += copy(data[i:], m.FSType)
+ data[i] = 0x30
+ i++
+ if m.ReadOnly {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ return i, nil
+}
+
+func (m *KeyToPath) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *KeyToPath) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Key)))
+ i += copy(data[i:], m.Key)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Path)))
+ i += copy(data[i:], m.Path)
+ return i, nil
+}
+
+func (m *Lifecycle) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Lifecycle) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.PostStart != nil {
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.PostStart.Size()))
+ n48, err := m.PostStart.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n48
+ }
+ if m.PreStop != nil {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.PreStop.Size()))
+ n49, err := m.PreStop.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n49
+ }
+ return i, nil
+}
+
+func (m *LimitRange) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *LimitRange) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
+ n50, err := m.ObjectMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n50
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size()))
+ n51, err := m.Spec.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n51
+ return i, nil
+}
+
+func (m *LimitRangeItem) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *LimitRangeItem) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Type)))
+ i += copy(data[i:], m.Type)
+ if len(m.Max) > 0 {
+ for k := range m.Max {
+ data[i] = 0x12
+ i++
+ v := m.Max[k]
+ msgSize := (&v).Size()
+ mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize))
+ i = encodeVarintGenerated(data, i, uint64(mapSize))
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(k)))
+ i += copy(data[i:], k)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64((&v).Size()))
+ n52, err := (&v).MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n52
+ }
+ }
+ if len(m.Min) > 0 {
+ for k := range m.Min {
+ data[i] = 0x1a
+ i++
+ v := m.Min[k]
+ msgSize := (&v).Size()
+ mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize))
+ i = encodeVarintGenerated(data, i, uint64(mapSize))
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(k)))
+ i += copy(data[i:], k)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64((&v).Size()))
+ n53, err := (&v).MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n53
+ }
+ }
+ if len(m.Default) > 0 {
+ for k := range m.Default {
+ data[i] = 0x22
+ i++
+ v := m.Default[k]
+ msgSize := (&v).Size()
+ mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize))
+ i = encodeVarintGenerated(data, i, uint64(mapSize))
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(k)))
+ i += copy(data[i:], k)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64((&v).Size()))
+ n54, err := (&v).MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n54
+ }
+ }
+ if len(m.DefaultRequest) > 0 {
+ for k := range m.DefaultRequest {
+ data[i] = 0x2a
+ i++
+ v := m.DefaultRequest[k]
+ msgSize := (&v).Size()
+ mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize))
+ i = encodeVarintGenerated(data, i, uint64(mapSize))
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(k)))
+ i += copy(data[i:], k)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64((&v).Size()))
+ n55, err := (&v).MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n55
+ }
+ }
+ if len(m.MaxLimitRequestRatio) > 0 {
+ for k := range m.MaxLimitRequestRatio {
+ data[i] = 0x32
+ i++
+ v := m.MaxLimitRequestRatio[k]
+ msgSize := (&v).Size()
+ mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize))
+ i = encodeVarintGenerated(data, i, uint64(mapSize))
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(k)))
+ i += copy(data[i:], k)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64((&v).Size()))
+ n56, err := (&v).MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n56
+ }
+ }
+ return i, nil
+}
+
+func (m *LimitRangeList) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *LimitRangeList) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size()))
+ n57, err := m.ListMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n57
+ if len(m.Items) > 0 {
+ for _, msg := range m.Items {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *LimitRangeSpec) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *LimitRangeSpec) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Limits) > 0 {
+ for _, msg := range m.Limits {
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *List) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *List) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size()))
+ n58, err := m.ListMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n58
+ if len(m.Items) > 0 {
+ for _, msg := range m.Items {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *ListOptions) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ListOptions) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.LabelSelector)))
+ i += copy(data[i:], m.LabelSelector)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.FieldSelector)))
+ i += copy(data[i:], m.FieldSelector)
+ data[i] = 0x18
+ i++
+ if m.Watch {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ data[i] = 0x22
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.ResourceVersion)))
+ i += copy(data[i:], m.ResourceVersion)
+ if m.TimeoutSeconds != nil {
+ data[i] = 0x28
+ i++
+ i = encodeVarintGenerated(data, i, uint64(*m.TimeoutSeconds))
+ }
+ return i, nil
+}
+
+func (m *LoadBalancerIngress) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *LoadBalancerIngress) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.IP)))
+ i += copy(data[i:], m.IP)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Hostname)))
+ i += copy(data[i:], m.Hostname)
+ return i, nil
+}
+
+func (m *LoadBalancerStatus) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *LoadBalancerStatus) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Ingress) > 0 {
+ for _, msg := range m.Ingress {
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *LocalObjectReference) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *LocalObjectReference) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Name)))
+ i += copy(data[i:], m.Name)
+ return i, nil
+}
+
+func (m *NFSVolumeSource) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *NFSVolumeSource) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Server)))
+ i += copy(data[i:], m.Server)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Path)))
+ i += copy(data[i:], m.Path)
+ data[i] = 0x18
+ i++
+ if m.ReadOnly {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ return i, nil
+}
+
+func (m *Namespace) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Namespace) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
+ n59, err := m.ObjectMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n59
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size()))
+ n60, err := m.Spec.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n60
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Status.Size()))
+ n61, err := m.Status.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n61
+ return i, nil
+}
+
+func (m *NamespaceList) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *NamespaceList) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size()))
+ n62, err := m.ListMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n62
+ if len(m.Items) > 0 {
+ for _, msg := range m.Items {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *NamespaceSpec) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *NamespaceSpec) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Finalizers) > 0 {
+ for _, s := range m.Finalizers {
+ data[i] = 0xa
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ return i, nil
+}
+
+func (m *NamespaceStatus) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *NamespaceStatus) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Phase)))
+ i += copy(data[i:], m.Phase)
+ return i, nil
+}
+
+func (m *Node) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Node) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
+ n63, err := m.ObjectMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n63
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size()))
+ n64, err := m.Spec.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n64
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Status.Size()))
+ n65, err := m.Status.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n65
+ return i, nil
+}
+
+func (m *NodeAddress) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *NodeAddress) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Type)))
+ i += copy(data[i:], m.Type)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Address)))
+ i += copy(data[i:], m.Address)
+ return i, nil
+}
+
+func (m *NodeAffinity) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *NodeAffinity) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.RequiredDuringSchedulingIgnoredDuringExecution != nil {
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.RequiredDuringSchedulingIgnoredDuringExecution.Size()))
+ n66, err := m.RequiredDuringSchedulingIgnoredDuringExecution.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n66
+ }
+ if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 {
+ for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *NodeCondition) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *NodeCondition) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Type)))
+ i += copy(data[i:], m.Type)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Status)))
+ i += copy(data[i:], m.Status)
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.LastHeartbeatTime.Size()))
+ n67, err := m.LastHeartbeatTime.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n67
+ data[i] = 0x22
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size()))
+ n68, err := m.LastTransitionTime.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n68
+ data[i] = 0x2a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Reason)))
+ i += copy(data[i:], m.Reason)
+ data[i] = 0x32
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Message)))
+ i += copy(data[i:], m.Message)
+ return i, nil
+}
+
+func (m *NodeDaemonEndpoints) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *NodeDaemonEndpoints) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.KubeletEndpoint.Size()))
+ n69, err := m.KubeletEndpoint.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n69
+ return i, nil
+}
+
+func (m *NodeList) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *NodeList) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size()))
+ n70, err := m.ListMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n70
+ if len(m.Items) > 0 {
+ for _, msg := range m.Items {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *NodeProxyOptions) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *NodeProxyOptions) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Path)))
+ i += copy(data[i:], m.Path)
+ return i, nil
+}
+
+func (m *NodeSelector) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *NodeSelector) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.NodeSelectorTerms) > 0 {
+ for _, msg := range m.NodeSelectorTerms {
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *NodeSelectorRequirement) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *NodeSelectorRequirement) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Key)))
+ i += copy(data[i:], m.Key)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Operator)))
+ i += copy(data[i:], m.Operator)
+ if len(m.Values) > 0 {
+ for _, s := range m.Values {
+ data[i] = 0x1a
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ return i, nil
+}
+
+func (m *NodeSelectorTerm) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *NodeSelectorTerm) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.MatchExpressions) > 0 {
+ for _, msg := range m.MatchExpressions {
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *NodeSpec) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *NodeSpec) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.PodCIDR)))
+ i += copy(data[i:], m.PodCIDR)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.ExternalID)))
+ i += copy(data[i:], m.ExternalID)
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.ProviderID)))
+ i += copy(data[i:], m.ProviderID)
+ data[i] = 0x20
+ i++
+ if m.Unschedulable {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ return i, nil
+}
+
+func (m *NodeStatus) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *NodeStatus) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Capacity) > 0 {
+ for k := range m.Capacity {
+ data[i] = 0xa
+ i++
+ v := m.Capacity[k]
+ msgSize := (&v).Size()
+ mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize))
+ i = encodeVarintGenerated(data, i, uint64(mapSize))
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(k)))
+ i += copy(data[i:], k)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64((&v).Size()))
+ n71, err := (&v).MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n71
+ }
+ }
+ if len(m.Allocatable) > 0 {
+ for k := range m.Allocatable {
+ data[i] = 0x12
+ i++
+ v := m.Allocatable[k]
+ msgSize := (&v).Size()
+ mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize))
+ i = encodeVarintGenerated(data, i, uint64(mapSize))
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(k)))
+ i += copy(data[i:], k)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64((&v).Size()))
+ n72, err := (&v).MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n72
+ }
+ }
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Phase)))
+ i += copy(data[i:], m.Phase)
+ if len(m.Conditions) > 0 {
+ for _, msg := range m.Conditions {
+ data[i] = 0x22
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ if len(m.Addresses) > 0 {
+ for _, msg := range m.Addresses {
+ data[i] = 0x2a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ data[i] = 0x32
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.DaemonEndpoints.Size()))
+ n73, err := m.DaemonEndpoints.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n73
+ data[i] = 0x3a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.NodeInfo.Size()))
+ n74, err := m.NodeInfo.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n74
+ if len(m.Images) > 0 {
+ for _, msg := range m.Images {
+ data[i] = 0x42
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ if len(m.VolumesInUse) > 0 {
+ for _, s := range m.VolumesInUse {
+ data[i] = 0x4a
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ if len(m.VolumesAttached) > 0 {
+ for _, msg := range m.VolumesAttached {
+ data[i] = 0x52
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *NodeSystemInfo) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *NodeSystemInfo) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.MachineID)))
+ i += copy(data[i:], m.MachineID)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.SystemUUID)))
+ i += copy(data[i:], m.SystemUUID)
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.BootID)))
+ i += copy(data[i:], m.BootID)
+ data[i] = 0x22
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.KernelVersion)))
+ i += copy(data[i:], m.KernelVersion)
+ data[i] = 0x2a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.OSImage)))
+ i += copy(data[i:], m.OSImage)
+ data[i] = 0x32
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.ContainerRuntimeVersion)))
+ i += copy(data[i:], m.ContainerRuntimeVersion)
+ data[i] = 0x3a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.KubeletVersion)))
+ i += copy(data[i:], m.KubeletVersion)
+ data[i] = 0x42
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.KubeProxyVersion)))
+ i += copy(data[i:], m.KubeProxyVersion)
+ data[i] = 0x4a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.OperatingSystem)))
+ i += copy(data[i:], m.OperatingSystem)
+ data[i] = 0x52
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Architecture)))
+ i += copy(data[i:], m.Architecture)
+ return i, nil
+}
+
+func (m *ObjectFieldSelector) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ObjectFieldSelector) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion)))
+ i += copy(data[i:], m.APIVersion)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.FieldPath)))
+ i += copy(data[i:], m.FieldPath)
+ return i, nil
+}
+
+func (m *ObjectMeta) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ObjectMeta) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Name)))
+ i += copy(data[i:], m.Name)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.GenerateName)))
+ i += copy(data[i:], m.GenerateName)
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Namespace)))
+ i += copy(data[i:], m.Namespace)
+ data[i] = 0x22
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.SelfLink)))
+ i += copy(data[i:], m.SelfLink)
+ data[i] = 0x2a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.UID)))
+ i += copy(data[i:], m.UID)
+ data[i] = 0x32
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.ResourceVersion)))
+ i += copy(data[i:], m.ResourceVersion)
+ data[i] = 0x38
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Generation))
+ data[i] = 0x42
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.CreationTimestamp.Size()))
+ n75, err := m.CreationTimestamp.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n75
+ if m.DeletionTimestamp != nil {
+ data[i] = 0x4a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.DeletionTimestamp.Size()))
+ n76, err := m.DeletionTimestamp.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n76
+ }
+ if m.DeletionGracePeriodSeconds != nil {
+ data[i] = 0x50
+ i++
+ i = encodeVarintGenerated(data, i, uint64(*m.DeletionGracePeriodSeconds))
+ }
+ if len(m.Labels) > 0 {
+ for k := range m.Labels {
+ data[i] = 0x5a
+ i++
+ v := m.Labels[k]
+ mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ i = encodeVarintGenerated(data, i, uint64(mapSize))
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(k)))
+ i += copy(data[i:], k)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(v)))
+ i += copy(data[i:], v)
+ }
+ }
+ if len(m.Annotations) > 0 {
+ for k := range m.Annotations {
+ data[i] = 0x62
+ i++
+ v := m.Annotations[k]
+ mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ i = encodeVarintGenerated(data, i, uint64(mapSize))
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(k)))
+ i += copy(data[i:], k)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(v)))
+ i += copy(data[i:], v)
+ }
+ }
+ if len(m.OwnerReferences) > 0 {
+ for _, msg := range m.OwnerReferences {
+ data[i] = 0x6a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ if len(m.Finalizers) > 0 {
+ for _, s := range m.Finalizers {
+ data[i] = 0x72
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ return i, nil
+}
+
+func (m *ObjectReference) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ObjectReference) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Kind)))
+ i += copy(data[i:], m.Kind)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Namespace)))
+ i += copy(data[i:], m.Namespace)
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Name)))
+ i += copy(data[i:], m.Name)
+ data[i] = 0x22
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.UID)))
+ i += copy(data[i:], m.UID)
+ data[i] = 0x2a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion)))
+ i += copy(data[i:], m.APIVersion)
+ data[i] = 0x32
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.ResourceVersion)))
+ i += copy(data[i:], m.ResourceVersion)
+ data[i] = 0x3a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.FieldPath)))
+ i += copy(data[i:], m.FieldPath)
+ return i, nil
+}
+
+func (m *OwnerReference) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *OwnerReference) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Kind)))
+ i += copy(data[i:], m.Kind)
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Name)))
+ i += copy(data[i:], m.Name)
+ data[i] = 0x22
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.UID)))
+ i += copy(data[i:], m.UID)
+ data[i] = 0x2a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion)))
+ i += copy(data[i:], m.APIVersion)
+ if m.Controller != nil {
+ data[i] = 0x30
+ i++
+ if *m.Controller {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ }
+ return i, nil
+}
+
+func (m *PersistentVolume) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *PersistentVolume) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
+ n77, err := m.ObjectMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n77
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size()))
+ n78, err := m.Spec.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n78
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Status.Size()))
+ n79, err := m.Status.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n79
+ return i, nil
+}
+
+func (m *PersistentVolumeClaim) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *PersistentVolumeClaim) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
+ n80, err := m.ObjectMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n80
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size()))
+ n81, err := m.Spec.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n81
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Status.Size()))
+ n82, err := m.Status.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n82
+ return i, nil
+}
+
+func (m *PersistentVolumeClaimList) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *PersistentVolumeClaimList) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size()))
+ n83, err := m.ListMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n83
+ if len(m.Items) > 0 {
+ for _, msg := range m.Items {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *PersistentVolumeClaimSpec) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *PersistentVolumeClaimSpec) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.AccessModes) > 0 {
+ for _, s := range m.AccessModes {
+ data[i] = 0xa
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Resources.Size()))
+ n84, err := m.Resources.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n84
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.VolumeName)))
+ i += copy(data[i:], m.VolumeName)
+ if m.Selector != nil {
+ data[i] = 0x22
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Selector.Size()))
+ n85, err := m.Selector.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n85
+ }
+ return i, nil
+}
+
+func (m *PersistentVolumeClaimStatus) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *PersistentVolumeClaimStatus) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Phase)))
+ i += copy(data[i:], m.Phase)
+ if len(m.AccessModes) > 0 {
+ for _, s := range m.AccessModes {
+ data[i] = 0x12
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ if len(m.Capacity) > 0 {
+ for k := range m.Capacity {
+ data[i] = 0x1a
+ i++
+ v := m.Capacity[k]
+ msgSize := (&v).Size()
+ mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize))
+ i = encodeVarintGenerated(data, i, uint64(mapSize))
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(k)))
+ i += copy(data[i:], k)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64((&v).Size()))
+ n86, err := (&v).MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n86
+ }
+ }
+ return i, nil
+}
+
+func (m *PersistentVolumeClaimVolumeSource) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *PersistentVolumeClaimVolumeSource) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.ClaimName)))
+ i += copy(data[i:], m.ClaimName)
+ data[i] = 0x10
+ i++
+ if m.ReadOnly {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ return i, nil
+}
+
+func (m *PersistentVolumeList) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *PersistentVolumeList) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size()))
+ n87, err := m.ListMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n87
+ if len(m.Items) > 0 {
+ for _, msg := range m.Items {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *PersistentVolumeSource) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *PersistentVolumeSource) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.GCEPersistentDisk != nil {
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.GCEPersistentDisk.Size()))
+ n88, err := m.GCEPersistentDisk.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n88
+ }
+ if m.AWSElasticBlockStore != nil {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.AWSElasticBlockStore.Size()))
+ n89, err := m.AWSElasticBlockStore.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n89
+ }
+ if m.HostPath != nil {
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.HostPath.Size()))
+ n90, err := m.HostPath.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n90
+ }
+ if m.Glusterfs != nil {
+ data[i] = 0x22
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Glusterfs.Size()))
+ n91, err := m.Glusterfs.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n91
+ }
+ if m.NFS != nil {
+ data[i] = 0x2a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.NFS.Size()))
+ n92, err := m.NFS.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n92
+ }
+ if m.RBD != nil {
+ data[i] = 0x32
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.RBD.Size()))
+ n93, err := m.RBD.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n93
+ }
+ if m.ISCSI != nil {
+ data[i] = 0x3a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ISCSI.Size()))
+ n94, err := m.ISCSI.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n94
+ }
+ if m.Cinder != nil {
+ data[i] = 0x42
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Cinder.Size()))
+ n95, err := m.Cinder.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n95
+ }
+ if m.CephFS != nil {
+ data[i] = 0x4a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.CephFS.Size()))
+ n96, err := m.CephFS.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n96
+ }
+ if m.FC != nil {
+ data[i] = 0x52
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.FC.Size()))
+ n97, err := m.FC.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n97
+ }
+ if m.Flocker != nil {
+ data[i] = 0x5a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Flocker.Size()))
+ n98, err := m.Flocker.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n98
+ }
+ if m.FlexVolume != nil {
+ data[i] = 0x62
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.FlexVolume.Size()))
+ n99, err := m.FlexVolume.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n99
+ }
+ if m.AzureFile != nil {
+ data[i] = 0x6a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.AzureFile.Size()))
+ n100, err := m.AzureFile.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n100
+ }
+ if m.VsphereVolume != nil {
+ data[i] = 0x72
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.VsphereVolume.Size()))
+ n101, err := m.VsphereVolume.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n101
+ }
+ return i, nil
+}
+
+func (m *PersistentVolumeSpec) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *PersistentVolumeSpec) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Capacity) > 0 {
+ for k := range m.Capacity {
+ data[i] = 0xa
+ i++
+ v := m.Capacity[k]
+ msgSize := (&v).Size()
+ mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize))
+ i = encodeVarintGenerated(data, i, uint64(mapSize))
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(k)))
+ i += copy(data[i:], k)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64((&v).Size()))
+ n102, err := (&v).MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n102
+ }
+ }
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.PersistentVolumeSource.Size()))
+ n103, err := m.PersistentVolumeSource.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n103
+ if len(m.AccessModes) > 0 {
+ for _, s := range m.AccessModes {
+ data[i] = 0x1a
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ if m.ClaimRef != nil {
+ data[i] = 0x22
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ClaimRef.Size()))
+ n104, err := m.ClaimRef.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n104
+ }
+ data[i] = 0x2a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.PersistentVolumeReclaimPolicy)))
+ i += copy(data[i:], m.PersistentVolumeReclaimPolicy)
+ return i, nil
+}
+
+func (m *PersistentVolumeStatus) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *PersistentVolumeStatus) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Phase)))
+ i += copy(data[i:], m.Phase)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Message)))
+ i += copy(data[i:], m.Message)
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Reason)))
+ i += copy(data[i:], m.Reason)
+ return i, nil
+}
+
+func (m *Pod) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Pod) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
+ n105, err := m.ObjectMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n105
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size()))
+ n106, err := m.Spec.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n106
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Status.Size()))
+ n107, err := m.Status.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n107
+ return i, nil
+}
+
+func (m *PodAffinity) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *PodAffinity) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 {
+ for _, msg := range m.RequiredDuringSchedulingIgnoredDuringExecution {
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 {
+ for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *PodAffinityTerm) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *PodAffinityTerm) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.LabelSelector != nil {
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.LabelSelector.Size()))
+ n108, err := m.LabelSelector.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n108
+ }
+ if len(m.Namespaces) > 0 {
+ for _, s := range m.Namespaces {
+ data[i] = 0x12
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.TopologyKey)))
+ i += copy(data[i:], m.TopologyKey)
+ return i, nil
+}
+
+func (m *PodAntiAffinity) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *PodAntiAffinity) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 {
+ for _, msg := range m.RequiredDuringSchedulingIgnoredDuringExecution {
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 {
+ for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *PodAttachOptions) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *PodAttachOptions) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0x8
+ i++
+ if m.Stdin {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ data[i] = 0x10
+ i++
+ if m.Stdout {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ data[i] = 0x18
+ i++
+ if m.Stderr {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ data[i] = 0x20
+ i++
+ if m.TTY {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ data[i] = 0x2a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Container)))
+ i += copy(data[i:], m.Container)
+ return i, nil
+}
+
+func (m *PodCondition) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *PodCondition) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Type)))
+ i += copy(data[i:], m.Type)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Status)))
+ i += copy(data[i:], m.Status)
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.LastProbeTime.Size()))
+ n109, err := m.LastProbeTime.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n109
+ data[i] = 0x22
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size()))
+ n110, err := m.LastTransitionTime.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n110
+ data[i] = 0x2a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Reason)))
+ i += copy(data[i:], m.Reason)
+ data[i] = 0x32
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Message)))
+ i += copy(data[i:], m.Message)
+ return i, nil
+}
+
+func (m *PodExecOptions) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *PodExecOptions) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0x8
+ i++
+ if m.Stdin {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ data[i] = 0x10
+ i++
+ if m.Stdout {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ data[i] = 0x18
+ i++
+ if m.Stderr {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ data[i] = 0x20
+ i++
+ if m.TTY {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ data[i] = 0x2a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Container)))
+ i += copy(data[i:], m.Container)
+ if len(m.Command) > 0 {
+ for _, s := range m.Command {
+ data[i] = 0x32
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ return i, nil
+}
+
+func (m *PodList) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *PodList) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size()))
+ n111, err := m.ListMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n111
+ if len(m.Items) > 0 {
+ for _, msg := range m.Items {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *PodLogOptions) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *PodLogOptions) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Container)))
+ i += copy(data[i:], m.Container)
+ data[i] = 0x10
+ i++
+ if m.Follow {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ data[i] = 0x18
+ i++
+ if m.Previous {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ if m.SinceSeconds != nil {
+ data[i] = 0x20
+ i++
+ i = encodeVarintGenerated(data, i, uint64(*m.SinceSeconds))
+ }
+ if m.SinceTime != nil {
+ data[i] = 0x2a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.SinceTime.Size()))
+ n112, err := m.SinceTime.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n112
+ }
+ data[i] = 0x30
+ i++
+ if m.Timestamps {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ if m.TailLines != nil {
+ data[i] = 0x38
+ i++
+ i = encodeVarintGenerated(data, i, uint64(*m.TailLines))
+ }
+ if m.LimitBytes != nil {
+ data[i] = 0x40
+ i++
+ i = encodeVarintGenerated(data, i, uint64(*m.LimitBytes))
+ }
+ return i, nil
+}
+
+func (m *PodProxyOptions) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *PodProxyOptions) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Path)))
+ i += copy(data[i:], m.Path)
+ return i, nil
+}
+
+func (m *PodSecurityContext) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *PodSecurityContext) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.SELinuxOptions != nil {
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.SELinuxOptions.Size()))
+ n113, err := m.SELinuxOptions.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n113
+ }
+ if m.RunAsUser != nil {
+ data[i] = 0x10
+ i++
+ i = encodeVarintGenerated(data, i, uint64(*m.RunAsUser))
+ }
+ if m.RunAsNonRoot != nil {
+ data[i] = 0x18
+ i++
+ if *m.RunAsNonRoot {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ }
+ if len(m.SupplementalGroups) > 0 {
+ for _, num := range m.SupplementalGroups {
+ data[i] = 0x20
+ i++
+ i = encodeVarintGenerated(data, i, uint64(num))
+ }
+ }
+ if m.FSGroup != nil {
+ data[i] = 0x28
+ i++
+ i = encodeVarintGenerated(data, i, uint64(*m.FSGroup))
+ }
+ return i, nil
+}
+
+func (m *PodSpec) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *PodSpec) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Volumes) > 0 {
+ for _, msg := range m.Volumes {
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ if len(m.Containers) > 0 {
+ for _, msg := range m.Containers {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.RestartPolicy)))
+ i += copy(data[i:], m.RestartPolicy)
+ if m.TerminationGracePeriodSeconds != nil {
+ data[i] = 0x20
+ i++
+ i = encodeVarintGenerated(data, i, uint64(*m.TerminationGracePeriodSeconds))
+ }
+ if m.ActiveDeadlineSeconds != nil {
+ data[i] = 0x28
+ i++
+ i = encodeVarintGenerated(data, i, uint64(*m.ActiveDeadlineSeconds))
+ }
+ data[i] = 0x32
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.DNSPolicy)))
+ i += copy(data[i:], m.DNSPolicy)
+ if len(m.NodeSelector) > 0 {
+ for k := range m.NodeSelector {
+ data[i] = 0x3a
+ i++
+ v := m.NodeSelector[k]
+ mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ i = encodeVarintGenerated(data, i, uint64(mapSize))
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(k)))
+ i += copy(data[i:], k)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(v)))
+ i += copy(data[i:], v)
+ }
+ }
+ data[i] = 0x42
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.ServiceAccountName)))
+ i += copy(data[i:], m.ServiceAccountName)
+ data[i] = 0x4a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.DeprecatedServiceAccount)))
+ i += copy(data[i:], m.DeprecatedServiceAccount)
+ data[i] = 0x52
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.NodeName)))
+ i += copy(data[i:], m.NodeName)
+ data[i] = 0x58
+ i++
+ if m.HostNetwork {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ data[i] = 0x60
+ i++
+ if m.HostPID {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ data[i] = 0x68
+ i++
+ if m.HostIPC {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ if m.SecurityContext != nil {
+ data[i] = 0x72
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.SecurityContext.Size()))
+ n114, err := m.SecurityContext.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n114
+ }
+ if len(m.ImagePullSecrets) > 0 {
+ for _, msg := range m.ImagePullSecrets {
+ data[i] = 0x7a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ data[i] = 0x82
+ i++
+ data[i] = 0x1
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Hostname)))
+ i += copy(data[i:], m.Hostname)
+ data[i] = 0x8a
+ i++
+ data[i] = 0x1
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Subdomain)))
+ i += copy(data[i:], m.Subdomain)
+ return i, nil
+}
+
+func (m *PodStatus) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *PodStatus) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Phase)))
+ i += copy(data[i:], m.Phase)
+ if len(m.Conditions) > 0 {
+ for _, msg := range m.Conditions {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Message)))
+ i += copy(data[i:], m.Message)
+ data[i] = 0x22
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Reason)))
+ i += copy(data[i:], m.Reason)
+ data[i] = 0x2a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.HostIP)))
+ i += copy(data[i:], m.HostIP)
+ data[i] = 0x32
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.PodIP)))
+ i += copy(data[i:], m.PodIP)
+ if m.StartTime != nil {
+ data[i] = 0x3a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.StartTime.Size()))
+ n115, err := m.StartTime.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n115
+ }
+ if len(m.ContainerStatuses) > 0 {
+ for _, msg := range m.ContainerStatuses {
+ data[i] = 0x42
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *PodStatusResult) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *PodStatusResult) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
+ n116, err := m.ObjectMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n116
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Status.Size()))
+ n117, err := m.Status.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n117
+ return i, nil
+}
+
+func (m *PodTemplate) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *PodTemplate) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
+ n118, err := m.ObjectMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n118
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Template.Size()))
+ n119, err := m.Template.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n119
+ return i, nil
+}
+
+func (m *PodTemplateList) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *PodTemplateList) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size()))
+ n120, err := m.ListMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n120
+ if len(m.Items) > 0 {
+ for _, msg := range m.Items {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *PodTemplateSpec) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *PodTemplateSpec) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
+ n121, err := m.ObjectMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n121
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size()))
+ n122, err := m.Spec.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n122
+ return i, nil
+}
+
+func (m *Preconditions) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Preconditions) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.UID != nil {
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(*m.UID)))
+ i += copy(data[i:], *m.UID)
+ }
+ return i, nil
+}
+
+func (m *PreferredSchedulingTerm) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *PreferredSchedulingTerm) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0x8
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Weight))
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Preference.Size()))
+ n123, err := m.Preference.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n123
+ return i, nil
+}
+
+func (m *Probe) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Probe) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Handler.Size()))
+ n124, err := m.Handler.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n124
+ data[i] = 0x10
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.InitialDelaySeconds))
+ data[i] = 0x18
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.TimeoutSeconds))
+ data[i] = 0x20
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.PeriodSeconds))
+ data[i] = 0x28
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.SuccessThreshold))
+ data[i] = 0x30
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.FailureThreshold))
+ return i, nil
+}
+
+func (m *RBDVolumeSource) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *RBDVolumeSource) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.CephMonitors) > 0 {
+ for _, s := range m.CephMonitors {
+ data[i] = 0xa
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.RBDImage)))
+ i += copy(data[i:], m.RBDImage)
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.FSType)))
+ i += copy(data[i:], m.FSType)
+ data[i] = 0x22
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.RBDPool)))
+ i += copy(data[i:], m.RBDPool)
+ data[i] = 0x2a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.RadosUser)))
+ i += copy(data[i:], m.RadosUser)
+ data[i] = 0x32
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Keyring)))
+ i += copy(data[i:], m.Keyring)
+ if m.SecretRef != nil {
+ data[i] = 0x3a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.SecretRef.Size()))
+ n125, err := m.SecretRef.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n125
+ }
+ data[i] = 0x40
+ i++
+ if m.ReadOnly {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ return i, nil
+}
+
+func (m *RangeAllocation) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *RangeAllocation) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
+ n126, err := m.ObjectMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n126
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Range)))
+ i += copy(data[i:], m.Range)
+ if m.Data != nil {
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Data)))
+ i += copy(data[i:], m.Data)
+ }
+ return i, nil
+}
+
+func (m *ReplicationController) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ReplicationController) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
+ n127, err := m.ObjectMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n127
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size()))
+ n128, err := m.Spec.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n128
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Status.Size()))
+ n129, err := m.Status.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n129
+ return i, nil
+}
+
+func (m *ReplicationControllerList) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ReplicationControllerList) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size()))
+ n130, err := m.ListMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n130
+ if len(m.Items) > 0 {
+ for _, msg := range m.Items {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *ReplicationControllerSpec) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ReplicationControllerSpec) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Replicas != nil {
+ data[i] = 0x8
+ i++
+ i = encodeVarintGenerated(data, i, uint64(*m.Replicas))
+ }
+ if len(m.Selector) > 0 {
+ for k := range m.Selector {
+ data[i] = 0x12
+ i++
+ v := m.Selector[k]
+ mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ i = encodeVarintGenerated(data, i, uint64(mapSize))
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(k)))
+ i += copy(data[i:], k)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(v)))
+ i += copy(data[i:], v)
+ }
+ }
+ if m.Template != nil {
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Template.Size()))
+ n131, err := m.Template.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n131
+ }
+ return i, nil
+}
+
+func (m *ReplicationControllerStatus) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ReplicationControllerStatus) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0x8
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Replicas))
+ data[i] = 0x10
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.FullyLabeledReplicas))
+ data[i] = 0x18
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ObservedGeneration))
+ return i, nil
+}
+
+func (m *ResourceFieldSelector) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ResourceFieldSelector) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.ContainerName)))
+ i += copy(data[i:], m.ContainerName)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Resource)))
+ i += copy(data[i:], m.Resource)
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Divisor.Size()))
+ n132, err := m.Divisor.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n132
+ return i, nil
+}
+
+func (m *ResourceQuota) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ResourceQuota) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
+ n133, err := m.ObjectMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n133
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size()))
+ n134, err := m.Spec.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n134
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Status.Size()))
+ n135, err := m.Status.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n135
+ return i, nil
+}
+
+func (m *ResourceQuotaList) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ResourceQuotaList) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size()))
+ n136, err := m.ListMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n136
+ if len(m.Items) > 0 {
+ for _, msg := range m.Items {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *ResourceQuotaSpec) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ResourceQuotaSpec) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Hard) > 0 {
+ for k := range m.Hard {
+ data[i] = 0xa
+ i++
+ v := m.Hard[k]
+ msgSize := (&v).Size()
+ mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize))
+ i = encodeVarintGenerated(data, i, uint64(mapSize))
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(k)))
+ i += copy(data[i:], k)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64((&v).Size()))
+ n137, err := (&v).MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n137
+ }
+ }
+ if len(m.Scopes) > 0 {
+ for _, s := range m.Scopes {
+ data[i] = 0x12
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ return i, nil
+}
+
+func (m *ResourceQuotaStatus) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ResourceQuotaStatus) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Hard) > 0 {
+ for k := range m.Hard {
+ data[i] = 0xa
+ i++
+ v := m.Hard[k]
+ msgSize := (&v).Size()
+ mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize))
+ i = encodeVarintGenerated(data, i, uint64(mapSize))
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(k)))
+ i += copy(data[i:], k)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64((&v).Size()))
+ n138, err := (&v).MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n138
+ }
+ }
+ if len(m.Used) > 0 {
+ for k := range m.Used {
+ data[i] = 0x12
+ i++
+ v := m.Used[k]
+ msgSize := (&v).Size()
+ mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize))
+ i = encodeVarintGenerated(data, i, uint64(mapSize))
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(k)))
+ i += copy(data[i:], k)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64((&v).Size()))
+ n139, err := (&v).MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n139
+ }
+ }
+ return i, nil
+}
+
+func (m *ResourceRequirements) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ResourceRequirements) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Limits) > 0 {
+ for k := range m.Limits {
+ data[i] = 0xa
+ i++
+ v := m.Limits[k]
+ msgSize := (&v).Size()
+ mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize))
+ i = encodeVarintGenerated(data, i, uint64(mapSize))
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(k)))
+ i += copy(data[i:], k)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64((&v).Size()))
+ n140, err := (&v).MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n140
+ }
+ }
+ if len(m.Requests) > 0 {
+ for k := range m.Requests {
+ data[i] = 0x12
+ i++
+ v := m.Requests[k]
+ msgSize := (&v).Size()
+ mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize))
+ i = encodeVarintGenerated(data, i, uint64(mapSize))
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(k)))
+ i += copy(data[i:], k)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64((&v).Size()))
+ n141, err := (&v).MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n141
+ }
+ }
+ return i, nil
+}
+
+func (m *SELinuxOptions) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *SELinuxOptions) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.User)))
+ i += copy(data[i:], m.User)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Role)))
+ i += copy(data[i:], m.Role)
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Type)))
+ i += copy(data[i:], m.Type)
+ data[i] = 0x22
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Level)))
+ i += copy(data[i:], m.Level)
+ return i, nil
+}
+
+func (m *Secret) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Secret) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
+ n142, err := m.ObjectMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n142
+ if len(m.Data) > 0 {
+ for k := range m.Data {
+ data[i] = 0x12
+ i++
+ v := m.Data[k]
+ mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ i = encodeVarintGenerated(data, i, uint64(mapSize))
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(k)))
+ i += copy(data[i:], k)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(v)))
+ i += copy(data[i:], v)
+ }
+ }
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Type)))
+ i += copy(data[i:], m.Type)
+ if len(m.StringData) > 0 {
+ for k := range m.StringData {
+ data[i] = 0x22
+ i++
+ v := m.StringData[k]
+ mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ i = encodeVarintGenerated(data, i, uint64(mapSize))
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(k)))
+ i += copy(data[i:], k)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(v)))
+ i += copy(data[i:], v)
+ }
+ }
+ return i, nil
+}
+
+func (m *SecretKeySelector) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *SecretKeySelector) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.LocalObjectReference.Size()))
+ n143, err := m.LocalObjectReference.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n143
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Key)))
+ i += copy(data[i:], m.Key)
+ return i, nil
+}
+
+func (m *SecretList) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *SecretList) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size()))
+ n144, err := m.ListMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n144
+ if len(m.Items) > 0 {
+ for _, msg := range m.Items {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *SecretVolumeSource) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *SecretVolumeSource) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.SecretName)))
+ i += copy(data[i:], m.SecretName)
+ if len(m.Items) > 0 {
+ for _, msg := range m.Items {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *SecurityContext) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *SecurityContext) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Capabilities != nil {
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Capabilities.Size()))
+ n145, err := m.Capabilities.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n145
+ }
+ if m.Privileged != nil {
+ data[i] = 0x10
+ i++
+ if *m.Privileged {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ }
+ if m.SELinuxOptions != nil {
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.SELinuxOptions.Size()))
+ n146, err := m.SELinuxOptions.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n146
+ }
+ if m.RunAsUser != nil {
+ data[i] = 0x20
+ i++
+ i = encodeVarintGenerated(data, i, uint64(*m.RunAsUser))
+ }
+ if m.RunAsNonRoot != nil {
+ data[i] = 0x28
+ i++
+ if *m.RunAsNonRoot {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ }
+ if m.ReadOnlyRootFilesystem != nil {
+ data[i] = 0x30
+ i++
+ if *m.ReadOnlyRootFilesystem {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ }
+ return i, nil
+}
+
+func (m *SerializedReference) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *SerializedReference) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Reference.Size()))
+ n147, err := m.Reference.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n147
+ return i, nil
+}
+
+func (m *Service) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Service) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
+ n148, err := m.ObjectMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n148
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size()))
+ n149, err := m.Spec.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n149
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Status.Size()))
+ n150, err := m.Status.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n150
+ return i, nil
+}
+
+func (m *ServiceAccount) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ServiceAccount) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
+ n151, err := m.ObjectMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n151
+ if len(m.Secrets) > 0 {
+ for _, msg := range m.Secrets {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ if len(m.ImagePullSecrets) > 0 {
+ for _, msg := range m.ImagePullSecrets {
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *ServiceAccountList) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ServiceAccountList) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size()))
+ n152, err := m.ListMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n152
+ if len(m.Items) > 0 {
+ for _, msg := range m.Items {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *ServiceList) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ServiceList) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size()))
+ n153, err := m.ListMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n153
+ if len(m.Items) > 0 {
+ for _, msg := range m.Items {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *ServicePort) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ServicePort) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Name)))
+ i += copy(data[i:], m.Name)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Protocol)))
+ i += copy(data[i:], m.Protocol)
+ data[i] = 0x18
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Port))
+ data[i] = 0x22
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.TargetPort.Size()))
+ n154, err := m.TargetPort.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n154
+ data[i] = 0x28
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.NodePort))
+ return i, nil
+}
+
+func (m *ServiceProxyOptions) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ServiceProxyOptions) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Path)))
+ i += copy(data[i:], m.Path)
+ return i, nil
+}
+
+func (m *ServiceSpec) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ServiceSpec) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Ports) > 0 {
+ for _, msg := range m.Ports {
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ if len(m.Selector) > 0 {
+ for k := range m.Selector {
+ data[i] = 0x12
+ i++
+ v := m.Selector[k]
+ mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ i = encodeVarintGenerated(data, i, uint64(mapSize))
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(k)))
+ i += copy(data[i:], k)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(v)))
+ i += copy(data[i:], v)
+ }
+ }
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.ClusterIP)))
+ i += copy(data[i:], m.ClusterIP)
+ data[i] = 0x22
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Type)))
+ i += copy(data[i:], m.Type)
+ if len(m.ExternalIPs) > 0 {
+ for _, s := range m.ExternalIPs {
+ data[i] = 0x2a
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ if len(m.DeprecatedPublicIPs) > 0 {
+ for _, s := range m.DeprecatedPublicIPs {
+ data[i] = 0x32
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ data[i] = 0x3a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.SessionAffinity)))
+ i += copy(data[i:], m.SessionAffinity)
+ data[i] = 0x42
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.LoadBalancerIP)))
+ i += copy(data[i:], m.LoadBalancerIP)
+ if len(m.LoadBalancerSourceRanges) > 0 {
+ for _, s := range m.LoadBalancerSourceRanges {
+ data[i] = 0x4a
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ return i, nil
+}
+
+func (m *ServiceStatus) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ServiceStatus) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.LoadBalancer.Size()))
+ n155, err := m.LoadBalancer.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n155
+ return i, nil
+}
+
+func (m *TCPSocketAction) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *TCPSocketAction) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Port.Size()))
+ n156, err := m.Port.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n156
+ return i, nil
+}
+
+func (m *Taint) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Taint) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Key)))
+ i += copy(data[i:], m.Key)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Value)))
+ i += copy(data[i:], m.Value)
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Effect)))
+ i += copy(data[i:], m.Effect)
+ return i, nil
+}
+
+func (m *Toleration) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Toleration) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Key)))
+ i += copy(data[i:], m.Key)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Operator)))
+ i += copy(data[i:], m.Operator)
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Value)))
+ i += copy(data[i:], m.Value)
+ data[i] = 0x22
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Effect)))
+ i += copy(data[i:], m.Effect)
+ return i, nil
+}
+
+func (m *Volume) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Volume) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Name)))
+ i += copy(data[i:], m.Name)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.VolumeSource.Size()))
+ n157, err := m.VolumeSource.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n157
+ return i, nil
+}
+
+func (m *VolumeMount) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *VolumeMount) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Name)))
+ i += copy(data[i:], m.Name)
+ data[i] = 0x10
+ i++
+ if m.ReadOnly {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.MountPath)))
+ i += copy(data[i:], m.MountPath)
+ data[i] = 0x22
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.SubPath)))
+ i += copy(data[i:], m.SubPath)
+ return i, nil
+}
+
+func (m *VolumeSource) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *VolumeSource) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.HostPath != nil {
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.HostPath.Size()))
+ n158, err := m.HostPath.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n158
+ }
+ if m.EmptyDir != nil {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.EmptyDir.Size()))
+ n159, err := m.EmptyDir.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n159
+ }
+ if m.GCEPersistentDisk != nil {
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.GCEPersistentDisk.Size()))
+ n160, err := m.GCEPersistentDisk.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n160
+ }
+ if m.AWSElasticBlockStore != nil {
+ data[i] = 0x22
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.AWSElasticBlockStore.Size()))
+ n161, err := m.AWSElasticBlockStore.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n161
+ }
+ if m.GitRepo != nil {
+ data[i] = 0x2a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.GitRepo.Size()))
+ n162, err := m.GitRepo.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n162
+ }
+ if m.Secret != nil {
+ data[i] = 0x32
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Secret.Size()))
+ n163, err := m.Secret.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n163
+ }
+ if m.NFS != nil {
+ data[i] = 0x3a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.NFS.Size()))
+ n164, err := m.NFS.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n164
+ }
+ if m.ISCSI != nil {
+ data[i] = 0x42
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ISCSI.Size()))
+ n165, err := m.ISCSI.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n165
+ }
+ if m.Glusterfs != nil {
+ data[i] = 0x4a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Glusterfs.Size()))
+ n166, err := m.Glusterfs.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n166
+ }
+ if m.PersistentVolumeClaim != nil {
+ data[i] = 0x52
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.PersistentVolumeClaim.Size()))
+ n167, err := m.PersistentVolumeClaim.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n167
+ }
+ if m.RBD != nil {
+ data[i] = 0x5a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.RBD.Size()))
+ n168, err := m.RBD.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n168
+ }
+ if m.FlexVolume != nil {
+ data[i] = 0x62
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.FlexVolume.Size()))
+ n169, err := m.FlexVolume.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n169
+ }
+ if m.Cinder != nil {
+ data[i] = 0x6a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Cinder.Size()))
+ n170, err := m.Cinder.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n170
+ }
+ if m.CephFS != nil {
+ data[i] = 0x72
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.CephFS.Size()))
+ n171, err := m.CephFS.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n171
+ }
+ if m.Flocker != nil {
+ data[i] = 0x7a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Flocker.Size()))
+ n172, err := m.Flocker.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n172
+ }
+ if m.DownwardAPI != nil {
+ data[i] = 0x82
+ i++
+ data[i] = 0x1
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.DownwardAPI.Size()))
+ n173, err := m.DownwardAPI.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n173
+ }
+ if m.FC != nil {
+ data[i] = 0x8a
+ i++
+ data[i] = 0x1
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.FC.Size()))
+ n174, err := m.FC.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n174
+ }
+ if m.AzureFile != nil {
+ data[i] = 0x92
+ i++
+ data[i] = 0x1
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.AzureFile.Size()))
+ n175, err := m.AzureFile.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n175
+ }
+ if m.ConfigMap != nil {
+ data[i] = 0x9a
+ i++
+ data[i] = 0x1
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ConfigMap.Size()))
+ n176, err := m.ConfigMap.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n176
+ }
+ if m.VsphereVolume != nil {
+ data[i] = 0xa2
+ i++
+ data[i] = 0x1
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.VsphereVolume.Size()))
+ n177, err := m.VsphereVolume.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n177
+ }
+ return i, nil
+}
+
+func (m *VsphereVirtualDiskVolumeSource) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *VsphereVirtualDiskVolumeSource) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.VolumePath)))
+ i += copy(data[i:], m.VolumePath)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.FSType)))
+ i += copy(data[i:], m.FSType)
+ return i, nil
+}
+
+func (m *WeightedPodAffinityTerm) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *WeightedPodAffinityTerm) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0x8
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Weight))
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.PodAffinityTerm.Size()))
+ n178, err := m.PodAffinityTerm.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n178
+ return i, nil
+}
+
+func encodeFixed64Generated(data []byte, offset int, v uint64) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ data[offset+4] = uint8(v >> 32)
+ data[offset+5] = uint8(v >> 40)
+ data[offset+6] = uint8(v >> 48)
+ data[offset+7] = uint8(v >> 56)
+ return offset + 8
+}
+func encodeFixed32Generated(data []byte, offset int, v uint32) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ return offset + 4
+}
+func encodeVarintGenerated(data []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ data[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ data[offset] = uint8(v)
+ return offset + 1
+}
+func (m *AWSElasticBlockStoreVolumeSource) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.VolumeID)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.FSType)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.Partition))
+ n += 2
+ return n
+}
+
+func (m *Affinity) Size() (n int) {
+ var l int
+ _ = l
+ if m.NodeAffinity != nil {
+ l = m.NodeAffinity.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.PodAffinity != nil {
+ l = m.PodAffinity.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.PodAntiAffinity != nil {
+ l = m.PodAntiAffinity.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *AttachedVolume) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.DevicePath)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *AzureFileVolumeSource) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.SecretName)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.ShareName)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 2
+ return n
+}
+
+func (m *Binding) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Target.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *Capabilities) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Add) > 0 {
+ for _, s := range m.Add {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Drop) > 0 {
+ for _, s := range m.Drop {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *CephFSVolumeSource) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Monitors) > 0 {
+ for _, s := range m.Monitors {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = len(m.Path)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.User)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.SecretFile)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.SecretRef != nil {
+ l = m.SecretRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ n += 2
+ return n
+}
+
+func (m *CinderVolumeSource) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.VolumeID)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.FSType)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 2
+ return n
+}
+
+func (m *ComponentCondition) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Status)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Message)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Error)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ComponentStatus) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Conditions) > 0 {
+ for _, e := range m.Conditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ComponentStatusList) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ConfigMap) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Data) > 0 {
+ for k, v := range m.Data {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ return n
+}
+
+func (m *ConfigMapKeySelector) Size() (n int) {
+ var l int
+ _ = l
+ l = m.LocalObjectReference.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Key)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ConfigMapList) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ConfigMapVolumeSource) Size() (n int) {
+ var l int
+ _ = l
+ l = m.LocalObjectReference.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *Container) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Image)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Command) > 0 {
+ for _, s := range m.Command {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Args) > 0 {
+ for _, s := range m.Args {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = len(m.WorkingDir)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Ports) > 0 {
+ for _, e := range m.Ports {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Env) > 0 {
+ for _, e := range m.Env {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = m.Resources.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.VolumeMounts) > 0 {
+ for _, e := range m.VolumeMounts {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.LivenessProbe != nil {
+ l = m.LivenessProbe.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ReadinessProbe != nil {
+ l = m.ReadinessProbe.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Lifecycle != nil {
+ l = m.Lifecycle.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = len(m.TerminationMessagePath)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.ImagePullPolicy)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.SecurityContext != nil {
+ l = m.SecurityContext.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ n += 3
+ n += 3
+ n += 3
+ return n
+}
+
+func (m *ContainerImage) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Names) > 0 {
+ for _, s := range m.Names {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ n += 1 + sovGenerated(uint64(m.SizeBytes))
+ return n
+}
+
+func (m *ContainerPort) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.HostPort))
+ n += 1 + sovGenerated(uint64(m.ContainerPort))
+ l = len(m.Protocol)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.HostIP)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ContainerState) Size() (n int) {
+ var l int
+ _ = l
+ if m.Waiting != nil {
+ l = m.Waiting.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Running != nil {
+ l = m.Running.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Terminated != nil {
+ l = m.Terminated.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *ContainerStateRunning) Size() (n int) {
+ var l int
+ _ = l
+ l = m.StartedAt.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ContainerStateTerminated) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovGenerated(uint64(m.ExitCode))
+ n += 1 + sovGenerated(uint64(m.Signal))
+ l = len(m.Reason)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Message)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.StartedAt.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.FinishedAt.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.ContainerID)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ContainerStateWaiting) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Reason)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Message)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ContainerStatus) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.State.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.LastTerminationState.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 2
+ n += 1 + sovGenerated(uint64(m.RestartCount))
+ l = len(m.Image)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.ImageID)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.ContainerID)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *DaemonEndpoint) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovGenerated(uint64(m.Port))
+ return n
+}
+
+func (m *DeleteOptions) Size() (n int) {
+ var l int
+ _ = l
+ if m.GracePeriodSeconds != nil {
+ n += 1 + sovGenerated(uint64(*m.GracePeriodSeconds))
+ }
+ if m.Preconditions != nil {
+ l = m.Preconditions.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.OrphanDependents != nil {
+ n += 2
+ }
+ return n
+}
+
+func (m *DownwardAPIVolumeFile) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Path)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.FieldRef != nil {
+ l = m.FieldRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ResourceFieldRef != nil {
+ l = m.ResourceFieldRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *DownwardAPIVolumeSource) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *EmptyDirVolumeSource) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Medium)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *EndpointAddress) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.IP)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.TargetRef != nil {
+ l = m.TargetRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = len(m.Hostname)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *EndpointPort) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.Port))
+ l = len(m.Protocol)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *EndpointSubset) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Addresses) > 0 {
+ for _, e := range m.Addresses {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.NotReadyAddresses) > 0 {
+ for _, e := range m.NotReadyAddresses {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Ports) > 0 {
+ for _, e := range m.Ports {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *Endpoints) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Subsets) > 0 {
+ for _, e := range m.Subsets {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *EndpointsList) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *EnvVar) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Value)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.ValueFrom != nil {
+ l = m.ValueFrom.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *EnvVarSource) Size() (n int) {
+ var l int
+ _ = l
+ if m.FieldRef != nil {
+ l = m.FieldRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ResourceFieldRef != nil {
+ l = m.ResourceFieldRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ConfigMapKeyRef != nil {
+ l = m.ConfigMapKeyRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.SecretKeyRef != nil {
+ l = m.SecretKeyRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *Event) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.InvolvedObject.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Reason)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Message)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Source.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.FirstTimestamp.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.LastTimestamp.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.Count))
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *EventList) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *EventSource) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Component)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Host)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ExecAction) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Command) > 0 {
+ for _, s := range m.Command {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ExportOptions) Size() (n int) {
+ var l int
+ _ = l
+ n += 2
+ n += 2
+ return n
+}
+
+func (m *FCVolumeSource) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.TargetWWNs) > 0 {
+ for _, s := range m.TargetWWNs {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.Lun != nil {
+ n += 1 + sovGenerated(uint64(*m.Lun))
+ }
+ l = len(m.FSType)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 2
+ return n
+}
+
+func (m *FlexVolumeSource) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Driver)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.FSType)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.SecretRef != nil {
+ l = m.SecretRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ n += 2
+ if len(m.Options) > 0 {
+ for k, v := range m.Options {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ return n
+}
+
+func (m *FlockerVolumeSource) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.DatasetName)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *GCEPersistentDiskVolumeSource) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.PDName)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.FSType)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.Partition))
+ n += 2
+ return n
+}
+
+func (m *GitRepoVolumeSource) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Repository)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Revision)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Directory)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *GlusterfsVolumeSource) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.EndpointsName)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Path)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 2
+ return n
+}
+
+func (m *HTTPGetAction) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Path)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Port.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Host)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Scheme)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.HTTPHeaders) > 0 {
+ for _, e := range m.HTTPHeaders {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *HTTPHeader) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Value)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *Handler) Size() (n int) {
+ var l int
+ _ = l
+ if m.Exec != nil {
+ l = m.Exec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.HTTPGet != nil {
+ l = m.HTTPGet.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.TCPSocket != nil {
+ l = m.TCPSocket.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *HostPathVolumeSource) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Path)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ISCSIVolumeSource) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.TargetPortal)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.IQN)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.Lun))
+ l = len(m.ISCSIInterface)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.FSType)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 2
+ return n
+}
+
+func (m *KeyToPath) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Key)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Path)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *Lifecycle) Size() (n int) {
+ var l int
+ _ = l
+ if m.PostStart != nil {
+ l = m.PostStart.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.PreStop != nil {
+ l = m.PreStop.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *LimitRange) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *LimitRangeItem) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Max) > 0 {
+ for k, v := range m.Max {
+ _ = k
+ _ = v
+ l = v.Size()
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ if len(m.Min) > 0 {
+ for k, v := range m.Min {
+ _ = k
+ _ = v
+ l = v.Size()
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ if len(m.Default) > 0 {
+ for k, v := range m.Default {
+ _ = k
+ _ = v
+ l = v.Size()
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ if len(m.DefaultRequest) > 0 {
+ for k, v := range m.DefaultRequest {
+ _ = k
+ _ = v
+ l = v.Size()
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ if len(m.MaxLimitRequestRatio) > 0 {
+ for k, v := range m.MaxLimitRequestRatio {
+ _ = k
+ _ = v
+ l = v.Size()
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ return n
+}
+
+func (m *LimitRangeList) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *LimitRangeSpec) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Limits) > 0 {
+ for _, e := range m.Limits {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *List) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ListOptions) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.LabelSelector)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.FieldSelector)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 2
+ l = len(m.ResourceVersion)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.TimeoutSeconds != nil {
+ n += 1 + sovGenerated(uint64(*m.TimeoutSeconds))
+ }
+ return n
+}
+
+func (m *LoadBalancerIngress) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.IP)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Hostname)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *LoadBalancerStatus) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Ingress) > 0 {
+ for _, e := range m.Ingress {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *LocalObjectReference) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *NFSVolumeSource) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Server)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Path)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 2
+ return n
+}
+
+func (m *Namespace) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *NamespaceList) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *NamespaceSpec) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Finalizers) > 0 {
+ for _, s := range m.Finalizers {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *NamespaceStatus) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Phase)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *Node) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *NodeAddress) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Address)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *NodeAffinity) Size() (n int) {
+ var l int
+ _ = l
+ if m.RequiredDuringSchedulingIgnoredDuringExecution != nil {
+ l = m.RequiredDuringSchedulingIgnoredDuringExecution.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 {
+ for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *NodeCondition) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Status)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.LastHeartbeatTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.LastTransitionTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Reason)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Message)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *NodeDaemonEndpoints) Size() (n int) {
+ var l int
+ _ = l
+ l = m.KubeletEndpoint.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *NodeList) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *NodeProxyOptions) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Path)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *NodeSelector) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.NodeSelectorTerms) > 0 {
+ for _, e := range m.NodeSelectorTerms {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *NodeSelectorRequirement) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Key)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Operator)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Values) > 0 {
+ for _, s := range m.Values {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *NodeSelectorTerm) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.MatchExpressions) > 0 {
+ for _, e := range m.MatchExpressions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *NodeSpec) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.PodCIDR)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.ExternalID)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.ProviderID)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 2
+ return n
+}
+
+func (m *NodeStatus) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Capacity) > 0 {
+ for k, v := range m.Capacity {
+ _ = k
+ _ = v
+ l = v.Size()
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ if len(m.Allocatable) > 0 {
+ for k, v := range m.Allocatable {
+ _ = k
+ _ = v
+ l = v.Size()
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ l = len(m.Phase)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Conditions) > 0 {
+ for _, e := range m.Conditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Addresses) > 0 {
+ for _, e := range m.Addresses {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = m.DaemonEndpoints.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.NodeInfo.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Images) > 0 {
+ for _, e := range m.Images {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.VolumesInUse) > 0 {
+ for _, s := range m.VolumesInUse {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.VolumesAttached) > 0 {
+ for _, e := range m.VolumesAttached {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *NodeSystemInfo) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.MachineID)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.SystemUUID)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.BootID)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.KernelVersion)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.OSImage)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.ContainerRuntimeVersion)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.KubeletVersion)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.KubeProxyVersion)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.OperatingSystem)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Architecture)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ObjectFieldSelector) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.APIVersion)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.FieldPath)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ObjectMeta) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.GenerateName)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Namespace)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.SelfLink)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.UID)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.ResourceVersion)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.Generation))
+ l = m.CreationTimestamp.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.DeletionTimestamp != nil {
+ l = m.DeletionTimestamp.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.DeletionGracePeriodSeconds != nil {
+ n += 1 + sovGenerated(uint64(*m.DeletionGracePeriodSeconds))
+ }
+ if len(m.Labels) > 0 {
+ for k, v := range m.Labels {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ if len(m.Annotations) > 0 {
+ for k, v := range m.Annotations {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ if len(m.OwnerReferences) > 0 {
+ for _, e := range m.OwnerReferences {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Finalizers) > 0 {
+ for _, s := range m.Finalizers {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ObjectReference) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Kind)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Namespace)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.UID)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.APIVersion)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.ResourceVersion)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.FieldPath)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *OwnerReference) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Kind)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.UID)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.APIVersion)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Controller != nil {
+ n += 2
+ }
+ return n
+}
+
+func (m *PersistentVolume) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *PersistentVolumeClaim) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *PersistentVolumeClaimList) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *PersistentVolumeClaimSpec) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.AccessModes) > 0 {
+ for _, s := range m.AccessModes {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = m.Resources.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.VolumeName)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Selector != nil {
+ l = m.Selector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *PersistentVolumeClaimStatus) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Phase)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.AccessModes) > 0 {
+ for _, s := range m.AccessModes {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Capacity) > 0 {
+ for k, v := range m.Capacity {
+ _ = k
+ _ = v
+ l = v.Size()
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ return n
+}
+
+func (m *PersistentVolumeClaimVolumeSource) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.ClaimName)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 2
+ return n
+}
+
+func (m *PersistentVolumeList) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *PersistentVolumeSource) Size() (n int) {
+ var l int
+ _ = l
+ if m.GCEPersistentDisk != nil {
+ l = m.GCEPersistentDisk.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.AWSElasticBlockStore != nil {
+ l = m.AWSElasticBlockStore.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.HostPath != nil {
+ l = m.HostPath.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Glusterfs != nil {
+ l = m.Glusterfs.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.NFS != nil {
+ l = m.NFS.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.RBD != nil {
+ l = m.RBD.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ISCSI != nil {
+ l = m.ISCSI.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Cinder != nil {
+ l = m.Cinder.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.CephFS != nil {
+ l = m.CephFS.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.FC != nil {
+ l = m.FC.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Flocker != nil {
+ l = m.Flocker.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.FlexVolume != nil {
+ l = m.FlexVolume.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.AzureFile != nil {
+ l = m.AzureFile.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.VsphereVolume != nil {
+ l = m.VsphereVolume.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *PersistentVolumeSpec) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Capacity) > 0 {
+ for k, v := range m.Capacity {
+ _ = k
+ _ = v
+ l = v.Size()
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ l = m.PersistentVolumeSource.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.AccessModes) > 0 {
+ for _, s := range m.AccessModes {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.ClaimRef != nil {
+ l = m.ClaimRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = len(m.PersistentVolumeReclaimPolicy)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *PersistentVolumeStatus) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Phase)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Message)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Reason)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *Pod) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *PodAffinity) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 {
+ for _, e := range m.RequiredDuringSchedulingIgnoredDuringExecution {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 {
+ for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *PodAffinityTerm) Size() (n int) {
+ var l int
+ _ = l
+ if m.LabelSelector != nil {
+ l = m.LabelSelector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.Namespaces) > 0 {
+ for _, s := range m.Namespaces {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = len(m.TopologyKey)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *PodAntiAffinity) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 {
+ for _, e := range m.RequiredDuringSchedulingIgnoredDuringExecution {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 {
+ for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *PodAttachOptions) Size() (n int) {
+ var l int
+ _ = l
+ n += 2
+ n += 2
+ n += 2
+ n += 2
+ l = len(m.Container)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *PodCondition) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Status)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.LastProbeTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.LastTransitionTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Reason)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Message)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *PodExecOptions) Size() (n int) {
+ var l int
+ _ = l
+ n += 2
+ n += 2
+ n += 2
+ n += 2
+ l = len(m.Container)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Command) > 0 {
+ for _, s := range m.Command {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *PodList) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *PodLogOptions) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Container)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 2
+ n += 2
+ if m.SinceSeconds != nil {
+ n += 1 + sovGenerated(uint64(*m.SinceSeconds))
+ }
+ if m.SinceTime != nil {
+ l = m.SinceTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ n += 2
+ if m.TailLines != nil {
+ n += 1 + sovGenerated(uint64(*m.TailLines))
+ }
+ if m.LimitBytes != nil {
+ n += 1 + sovGenerated(uint64(*m.LimitBytes))
+ }
+ return n
+}
+
+func (m *PodProxyOptions) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Path)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *PodSecurityContext) Size() (n int) {
+ var l int
+ _ = l
+ if m.SELinuxOptions != nil {
+ l = m.SELinuxOptions.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.RunAsUser != nil {
+ n += 1 + sovGenerated(uint64(*m.RunAsUser))
+ }
+ if m.RunAsNonRoot != nil {
+ n += 2
+ }
+ if len(m.SupplementalGroups) > 0 {
+ for _, e := range m.SupplementalGroups {
+ n += 1 + sovGenerated(uint64(e))
+ }
+ }
+ if m.FSGroup != nil {
+ n += 1 + sovGenerated(uint64(*m.FSGroup))
+ }
+ return n
+}
+
+func (m *PodSpec) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Volumes) > 0 {
+ for _, e := range m.Volumes {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Containers) > 0 {
+ for _, e := range m.Containers {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = len(m.RestartPolicy)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.TerminationGracePeriodSeconds != nil {
+ n += 1 + sovGenerated(uint64(*m.TerminationGracePeriodSeconds))
+ }
+ if m.ActiveDeadlineSeconds != nil {
+ n += 1 + sovGenerated(uint64(*m.ActiveDeadlineSeconds))
+ }
+ l = len(m.DNSPolicy)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.NodeSelector) > 0 {
+ for k, v := range m.NodeSelector {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ l = len(m.ServiceAccountName)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.DeprecatedServiceAccount)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.NodeName)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 2
+ n += 2
+ n += 2
+ if m.SecurityContext != nil {
+ l = m.SecurityContext.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.ImagePullSecrets) > 0 {
+ for _, e := range m.ImagePullSecrets {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = len(m.Hostname)
+ n += 2 + l + sovGenerated(uint64(l))
+ l = len(m.Subdomain)
+ n += 2 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *PodStatus) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Phase)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Conditions) > 0 {
+ for _, e := range m.Conditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = len(m.Message)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Reason)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.HostIP)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.PodIP)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.StartTime != nil {
+ l = m.StartTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.ContainerStatuses) > 0 {
+ for _, e := range m.ContainerStatuses {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *PodStatusResult) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *PodTemplate) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Template.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *PodTemplateList) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *PodTemplateSpec) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *Preconditions) Size() (n int) {
+ var l int
+ _ = l
+ if m.UID != nil {
+ l = len(*m.UID)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *PreferredSchedulingTerm) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovGenerated(uint64(m.Weight))
+ l = m.Preference.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *Probe) Size() (n int) {
+ var l int
+ _ = l
+ l = m.Handler.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.InitialDelaySeconds))
+ n += 1 + sovGenerated(uint64(m.TimeoutSeconds))
+ n += 1 + sovGenerated(uint64(m.PeriodSeconds))
+ n += 1 + sovGenerated(uint64(m.SuccessThreshold))
+ n += 1 + sovGenerated(uint64(m.FailureThreshold))
+ return n
+}
+
+func (m *RBDVolumeSource) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.CephMonitors) > 0 {
+ for _, s := range m.CephMonitors {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = len(m.RBDImage)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.FSType)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.RBDPool)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.RadosUser)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Keyring)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.SecretRef != nil {
+ l = m.SecretRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ n += 2
+ return n
+}
+
+func (m *RangeAllocation) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Range)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Data != nil {
+ l = len(m.Data)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *ReplicationController) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ReplicationControllerList) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ReplicationControllerSpec) Size() (n int) {
+ var l int
+ _ = l
+ if m.Replicas != nil {
+ n += 1 + sovGenerated(uint64(*m.Replicas))
+ }
+ if len(m.Selector) > 0 {
+ for k, v := range m.Selector {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ if m.Template != nil {
+ l = m.Template.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *ReplicationControllerStatus) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovGenerated(uint64(m.Replicas))
+ n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas))
+ n += 1 + sovGenerated(uint64(m.ObservedGeneration))
+ return n
+}
+
+func (m *ResourceFieldSelector) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.ContainerName)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Resource)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Divisor.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ResourceQuota) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ResourceQuotaList) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ResourceQuotaSpec) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Hard) > 0 {
+ for k, v := range m.Hard {
+ _ = k
+ _ = v
+ l = v.Size()
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ if len(m.Scopes) > 0 {
+ for _, s := range m.Scopes {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ResourceQuotaStatus) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Hard) > 0 {
+ for k, v := range m.Hard {
+ _ = k
+ _ = v
+ l = v.Size()
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ if len(m.Used) > 0 {
+ for k, v := range m.Used {
+ _ = k
+ _ = v
+ l = v.Size()
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ return n
+}
+
+func (m *ResourceRequirements) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Limits) > 0 {
+ for k, v := range m.Limits {
+ _ = k
+ _ = v
+ l = v.Size()
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ if len(m.Requests) > 0 {
+ for k, v := range m.Requests {
+ _ = k
+ _ = v
+ l = v.Size()
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ return n
+}
+
+func (m *SELinuxOptions) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.User)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Role)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Level)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *Secret) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Data) > 0 {
+ for k, v := range m.Data {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.StringData) > 0 {
+ for k, v := range m.StringData {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ return n
+}
+
+func (m *SecretKeySelector) Size() (n int) {
+ var l int
+ _ = l
+ l = m.LocalObjectReference.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Key)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *SecretList) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *SecretVolumeSource) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.SecretName)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *SecurityContext) Size() (n int) {
+ var l int
+ _ = l
+ if m.Capabilities != nil {
+ l = m.Capabilities.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Privileged != nil {
+ n += 2
+ }
+ if m.SELinuxOptions != nil {
+ l = m.SELinuxOptions.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.RunAsUser != nil {
+ n += 1 + sovGenerated(uint64(*m.RunAsUser))
+ }
+ if m.RunAsNonRoot != nil {
+ n += 2
+ }
+ if m.ReadOnlyRootFilesystem != nil {
+ n += 2
+ }
+ return n
+}
+
+func (m *SerializedReference) Size() (n int) {
+ var l int
+ _ = l
+ l = m.Reference.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *Service) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ServiceAccount) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Secrets) > 0 {
+ for _, e := range m.Secrets {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.ImagePullSecrets) > 0 {
+ for _, e := range m.ImagePullSecrets {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ServiceAccountList) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ServiceList) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ServicePort) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Protocol)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.Port))
+ l = m.TargetPort.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.NodePort))
+ return n
+}
+
+func (m *ServiceProxyOptions) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Path)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ServiceSpec) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Ports) > 0 {
+ for _, e := range m.Ports {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Selector) > 0 {
+ for k, v := range m.Selector {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ l = len(m.ClusterIP)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.ExternalIPs) > 0 {
+ for _, s := range m.ExternalIPs {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.DeprecatedPublicIPs) > 0 {
+ for _, s := range m.DeprecatedPublicIPs {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = len(m.SessionAffinity)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.LoadBalancerIP)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.LoadBalancerSourceRanges) > 0 {
+ for _, s := range m.LoadBalancerSourceRanges {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ServiceStatus) Size() (n int) {
+ var l int
+ _ = l
+ l = m.LoadBalancer.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *TCPSocketAction) Size() (n int) {
+ var l int
+ _ = l
+ l = m.Port.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *Taint) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Key)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Value)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Effect)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *Toleration) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Key)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Operator)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Value)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Effect)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *Volume) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.VolumeSource.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *VolumeMount) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 2
+ l = len(m.MountPath)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.SubPath)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *VolumeSource) Size() (n int) {
+ var l int
+ _ = l
+ if m.HostPath != nil {
+ l = m.HostPath.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.EmptyDir != nil {
+ l = m.EmptyDir.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.GCEPersistentDisk != nil {
+ l = m.GCEPersistentDisk.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.AWSElasticBlockStore != nil {
+ l = m.AWSElasticBlockStore.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.GitRepo != nil {
+ l = m.GitRepo.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Secret != nil {
+ l = m.Secret.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.NFS != nil {
+ l = m.NFS.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ISCSI != nil {
+ l = m.ISCSI.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Glusterfs != nil {
+ l = m.Glusterfs.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.PersistentVolumeClaim != nil {
+ l = m.PersistentVolumeClaim.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.RBD != nil {
+ l = m.RBD.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.FlexVolume != nil {
+ l = m.FlexVolume.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Cinder != nil {
+ l = m.Cinder.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.CephFS != nil {
+ l = m.CephFS.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Flocker != nil {
+ l = m.Flocker.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.DownwardAPI != nil {
+ l = m.DownwardAPI.Size()
+ n += 2 + l + sovGenerated(uint64(l))
+ }
+ if m.FC != nil {
+ l = m.FC.Size()
+ n += 2 + l + sovGenerated(uint64(l))
+ }
+ if m.AzureFile != nil {
+ l = m.AzureFile.Size()
+ n += 2 + l + sovGenerated(uint64(l))
+ }
+ if m.ConfigMap != nil {
+ l = m.ConfigMap.Size()
+ n += 2 + l + sovGenerated(uint64(l))
+ }
+ if m.VsphereVolume != nil {
+ l = m.VsphereVolume.Size()
+ n += 2 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *VsphereVirtualDiskVolumeSource) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.VolumePath)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.FSType)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *WeightedPodAffinityTerm) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovGenerated(uint64(m.Weight))
+ l = m.PodAffinityTerm.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AWSElasticBlockStoreVolumeSource: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AWSElasticBlockStoreVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.VolumeID = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.FSType = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType)
+ }
+ m.Partition = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Partition |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.ReadOnly = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Affinity) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Affinity: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Affinity: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeAffinity", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.NodeAffinity == nil {
+ m.NodeAffinity = &NodeAffinity{}
+ }
+ if err := m.NodeAffinity.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PodAffinity", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.PodAffinity == nil {
+ m.PodAffinity = &PodAffinity{}
+ }
+ if err := m.PodAffinity.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PodAntiAffinity", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.PodAntiAffinity == nil {
+ m.PodAntiAffinity = &PodAntiAffinity{}
+ }
+ if err := m.PodAntiAffinity.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AttachedVolume) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AttachedVolume: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AttachedVolume: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = UniqueVolumeName(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DevicePath", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DevicePath = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AzureFileVolumeSource) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AzureFileVolumeSource: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AzureFileVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SecretName = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ShareName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ShareName = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.ReadOnly = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Binding) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Binding: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Binding: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Target.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Capabilities) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Capabilities: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Capabilities: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Add", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Add = append(m.Add, Capability(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Drop", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Drop = append(m.Drop, Capability(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CephFSVolumeSource) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CephFSVolumeSource: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CephFSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Monitors", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Monitors = append(m.Monitors, string(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Path = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field User", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.User = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SecretFile", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SecretFile = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.SecretRef == nil {
+ m.SecretRef = &LocalObjectReference{}
+ }
+ if err := m.SecretRef.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.ReadOnly = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CinderVolumeSource) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CinderVolumeSource: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CinderVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.VolumeID = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.FSType = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.ReadOnly = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ComponentCondition) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ComponentCondition: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ComponentCondition: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = ComponentConditionType(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Status = ConditionStatus(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Error = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ComponentStatus) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ComponentStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ComponentStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Conditions = append(m.Conditions, ComponentCondition{})
+ if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ComponentStatusList) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ComponentStatusList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ComponentStatusList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, ComponentStatus{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ConfigMap) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ConfigMap: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ConfigMap: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var keykey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ keykey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey := string(data[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ var valuekey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ valuekey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue := string(data[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ if m.Data == nil {
+ m.Data = make(map[string]string)
+ }
+ m.Data[mapkey] = mapvalue
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ConfigMapKeySelector) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ConfigMapKeySelector: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ConfigMapKeySelector: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LocalObjectReference.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ConfigMapList) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ConfigMapList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ConfigMapList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, ConfigMap{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ConfigMapVolumeSource) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ConfigMapVolumeSource: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ConfigMapVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LocalObjectReference.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, KeyToPath{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Container) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Container: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Container: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Image = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Command = append(m.Command, string(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Args = append(m.Args, string(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field WorkingDir", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.WorkingDir = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Ports = append(m.Ports, ContainerPort{})
+ if err := m.Ports[len(m.Ports)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Env = append(m.Env, EnvVar{})
+ if err := m.Env[len(m.Env)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Resources.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.VolumeMounts = append(m.VolumeMounts, VolumeMount{})
+ if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 10:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LivenessProbe", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.LivenessProbe == nil {
+ m.LivenessProbe = &Probe{}
+ }
+ if err := m.LivenessProbe.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 11:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ReadinessProbe", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ReadinessProbe == nil {
+ m.ReadinessProbe = &Probe{}
+ }
+ if err := m.ReadinessProbe.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 12:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Lifecycle", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Lifecycle == nil {
+ m.Lifecycle = &Lifecycle{}
+ }
+ if err := m.Lifecycle.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 13:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePath", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.TerminationMessagePath = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 14:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ImagePullPolicy", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ImagePullPolicy = PullPolicy(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 15:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.SecurityContext == nil {
+ m.SecurityContext = &SecurityContext{}
+ }
+ if err := m.SecurityContext.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 16:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Stdin = bool(v != 0)
+ case 17:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field StdinOnce", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.StdinOnce = bool(v != 0)
+ case 18:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.TTY = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ContainerImage) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ContainerImage: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ContainerImage: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Names = append(m.Names, string(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SizeBytes", wireType)
+ }
+ m.SizeBytes = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.SizeBytes |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ContainerPort) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ContainerPort: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ContainerPort: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field HostPort", wireType)
+ }
+ m.HostPort = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.HostPort |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ContainerPort", wireType)
+ }
+ m.ContainerPort = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.ContainerPort |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Protocol = Protocol(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field HostIP", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.HostIP = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ContainerState) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ContainerState: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ContainerState: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Waiting", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Waiting == nil {
+ m.Waiting = &ContainerStateWaiting{}
+ }
+ if err := m.Waiting.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Running", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Running == nil {
+ m.Running = &ContainerStateRunning{}
+ }
+ if err := m.Running.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Terminated", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Terminated == nil {
+ m.Terminated = &ContainerStateTerminated{}
+ }
+ if err := m.Terminated.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ContainerStateRunning) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ContainerStateRunning: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ContainerStateRunning: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.StartedAt.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ContainerStateTerminated) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ContainerStateTerminated: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ContainerStateTerminated: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ExitCode", wireType)
+ }
+ m.ExitCode = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.ExitCode |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Signal", wireType)
+ }
+ m.Signal = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Signal |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Reason = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.StartedAt.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FinishedAt", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.FinishedAt.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ContainerID = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ContainerStateWaiting) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ContainerStateWaiting: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ContainerStateWaiting: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Reason = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ContainerStatus) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ContainerStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ContainerStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field State", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.State.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastTerminationState", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LastTerminationState.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Ready", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Ready = bool(v != 0)
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RestartCount", wireType)
+ }
+ m.RestartCount = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.RestartCount |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Image = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ImageID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ImageID = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ContainerID = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DaemonEndpoint) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DaemonEndpoint: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DaemonEndpoint: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType)
+ }
+ m.Port = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Port |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeleteOptions) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeleteOptions: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeleteOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field GracePeriodSeconds", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.GracePeriodSeconds = &v
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Preconditions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Preconditions == nil {
+ m.Preconditions = &Preconditions{}
+ }
+ if err := m.Preconditions.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field OrphanDependents", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.OrphanDependents = &b
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DownwardAPIVolumeFile) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DownwardAPIVolumeFile: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DownwardAPIVolumeFile: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Path = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.FieldRef == nil {
+ m.FieldRef = &ObjectFieldSelector{}
+ }
+ if err := m.FieldRef.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResourceFieldRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ResourceFieldRef == nil {
+ m.ResourceFieldRef = &ResourceFieldSelector{}
+ }
+ if err := m.ResourceFieldRef.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DownwardAPIVolumeSource) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DownwardAPIVolumeSource: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DownwardAPIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, DownwardAPIVolumeFile{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *EmptyDirVolumeSource) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: EmptyDirVolumeSource: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: EmptyDirVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Medium", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Medium = StorageMedium(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *EndpointAddress) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: EndpointAddress: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: EndpointAddress: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.IP = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TargetRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.TargetRef == nil {
+ m.TargetRef = &ObjectReference{}
+ }
+ if err := m.TargetRef.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Hostname = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *EndpointPort) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: EndpointPort: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: EndpointPort: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType)
+ }
+ m.Port = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Port |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Protocol = Protocol(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *EndpointSubset) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: EndpointSubset: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: EndpointSubset: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Addresses = append(m.Addresses, EndpointAddress{})
+ if err := m.Addresses[len(m.Addresses)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NotReadyAddresses", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.NotReadyAddresses = append(m.NotReadyAddresses, EndpointAddress{})
+ if err := m.NotReadyAddresses[len(m.NotReadyAddresses)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Ports = append(m.Ports, EndpointPort{})
+ if err := m.Ports[len(m.Ports)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Endpoints) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Endpoints: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Endpoints: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Subsets", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Subsets = append(m.Subsets, EndpointSubset{})
+ if err := m.Subsets[len(m.Subsets)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *EndpointsList) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: EndpointsList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: EndpointsList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, Endpoints{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *EnvVar) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: EnvVar: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: EnvVar: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Value = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ValueFrom", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ValueFrom == nil {
+ m.ValueFrom = &EnvVarSource{}
+ }
+ if err := m.ValueFrom.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *EnvVarSource) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: EnvVarSource: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: EnvVarSource: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.FieldRef == nil {
+ m.FieldRef = &ObjectFieldSelector{}
+ }
+ if err := m.FieldRef.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResourceFieldRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ResourceFieldRef == nil {
+ m.ResourceFieldRef = &ResourceFieldSelector{}
+ }
+ if err := m.ResourceFieldRef.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapKeyRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ConfigMapKeyRef == nil {
+ m.ConfigMapKeyRef = &ConfigMapKeySelector{}
+ }
+ if err := m.ConfigMapKeyRef.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SecretKeyRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.SecretKeyRef == nil {
+ m.SecretKeyRef = &SecretKeySelector{}
+ }
+ if err := m.SecretKeyRef.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Event) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Event: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field InvolvedObject", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.InvolvedObject.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Reason = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Source.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FirstTimestamp", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.FirstTimestamp.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastTimestamp", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LastTimestamp.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType)
+ }
+ m.Count = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Count |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *EventList) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: EventList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: EventList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, Event{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *EventSource) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: EventSource: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: EventSource: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Component", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Component = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Host = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ExecAction) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ExecAction: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ExecAction: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Command = append(m.Command, string(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ExportOptions) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ExportOptions: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ExportOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Export", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Export = bool(v != 0)
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Exact", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Exact = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *FCVolumeSource) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: FCVolumeSource: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: FCVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TargetWWNs", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.TargetWWNs = append(m.TargetWWNs, string(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Lun", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Lun = &v
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.FSType = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.ReadOnly = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *FlexVolumeSource) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: FlexVolumeSource: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: FlexVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Driver = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.FSType = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.SecretRef == nil {
+ m.SecretRef = &LocalObjectReference{}
+ }
+ if err := m.SecretRef.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.ReadOnly = bool(v != 0)
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var keykey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ keykey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey := string(data[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ var valuekey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ valuekey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue := string(data[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ if m.Options == nil {
+ m.Options = make(map[string]string)
+ }
+ m.Options[mapkey] = mapvalue
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *FlockerVolumeSource) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: FlockerVolumeSource: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: FlockerVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DatasetName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DatasetName = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *GCEPersistentDiskVolumeSource) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GCEPersistentDiskVolumeSource: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GCEPersistentDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PDName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PDName = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.FSType = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType)
+ }
+ m.Partition = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Partition |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.ReadOnly = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *GitRepoVolumeSource) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GitRepoVolumeSource: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GitRepoVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Repository", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Repository = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Revision = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Directory", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Directory = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *GlusterfsVolumeSource) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GlusterfsVolumeSource: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GlusterfsVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field EndpointsName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.EndpointsName = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Path = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.ReadOnly = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *HTTPGetAction) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: HTTPGetAction: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: HTTPGetAction: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Path = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Port.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Host = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Scheme", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Scheme = URIScheme(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field HTTPHeaders", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.HTTPHeaders = append(m.HTTPHeaders, HTTPHeader{})
+ if err := m.HTTPHeaders[len(m.HTTPHeaders)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *HTTPHeader) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: HTTPHeader: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: HTTPHeader: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Value = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Handler) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Handler: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Handler: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Exec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Exec == nil {
+ m.Exec = &ExecAction{}
+ }
+ if err := m.Exec.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field HTTPGet", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.HTTPGet == nil {
+ m.HTTPGet = &HTTPGetAction{}
+ }
+ if err := m.HTTPGet.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TCPSocket", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.TCPSocket == nil {
+ m.TCPSocket = &TCPSocketAction{}
+ }
+ if err := m.TCPSocket.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *HostPathVolumeSource) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: HostPathVolumeSource: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: HostPathVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Path = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ISCSIVolumeSource) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ISCSIVolumeSource: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ISCSIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TargetPortal", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.TargetPortal = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IQN", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.IQN = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Lun", wireType)
+ }
+ m.Lun = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Lun |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ISCSIInterface", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ISCSIInterface = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.FSType = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.ReadOnly = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *KeyToPath) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: KeyToPath: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: KeyToPath: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Path = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Lifecycle) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Lifecycle: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Lifecycle: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PostStart", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.PostStart == nil {
+ m.PostStart = &Handler{}
+ }
+ if err := m.PostStart.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PreStop", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.PreStop == nil {
+ m.PreStop = &Handler{}
+ }
+ if err := m.PreStop.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LimitRange) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LimitRange: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LimitRange: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LimitRangeItem) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LimitRangeItem: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LimitRangeItem: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = LimitType(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var keykey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ keykey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey := ResourceName(data[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ var valuekey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ valuekey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var mapmsglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ mapmsglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postmsgIndex := iNdEx + mapmsglen
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postmsgIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{}
+ if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil {
+ return err
+ }
+ iNdEx = postmsgIndex
+ if m.Max == nil {
+ m.Max = make(ResourceList)
+ }
+ m.Max[ResourceName(mapkey)] = *mapvalue
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var keykey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ keykey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey := ResourceName(data[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ var valuekey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ valuekey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var mapmsglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ mapmsglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postmsgIndex := iNdEx + mapmsglen
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postmsgIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{}
+ if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil {
+ return err
+ }
+ iNdEx = postmsgIndex
+ if m.Min == nil {
+ m.Min = make(ResourceList)
+ }
+ m.Min[ResourceName(mapkey)] = *mapvalue
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Default", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var keykey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ keykey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey := ResourceName(data[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ var valuekey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ valuekey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var mapmsglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ mapmsglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postmsgIndex := iNdEx + mapmsglen
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postmsgIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{}
+ if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil {
+ return err
+ }
+ iNdEx = postmsgIndex
+ if m.Default == nil {
+ m.Default = make(ResourceList)
+ }
+ m.Default[ResourceName(mapkey)] = *mapvalue
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DefaultRequest", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var keykey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ keykey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey := ResourceName(data[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ var valuekey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ valuekey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var mapmsglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ mapmsglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postmsgIndex := iNdEx + mapmsglen
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postmsgIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{}
+ if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil {
+ return err
+ }
+ iNdEx = postmsgIndex
+ if m.DefaultRequest == nil {
+ m.DefaultRequest = make(ResourceList)
+ }
+ m.DefaultRequest[ResourceName(mapkey)] = *mapvalue
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MaxLimitRequestRatio", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var keykey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ keykey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey := ResourceName(data[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ var valuekey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ valuekey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var mapmsglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ mapmsglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postmsgIndex := iNdEx + mapmsglen
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postmsgIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{}
+ if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil {
+ return err
+ }
+ iNdEx = postmsgIndex
+ if m.MaxLimitRequestRatio == nil {
+ m.MaxLimitRequestRatio = make(ResourceList)
+ }
+ m.MaxLimitRequestRatio[ResourceName(mapkey)] = *mapvalue
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LimitRangeList) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LimitRangeList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LimitRangeList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, LimitRange{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LimitRangeSpec) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LimitRangeSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LimitRangeSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Limits", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Limits = append(m.Limits, LimitRangeItem{})
+ if err := m.Limits[len(m.Limits)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *List) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: List: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: List: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, k8s_io_kubernetes_pkg_runtime.RawExtension{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ListOptions) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ListOptions: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ListOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.LabelSelector = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FieldSelector", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.FieldSelector = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Watch", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Watch = bool(v != 0)
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ResourceVersion = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.TimeoutSeconds = &v
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LoadBalancerIngress) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LoadBalancerIngress: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LoadBalancerIngress: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.IP = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Hostname = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LoadBalancerStatus) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LoadBalancerStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LoadBalancerStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Ingress = append(m.Ingress, LoadBalancerIngress{})
+ if err := m.Ingress[len(m.Ingress)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LocalObjectReference) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LocalObjectReference: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LocalObjectReference: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NFSVolumeSource) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NFSVolumeSource: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NFSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Server", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Server = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Path = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.ReadOnly = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Namespace) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Namespace: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Namespace: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NamespaceList) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NamespaceList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NamespaceList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, Namespace{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NamespaceSpec) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NamespaceSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NamespaceSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Finalizers", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Finalizers = append(m.Finalizers, FinalizerName(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NamespaceStatus) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NamespaceStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NamespaceStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Phase = NamespacePhase(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Node) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Node: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Node: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NodeAddress) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NodeAddress: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NodeAddress: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = NodeAddressType(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Address = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NodeAffinity) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NodeAffinity: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NodeAffinity: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RequiredDuringSchedulingIgnoredDuringExecution", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.RequiredDuringSchedulingIgnoredDuringExecution == nil {
+ m.RequiredDuringSchedulingIgnoredDuringExecution = &NodeSelector{}
+ }
+ if err := m.RequiredDuringSchedulingIgnoredDuringExecution.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PreferredDuringSchedulingIgnoredDuringExecution", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PreferredDuringSchedulingIgnoredDuringExecution = append(m.PreferredDuringSchedulingIgnoredDuringExecution, PreferredSchedulingTerm{})
+ if err := m.PreferredDuringSchedulingIgnoredDuringExecution[len(m.PreferredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NodeCondition) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NodeCondition: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NodeCondition: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = NodeConditionType(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Status = ConditionStatus(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastHeartbeatTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LastHeartbeatTime.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LastTransitionTime.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Reason = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NodeDaemonEndpoints) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NodeDaemonEndpoints: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NodeDaemonEndpoints: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field KubeletEndpoint", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.KubeletEndpoint.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NodeList) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NodeList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NodeList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, Node{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NodeProxyOptions) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NodeProxyOptions: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NodeProxyOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Path = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NodeSelector) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NodeSelector: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NodeSelector: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeSelectorTerms", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.NodeSelectorTerms = append(m.NodeSelectorTerms, NodeSelectorTerm{})
+ if err := m.NodeSelectorTerms[len(m.NodeSelectorTerms)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NodeSelectorRequirement) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NodeSelectorRequirement: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NodeSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Operator = NodeSelectorOperator(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Values = append(m.Values, string(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NodeSelectorTerm) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NodeSelectorTerm: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NodeSelectorTerm: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MatchExpressions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.MatchExpressions = append(m.MatchExpressions, NodeSelectorRequirement{})
+ if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NodeSpec) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NodeSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NodeSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PodCIDR", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PodCIDR = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ExternalID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ExternalID = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProviderID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ProviderID = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Unschedulable", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Unschedulable = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NodeStatus) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NodeStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NodeStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var keykey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ keykey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey := ResourceName(data[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ var valuekey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ valuekey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var mapmsglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ mapmsglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postmsgIndex := iNdEx + mapmsglen
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postmsgIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{}
+ if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil {
+ return err
+ }
+ iNdEx = postmsgIndex
+ if m.Capacity == nil {
+ m.Capacity = make(ResourceList)
+ }
+ m.Capacity[ResourceName(mapkey)] = *mapvalue
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Allocatable", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var keykey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ keykey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey := ResourceName(data[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ var valuekey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ valuekey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var mapmsglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ mapmsglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postmsgIndex := iNdEx + mapmsglen
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postmsgIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{}
+ if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil {
+ return err
+ }
+ iNdEx = postmsgIndex
+ if m.Allocatable == nil {
+ m.Allocatable = make(ResourceList)
+ }
+ m.Allocatable[ResourceName(mapkey)] = *mapvalue
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Phase = NodePhase(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Conditions = append(m.Conditions, NodeCondition{})
+ if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Addresses = append(m.Addresses, NodeAddress{})
+ if err := m.Addresses[len(m.Addresses)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DaemonEndpoints", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.DaemonEndpoints.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeInfo", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.NodeInfo.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Images = append(m.Images, ContainerImage{})
+ if err := m.Images[len(m.Images)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field VolumesInUse", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.VolumesInUse = append(m.VolumesInUse, UniqueVolumeName(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 10:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field VolumesAttached", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.VolumesAttached = append(m.VolumesAttached, AttachedVolume{})
+ if err := m.VolumesAttached[len(m.VolumesAttached)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NodeSystemInfo) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NodeSystemInfo: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NodeSystemInfo: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MachineID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.MachineID = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SystemUUID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SystemUUID = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field BootID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.BootID = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field KernelVersion", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.KernelVersion = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field OSImage", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.OSImage = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ContainerRuntimeVersion", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ContainerRuntimeVersion = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field KubeletVersion", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.KubeletVersion = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field KubeProxyVersion", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.KubeProxyVersion = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field OperatingSystem", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.OperatingSystem = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 10:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Architecture", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Architecture = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ObjectFieldSelector) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ObjectFieldSelector: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ObjectFieldSelector: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.APIVersion = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FieldPath", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.FieldPath = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ObjectMeta) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ObjectMeta: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ObjectMeta: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field GenerateName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.GenerateName = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Namespace = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SelfLink", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SelfLink = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.UID = k8s_io_kubernetes_pkg_types.UID(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ResourceVersion = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 7:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType)
+ }
+ m.Generation = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Generation |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CreationTimestamp", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.CreationTimestamp.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DeletionTimestamp", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.DeletionTimestamp == nil {
+ m.DeletionTimestamp = &k8s_io_kubernetes_pkg_api_unversioned.Time{}
+ }
+ if err := m.DeletionTimestamp.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 10:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DeletionGracePeriodSeconds", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.DeletionGracePeriodSeconds = &v
+ case 11:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var keykey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ keykey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey := string(data[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ var valuekey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ valuekey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue := string(data[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ if m.Labels == nil {
+ m.Labels = make(map[string]string)
+ }
+ m.Labels[mapkey] = mapvalue
+ iNdEx = postIndex
+ case 12:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var keykey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ keykey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey := string(data[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ var valuekey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ valuekey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue := string(data[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ if m.Annotations == nil {
+ m.Annotations = make(map[string]string)
+ }
+ m.Annotations[mapkey] = mapvalue
+ iNdEx = postIndex
+ case 13:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field OwnerReferences", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.OwnerReferences = append(m.OwnerReferences, OwnerReference{})
+ if err := m.OwnerReferences[len(m.OwnerReferences)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 14:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Finalizers", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Finalizers = append(m.Finalizers, string(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ObjectReference) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ObjectReference: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ObjectReference: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Kind = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Namespace = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.UID = k8s_io_kubernetes_pkg_types.UID(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.APIVersion = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ResourceVersion = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FieldPath", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.FieldPath = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *OwnerReference) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: OwnerReference: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: OwnerReference: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Kind = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.UID = k8s_io_kubernetes_pkg_types.UID(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.APIVersion = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.Controller = &b
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PersistentVolume) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PersistentVolume: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PersistentVolume: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PersistentVolumeClaim) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PersistentVolumeClaim: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PersistentVolumeClaim: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PersistentVolumeClaimList) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PersistentVolumeClaimList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PersistentVolumeClaimList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, PersistentVolumeClaim{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PersistentVolumeClaimSpec) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PersistentVolumeClaimSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PersistentVolumeClaimSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AccessModes", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.AccessModes = append(m.AccessModes, PersistentVolumeAccessMode(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Resources.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field VolumeName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.VolumeName = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Selector == nil {
+ m.Selector = &k8s_io_kubernetes_pkg_api_unversioned.LabelSelector{}
+ }
+ if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PersistentVolumeClaimStatus) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PersistentVolumeClaimStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PersistentVolumeClaimStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Phase = PersistentVolumeClaimPhase(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AccessModes", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.AccessModes = append(m.AccessModes, PersistentVolumeAccessMode(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var keykey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ keykey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey := ResourceName(data[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ var valuekey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ valuekey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var mapmsglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ mapmsglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postmsgIndex := iNdEx + mapmsglen
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postmsgIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{}
+ if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil {
+ return err
+ }
+ iNdEx = postmsgIndex
+ if m.Capacity == nil {
+ m.Capacity = make(ResourceList)
+ }
+ m.Capacity[ResourceName(mapkey)] = *mapvalue
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PersistentVolumeClaimVolumeSource) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PersistentVolumeClaimVolumeSource: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PersistentVolumeClaimVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClaimName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClaimName = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.ReadOnly = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PersistentVolumeList) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PersistentVolumeList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PersistentVolumeList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, PersistentVolume{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PersistentVolumeSource) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PersistentVolumeSource: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field GCEPersistentDisk", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.GCEPersistentDisk == nil {
+ m.GCEPersistentDisk = &GCEPersistentDiskVolumeSource{}
+ }
+ if err := m.GCEPersistentDisk.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AWSElasticBlockStore", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AWSElasticBlockStore == nil {
+ m.AWSElasticBlockStore = &AWSElasticBlockStoreVolumeSource{}
+ }
+ if err := m.AWSElasticBlockStore.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field HostPath", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.HostPath == nil {
+ m.HostPath = &HostPathVolumeSource{}
+ }
+ if err := m.HostPath.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Glusterfs", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Glusterfs == nil {
+ m.Glusterfs = &GlusterfsVolumeSource{}
+ }
+ if err := m.Glusterfs.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NFS", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.NFS == nil {
+ m.NFS = &NFSVolumeSource{}
+ }
+ if err := m.NFS.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RBD", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.RBD == nil {
+ m.RBD = &RBDVolumeSource{}
+ }
+ if err := m.RBD.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ISCSI", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ISCSI == nil {
+ m.ISCSI = &ISCSIVolumeSource{}
+ }
+ if err := m.ISCSI.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Cinder", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Cinder == nil {
+ m.Cinder = &CinderVolumeSource{}
+ }
+ if err := m.Cinder.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CephFS", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.CephFS == nil {
+ m.CephFS = &CephFSVolumeSource{}
+ }
+ if err := m.CephFS.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 10:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FC", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.FC == nil {
+ m.FC = &FCVolumeSource{}
+ }
+ if err := m.FC.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 11:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Flocker", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Flocker == nil {
+ m.Flocker = &FlockerVolumeSource{}
+ }
+ if err := m.Flocker.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 12:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FlexVolume", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.FlexVolume == nil {
+ m.FlexVolume = &FlexVolumeSource{}
+ }
+ if err := m.FlexVolume.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 13:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AzureFile", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AzureFile == nil {
+ m.AzureFile = &AzureFileVolumeSource{}
+ }
+ if err := m.AzureFile.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 14:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field VsphereVolume", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.VsphereVolume == nil {
+ m.VsphereVolume = &VsphereVirtualDiskVolumeSource{}
+ }
+ if err := m.VsphereVolume.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PersistentVolumeSpec) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PersistentVolumeSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PersistentVolumeSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var keykey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ keykey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey := ResourceName(data[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ var valuekey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ valuekey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var mapmsglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ mapmsglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postmsgIndex := iNdEx + mapmsglen
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postmsgIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{}
+ if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil {
+ return err
+ }
+ iNdEx = postmsgIndex
+ if m.Capacity == nil {
+ m.Capacity = make(ResourceList)
+ }
+ m.Capacity[ResourceName(mapkey)] = *mapvalue
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeSource", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.PersistentVolumeSource.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AccessModes", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.AccessModes = append(m.AccessModes, PersistentVolumeAccessMode(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClaimRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ClaimRef == nil {
+ m.ClaimRef = &ObjectReference{}
+ }
+ if err := m.ClaimRef.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeReclaimPolicy", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PersistentVolumeReclaimPolicy = PersistentVolumeReclaimPolicy(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PersistentVolumeStatus) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PersistentVolumeStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PersistentVolumeStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Phase = PersistentVolumePhase(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Reason = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Pod) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Pod: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Pod: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PodAffinity) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PodAffinity: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PodAffinity: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RequiredDuringSchedulingIgnoredDuringExecution", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.RequiredDuringSchedulingIgnoredDuringExecution = append(m.RequiredDuringSchedulingIgnoredDuringExecution, PodAffinityTerm{})
+ if err := m.RequiredDuringSchedulingIgnoredDuringExecution[len(m.RequiredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PreferredDuringSchedulingIgnoredDuringExecution", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PreferredDuringSchedulingIgnoredDuringExecution = append(m.PreferredDuringSchedulingIgnoredDuringExecution, WeightedPodAffinityTerm{})
+ if err := m.PreferredDuringSchedulingIgnoredDuringExecution[len(m.PreferredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PodAffinityTerm) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PodAffinityTerm: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PodAffinityTerm: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.LabelSelector == nil {
+ m.LabelSelector = &k8s_io_kubernetes_pkg_api_unversioned.LabelSelector{}
+ }
+ if err := m.LabelSelector.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Namespaces", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Namespaces = append(m.Namespaces, string(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TopologyKey", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.TopologyKey = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PodAntiAffinity) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PodAntiAffinity: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PodAntiAffinity: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RequiredDuringSchedulingIgnoredDuringExecution", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.RequiredDuringSchedulingIgnoredDuringExecution = append(m.RequiredDuringSchedulingIgnoredDuringExecution, PodAffinityTerm{})
+ if err := m.RequiredDuringSchedulingIgnoredDuringExecution[len(m.RequiredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PreferredDuringSchedulingIgnoredDuringExecution", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PreferredDuringSchedulingIgnoredDuringExecution = append(m.PreferredDuringSchedulingIgnoredDuringExecution, WeightedPodAffinityTerm{})
+ if err := m.PreferredDuringSchedulingIgnoredDuringExecution[len(m.PreferredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PodAttachOptions) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PodAttachOptions: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PodAttachOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Stdin = bool(v != 0)
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Stdout = bool(v != 0)
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Stderr = bool(v != 0)
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.TTY = bool(v != 0)
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Container = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PodCondition) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PodCondition: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PodCondition: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = PodConditionType(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Status = ConditionStatus(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastProbeTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LastProbeTime.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LastTransitionTime.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Reason = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PodExecOptions) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PodExecOptions: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PodExecOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Stdin = bool(v != 0)
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Stdout = bool(v != 0)
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Stderr = bool(v != 0)
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.TTY = bool(v != 0)
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Container = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Command = append(m.Command, string(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PodList) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PodList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PodList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, Pod{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PodLogOptions) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PodLogOptions: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PodLogOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Container = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Follow", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Follow = bool(v != 0)
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Previous", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Previous = bool(v != 0)
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SinceSeconds", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.SinceSeconds = &v
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SinceTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.SinceTime == nil {
+ m.SinceTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{}
+ }
+ if err := m.SinceTime.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Timestamps", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Timestamps = bool(v != 0)
+ case 7:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TailLines", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.TailLines = &v
+ case 8:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LimitBytes", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.LimitBytes = &v
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PodProxyOptions) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PodProxyOptions: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PodProxyOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Path = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PodSecurityContext) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PodSecurityContext: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PodSecurityContext: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SELinuxOptions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.SELinuxOptions == nil {
+ m.SELinuxOptions = &SELinuxOptions{}
+ }
+ if err := m.SELinuxOptions.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RunAsUser", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.RunAsUser = &v
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RunAsNonRoot", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.RunAsNonRoot = &b
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroups", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.SupplementalGroups = append(m.SupplementalGroups, v)
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FSGroup", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.FSGroup = &v
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PodSpec) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PodSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PodSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Volumes = append(m.Volumes, Volume{})
+ if err := m.Volumes[len(m.Volumes)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Containers", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Containers = append(m.Containers, Container{})
+ if err := m.Containers[len(m.Containers)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RestartPolicy", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.RestartPolicy = RestartPolicy(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TerminationGracePeriodSeconds", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.TerminationGracePeriodSeconds = &v
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ActiveDeadlineSeconds", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.ActiveDeadlineSeconds = &v
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DNSPolicy", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DNSPolicy = DNSPolicy(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var keykey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ keykey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey := string(data[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ var valuekey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ valuekey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue := string(data[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ if m.NodeSelector == nil {
+ m.NodeSelector = make(map[string]string)
+ }
+ m.NodeSelector[mapkey] = mapvalue
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ServiceAccountName = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedServiceAccount", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DeprecatedServiceAccount = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 10:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.NodeName = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 11:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field HostNetwork", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.HostNetwork = bool(v != 0)
+ case 12:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field HostPID", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.HostPID = bool(v != 0)
+ case 13:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field HostIPC", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.HostIPC = bool(v != 0)
+ case 14:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.SecurityContext == nil {
+ m.SecurityContext = &PodSecurityContext{}
+ }
+ if err := m.SecurityContext.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 15:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ImagePullSecrets", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ImagePullSecrets = append(m.ImagePullSecrets, LocalObjectReference{})
+ if err := m.ImagePullSecrets[len(m.ImagePullSecrets)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 16:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Hostname = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 17:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Subdomain", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Subdomain = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PodStatus) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PodStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PodStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Phase = PodPhase(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Conditions = append(m.Conditions, PodCondition{})
+ if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Reason = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field HostIP", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.HostIP = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PodIP", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PodIP = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.StartTime == nil {
+ m.StartTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{}
+ }
+ if err := m.StartTime.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ContainerStatuses", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ContainerStatuses = append(m.ContainerStatuses, ContainerStatus{})
+ if err := m.ContainerStatuses[len(m.ContainerStatuses)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PodStatusResult) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PodStatusResult: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PodStatusResult: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PodTemplate) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PodTemplate: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PodTemplate: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PodTemplateList) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PodTemplateList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PodTemplateList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, PodTemplate{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PodTemplateSpec) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PodTemplateSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PodTemplateSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Preconditions) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Preconditions: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Preconditions: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := k8s_io_kubernetes_pkg_types.UID(data[iNdEx:postIndex])
+ m.UID = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PreferredSchedulingTerm) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PreferredSchedulingTerm: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PreferredSchedulingTerm: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Weight", wireType)
+ }
+ m.Weight = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Weight |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Preference", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Preference.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Probe) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Probe: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Probe: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Handler", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Handler.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field InitialDelaySeconds", wireType)
+ }
+ m.InitialDelaySeconds = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.InitialDelaySeconds |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType)
+ }
+ m.TimeoutSeconds = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.TimeoutSeconds |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PeriodSeconds", wireType)
+ }
+ m.PeriodSeconds = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.PeriodSeconds |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SuccessThreshold", wireType)
+ }
+ m.SuccessThreshold = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.SuccessThreshold |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FailureThreshold", wireType)
+ }
+ m.FailureThreshold = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.FailureThreshold |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RBDVolumeSource) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RBDVolumeSource: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RBDVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CephMonitors", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.CephMonitors = append(m.CephMonitors, string(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RBDImage", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.RBDImage = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.FSType = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RBDPool", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.RBDPool = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RadosUser", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.RadosUser = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Keyring", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Keyring = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.SecretRef == nil {
+ m.SecretRef = &LocalObjectReference{}
+ }
+ if err := m.SecretRef.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.ReadOnly = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RangeAllocation) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RangeAllocation: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RangeAllocation: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Range", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Range = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Data = append(m.Data[:0], data[iNdEx:postIndex]...)
+ if m.Data == nil {
+ m.Data = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ReplicationController) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ReplicationController: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ReplicationController: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ReplicationControllerList) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ReplicationControllerList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ReplicationControllerList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, ReplicationController{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ReplicationControllerSpec) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ReplicationControllerSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ReplicationControllerSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Replicas = &v
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var keykey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ keykey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey := string(data[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ var valuekey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ valuekey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue := string(data[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ if m.Selector == nil {
+ m.Selector = make(map[string]string)
+ }
+ m.Selector[mapkey] = mapvalue
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Template == nil {
+ m.Template = &PodTemplateSpec{}
+ }
+ if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ReplicationControllerStatus) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ReplicationControllerStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ReplicationControllerStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType)
+ }
+ m.Replicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Replicas |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FullyLabeledReplicas", wireType)
+ }
+ m.FullyLabeledReplicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.FullyLabeledReplicas |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType)
+ }
+ m.ObservedGeneration = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.ObservedGeneration |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ResourceFieldSelector) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ResourceFieldSelector: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ResourceFieldSelector: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ContainerName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ContainerName = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Resource = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Divisor", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Divisor.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ResourceQuota) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ResourceQuota: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ResourceQuota: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ResourceQuotaList) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ResourceQuotaList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ResourceQuotaList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, ResourceQuota{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ResourceQuotaSpec) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ResourceQuotaSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ResourceQuotaSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Hard", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var keykey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ keykey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey := ResourceName(data[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ var valuekey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ valuekey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var mapmsglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ mapmsglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postmsgIndex := iNdEx + mapmsglen
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postmsgIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{}
+ if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil {
+ return err
+ }
+ iNdEx = postmsgIndex
+ if m.Hard == nil {
+ m.Hard = make(ResourceList)
+ }
+ m.Hard[ResourceName(mapkey)] = *mapvalue
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Scopes", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Scopes = append(m.Scopes, ResourceQuotaScope(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ResourceQuotaStatus) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ResourceQuotaStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ResourceQuotaStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Hard", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var keykey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ keykey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey := ResourceName(data[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ var valuekey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ valuekey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var mapmsglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ mapmsglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postmsgIndex := iNdEx + mapmsglen
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postmsgIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{}
+ if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil {
+ return err
+ }
+ iNdEx = postmsgIndex
+ if m.Hard == nil {
+ m.Hard = make(ResourceList)
+ }
+ m.Hard[ResourceName(mapkey)] = *mapvalue
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Used", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var keykey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ keykey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey := ResourceName(data[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ var valuekey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ valuekey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var mapmsglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ mapmsglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postmsgIndex := iNdEx + mapmsglen
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postmsgIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{}
+ if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil {
+ return err
+ }
+ iNdEx = postmsgIndex
+ if m.Used == nil {
+ m.Used = make(ResourceList)
+ }
+ m.Used[ResourceName(mapkey)] = *mapvalue
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ResourceRequirements) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ResourceRequirements: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ResourceRequirements: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Limits", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var keykey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ keykey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey := ResourceName(data[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ var valuekey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ valuekey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var mapmsglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ mapmsglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postmsgIndex := iNdEx + mapmsglen
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postmsgIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{}
+ if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil {
+ return err
+ }
+ iNdEx = postmsgIndex
+ if m.Limits == nil {
+ m.Limits = make(ResourceList)
+ }
+ m.Limits[ResourceName(mapkey)] = *mapvalue
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var keykey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ keykey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey := ResourceName(data[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ var valuekey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ valuekey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var mapmsglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ mapmsglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postmsgIndex := iNdEx + mapmsglen
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postmsgIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{}
+ if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil {
+ return err
+ }
+ iNdEx = postmsgIndex
+ if m.Requests == nil {
+ m.Requests = make(ResourceList)
+ }
+ m.Requests[ResourceName(mapkey)] = *mapvalue
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SELinuxOptions) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SELinuxOptions: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SELinuxOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field User", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.User = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Role = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Level = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Secret) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Secret: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Secret: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var keykey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ keykey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey := string(data[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ var valuekey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ valuekey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var mapbyteLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ mapbyteLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intMapbyteLen := int(mapbyteLen)
+ if intMapbyteLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postbytesIndex := iNdEx + intMapbyteLen
+ if postbytesIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue := make([]byte, mapbyteLen)
+ copy(mapvalue, data[iNdEx:postbytesIndex])
+ iNdEx = postbytesIndex
+ if m.Data == nil {
+ m.Data = make(map[string][]byte)
+ }
+ m.Data[mapkey] = mapvalue
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = SecretType(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field StringData", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var keykey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ keykey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey := string(data[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ var valuekey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ valuekey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue := string(data[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ if m.StringData == nil {
+ m.StringData = make(map[string]string)
+ }
+ m.StringData[mapkey] = mapvalue
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SecretKeySelector) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SecretKeySelector: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SecretKeySelector: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LocalObjectReference.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SecretList) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SecretList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SecretList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, Secret{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SecretVolumeSource) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SecretVolumeSource: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SecretVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SecretName = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, KeyToPath{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SecurityContext) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SecurityContext: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SecurityContext: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Capabilities", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Capabilities == nil {
+ m.Capabilities = &Capabilities{}
+ }
+ if err := m.Capabilities.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Privileged", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.Privileged = &b
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SELinuxOptions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.SELinuxOptions == nil {
+ m.SELinuxOptions = &SELinuxOptions{}
+ }
+ if err := m.SELinuxOptions.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RunAsUser", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.RunAsUser = &v
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RunAsNonRoot", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.RunAsNonRoot = &b
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ReadOnlyRootFilesystem", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.ReadOnlyRootFilesystem = &b
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SerializedReference) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SerializedReference: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SerializedReference: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reference", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Reference.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Service) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Service: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Service: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ServiceAccount) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ServiceAccount: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ServiceAccount: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Secrets", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Secrets = append(m.Secrets, ObjectReference{})
+ if err := m.Secrets[len(m.Secrets)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ImagePullSecrets", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ImagePullSecrets = append(m.ImagePullSecrets, LocalObjectReference{})
+ if err := m.ImagePullSecrets[len(m.ImagePullSecrets)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ServiceAccountList) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ServiceAccountList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ServiceAccountList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, ServiceAccount{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ServiceList) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ServiceList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ServiceList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, Service{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ServicePort) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ServicePort: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ServicePort: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Protocol = Protocol(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType)
+ }
+ m.Port = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Port |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TargetPort", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.TargetPort.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodePort", wireType)
+ }
+ m.NodePort = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.NodePort |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ServiceProxyOptions) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ServiceProxyOptions: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ServiceProxyOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Path = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ServiceSpec) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ServiceSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ServiceSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Ports = append(m.Ports, ServicePort{})
+ if err := m.Ports[len(m.Ports)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var keykey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ keykey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey := string(data[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ var valuekey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ valuekey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue := string(data[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ if m.Selector == nil {
+ m.Selector = make(map[string]string)
+ }
+ m.Selector[mapkey] = mapvalue
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClusterIP", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClusterIP = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = ServiceType(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ExternalIPs", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ExternalIPs = append(m.ExternalIPs, string(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedPublicIPs", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DeprecatedPublicIPs = append(m.DeprecatedPublicIPs, string(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SessionAffinity", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SessionAffinity = ServiceAffinity(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancerIP", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.LoadBalancerIP = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancerSourceRanges", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.LoadBalancerSourceRanges = append(m.LoadBalancerSourceRanges, string(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ServiceStatus) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ServiceStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ServiceStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LoadBalancer.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *TCPSocketAction) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: TCPSocketAction: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: TCPSocketAction: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Port.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Taint) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Taint: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Taint: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Value = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Effect", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Effect = TaintEffect(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Toleration) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Toleration: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Toleration: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Operator = TolerationOperator(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Value = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Effect", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Effect = TaintEffect(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Volume) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Volume: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Volume: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field VolumeSource", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.VolumeSource.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *VolumeMount) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: VolumeMount: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: VolumeMount: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.ReadOnly = bool(v != 0)
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MountPath", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.MountPath = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SubPath", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SubPath = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *VolumeSource) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: VolumeSource: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: VolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field HostPath", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.HostPath == nil {
+ m.HostPath = &HostPathVolumeSource{}
+ }
+ if err := m.HostPath.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field EmptyDir", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.EmptyDir == nil {
+ m.EmptyDir = &EmptyDirVolumeSource{}
+ }
+ if err := m.EmptyDir.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field GCEPersistentDisk", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.GCEPersistentDisk == nil {
+ m.GCEPersistentDisk = &GCEPersistentDiskVolumeSource{}
+ }
+ if err := m.GCEPersistentDisk.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AWSElasticBlockStore", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AWSElasticBlockStore == nil {
+ m.AWSElasticBlockStore = &AWSElasticBlockStoreVolumeSource{}
+ }
+ if err := m.AWSElasticBlockStore.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field GitRepo", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.GitRepo == nil {
+ m.GitRepo = &GitRepoVolumeSource{}
+ }
+ if err := m.GitRepo.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Secret == nil {
+ m.Secret = &SecretVolumeSource{}
+ }
+ if err := m.Secret.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NFS", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.NFS == nil {
+ m.NFS = &NFSVolumeSource{}
+ }
+ if err := m.NFS.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ISCSI", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ISCSI == nil {
+ m.ISCSI = &ISCSIVolumeSource{}
+ }
+ if err := m.ISCSI.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Glusterfs", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Glusterfs == nil {
+ m.Glusterfs = &GlusterfsVolumeSource{}
+ }
+ if err := m.Glusterfs.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 10:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeClaim", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.PersistentVolumeClaim == nil {
+ m.PersistentVolumeClaim = &PersistentVolumeClaimVolumeSource{}
+ }
+ if err := m.PersistentVolumeClaim.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 11:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RBD", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.RBD == nil {
+ m.RBD = &RBDVolumeSource{}
+ }
+ if err := m.RBD.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 12:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FlexVolume", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.FlexVolume == nil {
+ m.FlexVolume = &FlexVolumeSource{}
+ }
+ if err := m.FlexVolume.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 13:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Cinder", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Cinder == nil {
+ m.Cinder = &CinderVolumeSource{}
+ }
+ if err := m.Cinder.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 14:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CephFS", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.CephFS == nil {
+ m.CephFS = &CephFSVolumeSource{}
+ }
+ if err := m.CephFS.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 15:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Flocker", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Flocker == nil {
+ m.Flocker = &FlockerVolumeSource{}
+ }
+ if err := m.Flocker.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 16:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DownwardAPI", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.DownwardAPI == nil {
+ m.DownwardAPI = &DownwardAPIVolumeSource{}
+ }
+ if err := m.DownwardAPI.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 17:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FC", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.FC == nil {
+ m.FC = &FCVolumeSource{}
+ }
+ if err := m.FC.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 18:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AzureFile", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AzureFile == nil {
+ m.AzureFile = &AzureFileVolumeSource{}
+ }
+ if err := m.AzureFile.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 19:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ConfigMap", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ConfigMap == nil {
+ m.ConfigMap = &ConfigMapVolumeSource{}
+ }
+ if err := m.ConfigMap.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 20:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field VsphereVolume", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.VsphereVolume == nil {
+ m.VsphereVolume = &VsphereVirtualDiskVolumeSource{}
+ }
+ if err := m.VsphereVolume.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *VsphereVirtualDiskVolumeSource) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: VsphereVirtualDiskVolumeSource: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: VsphereVirtualDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field VolumePath", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.VolumePath = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.FSType = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *WeightedPodAffinityTerm) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: WeightedPodAffinityTerm: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: WeightedPodAffinityTerm: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Weight", wireType)
+ }
+ m.Weight = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Weight |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PodAffinityTerm", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.PodAffinityTerm.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenerated(data []byte) (n int, err error) {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if data[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipGenerated(data[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
+)
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/v1/generated.proto b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/v1/generated.proto
new file mode 100644
index 0000000..060b6ca
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/v1/generated.proto
@@ -0,0 +1,2935 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.kubernetes.pkg.api.v1;
+
+import "k8s.io/kubernetes/pkg/api/resource/generated.proto";
+import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto";
+import "k8s.io/kubernetes/pkg/runtime/generated.proto";
+import "k8s.io/kubernetes/pkg/util/intstr/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v1";
+
+// Represents a Persistent Disk resource in AWS.
+//
+// An AWS EBS disk must exist before mounting to a container. The disk
+// must also be in the same AWS zone as the kubelet. An AWS EBS disk
+// can only be mounted as read/write once. AWS EBS volumes support
+// ownership management and SELinux relabeling.
+message AWSElasticBlockStoreVolumeSource {
+ // Unique ID of the persistent disk resource in AWS (Amazon EBS volume).
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#awselasticblockstore
+ optional string volumeID = 1;
+
+ // Filesystem type of the volume that you want to mount.
+ // Tip: Ensure that the filesystem type is supported by the host operating system.
+ // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#awselasticblockstore
+ // TODO: how do we prevent errors in the filesystem from compromising the machine
+ optional string fsType = 2;
+
+ // The partition in the volume that you want to mount.
+ // If omitted, the default is to mount by volume name.
+ // Examples: For volume /dev/sda1, you specify the partition as "1".
+ // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
+ optional int32 partition = 3;
+
+ // Specify "true" to force and set the ReadOnly property in VolumeMounts to "true".
+ // If omitted, the default is "false".
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#awselasticblockstore
+ optional bool readOnly = 4;
+}
+
+// Affinity is a group of affinity scheduling rules.
+message Affinity {
+ // Describes node affinity scheduling rules for the pod.
+ optional NodeAffinity nodeAffinity = 1;
+
+ // Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
+ optional PodAffinity podAffinity = 2;
+
+ // Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
+ optional PodAntiAffinity podAntiAffinity = 3;
+}
+
+// AttachedVolume describes a volume attached to a node
+message AttachedVolume {
+ // Name of the attached volume
+ optional string name = 1;
+
+ // DevicePath represents the device path where the volume should be avilable
+ optional string devicePath = 2;
+}
+
+// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
+message AzureFileVolumeSource {
+ // the name of secret that contains Azure Storage Account Name and Key
+ optional string secretName = 1;
+
+ // Share Name
+ optional string shareName = 2;
+
+ // Defaults to false (read/write). ReadOnly here will force
+ // the ReadOnly setting in VolumeMounts.
+ optional bool readOnly = 3;
+}
+
+// Binding ties one object to another.
+// For example, a pod is bound to a node by a scheduler.
+message Binding {
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ optional ObjectMeta metadata = 1;
+
+ // The target object that you want to bind to the standard object.
+ optional ObjectReference target = 2;
+}
+
+// Adds and removes POSIX capabilities from running containers.
+message Capabilities {
+ // Added capabilities
+ repeated string add = 1;
+
+ // Removed capabilities
+ repeated string drop = 2;
+}
+
+// Represents a Ceph Filesystem mount that lasts the lifetime of a pod
+// Cephfs volumes do not support ownership management or SELinux relabeling.
+message CephFSVolumeSource {
+ // Required: Monitors is a collection of Ceph monitors
+ // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
+ repeated string monitors = 1;
+
+ // Optional: Used as the mounted root, rather than the full Ceph tree, default is /
+ optional string path = 2;
+
+ // Optional: User is the rados user name, default is admin
+ // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
+ optional string user = 3;
+
+ // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret
+ // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
+ optional string secretFile = 4;
+
+ // Optional: SecretRef is reference to the authentication secret for User, default is empty.
+ // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
+ optional LocalObjectReference secretRef = 5;
+
+ // Optional: Defaults to false (read/write). ReadOnly here will force
+ // the ReadOnly setting in VolumeMounts.
+ // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
+ optional bool readOnly = 6;
+}
+
+// Represents a cinder volume resource in Openstack.
+// A Cinder volume must exist before mounting to a container.
+// The volume must also be in the same region as the kubelet.
+// Cinder volumes support ownership management and SELinux relabeling.
+message CinderVolumeSource {
+ // volume id used to identify the volume in cinder
+ // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
+ optional string volumeID = 1;
+
+ // Filesystem type to mount.
+ // Must be a filesystem type supported by the host operating system.
+ // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
+ optional string fsType = 2;
+
+ // Optional: Defaults to false (read/write). ReadOnly here will force
+ // the ReadOnly setting in VolumeMounts.
+ // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
+ optional bool readOnly = 3;
+}
+
+// Information about the condition of a component.
+message ComponentCondition {
+ // Type of condition for a component.
+ // Valid value: "Healthy"
+ optional string type = 1;
+
+ // Status of the condition for a component.
+ // Valid values for "Healthy": "True", "False", or "Unknown".
+ optional string status = 2;
+
+ // Message about the condition for a component.
+ // For example, information about a health check.
+ optional string message = 3;
+
+ // Condition error code for a component.
+ // For example, a health check error code.
+ optional string error = 4;
+}
+
+// ComponentStatus (and ComponentStatusList) holds the cluster validation info.
+message ComponentStatus {
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ optional ObjectMeta metadata = 1;
+
+ // List of component conditions observed
+ repeated ComponentCondition conditions = 2;
+}
+
+// Status of all the conditions for the component as a list of ComponentStatus objects.
+message ComponentStatusList {
+ // Standard list metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
+ optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1;
+
+ // List of ComponentStatus objects.
+ repeated ComponentStatus items = 2;
+}
+
+// ConfigMap holds configuration data for pods to consume.
+message ConfigMap {
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ optional ObjectMeta metadata = 1;
+
+ // Data contains the configuration data.
+ // Each key must be a valid DNS_SUBDOMAIN with an optional leading dot.
+ map<string, string> data = 2;
+}
+
+// Selects a key from a ConfigMap.
+message ConfigMapKeySelector {
+ // The ConfigMap to select from.
+ optional LocalObjectReference localObjectReference = 1;
+
+ // The key to select.
+ optional string key = 2;
+}
+
+// ConfigMapList is a resource containing a list of ConfigMap objects.
+message ConfigMapList {
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1;
+
+ // Items is the list of ConfigMaps.
+ repeated ConfigMap items = 2;
+}
+
+// Adapts a ConfigMap into a volume.
+//
+// The contents of the target ConfigMap's Data field will be presented in a
+// volume as files using the keys in the Data field as the file names, unless
+// the items element is populated with specific mappings of keys to paths.
+// ConfigMap volumes support ownership management and SELinux relabeling.
+message ConfigMapVolumeSource {
+ optional LocalObjectReference localObjectReference = 1;
+
+ // If unspecified, each key-value pair in the Data field of the referenced
+ // ConfigMap will be projected into the volume as a file whose name is the
+ // key and content is the value. If specified, the listed keys will be
+ // projected into the specified paths, and unlisted keys will not be
+ // present. If a key is specified which is not present in the ConfigMap,
+ // the volume setup will error. Paths must be relative and may not contain
+ // the '..' path or start with '..'.
+ repeated KeyToPath items = 2;
+}
+
+// A single application container that you want to run within a pod.
+message Container {
+ // Name of the container specified as a DNS_LABEL.
+ // Each container in a pod must have a unique name (DNS_LABEL).
+ // Cannot be updated.
+ optional string name = 1;
+
+ // Docker image name.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/images.md
+ optional string image = 2;
+
+ // Entrypoint array. Not executed within a shell.
+ // The docker image's ENTRYPOINT is used if this is not provided.
+ // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+ // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax
+ // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,
+ // regardless of whether the variable exists or not.
+ // Cannot be updated.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/containers.md#containers-and-commands
+ repeated string command = 3;
+
+ // Arguments to the entrypoint.
+ // The docker image's CMD is used if this is not provided.
+ // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+ // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax
+ // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,
+ // regardless of whether the variable exists or not.
+ // Cannot be updated.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/containers.md#containers-and-commands
+ repeated string args = 4;
+
+ // Container's working directory.
+ // If not specified, the container runtime's default will be used, which
+ // might be configured in the container image.
+ // Cannot be updated.
+ optional string workingDir = 5;
+
+ // List of ports to expose from the container. Exposing a port here gives
+ // the system additional information about the network connections a
+ // container uses, but is primarily informational. Not specifying a port here
+ // DOES NOT prevent that port from being exposed. Any port which is
+ // listening on the default "0.0.0.0" address inside a container will be
+ // accessible from the network.
+ // Cannot be updated.
+ repeated ContainerPort ports = 6;
+
+ // List of environment variables to set in the container.
+ // Cannot be updated.
+ repeated EnvVar env = 7;
+
+ // Compute Resources required by this container.
+ // Cannot be updated.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#resources
+ optional ResourceRequirements resources = 8;
+
+ // Pod volumes to mount into the container's filesystem.
+ // Cannot be updated.
+ repeated VolumeMount volumeMounts = 9;
+
+ // Periodic probe of container liveness.
+ // Container will be restarted if the probe fails.
+ // Cannot be updated.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-probes
+ optional Probe livenessProbe = 10;
+
+ // Periodic probe of container service readiness.
+ // Container will be removed from service endpoints if the probe fails.
+ // Cannot be updated.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-probes
+ optional Probe readinessProbe = 11;
+
+ // Actions that the management system should take in response to container lifecycle events.
+ // Cannot be updated.
+ optional Lifecycle lifecycle = 12;
+
+ // Optional: Path at which the file to which the container's termination message
+ // will be written is mounted into the container's filesystem.
+ // Message written is intended to be brief final status, such as an assertion failure message.
+ // Defaults to /dev/termination-log.
+ // Cannot be updated.
+ optional string terminationMessagePath = 13;
+
+ // Image pull policy.
+ // One of Always, Never, IfNotPresent.
+ // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
+ // Cannot be updated.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/images.md#updating-images
+ optional string imagePullPolicy = 14;
+
+ // Security options the pod should run with.
+ // More info: http://releases.k8s.io/HEAD/docs/design/security_context.md
+ optional SecurityContext securityContext = 15;
+
+ // Whether this container should allocate a buffer for stdin in the container runtime. If this
+ // is not set, reads from stdin in the container will always result in EOF.
+ // Default is false.
+ optional bool stdin = 16;
+
+ // Whether the container runtime should close the stdin channel after it has been opened by
+ // a single attach. When stdin is true the stdin stream will remain open across multiple attach
+ // sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the
+ // first client attaches to stdin, and then remains open and accepts data until the client disconnects,
+ // at which time stdin is closed and remains closed until the container is restarted. If this
+ // flag is false, a container processes that reads from stdin will never receive an EOF.
+ // Default is false
+ optional bool stdinOnce = 17;
+
+ // Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.
+ // Default is false.
+ optional bool tty = 18;
+}
+
+// Describe a container image
+message ContainerImage {
+ // Names by which this image is known.
+ // e.g. ["gcr.io/google_containers/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"]
+ repeated string names = 1;
+
+ // The size of the image in bytes.
+ optional int64 sizeBytes = 2;
+}
+
+// ContainerPort represents a network port in a single container.
+message ContainerPort {
+ // If specified, this must be an IANA_SVC_NAME and unique within the pod. Each
+ // named port in a pod must have a unique name. Name for the port that can be
+ // referred to by services.
+ optional string name = 1;
+
+ // Number of port to expose on the host.
+ // If specified, this must be a valid port number, 0 < x < 65536.
+ // If HostNetwork is specified, this must match ContainerPort.
+ // Most containers do not need this.
+ optional int32 hostPort = 2;
+
+ // Number of port to expose on the pod's IP address.
+ // This must be a valid port number, 0 < x < 65536.
+ optional int32 containerPort = 3;
+
+ // Protocol for port. Must be UDP or TCP.
+ // Defaults to "TCP".
+ optional string protocol = 4;
+
+ // What host IP to bind the external port to.
+ optional string hostIP = 5;
+}
+
+// ContainerState holds a possible state of container.
+// Only one of its members may be specified.
+// If none of them is specified, the default one is ContainerStateWaiting.
+message ContainerState {
+ // Details about a waiting container
+ optional ContainerStateWaiting waiting = 1;
+
+ // Details about a running container
+ optional ContainerStateRunning running = 2;
+
+ // Details about a terminated container
+ optional ContainerStateTerminated terminated = 3;
+}
+
+// ContainerStateRunning is a running state of a container.
+message ContainerStateRunning {
+ // Time at which the container was last (re-)started
+ optional k8s.io.kubernetes.pkg.api.unversioned.Time startedAt = 1;
+}
+
+// ContainerStateTerminated is a terminated state of a container.
+message ContainerStateTerminated {
+ // Exit status from the last termination of the container
+ optional int32 exitCode = 1;
+
+ // Signal from the last termination of the container
+ optional int32 signal = 2;
+
+ // (brief) reason from the last termination of the container
+ optional string reason = 3;
+
+ // Message regarding the last termination of the container
+ optional string message = 4;
+
+ // Time at which previous execution of the container started
+ optional k8s.io.kubernetes.pkg.api.unversioned.Time startedAt = 5;
+
+ // Time at which the container last terminated
+ optional k8s.io.kubernetes.pkg.api.unversioned.Time finishedAt = 6;
+
+ // Container's ID in the format 'docker://<container_id>'
+ optional string containerID = 7;
+}
+
+// ContainerStateWaiting is a waiting state of a container.
+message ContainerStateWaiting {
+ // (brief) reason the container is not yet running.
+ optional string reason = 1;
+
+ // Message regarding why the container is not yet running.
+ optional string message = 2;
+}
+
+// ContainerStatus contains details for the current status of this container.
+message ContainerStatus {
+ // This must be a DNS_LABEL. Each container in a pod must have a unique name.
+ // Cannot be updated.
+ optional string name = 1;
+
+ // Details about the container's current condition.
+ optional ContainerState state = 2;
+
+ // Details about the container's last termination condition.
+ optional ContainerState lastState = 3;
+
+ // Specifies whether the container has passed its readiness probe.
+ optional bool ready = 4;
+
+ // The number of times the container has been restarted, currently based on
+ // the number of dead containers that have not yet been removed.
+ // Note that this is calculated from dead containers. But those containers are subject to
+ // garbage collection. This value will get capped at 5 by GC.
+ optional int32 restartCount = 5;
+
+ // The image the container is running.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/images.md
+ // TODO(dchen1107): Which image the container is running with?
+ optional string image = 6;
+
+ // ImageID of the container's image.
+ optional string imageID = 7;
+
+ // Container's ID in the format 'docker://<container_id>'.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/container-environment.md#container-information
+ optional string containerID = 8;
+}
+
+// DaemonEndpoint contains information about a single Daemon endpoint.
+message DaemonEndpoint {
+ // Port number of the given endpoint.
+ optional int32 Port = 1;
+}
+
+// DeleteOptions may be provided when deleting an API object
+message DeleteOptions {
+ // The duration in seconds before the object should be deleted. Value must be non-negative integer.
+ // The value zero indicates delete immediately. If this value is nil, the default grace period for the
+ // specified type will be used.
+ // Defaults to a per object value if not specified. zero means delete immediately.
+ optional int64 gracePeriodSeconds = 1;
+
+ // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be
+ // returned.
+ optional Preconditions preconditions = 2;
+
+ // Should the dependent objects be orphaned. If true/false, the "orphan"
+ // finalizer will be added to/removed from the object's finalizers list.
+ optional bool orphanDependents = 3;
+}
+
+// DownwardAPIVolumeFile represents information to create the file containing the pod field
+message DownwardAPIVolumeFile {
+ // Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'
+ optional string path = 1;
+
+ // Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.
+ optional ObjectFieldSelector fieldRef = 2;
+
+ // Selects a resource of the container: only resources limits and requests
+ // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
+ optional ResourceFieldSelector resourceFieldRef = 3;
+}
+
+// DownwardAPIVolumeSource represents a volume containing downward API info.
+// Downward API volumes support ownership management and SELinux relabeling.
+message DownwardAPIVolumeSource {
+ // Items is a list of downward API volume file
+ repeated DownwardAPIVolumeFile items = 1;
+}
+
+// Represents an empty directory for a pod.
+// Empty directory volumes support ownership management and SELinux relabeling.
+message EmptyDirVolumeSource {
+ // What type of storage medium should back this directory.
+ // The default is "" which means to use the node's default medium.
+ // Must be an empty string (default) or Memory.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#emptydir
+ optional string medium = 1;
+}
+
+// EndpointAddress is a tuple that describes single IP address.
+message EndpointAddress {
+ // The IP of this endpoint.
+ // May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16),
+ // or link-local multicast ((224.0.0.0/24).
+ // IPv6 is also accepted but not fully supported on all platforms. Also, certain
+ // kubernetes components, like kube-proxy, are not IPv6 ready.
+ // TODO: This should allow hostname or IP, See #4447.
+ optional string ip = 1;
+
+ // The Hostname of this endpoint
+ optional string hostname = 3;
+
+ // Reference to object providing the endpoint.
+ optional ObjectReference targetRef = 2;
+}
+
+// EndpointPort is a tuple that describes a single port.
+message EndpointPort {
+ // The name of this port (corresponds to ServicePort.Name).
+ // Must be a DNS_LABEL.
+ // Optional only if one port is defined.
+ optional string name = 1;
+
+ // The port number of the endpoint.
+ optional int32 port = 2;
+
+ // The IP protocol for this port.
+ // Must be UDP or TCP.
+ // Default is TCP.
+ optional string protocol = 3;
+}
+
+// EndpointSubset is a group of addresses with a common set of ports. The
+// expanded set of endpoints is the Cartesian product of Addresses x Ports.
+// For example, given:
+// {
+// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}],
+// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}]
+// }
+// The resulting set of endpoints can be viewed as:
+// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ],
+// b: [ 10.10.1.1:309, 10.10.2.2:309 ]
+message EndpointSubset {
+ // IP addresses which offer the related ports that are marked as ready. These endpoints
+ // should be considered safe for load balancers and clients to utilize.
+ repeated EndpointAddress addresses = 1;
+
+ // IP addresses which offer the related ports but are not currently marked as ready
+ // because they have not yet finished starting, have recently failed a readiness check,
+ // or have recently failed a liveness check.
+ repeated EndpointAddress notReadyAddresses = 2;
+
+ // Port numbers available on the related IP addresses.
+ repeated EndpointPort ports = 3;
+}
+
+// Endpoints is a collection of endpoints that implement the actual service. Example:
+// Name: "mysvc",
+// Subsets: [
+// {
+// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}],
+// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}]
+// },
+// {
+// Addresses: [{"ip": "10.10.3.3"}],
+// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}]
+// },
+// ]
+message Endpoints {
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ optional ObjectMeta metadata = 1;
+
+ // The set of all endpoints is the union of all subsets. Addresses are placed into
+ // subsets according to the IPs they share. A single address with multiple ports,
+ // some of which are ready and some of which are not (because they come from
+ // different containers) will result in the address being displayed in different
+ // subsets for the different ports. No address will appear in both Addresses and
+ // NotReadyAddresses in the same subset.
+ // Sets of addresses and ports that comprise a service.
+ repeated EndpointSubset subsets = 2;
+}
+
+// EndpointsList is a list of endpoints.
+message EndpointsList {
+ // Standard list metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
+ optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1;
+
+ // List of endpoints.
+ repeated Endpoints items = 2;
+}
+
+// EnvVar represents an environment variable present in a Container.
+message EnvVar {
+ // Name of the environment variable. Must be a C_IDENTIFIER.
+ optional string name = 1;
+
+ // Variable references $(VAR_NAME) are expanded
+ // using the previous defined environment variables in the container and
+ // any service environment variables. If a variable cannot be resolved,
+ // the reference in the input string will be unchanged. The $(VAR_NAME)
+ // syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped
+ // references will never be expanded, regardless of whether the variable
+ // exists or not.
+ // Defaults to "".
+ optional string value = 2;
+
+ // Source for the environment variable's value. Cannot be used if value is not empty.
+ optional EnvVarSource valueFrom = 3;
+}
+
+// EnvVarSource represents a source for the value of an EnvVar.
+message EnvVarSource {
+ // Selects a field of the pod; only name and namespace are supported.
+ optional ObjectFieldSelector fieldRef = 1;
+
+ // Selects a resource of the container: only resources limits and requests
+ // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
+ optional ResourceFieldSelector resourceFieldRef = 2;
+
+ // Selects a key of a ConfigMap.
+ optional ConfigMapKeySelector configMapKeyRef = 3;
+
+ // Selects a key of a secret in the pod's namespace
+ optional SecretKeySelector secretKeyRef = 4;
+}
+
+// Event is a report of an event somewhere in the cluster.
+// TODO: Decide whether to store these separately or with the object they apply to.
+message Event {
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ optional ObjectMeta metadata = 1;
+
+ // The object that this event is about.
+ optional ObjectReference involvedObject = 2;
+
+ // This should be a short, machine understandable string that gives the reason
+ // for the transition into the object's current status.
+ // TODO: provide exact specification for format.
+ optional string reason = 3;
+
+ // A human-readable description of the status of this operation.
+ // TODO: decide on maximum length.
+ optional string message = 4;
+
+ // The component reporting this event. Should be a short machine understandable string.
+ optional EventSource source = 5;
+
+ // The time at which the event was first recorded. (Time of server receipt is in TypeMeta.)
+ optional k8s.io.kubernetes.pkg.api.unversioned.Time firstTimestamp = 6;
+
+ // The time at which the most recent occurrence of this event was recorded.
+ optional k8s.io.kubernetes.pkg.api.unversioned.Time lastTimestamp = 7;
+
+ // The number of times this event has occurred.
+ optional int32 count = 8;
+
+ // Type of this event (Normal, Warning), new types could be added in the future
+ optional string type = 9;
+}
+
+// EventList is a list of events.
+message EventList {
+ // Standard list metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
+ optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1;
+
+ // List of events
+ repeated Event items = 2;
+}
+
+// EventSource contains information for an event.
+message EventSource {
+ // Component from which the event is generated.
+ optional string component = 1;
+
+ // Host name on which the event is generated.
+ optional string host = 2;
+}
+
+// ExecAction describes a "run in container" action.
+message ExecAction {
+ // Command is the command line to execute inside the container, the working directory for the
+ // command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ // not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ // a shell, you need to explicitly call out to that shell.
+ // Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ repeated string command = 1;
+}
+
+// ExportOptions is the query options to the standard REST get call.
+message ExportOptions {
+ // Should this value be exported. Export strips fields that a user can not specify.
+ optional bool export = 1;
+
+ // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'
+ optional bool exact = 2;
+}
+
+// Represents a Fibre Channel volume.
+// Fibre Channel volumes can only be mounted as read/write once.
+// Fibre Channel volumes support ownership management and SELinux relabeling.
+message FCVolumeSource {
+ // Required: FC target world wide names (WWNs)
+ repeated string targetWWNs = 1;
+
+ // Required: FC target lun number
+ optional int32 lun = 2;
+
+ // Filesystem type to mount.
+ // Must be a filesystem type supported by the host operating system.
+ // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ // TODO: how do we prevent errors in the filesystem from compromising the machine
+ optional string fsType = 3;
+
+ // Optional: Defaults to false (read/write). ReadOnly here will force
+ // the ReadOnly setting in VolumeMounts.
+ optional bool readOnly = 4;
+}
+
+// FlexVolume represents a generic volume resource that is
+// provisioned/attached using a exec based plugin. This is an alpha feature and may change in future.
+message FlexVolumeSource {
+ // Driver is the name of the driver to use for this volume.
+ optional string driver = 1;
+
+ // Filesystem type to mount.
+ // Must be a filesystem type supported by the host operating system.
+ // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.
+ optional string fsType = 2;
+
+ // Optional: SecretRef is reference to the secret object containing
+ // sensitive information to pass to the plugin scripts. This may be
+ // empty if no secret object is specified. If the secret object
+ // contains more than one secret, all secrets are passed to the plugin
+ // scripts.
+ optional LocalObjectReference secretRef = 3;
+
+ // Optional: Defaults to false (read/write). ReadOnly here will force
+ // the ReadOnly setting in VolumeMounts.
+ optional bool readOnly = 4;
+
+ // Optional: Extra command options if any.
+ map<string, string> options = 5;
+}
+
+// Represents a Flocker volume mounted by the Flocker agent.
+// Flocker volumes do not support ownership management or SELinux relabeling.
+message FlockerVolumeSource {
+ // Required: the volume name. This is going to be store on metadata -> name on the payload for Flocker
+ optional string datasetName = 1;
+}
+
+// Represents a Persistent Disk resource in Google Compute Engine.
+//
+// A GCE PD must exist before mounting to a container. The disk must
+// also be in the same GCE project and zone as the kubelet. A GCE PD
+// can only be mounted as read/write once or read-only many times. GCE
+// PDs support ownership management and SELinux relabeling.
+message GCEPersistentDiskVolumeSource {
+ // Unique name of the PD resource in GCE. Used to identify the disk in GCE.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk
+ optional string pdName = 1;
+
+ // Filesystem type of the volume that you want to mount.
+ // Tip: Ensure that the filesystem type is supported by the host operating system.
+ // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk
+ // TODO: how do we prevent errors in the filesystem from compromising the machine
+ optional string fsType = 2;
+
+ // The partition in the volume that you want to mount.
+ // If omitted, the default is to mount by volume name.
+ // Examples: For volume /dev/sda1, you specify the partition as "1".
+ // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk
+ optional int32 partition = 3;
+
+ // ReadOnly here will force the ReadOnly setting in VolumeMounts.
+ // Defaults to false.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk
+ optional bool readOnly = 4;
+}
+
+// Represents a volume that is populated with the contents of a git repository.
+// Git repo volumes do not support ownership management.
+// Git repo volumes support SELinux relabeling.
+message GitRepoVolumeSource {
+ // Repository URL
+ optional string repository = 1;
+
+ // Commit hash for the specified revision.
+ optional string revision = 2;
+
+ // Target directory name.
+ // Must not contain or start with '..'. If '.' is supplied, the volume directory will be the
+ // git repository. Otherwise, if specified, the volume will contain the git repository in
+ // the subdirectory with the given name.
+ optional string directory = 3;
+}
+
+// Represents a Glusterfs mount that lasts the lifetime of a pod.
+// Glusterfs volumes do not support ownership management or SELinux relabeling.
+message GlusterfsVolumeSource {
+ // EndpointsName is the endpoint name that details Glusterfs topology.
+ // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
+ optional string endpoints = 1;
+
+ // Path is the Glusterfs volume path.
+ // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
+ optional string path = 2;
+
+ // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions.
+ // Defaults to false.
+ // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
+ optional bool readOnly = 3;
+}
+
+// HTTPGetAction describes an action based on HTTP Get requests.
+message HTTPGetAction {
+ // Path to access on the HTTP server.
+ optional string path = 1;
+
+ // Name or number of the port to access on the container.
+ // Number must be in the range 1 to 65535.
+ // Name must be an IANA_SVC_NAME.
+ optional k8s.io.kubernetes.pkg.util.intstr.IntOrString port = 2;
+
+ // Host name to connect to, defaults to the pod IP. You probably want to set
+ // "Host" in httpHeaders instead.
+ optional string host = 3;
+
+ // Scheme to use for connecting to the host.
+ // Defaults to HTTP.
+ optional string scheme = 4;
+
+ // Custom headers to set in the request. HTTP allows repeated headers.
+ repeated HTTPHeader httpHeaders = 5;
+}
+
+// HTTPHeader describes a custom header to be used in HTTP probes
+message HTTPHeader {
+ // The header field name
+ optional string name = 1;
+
+ // The header field value
+ optional string value = 2;
+}
+
+// Handler defines a specific action that should be taken
+// TODO: pass structured data to these actions, and document that data here.
+message Handler {
+ // One and only one of the following should be specified.
+ // Exec specifies the action to take.
+ optional ExecAction exec = 1;
+
+ // HTTPGet specifies the http request to perform.
+ optional HTTPGetAction httpGet = 2;
+
+ // TCPSocket specifies an action involving a TCP port.
+ // TCP hooks not yet supported
+ // TODO: implement a realistic TCP lifecycle hook
+ optional TCPSocketAction tcpSocket = 3;
+}
+
+// Represents a host path mapped into a pod.
+// Host path volumes do not support ownership management or SELinux relabeling.
+message HostPathVolumeSource {
+ // Path of the directory on the host.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#hostpath
+ optional string path = 1;
+}
+
+// Represents an ISCSI disk.
+// ISCSI volumes can only be mounted as read/write once.
+// ISCSI volumes support ownership management and SELinux relabeling.
+message ISCSIVolumeSource {
+ // iSCSI target portal. The portal is either an IP or ip_addr:port if the port
+ // is other than default (typically TCP ports 860 and 3260).
+ optional string targetPortal = 1;
+
+ // Target iSCSI Qualified Name.
+ optional string iqn = 2;
+
+ // iSCSI target lun number.
+ optional int32 lun = 3;
+
+ // Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport.
+ optional string iscsiInterface = 4;
+
+ // Filesystem type of the volume that you want to mount.
+ // Tip: Ensure that the filesystem type is supported by the host operating system.
+ // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#iscsi
+ // TODO: how do we prevent errors in the filesystem from compromising the machine
+ optional string fsType = 5;
+
+ // ReadOnly here will force the ReadOnly setting in VolumeMounts.
+ // Defaults to false.
+ optional bool readOnly = 6;
+}
+
+// Maps a string key to a path within a volume.
+message KeyToPath {
+ // The key to project.
+ optional string key = 1;
+
+ // The relative path of the file to map the key to.
+ // May not be an absolute path.
+ // May not contain the path element '..'.
+ // May not start with the string '..'.
+ optional string path = 2;
+}
+
+// Lifecycle describes actions that the management system should take in response to container lifecycle
+// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks
+// until the action is complete, unless the container process fails, in which case the handler is aborted.
+message Lifecycle {
+ // PostStart is called immediately after a container is created. If the handler fails,
+ // the container is terminated and restarted according to its restart policy.
+ // Other management of the container blocks until the hook completes.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/container-environment.md#hook-details
+ optional Handler postStart = 1;
+
+ // PreStop is called immediately before a container is terminated.
+ // The container is terminated after the handler completes.
+ // The reason for termination is passed to the handler.
+ // Regardless of the outcome of the handler, the container is eventually terminated.
+ // Other management of the container blocks until the hook completes.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/container-environment.md#hook-details
+ optional Handler preStop = 2;
+}
+
+// LimitRange sets resource usage limits for each kind of resource in a Namespace.
+message LimitRange {
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ optional ObjectMeta metadata = 1;
+
+ // Spec defines the limits enforced.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ optional LimitRangeSpec spec = 2;
+}
+
+// LimitRangeItem defines a min/max usage limit for any resource that matches on kind.
+message LimitRangeItem {
+ // Type of resource that this limit applies to.
+ optional string type = 1;
+
+ // Max usage constraints on this kind by resource name.
+ map<string, k8s.io.kubernetes.pkg.api.resource.Quantity> max = 2;
+
+ // Min usage constraints on this kind by resource name.
+ map<string, k8s.io.kubernetes.pkg.api.resource.Quantity> min = 3;
+
+ // Default resource requirement limit value by resource name if resource limit is omitted.
+ map<string, k8s.io.kubernetes.pkg.api.resource.Quantity> default = 4;
+
+ // DefaultRequest is the default resource requirement request value by resource name if resource request is omitted.
+ map<string, k8s.io.kubernetes.pkg.api.resource.Quantity> defaultRequest = 5;
+
+ // MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource.
+ map<string, k8s.io.kubernetes.pkg.api.resource.Quantity> maxLimitRequestRatio = 6;
+}
+
+// LimitRangeList is a list of LimitRange items.
+message LimitRangeList {
+ // Standard list metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
+ optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1;
+
+ // Items is a list of LimitRange objects.
+ // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_limit_range.md
+ repeated LimitRange items = 2;
+}
+
+// LimitRangeSpec defines a min/max usage limit for resources that match on kind.
+message LimitRangeSpec {
+ // Limits is the list of LimitRangeItem objects that are enforced.
+ repeated LimitRangeItem limits = 1;
+}
+
+// List holds a list of objects, which may not be known by the server.
+message List {
+ // Standard list metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
+ optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1;
+
+ // List of objects
+ repeated k8s.io.kubernetes.pkg.runtime.RawExtension items = 2;
+}
+
+// ListOptions is the query options to a standard REST list call.
+message ListOptions {
+ // A selector to restrict the list of returned objects by their labels.
+ // Defaults to everything.
+ optional string labelSelector = 1;
+
+ // A selector to restrict the list of returned objects by their fields.
+ // Defaults to everything.
+ optional string fieldSelector = 2;
+
+ // Watch for changes to the described resources and return them as a stream of
+ // add, update, and remove notifications. Specify resourceVersion.
+ optional bool watch = 3;
+
+ // When specified with a watch call, shows changes that occur after that particular version of a resource.
+ // Defaults to changes from the beginning of history.
+ optional string resourceVersion = 4;
+
+ // Timeout for the list/watch call.
+ optional int64 timeoutSeconds = 5;
+}
+
+// LoadBalancerIngress represents the status of a load-balancer ingress point:
+// traffic intended for the service should be sent to an ingress point.
+message LoadBalancerIngress {
+ // IP is set for load-balancer ingress points that are IP based
+ // (typically GCE or OpenStack load-balancers)
+ optional string ip = 1;
+
+ // Hostname is set for load-balancer ingress points that are DNS based
+ // (typically AWS load-balancers)
+ optional string hostname = 2;
+}
+
+// LoadBalancerStatus represents the status of a load-balancer.
+message LoadBalancerStatus {
+ // Ingress is a list containing ingress points for the load-balancer.
+ // Traffic intended for the service should be sent to these ingress points.
+ repeated LoadBalancerIngress ingress = 1;
+}
+
+// LocalObjectReference contains enough information to let you locate the
+// referenced object inside the same namespace.
+message LocalObjectReference {
+ // Name of the referent.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names
+ // TODO: Add other useful fields. apiVersion, kind, uid?
+ optional string name = 1;
+}
+
+// Represents an NFS mount that lasts the lifetime of a pod.
+// NFS volumes do not support ownership management or SELinux relabeling.
+message NFSVolumeSource {
+ // Server is the hostname or IP address of the NFS server.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#nfs
+ optional string server = 1;
+
+ // Path that is exported by the NFS server.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#nfs
+ optional string path = 2;
+
+ // ReadOnly here will force
+ // the NFS export to be mounted with read-only permissions.
+ // Defaults to false.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#nfs
+ optional bool readOnly = 3;
+}
+
+// Namespace provides a scope for Names.
+// Use of multiple namespaces is optional.
+message Namespace {
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ optional ObjectMeta metadata = 1;
+
+ // Spec defines the behavior of the Namespace.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ optional NamespaceSpec spec = 2;
+
+ // Status describes the current status of a Namespace.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ optional NamespaceStatus status = 3;
+}
+
+// NamespaceList is a list of Namespaces.
+message NamespaceList {
+ // Standard list metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
+ optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1;
+
+ // Items is the list of Namespace objects in the list.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/namespaces.md
+ repeated Namespace items = 2;
+}
+
+// NamespaceSpec describes the attributes on a Namespace.
+message NamespaceSpec {
+ // Finalizers is an opaque list of values that must be empty to permanently remove object from storage.
+ // More info: http://releases.k8s.io/HEAD/docs/design/namespaces.md#finalizers
+ repeated string finalizers = 1;
+}
+
+// NamespaceStatus is information about the current status of a Namespace.
+message NamespaceStatus {
+ // Phase is the current lifecycle phase of the namespace.
+ // More info: http://releases.k8s.io/HEAD/docs/design/namespaces.md#phases
+ optional string phase = 1;
+}
+
+// Node is a worker node in Kubernetes, formerly known as minion.
+// Each node will have a unique identifier in the cache (i.e. in etcd).
+message Node {
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ optional ObjectMeta metadata = 1;
+
+ // Spec defines the behavior of a node.
+ // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ optional NodeSpec spec = 2;
+
+ // Most recently observed status of the node.
+ // Populated by the system.
+ // Read-only.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ optional NodeStatus status = 3;
+}
+
+// NodeAddress contains information for the node's address.
+message NodeAddress {
+ // Node address type, one of Hostname, ExternalIP or InternalIP.
+ optional string type = 1;
+
+ // The node address.
+ optional string address = 2;
+}
+
+// Node affinity is a group of node affinity scheduling rules.
+message NodeAffinity {
+ // If the affinity requirements specified by this field are not met at
+ // scheduling time, the pod will not be scheduled onto the node.
+ // If the affinity requirements specified by this field cease to be met
+ // at some point during pod execution (e.g. due to an update), the system
+ // may or may not try to eventually evict the pod from its node.
+ optional NodeSelector requiredDuringSchedulingIgnoredDuringExecution = 1;
+
+ // The scheduler will prefer to schedule pods to nodes that satisfy
+ // the affinity expressions specified by this field, but it may choose
+ // a node that violates one or more of the expressions. The node that is
+ // most preferred is the one with the greatest sum of weights, i.e.
+ // for each node that meets all of the scheduling requirements (resource
+ // request, requiredDuringScheduling affinity expressions, etc.),
+ // compute a sum by iterating through the elements of this field and adding
+ // "weight" to the sum if the node matches the corresponding matchExpressions; the
+ // node(s) with the highest sum are the most preferred.
+ repeated PreferredSchedulingTerm preferredDuringSchedulingIgnoredDuringExecution = 2;
+}
+
+// NodeCondition contains condition infromation for a node.
+message NodeCondition {
+ // Type of node condition.
+ optional string type = 1;
+
+ // Status of the condition, one of True, False, Unknown.
+ optional string status = 2;
+
+ // Last time we got an update on a given condition.
+ optional k8s.io.kubernetes.pkg.api.unversioned.Time lastHeartbeatTime = 3;
+
+ // Last time the condition transit from one status to another.
+ optional k8s.io.kubernetes.pkg.api.unversioned.Time lastTransitionTime = 4;
+
+ // (brief) reason for the condition's last transition.
+ optional string reason = 5;
+
+ // Human readable message indicating details about last transition.
+ optional string message = 6;
+}
+
+// NodeDaemonEndpoints lists ports opened by daemons running on the Node.
+message NodeDaemonEndpoints {
+ // Endpoint on which Kubelet is listening.
+ optional DaemonEndpoint kubeletEndpoint = 1;
+}
+
+// NodeList is the whole list of all Nodes which have been registered with master.
+message NodeList {
+ // Standard list metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
+ optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1;
+
+ // List of nodes
+ repeated Node items = 2;
+}
+
+// NodeProxyOptions is the query options to a Node's proxy call.
+message NodeProxyOptions {
+ // Path is the URL path to use for the current proxy request to node.
+ optional string path = 1;
+}
+
+// A node selector represents the union of the results of one or more label queries
+// over a set of nodes; that is, it represents the OR of the selectors represented
+// by the node selector terms.
+message NodeSelector {
+ // Required. A list of node selector terms. The terms are ORed.
+ repeated NodeSelectorTerm nodeSelectorTerms = 1;
+}
+
+// A node selector requirement is a selector that contains values, a key, and an operator
+// that relates the key and values.
+message NodeSelectorRequirement {
+ // The label key that the selector applies to.
+ optional string key = 1;
+
+ // Represents a key's relationship to a set of values.
+ // Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ optional string operator = 2;
+
+ // An array of string values. If the operator is In or NotIn,
+ // the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ // the values array must be empty. If the operator is Gt or Lt, the values
+ // array must have a single element, which will be interpreted as an integer.
+ // This array is replaced during a strategic merge patch.
+ repeated string values = 3;
+}
+
+// A null or empty node selector term matches no objects.
+message NodeSelectorTerm {
+ // Required. A list of node selector requirements. The requirements are ANDed.
+ repeated NodeSelectorRequirement matchExpressions = 1;
+}
+
+// NodeSpec describes the attributes that a node is created with.
+message NodeSpec {
+ // PodCIDR represents the pod IP range assigned to the node.
+ optional string podCIDR = 1;
+
+ // External ID of the node assigned by some machine database (e.g. a cloud provider).
+ // Deprecated.
+ optional string externalID = 2;
+
+ // ID of the node assigned by the cloud provider in the format: <ProviderName>://<ProviderSpecificNodeID>
+ optional string providerID = 3;
+
+ // Unschedulable controls node schedulability of new pods. By default, node is schedulable.
+ // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#manual-node-administration"`
+ optional bool unschedulable = 4;
+}
+
+// NodeStatus is information about the current status of a node.
+message NodeStatus {
+ // Capacity represents the total resources of a node.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#capacity for more details.
+ map<string, k8s.io.kubernetes.pkg.api.resource.Quantity> capacity = 1;
+
+ // Allocatable represents the resources of a node that are available for scheduling.
+ // Defaults to Capacity.
+ map<string, k8s.io.kubernetes.pkg.api.resource.Quantity> allocatable = 2;
+
+ // NodePhase is the recently observed lifecycle phase of the node.
+ // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-phase
+ optional string phase = 3;
+
+ // Conditions is an array of current observed node conditions.
+ // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-condition
+ repeated NodeCondition conditions = 4;
+
+ // List of addresses reachable to the node.
+ // Queried from cloud provider, if available.
+ // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-addresses
+ repeated NodeAddress addresses = 5;
+
+ // Endpoints of daemons running on the Node.
+ optional NodeDaemonEndpoints daemonEndpoints = 6;
+
+ // Set of ids/uuids to uniquely identify the node.
+ // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-info
+ optional NodeSystemInfo nodeInfo = 7;
+
+ // List of container images on this node
+ repeated ContainerImage images = 8;
+
+ // List of attachable volumes in use (mounted) by the node.
+ repeated string volumesInUse = 9;
+
+ // List of volumes that are attached to the node.
+ repeated AttachedVolume volumesAttached = 10;
+}
+
+// NodeSystemInfo is a set of ids/uuids to uniquely identify the node.
+message NodeSystemInfo {
+ // Machine ID reported by the node.
+ optional string machineID = 1;
+
+ // System UUID reported by the node.
+ optional string systemUUID = 2;
+
+ // Boot ID reported by the node.
+ optional string bootID = 3;
+
+ // Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).
+ optional string kernelVersion = 4;
+
+ // OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).
+ optional string osImage = 5;
+
+ // ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0).
+ optional string containerRuntimeVersion = 6;
+
+ // Kubelet Version reported by the node.
+ optional string kubeletVersion = 7;
+
+ // KubeProxy Version reported by the node.
+ optional string kubeProxyVersion = 8;
+
+ // The Operating System reported by the node
+ optional string operatingSystem = 9;
+
+ // The Architecture reported by the node
+ optional string architecture = 10;
+}
+
+// ObjectFieldSelector selects an APIVersioned field of an object.
+message ObjectFieldSelector {
+ // Version of the schema the FieldPath is written in terms of, defaults to "v1".
+ optional string apiVersion = 1;
+
+ // Path of the field to select in the specified API version.
+ optional string fieldPath = 2;
+}
+
+// ObjectMeta is metadata that all persisted resources must have, which includes all objects
+// users must create.
+message ObjectMeta {
+ // Name must be unique within a namespace. Is required when creating resources, although
+ // some resources may allow a client to request the generation of an appropriate name
+ // automatically. Name is primarily intended for creation idempotence and configuration
+ // definition.
+ // Cannot be updated.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names
+ optional string name = 1;
+
+ // GenerateName is an optional prefix, used by the server, to generate a unique
+ // name ONLY IF the Name field has not been provided.
+ // If this field is used, the name returned to the client will be different
+ // than the name passed. This value will also be combined with a unique suffix.
+ // The provided value has the same validation rules as the Name field,
+ // and may be truncated by the length of the suffix required to make the value
+ // unique on the server.
+ //
+ // If this field is specified and the generated name exists, the server will
+ // NOT return a 409 - instead, it will either return 201 Created or 500 with Reason
+ // ServerTimeout indicating a unique name could not be found in the time allotted, and the client
+ // should retry (optionally after the time indicated in the Retry-After header).
+ //
+ // Applied only if Name is not specified.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#idempotency
+ optional string generateName = 2;
+
+ // Namespace defines the space within each name must be unique. An empty namespace is
+ // equivalent to the "default" namespace, but "default" is the canonical representation.
+ // Not all objects are required to be scoped to a namespace - the value of this field for
+ // those objects will be empty.
+ //
+ // Must be a DNS_LABEL.
+ // Cannot be updated.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/namespaces.md
+ optional string namespace = 3;
+
+ // SelfLink is a URL representing this object.
+ // Populated by the system.
+ // Read-only.
+ optional string selfLink = 4;
+
+ // UID is the unique in time and space value for this object. It is typically generated by
+ // the server on successful creation of a resource and is not allowed to change on PUT
+ // operations.
+ //
+ // Populated by the system.
+ // Read-only.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#uids
+ optional string uid = 5;
+
+ // An opaque value that represents the internal version of this object that can
+ // be used by clients to determine when objects have changed. May be used for optimistic
+ // concurrency, change detection, and the watch operation on a resource or set of resources.
+ // Clients must treat these values as opaque and passed unmodified back to the server.
+ // They may only be valid for a particular resource or set of resources.
+ //
+ // Populated by the system.
+ // Read-only.
+ // Value must be treated as opaque by clients and .
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency
+ optional string resourceVersion = 6;
+
+ // A sequence number representing a specific generation of the desired state.
+ // Populated by the system. Read-only.
+ optional int64 generation = 7;
+
+ // CreationTimestamp is a timestamp representing the server time when this object was
+ // created. It is not guaranteed to be set in happens-before order across separate operations.
+ // Clients may not set this value. It is represented in RFC3339 form and is in UTC.
+ //
+ // Populated by the system.
+ // Read-only.
+ // Null for lists.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ optional k8s.io.kubernetes.pkg.api.unversioned.Time creationTimestamp = 8;
+
+ // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This
+ // field is set by the server when a graceful deletion is requested by the user, and is not
+ // directly settable by a client. The resource will be deleted (no longer visible from
+ // resource lists, and not reachable by name) after the time in this field. Once set, this
+ // value may not be unset or be set further into the future, although it may be shortened
+ // or the resource may be deleted prior to this time. For example, a user may request that
+ // a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination
+ // signal to the containers in the pod. Once the resource is deleted in the API, the Kubelet
+ // will send a hard termination signal to the container.
+ // If not set, graceful deletion of the object has not been requested.
+ //
+ // Populated by the system when a graceful deletion is requested.
+ // Read-only.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ optional k8s.io.kubernetes.pkg.api.unversioned.Time deletionTimestamp = 9;
+
+ // Number of seconds allowed for this object to gracefully terminate before
+ // it will be removed from the system. Only set when deletionTimestamp is also set.
+ // May only be shortened.
+ // Read-only.
+ optional int64 deletionGracePeriodSeconds = 10;
+
+ // Map of string keys and values that can be used to organize and categorize
+ // (scope and select) objects. May match selectors of replication controllers
+ // and services.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md
+ // TODO: replace map[string]string with labels.LabelSet type
+ map<string, string> labels = 11;
+
+ // Annotations is an unstructured key value map stored with a resource that may be
+ // set by external tools to store and retrieve arbitrary metadata. They are not
+ // queryable and should be preserved when modifying objects.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/annotations.md
+ map<string, string> annotations = 12;
+
+ // List of objects depended by this object. If ALL objects in the list have
+ // been deleted, this object will be garbage collected. If this object is managed by a controller,
+ // then an entry in this list will point to this controller, with the controller field set to true.
+ // There cannot be more than one managing controller.
+ repeated OwnerReference ownerReferences = 13;
+
+ // Must be empty before the object is deleted from the registry. Each entry
+ // is an identifier for the responsible component that will remove the entry
+ // from the list. If the deletionTimestamp of the object is non-nil, entries
+ // in this list can only be removed.
+ repeated string finalizers = 14;
+}
+
+// ObjectReference contains enough information to let you inspect or modify the referred object.
+message ObjectReference {
+ // Kind of the referent.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
+ optional string kind = 1;
+
+ // Namespace of the referent.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/namespaces.md
+ optional string namespace = 2;
+
+ // Name of the referent.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names
+ optional string name = 3;
+
+ // UID of the referent.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#uids
+ optional string uid = 4;
+
+ // API version of the referent.
+ optional string apiVersion = 5;
+
+ // Specific resourceVersion to which this reference is made, if any.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency
+ optional string resourceVersion = 6;
+
+ // If referring to a piece of an object instead of an entire object, this string
+ // should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
+ // For example, if the object reference is to a container within a pod, this would take on a value like:
+ // "spec.containers{name}" (where "name" refers to the name of the container that triggered
+ // the event) or if no container name is specified "spec.containers[2]" (container with
+ // index 2 in this pod). This syntax is chosen only to have some well-defined way of
+ // referencing a part of an object.
+ // TODO: this design is not final and this field is subject to change in the future.
+ optional string fieldPath = 7;
+}
+
+// OwnerReference contains enough information to let you identify an owning
+// object. Currently, an owning object must be in the same namespace, so there
+// is no namespace field.
+message OwnerReference {
+ // API version of the referent.
+ optional string apiVersion = 5;
+
+ // Kind of the referent.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
+ optional string kind = 1;
+
+ // Name of the referent.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names
+ optional string name = 3;
+
+ // UID of the referent.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#uids
+ optional string uid = 4;
+
+ // If true, this reference points to the managing controller.
+ optional bool controller = 6;
+}
+
+// PersistentVolume (PV) is a storage resource provisioned by an administrator.
+// It is analogous to a node.
+// More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md
+message PersistentVolume {
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ optional ObjectMeta metadata = 1;
+
+ // Spec defines a specification of a persistent volume owned by the cluster.
+ // Provisioned by an administrator.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistent-volumes
+ optional PersistentVolumeSpec spec = 2;
+
+ // Status represents the current information/status for the persistent volume.
+ // Populated by the system.
+ // Read-only.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistent-volumes
+ optional PersistentVolumeStatus status = 3;
+}
+
+// PersistentVolumeClaim is a user's request for and claim to a persistent volume
+message PersistentVolumeClaim {
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ optional ObjectMeta metadata = 1;
+
+ // Spec defines the desired characteristics of a volume requested by a pod author.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistentvolumeclaims
+ optional PersistentVolumeClaimSpec spec = 2;
+
+ // Status represents the current information/status of a persistent volume claim.
+ // Read-only.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistentvolumeclaims
+ optional PersistentVolumeClaimStatus status = 3;
+}
+
+// PersistentVolumeClaimList is a list of PersistentVolumeClaim items.
+message PersistentVolumeClaimList {
+ // Standard list metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
+ optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1;
+
+ // A list of persistent volume claims.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistentvolumeclaims
+ repeated PersistentVolumeClaim items = 2;
+}
+
+// PersistentVolumeClaimSpec describes the common attributes of storage devices
+// and allows a Source for provider-specific attributes
+message PersistentVolumeClaimSpec {
+ // AccessModes contains the desired access modes the volume should have.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#access-modes-1
+ repeated string accessModes = 1;
+
+ // A label query over volumes to consider for binding.
+ optional k8s.io.kubernetes.pkg.api.unversioned.LabelSelector selector = 4;
+
+ // Resources represents the minimum resources the volume should have.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#resources
+ optional ResourceRequirements resources = 2;
+
+ // VolumeName is the binding reference to the PersistentVolume backing this claim.
+ optional string volumeName = 3;
+}
+
+// PersistentVolumeClaimStatus is the current status of a persistent volume claim.
+message PersistentVolumeClaimStatus {
+ // Phase represents the current phase of PersistentVolumeClaim.
+ optional string phase = 1;
+
+ // AccessModes contains the actual access modes the volume backing the PVC has.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#access-modes-1
+ repeated string accessModes = 2;
+
+ // Represents the actual resources of the underlying volume.
+ map<string, k8s.io.kubernetes.pkg.api.resource.Quantity> capacity = 3;
+}
+
+// PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace.
+// This volume finds the bound PV and mounts that volume for the pod. A
+// PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another
+// type of volume that is owned by someone else (the system).
+message PersistentVolumeClaimVolumeSource {
+ // ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistentvolumeclaims
+ optional string claimName = 1;
+
+ // Will force the ReadOnly setting in VolumeMounts.
+ // Default false.
+ optional bool readOnly = 2;
+}
+
+// PersistentVolumeList is a list of PersistentVolume items.
+message PersistentVolumeList {
+ // Standard list metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
+ optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1;
+
+ // List of persistent volumes.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md
+ repeated PersistentVolume items = 2;
+}
+
+// PersistentVolumeSource is similar to VolumeSource but meant for the
+// administrator who creates PVs. Exactly one of its members must be set.
+message PersistentVolumeSource {
+ // GCEPersistentDisk represents a GCE Disk resource that is attached to a
+ // kubelet's host machine and then exposed to the pod. Provisioned by an admin.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk
+ optional GCEPersistentDiskVolumeSource gcePersistentDisk = 1;
+
+ // AWSElasticBlockStore represents an AWS Disk resource that is attached to a
+ // kubelet's host machine and then exposed to the pod.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#awselasticblockstore
+ optional AWSElasticBlockStoreVolumeSource awsElasticBlockStore = 2;
+
+ // HostPath represents a directory on the host.
+ // Provisioned by a developer or tester.
+ // This is useful for single-node development and testing only!
+ // On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#hostpath
+ optional HostPathVolumeSource hostPath = 3;
+
+ // Glusterfs represents a Glusterfs volume that is attached to a host and
+ // exposed to the pod. Provisioned by an admin.
+ // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md
+ optional GlusterfsVolumeSource glusterfs = 4;
+
+ // NFS represents an NFS mount on the host. Provisioned by an admin.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#nfs
+ optional NFSVolumeSource nfs = 5;
+
+ // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime.
+ // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md
+ optional RBDVolumeSource rbd = 6;
+
+ // ISCSI represents an ISCSI Disk resource that is attached to a
+ // kubelet's host machine and then exposed to the pod. Provisioned by an admin.
+ optional ISCSIVolumeSource iscsi = 7;
+
+ // Cinder represents a cinder volume attached and mounted on kubelets host machine
+ // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
+ optional CinderVolumeSource cinder = 8;
+
+ // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime
+ optional CephFSVolumeSource cephfs = 9;
+
+ // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.
+ optional FCVolumeSource fc = 10;
+
+ // Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running
+ optional FlockerVolumeSource flocker = 11;
+
+ // FlexVolume represents a generic volume resource that is
+ // provisioned/attached using a exec based plugin. This is an
+ // alpha feature and may change in future.
+ optional FlexVolumeSource flexVolume = 12;
+
+ // AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
+ optional AzureFileVolumeSource azureFile = 13;
+
+ // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
+ optional VsphereVirtualDiskVolumeSource vsphereVolume = 14;
+}
+
+// PersistentVolumeSpec is the specification of a persistent volume.
+message PersistentVolumeSpec {
+ // A description of the persistent volume's resources and capacity.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#capacity
+ map<string, k8s.io.kubernetes.pkg.api.resource.Quantity> capacity = 1;
+
+ // The actual volume backing the persistent volume.
+ optional PersistentVolumeSource persistentVolumeSource = 2;
+
+ // AccessModes contains all ways the volume can be mounted.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#access-modes
+ repeated string accessModes = 3;
+
+ // ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim.
+ // Expected to be non-nil when bound.
+ // claim.VolumeName is the authoritative bind between PV and PVC.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#binding
+ optional ObjectReference claimRef = 4;
+
+ // What happens to a persistent volume when released from its claim.
+ // Valid options are Retain (default) and Recycle.
+ // Recyling must be supported by the volume plugin underlying this persistent volume.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#recycling-policy
+ optional string persistentVolumeReclaimPolicy = 5;
+}
+
+// PersistentVolumeStatus is the current status of a persistent volume.
+message PersistentVolumeStatus {
+ // Phase indicates if a volume is available, bound to a claim, or released by a claim.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#phase
+ optional string phase = 1;
+
+ // A human-readable message indicating details about why the volume is in this state.
+ optional string message = 2;
+
+ // Reason is a brief CamelCase string that describes any failure and is meant
+ // for machine parsing and tidy display in the CLI.
+ optional string reason = 3;
+}
+
+// Pod is a collection of containers that can run on a host. This resource is created
+// by clients and scheduled onto hosts.
+message Pod {
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ optional ObjectMeta metadata = 1;
+
+ // Specification of the desired behavior of the pod.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ optional PodSpec spec = 2;
+
+ // Most recently observed status of the pod.
+ // This data may not be up to date.
+ // Populated by the system.
+ // Read-only.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ optional PodStatus status = 3;
+}
+
+// Pod affinity is a group of inter pod affinity scheduling rules.
+message PodAffinity {
+ // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
+ // If the affinity requirements specified by this field are not met at
+ // scheduling time, the pod will not be scheduled onto the node.
+ // If the affinity requirements specified by this field cease to be met
+ // at some point during pod execution (e.g. due to a pod label update), the
+ // system will try to eventually evict the pod from its node.
+ // When there are multiple elements, the lists of nodes corresponding to each
+ // podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"`
+ // If the affinity requirements specified by this field are not met at
+ // scheduling time, the pod will not be scheduled onto the node.
+ // If the affinity requirements specified by this field cease to be met
+ // at some point during pod execution (e.g. due to a pod label update), the
+ // system may or may not try to eventually evict the pod from its node.
+ // When there are multiple elements, the lists of nodes corresponding to each
+ // podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ repeated PodAffinityTerm requiredDuringSchedulingIgnoredDuringExecution = 1;
+
+ // The scheduler will prefer to schedule pods to nodes that satisfy
+ // the affinity expressions specified by this field, but it may choose
+ // a node that violates one or more of the expressions. The node that is
+ // most preferred is the one with the greatest sum of weights, i.e.
+ // for each node that meets all of the scheduling requirements (resource
+ // request, requiredDuringScheduling affinity expressions, etc.),
+ // compute a sum by iterating through the elements of this field and adding
+ // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
+ // node(s) with the highest sum are the most preferred.
+ repeated WeightedPodAffinityTerm preferredDuringSchedulingIgnoredDuringExecution = 2;
+}
+
+// Defines a set of pods (namely those matching the labelSelector
+// relative to the given namespace(s)) that this pod should be
+// co-located (affinity) or not co-located (anti-affinity) with,
+// where co-located is defined as running on a node whose value of
+// the label with key <topologyKey> tches that of any node on which
+// a pod of the set of pods is running
+message PodAffinityTerm {
+ // A label query over a set of resources, in this case pods.
+ optional k8s.io.kubernetes.pkg.api.unversioned.LabelSelector labelSelector = 1;
+
+ // namespaces specifies which namespaces the labelSelector applies to (matches against);
+ // nil list means "this pod's namespace," empty list means "all namespaces"
+ // The json tag here is not "omitempty" since we need to distinguish nil and empty.
+ // See https://golang.org/pkg/encoding/json/#Marshal for more details.
+ repeated string namespaces = 2;
+
+ // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ // the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ // whose value of the label with key topologyKey matches that of any node on which any of the
+ // selected pods is running.
+ // For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as "all topologies"
+ // ("all topologies" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains);
+ // for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed.
+ optional string topologyKey = 3;
+}
+
+// Pod anti affinity is a group of inter pod anti affinity scheduling rules.
+message PodAntiAffinity {
+ // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
+ // If the anti-affinity requirements specified by this field are not met at
+ // scheduling time, the pod will not be scheduled onto the node.
+ // If the anti-affinity requirements specified by this field cease to be met
+ // at some point during pod execution (e.g. due to a pod label update), the
+ // system will try to eventually evict the pod from its node.
+ // When there are multiple elements, the lists of nodes corresponding to each
+ // podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"`
+ // If the anti-affinity requirements specified by this field are not met at
+ // scheduling time, the pod will not be scheduled onto the node.
+ // If the anti-affinity requirements specified by this field cease to be met
+ // at some point during pod execution (e.g. due to a pod label update), the
+ // system may or may not try to eventually evict the pod from its node.
+ // When there are multiple elements, the lists of nodes corresponding to each
+ // podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ repeated PodAffinityTerm requiredDuringSchedulingIgnoredDuringExecution = 1;
+
+ // The scheduler will prefer to schedule pods to nodes that satisfy
+ // the anti-affinity expressions specified by this field, but it may choose
+ // a node that violates one or more of the expressions. The node that is
+ // most preferred is the one with the greatest sum of weights, i.e.
+ // for each node that meets all of the scheduling requirements (resource
+ // request, requiredDuringScheduling anti-affinity expressions, etc.),
+ // compute a sum by iterating through the elements of this field and adding
+ // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
+ // node(s) with the highest sum are the most preferred.
+ repeated WeightedPodAffinityTerm preferredDuringSchedulingIgnoredDuringExecution = 2;
+}
+
+// PodAttachOptions is the query options to a Pod's remote attach call.
+// ---
+// TODO: merge w/ PodExecOptions below for stdin, stdout, etc
+// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY
+message PodAttachOptions {
+ // Stdin if true, redirects the standard input stream of the pod for this call.
+ // Defaults to false.
+ optional bool stdin = 1;
+
+ // Stdout if true indicates that stdout is to be redirected for the attach call.
+ // Defaults to true.
+ optional bool stdout = 2;
+
+ // Stderr if true indicates that stderr is to be redirected for the attach call.
+ // Defaults to true.
+ optional bool stderr = 3;
+
+ // TTY if true indicates that a tty will be allocated for the attach call.
+ // This is passed through the container runtime so the tty
+ // is allocated on the worker node by the container runtime.
+ // Defaults to false.
+ optional bool tty = 4;
+
+ // The container in which to execute the command.
+ // Defaults to only container if there is only one container in the pod.
+ optional string container = 5;
+}
+
+// PodCondition contains details for the current condition of this pod.
+message PodCondition {
+ // Type is the type of the condition.
+ // Currently only Ready.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#pod-conditions
+ optional string type = 1;
+
+ // Status is the status of the condition.
+ // Can be True, False, Unknown.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#pod-conditions
+ optional string status = 2;
+
+ // Last time we probed the condition.
+ optional k8s.io.kubernetes.pkg.api.unversioned.Time lastProbeTime = 3;
+
+ // Last time the condition transitioned from one status to another.
+ optional k8s.io.kubernetes.pkg.api.unversioned.Time lastTransitionTime = 4;
+
+ // Unique, one-word, CamelCase reason for the condition's last transition.
+ optional string reason = 5;
+
+ // Human-readable message indicating details about last transition.
+ optional string message = 6;
+}
+
+// PodExecOptions is the query options to a Pod's remote exec call.
+// ---
+// TODO: This is largely identical to PodAttachOptions above, make sure they stay in sync and see about merging
+// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY
+message PodExecOptions {
+ // Redirect the standard input stream of the pod for this call.
+ // Defaults to false.
+ optional bool stdin = 1;
+
+ // Redirect the standard output stream of the pod for this call.
+ // Defaults to true.
+ optional bool stdout = 2;
+
+ // Redirect the standard error stream of the pod for this call.
+ // Defaults to true.
+ optional bool stderr = 3;
+
+ // TTY if true indicates that a tty will be allocated for the exec call.
+ // Defaults to false.
+ optional bool tty = 4;
+
+ // Container in which to execute the command.
+ // Defaults to only container if there is only one container in the pod.
+ optional string container = 5;
+
+ // Command is the remote command to execute. argv array. Not executed within a shell.
+ repeated string command = 6;
+}
+
+// PodList is a list of Pods.
+message PodList {
+ // Standard list metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
+ optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1;
+
+ // List of pods.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/pods.md
+ repeated Pod items = 2;
+}
+
+// PodLogOptions is the query options for a Pod's logs REST call.
+message PodLogOptions {
+ // The container for which to stream logs. Defaults to only container if there is one container in the pod.
+ optional string container = 1;
+
+ // Follow the log stream of the pod. Defaults to false.
+ optional bool follow = 2;
+
+ // Return previous terminated container logs. Defaults to false.
+ optional bool previous = 3;
+
+ // A relative time in seconds before the current time from which to show logs. If this value
+ // precedes the time a pod was started, only logs since the pod start will be returned.
+ // If this value is in the future, no logs will be returned.
+ // Only one of sinceSeconds or sinceTime may be specified.
+ optional int64 sinceSeconds = 4;
+
+ // An RFC3339 timestamp from which to show logs. If this value
+ // precedes the time a pod was started, only logs since the pod start will be returned.
+ // If this value is in the future, no logs will be returned.
+ // Only one of sinceSeconds or sinceTime may be specified.
+ optional k8s.io.kubernetes.pkg.api.unversioned.Time sinceTime = 5;
+
+ // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line
+ // of log output. Defaults to false.
+ optional bool timestamps = 6;
+
+ // If set, the number of lines from the end of the logs to show. If not specified,
+ // logs are shown from the creation of the container or sinceSeconds or sinceTime
+ optional int64 tailLines = 7;
+
+ // If set, the number of bytes to read from the server before terminating the
+ // log output. This may not display a complete final line of logging, and may return
+ // slightly more or slightly less than the specified limit.
+ optional int64 limitBytes = 8;
+}
+
+// PodProxyOptions is the query options to a Pod's proxy call.
+message PodProxyOptions {
+ // Path is the URL path to use for the current proxy request to pod.
+ optional string path = 1;
+}
+
+// PodSecurityContext holds pod-level security attributes and common container settings.
+// Some fields are also present in container.securityContext. Field values of
+// container.securityContext take precedence over field values of PodSecurityContext.
+message PodSecurityContext {
+ // The SELinux context to be applied to all containers.
+ // If unspecified, the container runtime will allocate a random SELinux context for each
+ // container. May also be set in SecurityContext. If set in
+ // both SecurityContext and PodSecurityContext, the value specified in SecurityContext
+ // takes precedence for that container.
+ optional SELinuxOptions seLinuxOptions = 1;
+
+ // The UID to run the entrypoint of the container process.
+ // Defaults to user specified in image metadata if unspecified.
+ // May also be set in SecurityContext. If set in both SecurityContext and
+ // PodSecurityContext, the value specified in SecurityContext takes precedence
+ // for that container.
+ optional int64 runAsUser = 2;
+
+ // Indicates that the container must run as a non-root user.
+ // If true, the Kubelet will validate the image at runtime to ensure that it
+ // does not run as UID 0 (root) and fail to start the container if it does.
+ // If unset or false, no such validation will be performed.
+ // May also be set in SecurityContext. If set in both SecurityContext and
+ // PodSecurityContext, the value specified in SecurityContext takes precedence.
+ optional bool runAsNonRoot = 3;
+
+ // A list of groups applied to the first process run in each container, in addition
+ // to the container's primary GID. If unspecified, no groups will be added to
+ // any container.
+ repeated int64 supplementalGroups = 4;
+
+ // A special supplemental group that applies to all containers in a pod.
+ // Some volume types allow the Kubelet to change the ownership of that volume
+ // to be owned by the pod:
+ //
+ // 1. The owning GID will be the FSGroup
+ // 2. The setgid bit is set (new files created in the volume will be owned by FSGroup)
+ // 3. The permission bits are OR'd with rw-rw----
+ //
+ // If unset, the Kubelet will not modify the ownership and permissions of any volume.
+ optional int64 fsGroup = 5;
+}
+
+// PodSpec is a description of a pod.
+message PodSpec {
+ // List of volumes that can be mounted by containers belonging to the pod.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md
+ repeated Volume volumes = 1;
+
+ // List of containers belonging to the pod.
+ // Containers cannot currently be added or removed.
+ // There must be at least one container in a Pod.
+ // Cannot be updated.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/containers.md
+ repeated Container containers = 2;
+
+ // Restart policy for all containers within the pod.
+ // One of Always, OnFailure, Never.
+ // Default to Always.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#restartpolicy
+ optional string restartPolicy = 3;
+
+ // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request.
+ // Value must be non-negative integer. The value zero indicates delete immediately.
+ // If this value is nil, the default grace period will be used instead.
+ // The grace period is the duration in seconds after the processes running in the pod are sent
+ // a termination signal and the time when the processes are forcibly halted with a kill signal.
+ // Set this value longer than the expected cleanup time for your process.
+ // Defaults to 30 seconds.
+ optional int64 terminationGracePeriodSeconds = 4;
+
+ // Optional duration in seconds the pod may be active on the node relative to
+ // StartTime before the system will actively try to mark it failed and kill associated containers.
+ // Value must be a positive integer.
+ optional int64 activeDeadlineSeconds = 5;
+
+ // Set DNS policy for containers within the pod.
+ // One of 'ClusterFirst' or 'Default'.
+ // Defaults to "ClusterFirst".
+ optional string dnsPolicy = 6;
+
+ // NodeSelector is a selector which must be true for the pod to fit on a node.
+ // Selector which must match a node's labels for the pod to be scheduled on that node.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/node-selection/README.md
+ map<string, string> nodeSelector = 7;
+
+ // ServiceAccountName is the name of the ServiceAccount to use to run this pod.
+ // More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md
+ optional string serviceAccountName = 8;
+
+ // DeprecatedServiceAccount is a depreciated alias for ServiceAccountName.
+ // Deprecated: Use serviceAccountName instead.
+ // +k8s:conversion-gen=false
+ optional string serviceAccount = 9;
+
+ // NodeName is a request to schedule this pod onto a specific node. If it is non-empty,
+ // the scheduler simply schedules this pod onto that node, assuming that it fits resource
+ // requirements.
+ optional string nodeName = 10;
+
+ // Host networking requested for this pod. Use the host's network namespace.
+ // If this option is set, the ports that will be used must be specified.
+ // Default to false.
+ // +k8s:conversion-gen=false
+ optional bool hostNetwork = 11;
+
+ // Use the host's pid namespace.
+ // Optional: Default to false.
+ // +k8s:conversion-gen=false
+ optional bool hostPID = 12;
+
+ // Use the host's ipc namespace.
+ // Optional: Default to false.
+ // +k8s:conversion-gen=false
+ optional bool hostIPC = 13;
+
+ // SecurityContext holds pod-level security attributes and common container settings.
+ // Optional: Defaults to empty. See type description for default values of each field.
+ optional PodSecurityContext securityContext = 14;
+
+ // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.
+ // If specified, these secrets will be passed to individual puller implementations for them to use. For example,
+ // in the case of docker, only DockerConfig type secrets are honored.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/images.md#specifying-imagepullsecrets-on-a-pod
+ repeated LocalObjectReference imagePullSecrets = 15;
+
+ // Specifies the hostname of the Pod
+ // If not specified, the pod's hostname will be set to a system-defined value.
+ optional string hostname = 16;
+
+ // If specified, the fully qualified Pod hostname will be "<hostname>.<subdomain>.<pod namespace>.svc.<cluster domain>".
+ // If not specified, the pod will not have a domainname at all.
+ optional string subdomain = 17;
+}
+
+// PodStatus represents information about the status of a pod. Status may trail the actual
+// state of a system.
+message PodStatus {
+ // Current condition of the pod.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#pod-phase
+ optional string phase = 1;
+
+ // Current service state of pod.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#pod-conditions
+ repeated PodCondition conditions = 2;
+
+ // A human readable message indicating details about why the pod is in this condition.
+ optional string message = 3;
+
+ // A brief CamelCase message indicating details about why the pod is in this state.
+ // e.g. 'OutOfDisk'
+ optional string reason = 4;
+
+ // IP address of the host to which the pod is assigned. Empty if not yet scheduled.
+ optional string hostIP = 5;
+
+ // IP address allocated to the pod. Routable at least within the cluster.
+ // Empty if not yet allocated.
+ optional string podIP = 6;
+
+ // RFC 3339 date and time at which the object was acknowledged by the Kubelet.
+ // This is before the Kubelet pulled the container image(s) for the pod.
+ optional k8s.io.kubernetes.pkg.api.unversioned.Time startTime = 7;
+
+ // The list has one entry per container in the manifest. Each entry is currently the output
+ // of `docker inspect`.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-statuses
+ repeated ContainerStatus containerStatuses = 8;
+}
+
+// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded
+message PodStatusResult {
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ optional ObjectMeta metadata = 1;
+
+ // Most recently observed status of the pod.
+ // This data may not be up to date.
+ // Populated by the system.
+ // Read-only.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ optional PodStatus status = 2;
+}
+
+// PodTemplate describes a template for creating copies of a predefined pod.
+message PodTemplate {
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ optional ObjectMeta metadata = 1;
+
+ // Template defines the pods that will be created from this pod template.
+ // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ optional PodTemplateSpec template = 2;
+}
+
+// PodTemplateList is a list of PodTemplates.
+message PodTemplateList {
+ // Standard list metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
+ optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1;
+
+ // List of pod templates
+ repeated PodTemplate items = 2;
+}
+
+// PodTemplateSpec describes the data a pod should have when created from a template
+message PodTemplateSpec {
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ optional ObjectMeta metadata = 1;
+
+ // Specification of the desired behavior of the pod.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ optional PodSpec spec = 2;
+}
+
+// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.
+message Preconditions {
+ // Specifies the target UID.
+ optional string uid = 1;
+}
+
+// An empty preferred scheduling term matches all objects with implicit weight 0
+// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
+message PreferredSchedulingTerm {
+ // Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
+ optional int32 weight = 1;
+
+ // A node selector term, associated with the corresponding weight.
+ optional NodeSelectorTerm preference = 2;
+}
+
+// Probe describes a health check to be performed against a container to determine whether it is
+// alive or ready to receive traffic.
+message Probe {
+ // The action taken to determine the health of a container
+ optional Handler handler = 1;
+
+ // Number of seconds after the container has started before liveness probes are initiated.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-probes
+ optional int32 initialDelaySeconds = 2;
+
+ // Number of seconds after which the probe times out.
+ // Defaults to 1 second. Minimum value is 1.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-probes
+ optional int32 timeoutSeconds = 3;
+
+ // How often (in seconds) to perform the probe.
+ // Default to 10 seconds. Minimum value is 1.
+ optional int32 periodSeconds = 4;
+
+ // Minimum consecutive successes for the probe to be considered successful after having failed.
+ // Defaults to 1. Must be 1 for liveness. Minimum value is 1.
+ optional int32 successThreshold = 5;
+
+ // Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ // Defaults to 3. Minimum value is 1.
+ optional int32 failureThreshold = 6;
+}
+
+// Represents a Rados Block Device mount that lasts the lifetime of a pod.
+// RBD volumes support ownership management and SELinux relabeling.
+message RBDVolumeSource {
+ // A collection of Ceph monitors.
+ // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
+ repeated string monitors = 1;
+
+ // The rados image name.
+ // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
+ optional string image = 2;
+
+ // Filesystem type of the volume that you want to mount.
+ // Tip: Ensure that the filesystem type is supported by the host operating system.
+ // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#rbd
+ // TODO: how do we prevent errors in the filesystem from compromising the machine
+ optional string fsType = 3;
+
+ // The rados pool name.
+ // Default is rbd.
+ // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it.
+ optional string pool = 4;
+
+ // The rados user name.
+ // Default is admin.
+ // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
+ optional string user = 5;
+
+ // Keyring is the path to key ring for RBDUser.
+ // Default is /etc/ceph/keyring.
+ // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
+ optional string keyring = 6;
+
+ // SecretRef is name of the authentication secret for RBDUser. If provided
+ // overrides keyring.
+ // Default is nil.
+ // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
+ optional LocalObjectReference secretRef = 7;
+
+ // ReadOnly here will force the ReadOnly setting in VolumeMounts.
+ // Defaults to false.
+ // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
+ optional bool readOnly = 8;
+}
+
+// RangeAllocation is not a public type.
+message RangeAllocation {
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ optional ObjectMeta metadata = 1;
+
+ // Range is string that identifies the range represented by 'data'.
+ optional string range = 2;
+
+ // Data is a bit array containing all allocated addresses in the previous segment.
+ optional bytes data = 3;
+}
+
+// ReplicationController represents the configuration of a replication controller.
+message ReplicationController {
+ // If the Labels of a ReplicationController are empty, they are defaulted to
+ // be the same as the Pod(s) that the replication controller manages.
+ // Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ optional ObjectMeta metadata = 1;
+
+ // Spec defines the specification of the desired behavior of the replication controller.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ optional ReplicationControllerSpec spec = 2;
+
+ // Status is the most recently observed status of the replication controller.
+ // This data may be out of date by some window of time.
+ // Populated by the system.
+ // Read-only.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ optional ReplicationControllerStatus status = 3;
+}
+
+// ReplicationControllerList is a collection of replication controllers.
+message ReplicationControllerList {
+ // Standard list metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
+ optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1;
+
+ // List of replication controllers.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md
+ repeated ReplicationController items = 2;
+}
+
+// ReplicationControllerSpec is the specification of a replication controller.
+message ReplicationControllerSpec {
+ // Replicas is the number of desired replicas.
+ // This is a pointer to distinguish between explicit zero and unspecified.
+ // Defaults to 1.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller
+ optional int32 replicas = 1;
+
+ // Selector is a label query over pods that should match the Replicas count.
+ // If Selector is empty, it is defaulted to the labels present on the Pod template.
+ // Label keys and values that must match in order to be controlled by this replication
+ // controller, if empty defaulted to labels on Pod template.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors
+ map<string, string> selector = 2;
+
+ // Template is the object that describes the pod that will be created if
+ // insufficient replicas are detected. This takes precedence over a TemplateRef.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#pod-template
+ optional PodTemplateSpec template = 3;
+}
+
+// ReplicationControllerStatus represents the current status of a replication
+// controller.
+message ReplicationControllerStatus {
+ // Replicas is the most recently oberved number of replicas.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller
+ optional int32 replicas = 1;
+
+ // The number of pods that have labels matching the labels of the pod template of the replication controller.
+ optional int32 fullyLabeledReplicas = 2;
+
+ // ObservedGeneration reflects the generation of the most recently observed replication controller.
+ optional int64 observedGeneration = 3;
+}
+
+// ResourceFieldSelector represents container resources (cpu, memory) and their output format
+message ResourceFieldSelector {
+ // Container name: required for volumes, optional for env vars
+ optional string containerName = 1;
+
+ // Required: resource to select
+ optional string resource = 2;
+
+ // Specifies the output format of the exposed resources, defaults to "1"
+ optional k8s.io.kubernetes.pkg.api.resource.Quantity divisor = 3;
+}
+
+// ResourceQuota sets aggregate quota restrictions enforced per namespace
+message ResourceQuota {
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ optional ObjectMeta metadata = 1;
+
+ // Spec defines the desired quota.
+ // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ optional ResourceQuotaSpec spec = 2;
+
+ // Status defines the actual enforced quota and its current usage.
+ // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ optional ResourceQuotaStatus status = 3;
+}
+
+// ResourceQuotaList is a list of ResourceQuota items.
+message ResourceQuotaList {
+ // Standard list metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
+ optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1;
+
+ // Items is a list of ResourceQuota objects.
+ // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota
+ repeated ResourceQuota items = 2;
+}
+
+// ResourceQuotaSpec defines the desired hard limits to enforce for Quota.
+message ResourceQuotaSpec {
+ // Hard is the set of desired hard limits for each named resource.
+ // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota
+ map<string, k8s.io.kubernetes.pkg.api.resource.Quantity> hard = 1;
+
+ // A collection of filters that must match each object tracked by a quota.
+ // If not specified, the quota matches all objects.
+ repeated string scopes = 2;
+}
+
+// ResourceQuotaStatus defines the enforced hard limits and observed use.
+message ResourceQuotaStatus {
+ // Hard is the set of enforced hard limits for each named resource.
+ // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota
+ map<string, k8s.io.kubernetes.pkg.api.resource.Quantity> hard = 1;
+
+ // Used is the current observed total usage of the resource in the namespace.
+ map<string, k8s.io.kubernetes.pkg.api.resource.Quantity> used = 2;
+}
+
+// ResourceRequirements describes the compute resource requirements.
+message ResourceRequirements {
+ // Limits describes the maximum amount of compute resources allowed.
+ // More info: http://releases.k8s.io/HEAD/docs/design/resources.md#resource-specifications
+ map<string, k8s.io.kubernetes.pkg.api.resource.Quantity> limits = 1;
+
+ // Requests describes the minimum amount of compute resources required.
+ // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ // otherwise to an implementation-defined value.
+ // More info: http://releases.k8s.io/HEAD/docs/design/resources.md#resource-specifications
+ map<string, k8s.io.kubernetes.pkg.api.resource.Quantity> requests = 2;
+}
+
+// SELinuxOptions are the labels to be applied to the container
+message SELinuxOptions {
+ // User is a SELinux user label that applies to the container.
+ optional string user = 1;
+
+ // Role is a SELinux role label that applies to the container.
+ optional string role = 2;
+
+ // Type is a SELinux type label that applies to the container.
+ optional string type = 3;
+
+ // Level is SELinux level label that applies to the container.
+ optional string level = 4;
+}
+
+// Secret holds secret data of a certain type. The total bytes of the values in
+// the Data field must be less than MaxSecretSize bytes.
+message Secret {
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ optional ObjectMeta metadata = 1;
+
+ // Data contains the secret data. Each key must be a valid DNS_SUBDOMAIN
+ // or leading dot followed by valid DNS_SUBDOMAIN.
+ // The serialized form of the secret data is a base64 encoded string,
+ // representing the arbitrary (possibly non-string) data value here.
+ // Described in https://tools.ietf.org/html/rfc4648#section-4
+ map<string, bytes> data = 2;
+
+ // stringData allows specifying non-binary secret data in string form.
+ // It is provided as a write-only convenience method.
+ // All keys and values are merged into the data field on write, overwriting any existing values.
+ // It is never output when reading from the API.
+ // +k8s:conversion-gen=false
+ map<string, string> stringData = 4;
+
+ // Used to facilitate programmatic handling of secret data.
+ optional string type = 3;
+}
+
+// SecretKeySelector selects a key of a Secret.
+message SecretKeySelector {
+ // The name of the secret in the pod's namespace to select from.
+ optional LocalObjectReference localObjectReference = 1;
+
+ // The key of the secret to select from. Must be a valid secret key.
+ optional string key = 2;
+}
+
+// SecretList is a list of Secret.
+message SecretList {
+ // Standard list metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
+ optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1;
+
+ // Items is a list of secret objects.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/secrets.md
+ repeated Secret items = 2;
+}
+
+// Adapts a Secret into a volume.
+//
+// The contents of the target Secret's Data field will be presented in a volume
+// as files using the keys in the Data field as the file names.
+// Secret volumes support ownership management and SELinux relabeling.
+message SecretVolumeSource {
+ // Name of the secret in the pod's namespace to use.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#secrets
+ optional string secretName = 1;
+
+ // If unspecified, each key-value pair in the Data field of the referenced
+ // Secret will be projected into the volume as a file whose name is the
+ // key and content is the value. If specified, the listed keys will be
+ // projected into the specified paths, and unlisted keys will not be
+ // present. If a key is specified which is not present in the Secret,
+ // the volume setup will error. Paths must be relative and may not contain
+ // the '..' path or start with '..'.
+ repeated KeyToPath items = 2;
+}
+
+// SecurityContext holds security configuration that will be applied to a container.
+// Some fields are present in both SecurityContext and PodSecurityContext. When both
+// are set, the values in SecurityContext take precedence.
+message SecurityContext {
+ // The capabilities to add/drop when running containers.
+ // Defaults to the default set of capabilities granted by the container runtime.
+ optional Capabilities capabilities = 1;
+
+ // Run container in privileged mode.
+ // Processes in privileged containers are essentially equivalent to root on the host.
+ // Defaults to false.
+ optional bool privileged = 2;
+
+ // The SELinux context to be applied to the container.
+ // If unspecified, the container runtime will allocate a random SELinux context for each
+ // container. May also be set in PodSecurityContext. If set in both SecurityContext and
+ // PodSecurityContext, the value specified in SecurityContext takes precedence.
+ optional SELinuxOptions seLinuxOptions = 3;
+
+ // The UID to run the entrypoint of the container process.
+ // Defaults to user specified in image metadata if unspecified.
+ // May also be set in PodSecurityContext. If set in both SecurityContext and
+ // PodSecurityContext, the value specified in SecurityContext takes precedence.
+ optional int64 runAsUser = 4;
+
+ // Indicates that the container must run as a non-root user.
+ // If true, the Kubelet will validate the image at runtime to ensure that it
+ // does not run as UID 0 (root) and fail to start the container if it does.
+ // If unset or false, no such validation will be performed.
+ // May also be set in PodSecurityContext. If set in both SecurityContext and
+ // PodSecurityContext, the value specified in SecurityContext takes precedence.
+ optional bool runAsNonRoot = 5;
+
+ // Whether this container has a read-only root filesystem.
+ // Default is false.
+ optional bool readOnlyRootFilesystem = 6;
+}
+
+// SerializedReference is a reference to serialized object.
+message SerializedReference {
+ // The reference to an object in the system.
+ optional ObjectReference reference = 1;
+}
+
+// Service is a named abstraction of software service (for example, mysql) consisting of local port
+// (for example 3306) that the proxy listens on, and the selector that determines which pods
+// will answer requests sent through the proxy.
+message Service {
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ optional ObjectMeta metadata = 1;
+
+ // Spec defines the behavior of a service.
+ // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ optional ServiceSpec spec = 2;
+
+ // Most recently observed status of the service.
+ // Populated by the system.
+ // Read-only.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ optional ServiceStatus status = 3;
+}
+
+// ServiceAccount binds together:
+// * a name, understood by users, and perhaps by peripheral systems, for an identity
+// * a principal that can be authenticated and authorized
+// * a set of secrets
+message ServiceAccount {
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ optional ObjectMeta metadata = 1;
+
+ // Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/secrets.md
+ repeated ObjectReference secrets = 2;
+
+ // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images
+ // in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets
+ // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/secrets.md#manually-specifying-an-imagepullsecret
+ repeated LocalObjectReference imagePullSecrets = 3;
+}
+
+// ServiceAccountList is a list of ServiceAccount objects
+message ServiceAccountList {
+ // Standard list metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
+ optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1;
+
+ // List of ServiceAccounts.
+ // More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md#service-accounts
+ repeated ServiceAccount items = 2;
+}
+
+// ServiceList holds a list of services.
+message ServiceList {
+ // Standard list metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
+ optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1;
+
+ // List of services
+ repeated Service items = 2;
+}
+
+// ServicePort contains information on service's port.
+message ServicePort {
+ // The name of this port within the service. This must be a DNS_LABEL.
+ // All ports within a ServiceSpec must have unique names. This maps to
+ // the 'Name' field in EndpointPort objects.
+ // Optional if only one ServicePort is defined on this service.
+ optional string name = 1;
+
+ // The IP protocol for this port. Supports "TCP" and "UDP".
+ // Default is TCP.
+ optional string protocol = 2;
+
+ // The port that will be exposed by this service.
+ optional int32 port = 3;
+
+ // Number or name of the port to access on the pods targeted by the service.
+ // Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
+ // If this is a string, it will be looked up as a named port in the
+ // target Pod's container ports. If this is not specified, the value
+ // of the 'port' field is used (an identity map).
+ // This field is ignored for services with clusterIP=None, and should be
+ // omitted or set equal to the 'port' field.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#defining-a-service
+ optional k8s.io.kubernetes.pkg.util.intstr.IntOrString targetPort = 4;
+
+ // The port on each node on which this service is exposed when type=NodePort or LoadBalancer.
+ // Usually assigned by the system. If specified, it will be allocated to the service
+ // if unused or else creation of the service will fail.
+ // Default is to auto-allocate a port if the ServiceType of this Service requires one.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#type--nodeport
+ optional int32 nodePort = 5;
+}
+
+// ServiceProxyOptions is the query options to a Service's proxy call.
+message ServiceProxyOptions {
+ // Path is the part of URLs that include service endpoints, suffixes,
+ // and parameters to use for the current proxy request to service.
+ // For example, the whole request URL is
+ // http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy.
+ // Path is _search?q=user:kimchy.
+ optional string path = 1;
+}
+
+// ServiceSpec describes the attributes that a user creates on a service.
+message ServiceSpec {
+ // The list of ports that are exposed by this service.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#virtual-ips-and-service-proxies
+ repeated ServicePort ports = 1;
+
+ // This service will route traffic to pods having labels matching this selector.
+ // Label keys and values that must match in order to receive traffic for this service.
+ // If not specified, endpoints must be manually specified and the system will not automatically manage them.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#overview
+ map<string, string> selector = 2;
+
+ // ClusterIP is usually assigned by the master and is the IP address of the service.
+ // If specified, it will be allocated to the service if it is unused
+ // or else creation of the service will fail.
+ // Valid values are None, empty string (""), or a valid IP address.
+ // 'None' can be specified for a headless service when proxying is not required.
+ // Cannot be updated.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#virtual-ips-and-service-proxies
+ optional string clusterIP = 3;
+
+ // Type of exposed service. Must be ClusterIP, NodePort, or LoadBalancer.
+ // Defaults to ClusterIP.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#external-services
+ optional string type = 4;
+
+ // externalIPs is a list of IP addresses for which nodes in the cluster
+ // will also accept traffic for this service. These IPs are not managed by
+ // Kubernetes. The user is responsible for ensuring that traffic arrives
+ // at a node with this IP. A common example is external load-balancers
+ // that are not part of the Kubernetes system. A previous form of this
+ // functionality exists as the deprecatedPublicIPs field. When using this
+ // field, callers should also clear the deprecatedPublicIPs field.
+ repeated string externalIPs = 5;
+
+ // deprecatedPublicIPs is deprecated and replaced by the externalIPs field
+ // with almost the exact same semantics. This field is retained in the v1
+ // API for compatibility until at least 8/20/2016. It will be removed from
+ // any new API revisions. If both deprecatedPublicIPs *and* externalIPs are
+ // set, deprecatedPublicIPs is used.
+ // +k8s:conversion-gen=false
+ repeated string deprecatedPublicIPs = 6;
+
+ // Supports "ClientIP" and "None". Used to maintain session affinity.
+ // Enable client IP based session affinity.
+ // Must be ClientIP or None.
+ // Defaults to None.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#virtual-ips-and-service-proxies
+ optional string sessionAffinity = 7;
+
+ // Only applies to Service Type: LoadBalancer
+ // LoadBalancer will get created with the IP specified in this field.
+ // This feature depends on whether the underlying cloud-provider supports specifying
+ // the loadBalancerIP when a load balancer is created.
+ // This field will be ignored if the cloud-provider does not support the feature.
+ optional string loadBalancerIP = 8;
+
+ // If specified and supported by the platform, this will restrict traffic through the cloud-provider
+ // load-balancer will be restricted to the specified client IPs. This field will be ignored if the
+ // cloud-provider does not support the feature."
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/services-firewalls.md
+ repeated string loadBalancerSourceRanges = 9;
+}
+
+// ServiceStatus represents the current status of a service.
+message ServiceStatus {
+ // LoadBalancer contains the current status of the load-balancer,
+ // if one is present.
+ optional LoadBalancerStatus loadBalancer = 1;
+}
+
+// TCPSocketAction describes an action based on opening a socket
+message TCPSocketAction {
+ // Number or name of the port to access on the container.
+ // Number must be in the range 1 to 65535.
+ // Name must be an IANA_SVC_NAME.
+ optional k8s.io.kubernetes.pkg.util.intstr.IntOrString port = 1;
+}
+
+// The node this Taint is attached to has the effect "effect" on
+// any pod that that does not tolerate the Taint.
+message Taint {
+ // Required. The taint key to be applied to a node.
+ optional string key = 1;
+
+ // Required. The taint value corresponding to the taint key.
+ optional string value = 2;
+
+ // Required. The effect of the taint on pods
+ // that do not tolerate the taint.
+ // Valid effects are NoSchedule and PreferNoSchedule.
+ optional string effect = 3;
+}
+
+// The pod this Toleration is attached to tolerates any taint that matches
+// the triple <key,value,effect> using the matching operator <operator>.
+message Toleration {
+ // Required. Key is the taint key that the toleration applies to.
+ optional string key = 1;
+
+ // operator represents a key's relationship to the value.
+ // Valid operators are Exists and Equal. Defaults to Equal.
+ // Exists is equivalent to wildcard for value, so that a pod can
+ // tolerate all taints of a particular category.
+ optional string operator = 2;
+
+ // Value is the taint value the toleration matches to.
+ // If the operator is Exists, the value should be empty, otherwise just a regular string.
+ optional string value = 3;
+
+ // Effect indicates the taint effect to match. Empty means match all taint effects.
+ // When specified, allowed values are NoSchedule and PreferNoSchedule.
+ optional string effect = 4;
+}
+
+// Volume represents a named volume in a pod that may be accessed by any container in the pod.
+message Volume {
+ // Volume's name.
+ // Must be a DNS_LABEL and unique within the pod.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names
+ optional string name = 1;
+
+ // VolumeSource represents the location and type of the mounted volume.
+ // If not specified, the Volume is implied to be an EmptyDir.
+ // This implied behavior is deprecated and will be removed in a future version.
+ optional VolumeSource volumeSource = 2;
+}
+
+// VolumeMount describes a mounting of a Volume within a container.
+message VolumeMount {
+ // This must match the Name of a Volume.
+ optional string name = 1;
+
+ // Mounted read-only if true, read-write otherwise (false or unspecified).
+ // Defaults to false.
+ optional bool readOnly = 2;
+
+ // Path within the container at which the volume should be mounted. Must
+ // not contain ':'.
+ optional string mountPath = 3;
+
+ // Path within the volume from which the container's volume should be mounted.
+ // Defaults to "" (volume's root).
+ optional string subPath = 4;
+}
+
+// Represents the source of a volume to mount.
+// Only one of its members may be specified.
+message VolumeSource {
+ // HostPath represents a pre-existing file or directory on the host
+ // machine that is directly exposed to the container. This is generally
+ // used for system agents or other privileged things that are allowed
+ // to see the host machine. Most containers will NOT need this.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#hostpath
+ // ---
+ // TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not
+ // mount host directories as read/write.
+ optional HostPathVolumeSource hostPath = 1;
+
+ // EmptyDir represents a temporary directory that shares a pod's lifetime.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#emptydir
+ optional EmptyDirVolumeSource emptyDir = 2;
+
+ // GCEPersistentDisk represents a GCE Disk resource that is attached to a
+ // kubelet's host machine and then exposed to the pod.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk
+ optional GCEPersistentDiskVolumeSource gcePersistentDisk = 3;
+
+ // AWSElasticBlockStore represents an AWS Disk resource that is attached to a
+ // kubelet's host machine and then exposed to the pod.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#awselasticblockstore
+ optional AWSElasticBlockStoreVolumeSource awsElasticBlockStore = 4;
+
+ // GitRepo represents a git repository at a particular revision.
+ optional GitRepoVolumeSource gitRepo = 5;
+
+ // Secret represents a secret that should populate this volume.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#secrets
+ optional SecretVolumeSource secret = 6;
+
+ // NFS represents an NFS mount on the host that shares a pod's lifetime
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#nfs
+ optional NFSVolumeSource nfs = 7;
+
+ // ISCSI represents an ISCSI Disk resource that is attached to a
+ // kubelet's host machine and then exposed to the pod.
+ // More info: http://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md
+ optional ISCSIVolumeSource iscsi = 8;
+
+ // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
+ // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md
+ optional GlusterfsVolumeSource glusterfs = 9;
+
+ // PersistentVolumeClaimVolumeSource represents a reference to a
+ // PersistentVolumeClaim in the same namespace.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistentvolumeclaims
+ optional PersistentVolumeClaimVolumeSource persistentVolumeClaim = 10;
+
+ // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime.
+ // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md
+ optional RBDVolumeSource rbd = 11;
+
+ // FlexVolume represents a generic volume resource that is
+ // provisioned/attached using a exec based plugin. This is an
+ // alpha feature and may change in future.
+ optional FlexVolumeSource flexVolume = 12;
+
+ // Cinder represents a cinder volume attached and mounted on kubelets host machine
+ // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
+ optional CinderVolumeSource cinder = 13;
+
+ // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime
+ optional CephFSVolumeSource cephfs = 14;
+
+ // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running
+ optional FlockerVolumeSource flocker = 15;
+
+ // DownwardAPI represents downward API about the pod that should populate this volume
+ optional DownwardAPIVolumeSource downwardAPI = 16;
+
+ // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.
+ optional FCVolumeSource fc = 17;
+
+ // AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
+ optional AzureFileVolumeSource azureFile = 18;
+
+ // ConfigMap represents a configMap that should populate this volume
+ optional ConfigMapVolumeSource configMap = 19;
+
+ // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
+ optional VsphereVirtualDiskVolumeSource vsphereVolume = 20;
+}
+
+// Represents a vSphere volume resource.
+message VsphereVirtualDiskVolumeSource {
+ // Path that identifies vSphere volume vmdk
+ optional string volumePath = 1;
+
+ // Filesystem type to mount.
+ // Must be a filesystem type supported by the host operating system.
+ // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ optional string fsType = 2;
+}
+
+// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
+message WeightedPodAffinityTerm {
+ // weight associated with matching the corresponding podAffinityTerm,
+ // in the range 1-100.
+ optional int32 weight = 1;
+
+ // Required. A pod affinity term, associated with the corresponding weight.
+ optional PodAffinityTerm podAffinityTerm = 2;
+}
+
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/v1/meta.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/v1/meta.go
new file mode 100644
index 0000000..d5ba042
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/v1/meta.go
@@ -0,0 +1,85 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "k8s.io/kubernetes/pkg/api/meta"
+ "k8s.io/kubernetes/pkg/api/meta/metatypes"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/types"
+)
+
+func (obj *ObjectMeta) GetObjectMeta() meta.Object { return obj }
+
+// Namespace implements meta.Object for any object with an ObjectMeta typed field. Allows
+// fast, direct access to metadata fields for API objects.
+func (meta *ObjectMeta) GetNamespace() string { return meta.Namespace }
+func (meta *ObjectMeta) SetNamespace(namespace string) { meta.Namespace = namespace }
+func (meta *ObjectMeta) GetName() string { return meta.Name }
+func (meta *ObjectMeta) SetName(name string) { meta.Name = name }
+func (meta *ObjectMeta) GetGenerateName() string { return meta.GenerateName }
+func (meta *ObjectMeta) SetGenerateName(generateName string) { meta.GenerateName = generateName }
+func (meta *ObjectMeta) GetUID() types.UID { return meta.UID }
+func (meta *ObjectMeta) SetUID(uid types.UID) { meta.UID = uid }
+func (meta *ObjectMeta) GetResourceVersion() string { return meta.ResourceVersion }
+func (meta *ObjectMeta) SetResourceVersion(version string) { meta.ResourceVersion = version }
+func (meta *ObjectMeta) GetSelfLink() string { return meta.SelfLink }
+func (meta *ObjectMeta) SetSelfLink(selfLink string) { meta.SelfLink = selfLink }
+func (meta *ObjectMeta) GetCreationTimestamp() unversioned.Time { return meta.CreationTimestamp }
+func (meta *ObjectMeta) SetCreationTimestamp(creationTimestamp unversioned.Time) {
+ meta.CreationTimestamp = creationTimestamp
+}
+func (meta *ObjectMeta) GetDeletionTimestamp() *unversioned.Time { return meta.DeletionTimestamp }
+func (meta *ObjectMeta) SetDeletionTimestamp(deletionTimestamp *unversioned.Time) {
+ meta.DeletionTimestamp = deletionTimestamp
+}
+func (meta *ObjectMeta) GetLabels() map[string]string { return meta.Labels }
+func (meta *ObjectMeta) SetLabels(labels map[string]string) { meta.Labels = labels }
+func (meta *ObjectMeta) GetAnnotations() map[string]string { return meta.Annotations }
+func (meta *ObjectMeta) SetAnnotations(annotations map[string]string) { meta.Annotations = annotations }
+func (meta *ObjectMeta) GetFinalizers() []string { return meta.Finalizers }
+func (meta *ObjectMeta) SetFinalizers(finalizers []string) { meta.Finalizers = finalizers }
+
+func (meta *ObjectMeta) GetOwnerReferences() []metatypes.OwnerReference {
+ ret := make([]metatypes.OwnerReference, len(meta.OwnerReferences))
+ for i := 0; i < len(meta.OwnerReferences); i++ {
+ ret[i].Kind = meta.OwnerReferences[i].Kind
+ ret[i].Name = meta.OwnerReferences[i].Name
+ ret[i].UID = meta.OwnerReferences[i].UID
+ ret[i].APIVersion = meta.OwnerReferences[i].APIVersion
+ if meta.OwnerReferences[i].Controller != nil {
+ value := *meta.OwnerReferences[i].Controller
+ ret[i].Controller = &value
+ }
+ }
+ return ret
+}
+
+func (meta *ObjectMeta) SetOwnerReferences(references []metatypes.OwnerReference) {
+ newReferences := make([]OwnerReference, len(references))
+ for i := 0; i < len(references); i++ {
+ newReferences[i].Kind = references[i].Kind
+ newReferences[i].Name = references[i].Name
+ newReferences[i].UID = references[i].UID
+ newReferences[i].APIVersion = references[i].APIVersion
+ if references[i].Controller != nil {
+ value := *references[i].Controller
+ newReferences[i].Controller = &value
+ }
+ }
+ meta.OwnerReferences = newReferences
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/v1/register.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/v1/register.go
new file mode 100644
index 0000000..8c625b8
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/v1/register.go
@@ -0,0 +1,94 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/runtime"
+ versionedwatch "k8s.io/kubernetes/pkg/watch/versioned"
+)
+
+// GroupName is the group name use in this package
+const GroupName = ""
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1"}
+
+func AddToScheme(scheme *runtime.Scheme) {
+ // Add the API to Scheme.
+ addKnownTypes(scheme)
+ addConversionFuncs(scheme)
+ addDefaultingFuncs(scheme)
+}
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &Pod{},
+ &PodList{},
+ &PodStatusResult{},
+ &PodTemplate{},
+ &PodTemplateList{},
+ &ReplicationController{},
+ &ReplicationControllerList{},
+ &Service{},
+ &ServiceProxyOptions{},
+ &ServiceList{},
+ &Endpoints{},
+ &EndpointsList{},
+ &Node{},
+ &NodeList{},
+ &NodeProxyOptions{},
+ &Binding{},
+ &Event{},
+ &EventList{},
+ &List{},
+ &LimitRange{},
+ &LimitRangeList{},
+ &ResourceQuota{},
+ &ResourceQuotaList{},
+ &Namespace{},
+ &NamespaceList{},
+ &Secret{},
+ &SecretList{},
+ &ServiceAccount{},
+ &ServiceAccountList{},
+ &PersistentVolume{},
+ &PersistentVolumeList{},
+ &PersistentVolumeClaim{},
+ &PersistentVolumeClaimList{},
+ &DeleteOptions{},
+ &ExportOptions{},
+ &ListOptions{},
+ &PodAttachOptions{},
+ &PodLogOptions{},
+ &PodExecOptions{},
+ &PodProxyOptions{},
+ &ComponentStatus{},
+ &ComponentStatusList{},
+ &SerializedReference{},
+ &RangeAllocation{},
+ &ConfigMap{},
+ &ConfigMapList{},
+ )
+
+ // Add common types
+ scheme.AddKnownTypes(SchemeGroupVersion, &unversioned.Status{})
+
+ // Add the watch version that applies
+ versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/v1/types.generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/v1/types.generated.go
new file mode 100644
index 0000000..93c45da
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/v1/types.generated.go
@@ -0,0 +1,60001 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// ************************************************************
+// DO NOT EDIT.
+// THIS FILE IS AUTO-GENERATED BY codecgen.
+// ************************************************************
+
+package v1
+
+import (
+ "errors"
+ "fmt"
+ codec1978 "github.com/ugorji/go/codec"
+ pkg3_resource "k8s.io/kubernetes/pkg/api/resource"
+ pkg2_unversioned "k8s.io/kubernetes/pkg/api/unversioned"
+ pkg5_runtime "k8s.io/kubernetes/pkg/runtime"
+ pkg1_types "k8s.io/kubernetes/pkg/types"
+ pkg4_intstr "k8s.io/kubernetes/pkg/util/intstr"
+ "reflect"
+ "runtime"
+ time "time"
+)
+
+const (
+ // ----- content types ----
+ codecSelferC_UTF81234 = 1
+ codecSelferC_RAW1234 = 0
+ // ----- value types used ----
+ codecSelferValueTypeArray1234 = 10
+ codecSelferValueTypeMap1234 = 9
+ // ----- containerStateValues ----
+ codecSelfer_containerMapKey1234 = 2
+ codecSelfer_containerMapValue1234 = 3
+ codecSelfer_containerMapEnd1234 = 4
+ codecSelfer_containerArrayElem1234 = 6
+ codecSelfer_containerArrayEnd1234 = 7
+)
+
+var (
+ codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits())
+ codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`)
+)
+
+type codecSelfer1234 struct{}
+
+func init() {
+ if codec1978.GenVersion != 5 {
+ _, file, _, _ := runtime.Caller(0)
+ err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v",
+ 5, codec1978.GenVersion, file)
+ panic(err)
+ }
+ if false { // reference the types, but skip this branch at build/run time
+ var v0 pkg3_resource.Quantity
+ var v1 pkg2_unversioned.Time
+ var v2 pkg5_runtime.RawExtension
+ var v3 pkg1_types.UID
+ var v4 pkg4_intstr.IntOrString
+ var v5 time.Time
+ _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5
+ }
+}
+
+func (x *ObjectMeta) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [14]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Name != ""
+ yyq2[1] = x.GenerateName != ""
+ yyq2[2] = x.Namespace != ""
+ yyq2[3] = x.SelfLink != ""
+ yyq2[4] = x.UID != ""
+ yyq2[5] = x.ResourceVersion != ""
+ yyq2[6] = x.Generation != 0
+ yyq2[7] = true
+ yyq2[8] = x.DeletionTimestamp != nil
+ yyq2[9] = x.DeletionGracePeriodSeconds != nil
+ yyq2[10] = len(x.Labels) != 0
+ yyq2[11] = len(x.Annotations) != 0
+ yyq2[12] = len(x.OwnerReferences) != 0
+ yyq2[13] = len(x.Finalizers) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(14)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("name"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.GenerateName))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("generateName"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.GenerateName))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Namespace))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("namespace"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Namespace))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.SelfLink))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("selfLink"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.SelfLink))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.UID) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.UID))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("uid"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.UID) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.UID))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("resourceVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[6] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Generation))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[6] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("generation"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Generation))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[7] {
+ yy25 := &x.CreationTimestamp
+ yym26 := z.EncBinary()
+ _ = yym26
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy25) {
+ } else if yym26 {
+ z.EncBinaryMarshal(yy25)
+ } else if !yym26 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy25)
+ } else {
+ z.EncFallback(yy25)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[7] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("creationTimestamp"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy27 := &x.CreationTimestamp
+ yym28 := z.EncBinary()
+ _ = yym28
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy27) {
+ } else if yym28 {
+ z.EncBinaryMarshal(yy27)
+ } else if !yym28 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy27)
+ } else {
+ z.EncFallback(yy27)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[8] {
+ if x.DeletionTimestamp == nil {
+ r.EncodeNil()
+ } else {
+ yym30 := z.EncBinary()
+ _ = yym30
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.DeletionTimestamp) {
+ } else if yym30 {
+ z.EncBinaryMarshal(x.DeletionTimestamp)
+ } else if !yym30 && z.IsJSONHandle() {
+ z.EncJSONMarshal(x.DeletionTimestamp)
+ } else {
+ z.EncFallback(x.DeletionTimestamp)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[8] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("deletionTimestamp"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.DeletionTimestamp == nil {
+ r.EncodeNil()
+ } else {
+ yym31 := z.EncBinary()
+ _ = yym31
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.DeletionTimestamp) {
+ } else if yym31 {
+ z.EncBinaryMarshal(x.DeletionTimestamp)
+ } else if !yym31 && z.IsJSONHandle() {
+ z.EncJSONMarshal(x.DeletionTimestamp)
+ } else {
+ z.EncFallback(x.DeletionTimestamp)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[9] {
+ if x.DeletionGracePeriodSeconds == nil {
+ r.EncodeNil()
+ } else {
+ yy33 := *x.DeletionGracePeriodSeconds
+ yym34 := z.EncBinary()
+ _ = yym34
+ if false {
+ } else {
+ r.EncodeInt(int64(yy33))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[9] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("deletionGracePeriodSeconds"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.DeletionGracePeriodSeconds == nil {
+ r.EncodeNil()
+ } else {
+ yy35 := *x.DeletionGracePeriodSeconds
+ yym36 := z.EncBinary()
+ _ = yym36
+ if false {
+ } else {
+ r.EncodeInt(int64(yy35))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[10] {
+ if x.Labels == nil {
+ r.EncodeNil()
+ } else {
+ yym38 := z.EncBinary()
+ _ = yym38
+ if false {
+ } else {
+ z.F.EncMapStringStringV(x.Labels, false, e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[10] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("labels"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Labels == nil {
+ r.EncodeNil()
+ } else {
+ yym39 := z.EncBinary()
+ _ = yym39
+ if false {
+ } else {
+ z.F.EncMapStringStringV(x.Labels, false, e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[11] {
+ if x.Annotations == nil {
+ r.EncodeNil()
+ } else {
+ yym41 := z.EncBinary()
+ _ = yym41
+ if false {
+ } else {
+ z.F.EncMapStringStringV(x.Annotations, false, e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[11] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("annotations"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Annotations == nil {
+ r.EncodeNil()
+ } else {
+ yym42 := z.EncBinary()
+ _ = yym42
+ if false {
+ } else {
+ z.F.EncMapStringStringV(x.Annotations, false, e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[12] {
+ if x.OwnerReferences == nil {
+ r.EncodeNil()
+ } else {
+ yym44 := z.EncBinary()
+ _ = yym44
+ if false {
+ } else {
+ h.encSliceOwnerReference(([]OwnerReference)(x.OwnerReferences), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[12] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("ownerReferences"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.OwnerReferences == nil {
+ r.EncodeNil()
+ } else {
+ yym45 := z.EncBinary()
+ _ = yym45
+ if false {
+ } else {
+ h.encSliceOwnerReference(([]OwnerReference)(x.OwnerReferences), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[13] {
+ if x.Finalizers == nil {
+ r.EncodeNil()
+ } else {
+ yym47 := z.EncBinary()
+ _ = yym47
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Finalizers, false, e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[13] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("finalizers"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Finalizers == nil {
+ r.EncodeNil()
+ } else {
+ yym48 := z.EncBinary()
+ _ = yym48
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Finalizers, false, e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ObjectMeta) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ObjectMeta) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "name":
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ case "generateName":
+ if r.TryDecodeAsNil() {
+ x.GenerateName = ""
+ } else {
+ x.GenerateName = string(r.DecodeString())
+ }
+ case "namespace":
+ if r.TryDecodeAsNil() {
+ x.Namespace = ""
+ } else {
+ x.Namespace = string(r.DecodeString())
+ }
+ case "selfLink":
+ if r.TryDecodeAsNil() {
+ x.SelfLink = ""
+ } else {
+ x.SelfLink = string(r.DecodeString())
+ }
+ case "uid":
+ if r.TryDecodeAsNil() {
+ x.UID = ""
+ } else {
+ x.UID = pkg1_types.UID(r.DecodeString())
+ }
+ case "resourceVersion":
+ if r.TryDecodeAsNil() {
+ x.ResourceVersion = ""
+ } else {
+ x.ResourceVersion = string(r.DecodeString())
+ }
+ case "generation":
+ if r.TryDecodeAsNil() {
+ x.Generation = 0
+ } else {
+ x.Generation = int64(r.DecodeInt(64))
+ }
+ case "creationTimestamp":
+ if r.TryDecodeAsNil() {
+ x.CreationTimestamp = pkg2_unversioned.Time{}
+ } else {
+ yyv11 := &x.CreationTimestamp
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else if yym12 {
+ z.DecBinaryUnmarshal(yyv11)
+ } else if !yym12 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv11)
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ case "deletionTimestamp":
+ if r.TryDecodeAsNil() {
+ if x.DeletionTimestamp != nil {
+ x.DeletionTimestamp = nil
+ }
+ } else {
+ if x.DeletionTimestamp == nil {
+ x.DeletionTimestamp = new(pkg2_unversioned.Time)
+ }
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.DeletionTimestamp) {
+ } else if yym14 {
+ z.DecBinaryUnmarshal(x.DeletionTimestamp)
+ } else if !yym14 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(x.DeletionTimestamp)
+ } else {
+ z.DecFallback(x.DeletionTimestamp, false)
+ }
+ }
+ case "deletionGracePeriodSeconds":
+ if r.TryDecodeAsNil() {
+ if x.DeletionGracePeriodSeconds != nil {
+ x.DeletionGracePeriodSeconds = nil
+ }
+ } else {
+ if x.DeletionGracePeriodSeconds == nil {
+ x.DeletionGracePeriodSeconds = new(int64)
+ }
+ yym16 := z.DecBinary()
+ _ = yym16
+ if false {
+ } else {
+ *((*int64)(x.DeletionGracePeriodSeconds)) = int64(r.DecodeInt(64))
+ }
+ }
+ case "labels":
+ if r.TryDecodeAsNil() {
+ x.Labels = nil
+ } else {
+ yyv17 := &x.Labels
+ yym18 := z.DecBinary()
+ _ = yym18
+ if false {
+ } else {
+ z.F.DecMapStringStringX(yyv17, false, d)
+ }
+ }
+ case "annotations":
+ if r.TryDecodeAsNil() {
+ x.Annotations = nil
+ } else {
+ yyv19 := &x.Annotations
+ yym20 := z.DecBinary()
+ _ = yym20
+ if false {
+ } else {
+ z.F.DecMapStringStringX(yyv19, false, d)
+ }
+ }
+ case "ownerReferences":
+ if r.TryDecodeAsNil() {
+ x.OwnerReferences = nil
+ } else {
+ yyv21 := &x.OwnerReferences
+ yym22 := z.DecBinary()
+ _ = yym22
+ if false {
+ } else {
+ h.decSliceOwnerReference((*[]OwnerReference)(yyv21), d)
+ }
+ }
+ case "finalizers":
+ if r.TryDecodeAsNil() {
+ x.Finalizers = nil
+ } else {
+ yyv23 := &x.Finalizers
+ yym24 := z.DecBinary()
+ _ = yym24
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv23, false, d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ObjectMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj25 int
+ var yyb25 bool
+ var yyhl25 bool = l >= 0
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.GenerateName = ""
+ } else {
+ x.GenerateName = string(r.DecodeString())
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Namespace = ""
+ } else {
+ x.Namespace = string(r.DecodeString())
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.SelfLink = ""
+ } else {
+ x.SelfLink = string(r.DecodeString())
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.UID = ""
+ } else {
+ x.UID = pkg1_types.UID(r.DecodeString())
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ResourceVersion = ""
+ } else {
+ x.ResourceVersion = string(r.DecodeString())
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Generation = 0
+ } else {
+ x.Generation = int64(r.DecodeInt(64))
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.CreationTimestamp = pkg2_unversioned.Time{}
+ } else {
+ yyv33 := &x.CreationTimestamp
+ yym34 := z.DecBinary()
+ _ = yym34
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv33) {
+ } else if yym34 {
+ z.DecBinaryUnmarshal(yyv33)
+ } else if !yym34 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv33)
+ } else {
+ z.DecFallback(yyv33, false)
+ }
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.DeletionTimestamp != nil {
+ x.DeletionTimestamp = nil
+ }
+ } else {
+ if x.DeletionTimestamp == nil {
+ x.DeletionTimestamp = new(pkg2_unversioned.Time)
+ }
+ yym36 := z.DecBinary()
+ _ = yym36
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.DeletionTimestamp) {
+ } else if yym36 {
+ z.DecBinaryUnmarshal(x.DeletionTimestamp)
+ } else if !yym36 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(x.DeletionTimestamp)
+ } else {
+ z.DecFallback(x.DeletionTimestamp, false)
+ }
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.DeletionGracePeriodSeconds != nil {
+ x.DeletionGracePeriodSeconds = nil
+ }
+ } else {
+ if x.DeletionGracePeriodSeconds == nil {
+ x.DeletionGracePeriodSeconds = new(int64)
+ }
+ yym38 := z.DecBinary()
+ _ = yym38
+ if false {
+ } else {
+ *((*int64)(x.DeletionGracePeriodSeconds)) = int64(r.DecodeInt(64))
+ }
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Labels = nil
+ } else {
+ yyv39 := &x.Labels
+ yym40 := z.DecBinary()
+ _ = yym40
+ if false {
+ } else {
+ z.F.DecMapStringStringX(yyv39, false, d)
+ }
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Annotations = nil
+ } else {
+ yyv41 := &x.Annotations
+ yym42 := z.DecBinary()
+ _ = yym42
+ if false {
+ } else {
+ z.F.DecMapStringStringX(yyv41, false, d)
+ }
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.OwnerReferences = nil
+ } else {
+ yyv43 := &x.OwnerReferences
+ yym44 := z.DecBinary()
+ _ = yym44
+ if false {
+ } else {
+ h.decSliceOwnerReference((*[]OwnerReference)(yyv43), d)
+ }
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Finalizers = nil
+ } else {
+ yyv45 := &x.Finalizers
+ yym46 := z.DecBinary()
+ _ = yym46
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv45, false, d)
+ }
+ }
+ for {
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj25-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [21]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.VolumeSource.HostPath != nil && x.HostPath != nil
+ yyq2[2] = x.VolumeSource.EmptyDir != nil && x.EmptyDir != nil
+ yyq2[3] = x.VolumeSource.GCEPersistentDisk != nil && x.GCEPersistentDisk != nil
+ yyq2[4] = x.VolumeSource.AWSElasticBlockStore != nil && x.AWSElasticBlockStore != nil
+ yyq2[5] = x.VolumeSource.GitRepo != nil && x.GitRepo != nil
+ yyq2[6] = x.VolumeSource.Secret != nil && x.Secret != nil
+ yyq2[7] = x.VolumeSource.NFS != nil && x.NFS != nil
+ yyq2[8] = x.VolumeSource.ISCSI != nil && x.ISCSI != nil
+ yyq2[9] = x.VolumeSource.Glusterfs != nil && x.Glusterfs != nil
+ yyq2[10] = x.VolumeSource.PersistentVolumeClaim != nil && x.PersistentVolumeClaim != nil
+ yyq2[11] = x.VolumeSource.RBD != nil && x.RBD != nil
+ yyq2[12] = x.VolumeSource.FlexVolume != nil && x.FlexVolume != nil
+ yyq2[13] = x.VolumeSource.Cinder != nil && x.Cinder != nil
+ yyq2[14] = x.VolumeSource.CephFS != nil && x.CephFS != nil
+ yyq2[15] = x.VolumeSource.Flocker != nil && x.Flocker != nil
+ yyq2[16] = x.VolumeSource.DownwardAPI != nil && x.DownwardAPI != nil
+ yyq2[17] = x.VolumeSource.FC != nil && x.FC != nil
+ yyq2[18] = x.VolumeSource.AzureFile != nil && x.AzureFile != nil
+ yyq2[19] = x.VolumeSource.ConfigMap != nil && x.ConfigMap != nil
+ yyq2[20] = x.VolumeSource.VsphereVolume != nil && x.VsphereVolume != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(21)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("name"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ }
+ var yyn6 bool
+ if x.VolumeSource.HostPath == nil {
+ yyn6 = true
+ goto LABEL6
+ }
+ LABEL6:
+ if yyr2 || yy2arr2 {
+ if yyn6 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.HostPath == nil {
+ r.EncodeNil()
+ } else {
+ x.HostPath.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("hostPath"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn6 {
+ r.EncodeNil()
+ } else {
+ if x.HostPath == nil {
+ r.EncodeNil()
+ } else {
+ x.HostPath.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn9 bool
+ if x.VolumeSource.EmptyDir == nil {
+ yyn9 = true
+ goto LABEL9
+ }
+ LABEL9:
+ if yyr2 || yy2arr2 {
+ if yyn9 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.EmptyDir == nil {
+ r.EncodeNil()
+ } else {
+ x.EmptyDir.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("emptyDir"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn9 {
+ r.EncodeNil()
+ } else {
+ if x.EmptyDir == nil {
+ r.EncodeNil()
+ } else {
+ x.EmptyDir.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn12 bool
+ if x.VolumeSource.GCEPersistentDisk == nil {
+ yyn12 = true
+ goto LABEL12
+ }
+ LABEL12:
+ if yyr2 || yy2arr2 {
+ if yyn12 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ if x.GCEPersistentDisk == nil {
+ r.EncodeNil()
+ } else {
+ x.GCEPersistentDisk.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("gcePersistentDisk"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn12 {
+ r.EncodeNil()
+ } else {
+ if x.GCEPersistentDisk == nil {
+ r.EncodeNil()
+ } else {
+ x.GCEPersistentDisk.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn15 bool
+ if x.VolumeSource.AWSElasticBlockStore == nil {
+ yyn15 = true
+ goto LABEL15
+ }
+ LABEL15:
+ if yyr2 || yy2arr2 {
+ if yyn15 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ if x.AWSElasticBlockStore == nil {
+ r.EncodeNil()
+ } else {
+ x.AWSElasticBlockStore.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("awsElasticBlockStore"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn15 {
+ r.EncodeNil()
+ } else {
+ if x.AWSElasticBlockStore == nil {
+ r.EncodeNil()
+ } else {
+ x.AWSElasticBlockStore.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn18 bool
+ if x.VolumeSource.GitRepo == nil {
+ yyn18 = true
+ goto LABEL18
+ }
+ LABEL18:
+ if yyr2 || yy2arr2 {
+ if yyn18 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ if x.GitRepo == nil {
+ r.EncodeNil()
+ } else {
+ x.GitRepo.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("gitRepo"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn18 {
+ r.EncodeNil()
+ } else {
+ if x.GitRepo == nil {
+ r.EncodeNil()
+ } else {
+ x.GitRepo.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn21 bool
+ if x.VolumeSource.Secret == nil {
+ yyn21 = true
+ goto LABEL21
+ }
+ LABEL21:
+ if yyr2 || yy2arr2 {
+ if yyn21 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[6] {
+ if x.Secret == nil {
+ r.EncodeNil()
+ } else {
+ x.Secret.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[6] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("secret"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn21 {
+ r.EncodeNil()
+ } else {
+ if x.Secret == nil {
+ r.EncodeNil()
+ } else {
+ x.Secret.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn24 bool
+ if x.VolumeSource.NFS == nil {
+ yyn24 = true
+ goto LABEL24
+ }
+ LABEL24:
+ if yyr2 || yy2arr2 {
+ if yyn24 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[7] {
+ if x.NFS == nil {
+ r.EncodeNil()
+ } else {
+ x.NFS.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[7] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("nfs"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn24 {
+ r.EncodeNil()
+ } else {
+ if x.NFS == nil {
+ r.EncodeNil()
+ } else {
+ x.NFS.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn27 bool
+ if x.VolumeSource.ISCSI == nil {
+ yyn27 = true
+ goto LABEL27
+ }
+ LABEL27:
+ if yyr2 || yy2arr2 {
+ if yyn27 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[8] {
+ if x.ISCSI == nil {
+ r.EncodeNil()
+ } else {
+ x.ISCSI.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[8] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("iscsi"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn27 {
+ r.EncodeNil()
+ } else {
+ if x.ISCSI == nil {
+ r.EncodeNil()
+ } else {
+ x.ISCSI.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn30 bool
+ if x.VolumeSource.Glusterfs == nil {
+ yyn30 = true
+ goto LABEL30
+ }
+ LABEL30:
+ if yyr2 || yy2arr2 {
+ if yyn30 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[9] {
+ if x.Glusterfs == nil {
+ r.EncodeNil()
+ } else {
+ x.Glusterfs.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[9] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("glusterfs"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn30 {
+ r.EncodeNil()
+ } else {
+ if x.Glusterfs == nil {
+ r.EncodeNil()
+ } else {
+ x.Glusterfs.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn33 bool
+ if x.VolumeSource.PersistentVolumeClaim == nil {
+ yyn33 = true
+ goto LABEL33
+ }
+ LABEL33:
+ if yyr2 || yy2arr2 {
+ if yyn33 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[10] {
+ if x.PersistentVolumeClaim == nil {
+ r.EncodeNil()
+ } else {
+ x.PersistentVolumeClaim.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[10] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("persistentVolumeClaim"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn33 {
+ r.EncodeNil()
+ } else {
+ if x.PersistentVolumeClaim == nil {
+ r.EncodeNil()
+ } else {
+ x.PersistentVolumeClaim.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn36 bool
+ if x.VolumeSource.RBD == nil {
+ yyn36 = true
+ goto LABEL36
+ }
+ LABEL36:
+ if yyr2 || yy2arr2 {
+ if yyn36 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[11] {
+ if x.RBD == nil {
+ r.EncodeNil()
+ } else {
+ x.RBD.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[11] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("rbd"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn36 {
+ r.EncodeNil()
+ } else {
+ if x.RBD == nil {
+ r.EncodeNil()
+ } else {
+ x.RBD.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn39 bool
+ if x.VolumeSource.FlexVolume == nil {
+ yyn39 = true
+ goto LABEL39
+ }
+ LABEL39:
+ if yyr2 || yy2arr2 {
+ if yyn39 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[12] {
+ if x.FlexVolume == nil {
+ r.EncodeNil()
+ } else {
+ x.FlexVolume.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[12] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("flexVolume"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn39 {
+ r.EncodeNil()
+ } else {
+ if x.FlexVolume == nil {
+ r.EncodeNil()
+ } else {
+ x.FlexVolume.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn42 bool
+ if x.VolumeSource.Cinder == nil {
+ yyn42 = true
+ goto LABEL42
+ }
+ LABEL42:
+ if yyr2 || yy2arr2 {
+ if yyn42 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[13] {
+ if x.Cinder == nil {
+ r.EncodeNil()
+ } else {
+ x.Cinder.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[13] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("cinder"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn42 {
+ r.EncodeNil()
+ } else {
+ if x.Cinder == nil {
+ r.EncodeNil()
+ } else {
+ x.Cinder.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn45 bool
+ if x.VolumeSource.CephFS == nil {
+ yyn45 = true
+ goto LABEL45
+ }
+ LABEL45:
+ if yyr2 || yy2arr2 {
+ if yyn45 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[14] {
+ if x.CephFS == nil {
+ r.EncodeNil()
+ } else {
+ x.CephFS.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[14] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("cephfs"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn45 {
+ r.EncodeNil()
+ } else {
+ if x.CephFS == nil {
+ r.EncodeNil()
+ } else {
+ x.CephFS.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn48 bool
+ if x.VolumeSource.Flocker == nil {
+ yyn48 = true
+ goto LABEL48
+ }
+ LABEL48:
+ if yyr2 || yy2arr2 {
+ if yyn48 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[15] {
+ if x.Flocker == nil {
+ r.EncodeNil()
+ } else {
+ x.Flocker.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[15] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("flocker"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn48 {
+ r.EncodeNil()
+ } else {
+ if x.Flocker == nil {
+ r.EncodeNil()
+ } else {
+ x.Flocker.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn51 bool
+ if x.VolumeSource.DownwardAPI == nil {
+ yyn51 = true
+ goto LABEL51
+ }
+ LABEL51:
+ if yyr2 || yy2arr2 {
+ if yyn51 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[16] {
+ if x.DownwardAPI == nil {
+ r.EncodeNil()
+ } else {
+ x.DownwardAPI.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[16] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("downwardAPI"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn51 {
+ r.EncodeNil()
+ } else {
+ if x.DownwardAPI == nil {
+ r.EncodeNil()
+ } else {
+ x.DownwardAPI.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn54 bool
+ if x.VolumeSource.FC == nil {
+ yyn54 = true
+ goto LABEL54
+ }
+ LABEL54:
+ if yyr2 || yy2arr2 {
+ if yyn54 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[17] {
+ if x.FC == nil {
+ r.EncodeNil()
+ } else {
+ x.FC.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[17] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("fc"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn54 {
+ r.EncodeNil()
+ } else {
+ if x.FC == nil {
+ r.EncodeNil()
+ } else {
+ x.FC.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn57 bool
+ if x.VolumeSource.AzureFile == nil {
+ yyn57 = true
+ goto LABEL57
+ }
+ LABEL57:
+ if yyr2 || yy2arr2 {
+ if yyn57 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[18] {
+ if x.AzureFile == nil {
+ r.EncodeNil()
+ } else {
+ x.AzureFile.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[18] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("azureFile"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn57 {
+ r.EncodeNil()
+ } else {
+ if x.AzureFile == nil {
+ r.EncodeNil()
+ } else {
+ x.AzureFile.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn60 bool
+ if x.VolumeSource.ConfigMap == nil {
+ yyn60 = true
+ goto LABEL60
+ }
+ LABEL60:
+ if yyr2 || yy2arr2 {
+ if yyn60 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[19] {
+ if x.ConfigMap == nil {
+ r.EncodeNil()
+ } else {
+ x.ConfigMap.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[19] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("configMap"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn60 {
+ r.EncodeNil()
+ } else {
+ if x.ConfigMap == nil {
+ r.EncodeNil()
+ } else {
+ x.ConfigMap.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn63 bool
+ if x.VolumeSource.VsphereVolume == nil {
+ yyn63 = true
+ goto LABEL63
+ }
+ LABEL63:
+ if yyr2 || yy2arr2 {
+ if yyn63 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[20] {
+ if x.VsphereVolume == nil {
+ r.EncodeNil()
+ } else {
+ x.VsphereVolume.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[20] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("vsphereVolume"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn63 {
+ r.EncodeNil()
+ } else {
+ if x.VsphereVolume == nil {
+ r.EncodeNil()
+ } else {
+ x.VsphereVolume.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *Volume) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *Volume) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "name":
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ case "hostPath":
+ if x.VolumeSource.HostPath == nil {
+ x.VolumeSource.HostPath = new(HostPathVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.HostPath != nil {
+ x.HostPath = nil
+ }
+ } else {
+ if x.HostPath == nil {
+ x.HostPath = new(HostPathVolumeSource)
+ }
+ x.HostPath.CodecDecodeSelf(d)
+ }
+ case "emptyDir":
+ if x.VolumeSource.EmptyDir == nil {
+ x.VolumeSource.EmptyDir = new(EmptyDirVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.EmptyDir != nil {
+ x.EmptyDir = nil
+ }
+ } else {
+ if x.EmptyDir == nil {
+ x.EmptyDir = new(EmptyDirVolumeSource)
+ }
+ x.EmptyDir.CodecDecodeSelf(d)
+ }
+ case "gcePersistentDisk":
+ if x.VolumeSource.GCEPersistentDisk == nil {
+ x.VolumeSource.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.GCEPersistentDisk != nil {
+ x.GCEPersistentDisk = nil
+ }
+ } else {
+ if x.GCEPersistentDisk == nil {
+ x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource)
+ }
+ x.GCEPersistentDisk.CodecDecodeSelf(d)
+ }
+ case "awsElasticBlockStore":
+ if x.VolumeSource.AWSElasticBlockStore == nil {
+ x.VolumeSource.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.AWSElasticBlockStore != nil {
+ x.AWSElasticBlockStore = nil
+ }
+ } else {
+ if x.AWSElasticBlockStore == nil {
+ x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource)
+ }
+ x.AWSElasticBlockStore.CodecDecodeSelf(d)
+ }
+ case "gitRepo":
+ if x.VolumeSource.GitRepo == nil {
+ x.VolumeSource.GitRepo = new(GitRepoVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.GitRepo != nil {
+ x.GitRepo = nil
+ }
+ } else {
+ if x.GitRepo == nil {
+ x.GitRepo = new(GitRepoVolumeSource)
+ }
+ x.GitRepo.CodecDecodeSelf(d)
+ }
+ case "secret":
+ if x.VolumeSource.Secret == nil {
+ x.VolumeSource.Secret = new(SecretVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.Secret != nil {
+ x.Secret = nil
+ }
+ } else {
+ if x.Secret == nil {
+ x.Secret = new(SecretVolumeSource)
+ }
+ x.Secret.CodecDecodeSelf(d)
+ }
+ case "nfs":
+ if x.VolumeSource.NFS == nil {
+ x.VolumeSource.NFS = new(NFSVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.NFS != nil {
+ x.NFS = nil
+ }
+ } else {
+ if x.NFS == nil {
+ x.NFS = new(NFSVolumeSource)
+ }
+ x.NFS.CodecDecodeSelf(d)
+ }
+ case "iscsi":
+ if x.VolumeSource.ISCSI == nil {
+ x.VolumeSource.ISCSI = new(ISCSIVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.ISCSI != nil {
+ x.ISCSI = nil
+ }
+ } else {
+ if x.ISCSI == nil {
+ x.ISCSI = new(ISCSIVolumeSource)
+ }
+ x.ISCSI.CodecDecodeSelf(d)
+ }
+ case "glusterfs":
+ if x.VolumeSource.Glusterfs == nil {
+ x.VolumeSource.Glusterfs = new(GlusterfsVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.Glusterfs != nil {
+ x.Glusterfs = nil
+ }
+ } else {
+ if x.Glusterfs == nil {
+ x.Glusterfs = new(GlusterfsVolumeSource)
+ }
+ x.Glusterfs.CodecDecodeSelf(d)
+ }
+ case "persistentVolumeClaim":
+ if x.VolumeSource.PersistentVolumeClaim == nil {
+ x.VolumeSource.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.PersistentVolumeClaim != nil {
+ x.PersistentVolumeClaim = nil
+ }
+ } else {
+ if x.PersistentVolumeClaim == nil {
+ x.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource)
+ }
+ x.PersistentVolumeClaim.CodecDecodeSelf(d)
+ }
+ case "rbd":
+ if x.VolumeSource.RBD == nil {
+ x.VolumeSource.RBD = new(RBDVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.RBD != nil {
+ x.RBD = nil
+ }
+ } else {
+ if x.RBD == nil {
+ x.RBD = new(RBDVolumeSource)
+ }
+ x.RBD.CodecDecodeSelf(d)
+ }
+ case "flexVolume":
+ if x.VolumeSource.FlexVolume == nil {
+ x.VolumeSource.FlexVolume = new(FlexVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.FlexVolume != nil {
+ x.FlexVolume = nil
+ }
+ } else {
+ if x.FlexVolume == nil {
+ x.FlexVolume = new(FlexVolumeSource)
+ }
+ x.FlexVolume.CodecDecodeSelf(d)
+ }
+ case "cinder":
+ if x.VolumeSource.Cinder == nil {
+ x.VolumeSource.Cinder = new(CinderVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.Cinder != nil {
+ x.Cinder = nil
+ }
+ } else {
+ if x.Cinder == nil {
+ x.Cinder = new(CinderVolumeSource)
+ }
+ x.Cinder.CodecDecodeSelf(d)
+ }
+ case "cephfs":
+ if x.VolumeSource.CephFS == nil {
+ x.VolumeSource.CephFS = new(CephFSVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.CephFS != nil {
+ x.CephFS = nil
+ }
+ } else {
+ if x.CephFS == nil {
+ x.CephFS = new(CephFSVolumeSource)
+ }
+ x.CephFS.CodecDecodeSelf(d)
+ }
+ case "flocker":
+ if x.VolumeSource.Flocker == nil {
+ x.VolumeSource.Flocker = new(FlockerVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.Flocker != nil {
+ x.Flocker = nil
+ }
+ } else {
+ if x.Flocker == nil {
+ x.Flocker = new(FlockerVolumeSource)
+ }
+ x.Flocker.CodecDecodeSelf(d)
+ }
+ case "downwardAPI":
+ if x.VolumeSource.DownwardAPI == nil {
+ x.VolumeSource.DownwardAPI = new(DownwardAPIVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.DownwardAPI != nil {
+ x.DownwardAPI = nil
+ }
+ } else {
+ if x.DownwardAPI == nil {
+ x.DownwardAPI = new(DownwardAPIVolumeSource)
+ }
+ x.DownwardAPI.CodecDecodeSelf(d)
+ }
+ case "fc":
+ if x.VolumeSource.FC == nil {
+ x.VolumeSource.FC = new(FCVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.FC != nil {
+ x.FC = nil
+ }
+ } else {
+ if x.FC == nil {
+ x.FC = new(FCVolumeSource)
+ }
+ x.FC.CodecDecodeSelf(d)
+ }
+ case "azureFile":
+ if x.VolumeSource.AzureFile == nil {
+ x.VolumeSource.AzureFile = new(AzureFileVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.AzureFile != nil {
+ x.AzureFile = nil
+ }
+ } else {
+ if x.AzureFile == nil {
+ x.AzureFile = new(AzureFileVolumeSource)
+ }
+ x.AzureFile.CodecDecodeSelf(d)
+ }
+ case "configMap":
+ if x.VolumeSource.ConfigMap == nil {
+ x.VolumeSource.ConfigMap = new(ConfigMapVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.ConfigMap != nil {
+ x.ConfigMap = nil
+ }
+ } else {
+ if x.ConfigMap == nil {
+ x.ConfigMap = new(ConfigMapVolumeSource)
+ }
+ x.ConfigMap.CodecDecodeSelf(d)
+ }
+ case "vsphereVolume":
+ if x.VolumeSource.VsphereVolume == nil {
+ x.VolumeSource.VsphereVolume = new(VsphereVirtualDiskVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.VsphereVolume != nil {
+ x.VsphereVolume = nil
+ }
+ } else {
+ if x.VsphereVolume == nil {
+ x.VsphereVolume = new(VsphereVirtualDiskVolumeSource)
+ }
+ x.VsphereVolume.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj25 int
+ var yyb25 bool
+ var yyhl25 bool = l >= 0
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ if x.VolumeSource.HostPath == nil {
+ x.VolumeSource.HostPath = new(HostPathVolumeSource)
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.HostPath != nil {
+ x.HostPath = nil
+ }
+ } else {
+ if x.HostPath == nil {
+ x.HostPath = new(HostPathVolumeSource)
+ }
+ x.HostPath.CodecDecodeSelf(d)
+ }
+ if x.VolumeSource.EmptyDir == nil {
+ x.VolumeSource.EmptyDir = new(EmptyDirVolumeSource)
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.EmptyDir != nil {
+ x.EmptyDir = nil
+ }
+ } else {
+ if x.EmptyDir == nil {
+ x.EmptyDir = new(EmptyDirVolumeSource)
+ }
+ x.EmptyDir.CodecDecodeSelf(d)
+ }
+ if x.VolumeSource.GCEPersistentDisk == nil {
+ x.VolumeSource.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource)
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.GCEPersistentDisk != nil {
+ x.GCEPersistentDisk = nil
+ }
+ } else {
+ if x.GCEPersistentDisk == nil {
+ x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource)
+ }
+ x.GCEPersistentDisk.CodecDecodeSelf(d)
+ }
+ if x.VolumeSource.AWSElasticBlockStore == nil {
+ x.VolumeSource.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource)
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.AWSElasticBlockStore != nil {
+ x.AWSElasticBlockStore = nil
+ }
+ } else {
+ if x.AWSElasticBlockStore == nil {
+ x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource)
+ }
+ x.AWSElasticBlockStore.CodecDecodeSelf(d)
+ }
+ if x.VolumeSource.GitRepo == nil {
+ x.VolumeSource.GitRepo = new(GitRepoVolumeSource)
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.GitRepo != nil {
+ x.GitRepo = nil
+ }
+ } else {
+ if x.GitRepo == nil {
+ x.GitRepo = new(GitRepoVolumeSource)
+ }
+ x.GitRepo.CodecDecodeSelf(d)
+ }
+ if x.VolumeSource.Secret == nil {
+ x.VolumeSource.Secret = new(SecretVolumeSource)
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Secret != nil {
+ x.Secret = nil
+ }
+ } else {
+ if x.Secret == nil {
+ x.Secret = new(SecretVolumeSource)
+ }
+ x.Secret.CodecDecodeSelf(d)
+ }
+ if x.VolumeSource.NFS == nil {
+ x.VolumeSource.NFS = new(NFSVolumeSource)
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.NFS != nil {
+ x.NFS = nil
+ }
+ } else {
+ if x.NFS == nil {
+ x.NFS = new(NFSVolumeSource)
+ }
+ x.NFS.CodecDecodeSelf(d)
+ }
+ if x.VolumeSource.ISCSI == nil {
+ x.VolumeSource.ISCSI = new(ISCSIVolumeSource)
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.ISCSI != nil {
+ x.ISCSI = nil
+ }
+ } else {
+ if x.ISCSI == nil {
+ x.ISCSI = new(ISCSIVolumeSource)
+ }
+ x.ISCSI.CodecDecodeSelf(d)
+ }
+ if x.VolumeSource.Glusterfs == nil {
+ x.VolumeSource.Glusterfs = new(GlusterfsVolumeSource)
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Glusterfs != nil {
+ x.Glusterfs = nil
+ }
+ } else {
+ if x.Glusterfs == nil {
+ x.Glusterfs = new(GlusterfsVolumeSource)
+ }
+ x.Glusterfs.CodecDecodeSelf(d)
+ }
+ if x.VolumeSource.PersistentVolumeClaim == nil {
+ x.VolumeSource.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource)
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.PersistentVolumeClaim != nil {
+ x.PersistentVolumeClaim = nil
+ }
+ } else {
+ if x.PersistentVolumeClaim == nil {
+ x.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource)
+ }
+ x.PersistentVolumeClaim.CodecDecodeSelf(d)
+ }
+ if x.VolumeSource.RBD == nil {
+ x.VolumeSource.RBD = new(RBDVolumeSource)
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.RBD != nil {
+ x.RBD = nil
+ }
+ } else {
+ if x.RBD == nil {
+ x.RBD = new(RBDVolumeSource)
+ }
+ x.RBD.CodecDecodeSelf(d)
+ }
+ if x.VolumeSource.FlexVolume == nil {
+ x.VolumeSource.FlexVolume = new(FlexVolumeSource)
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.FlexVolume != nil {
+ x.FlexVolume = nil
+ }
+ } else {
+ if x.FlexVolume == nil {
+ x.FlexVolume = new(FlexVolumeSource)
+ }
+ x.FlexVolume.CodecDecodeSelf(d)
+ }
+ if x.VolumeSource.Cinder == nil {
+ x.VolumeSource.Cinder = new(CinderVolumeSource)
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Cinder != nil {
+ x.Cinder = nil
+ }
+ } else {
+ if x.Cinder == nil {
+ x.Cinder = new(CinderVolumeSource)
+ }
+ x.Cinder.CodecDecodeSelf(d)
+ }
+ if x.VolumeSource.CephFS == nil {
+ x.VolumeSource.CephFS = new(CephFSVolumeSource)
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.CephFS != nil {
+ x.CephFS = nil
+ }
+ } else {
+ if x.CephFS == nil {
+ x.CephFS = new(CephFSVolumeSource)
+ }
+ x.CephFS.CodecDecodeSelf(d)
+ }
+ if x.VolumeSource.Flocker == nil {
+ x.VolumeSource.Flocker = new(FlockerVolumeSource)
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Flocker != nil {
+ x.Flocker = nil
+ }
+ } else {
+ if x.Flocker == nil {
+ x.Flocker = new(FlockerVolumeSource)
+ }
+ x.Flocker.CodecDecodeSelf(d)
+ }
+ if x.VolumeSource.DownwardAPI == nil {
+ x.VolumeSource.DownwardAPI = new(DownwardAPIVolumeSource)
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.DownwardAPI != nil {
+ x.DownwardAPI = nil
+ }
+ } else {
+ if x.DownwardAPI == nil {
+ x.DownwardAPI = new(DownwardAPIVolumeSource)
+ }
+ x.DownwardAPI.CodecDecodeSelf(d)
+ }
+ if x.VolumeSource.FC == nil {
+ x.VolumeSource.FC = new(FCVolumeSource)
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.FC != nil {
+ x.FC = nil
+ }
+ } else {
+ if x.FC == nil {
+ x.FC = new(FCVolumeSource)
+ }
+ x.FC.CodecDecodeSelf(d)
+ }
+ if x.VolumeSource.AzureFile == nil {
+ x.VolumeSource.AzureFile = new(AzureFileVolumeSource)
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.AzureFile != nil {
+ x.AzureFile = nil
+ }
+ } else {
+ if x.AzureFile == nil {
+ x.AzureFile = new(AzureFileVolumeSource)
+ }
+ x.AzureFile.CodecDecodeSelf(d)
+ }
+ if x.VolumeSource.ConfigMap == nil {
+ x.VolumeSource.ConfigMap = new(ConfigMapVolumeSource)
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.ConfigMap != nil {
+ x.ConfigMap = nil
+ }
+ } else {
+ if x.ConfigMap == nil {
+ x.ConfigMap = new(ConfigMapVolumeSource)
+ }
+ x.ConfigMap.CodecDecodeSelf(d)
+ }
+ if x.VolumeSource.VsphereVolume == nil {
+ x.VolumeSource.VsphereVolume = new(VsphereVirtualDiskVolumeSource)
+ }
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.VsphereVolume != nil {
+ x.VsphereVolume = nil
+ }
+ } else {
+ if x.VsphereVolume == nil {
+ x.VsphereVolume = new(VsphereVirtualDiskVolumeSource)
+ }
+ x.VsphereVolume.CodecDecodeSelf(d)
+ }
+ for {
+ yyj25++
+ if yyhl25 {
+ yyb25 = yyj25 > l
+ } else {
+ yyb25 = r.CheckBreak()
+ }
+ if yyb25 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj25-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [20]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.HostPath != nil
+ yyq2[1] = x.EmptyDir != nil
+ yyq2[2] = x.GCEPersistentDisk != nil
+ yyq2[3] = x.AWSElasticBlockStore != nil
+ yyq2[4] = x.GitRepo != nil
+ yyq2[5] = x.Secret != nil
+ yyq2[6] = x.NFS != nil
+ yyq2[7] = x.ISCSI != nil
+ yyq2[8] = x.Glusterfs != nil
+ yyq2[9] = x.PersistentVolumeClaim != nil
+ yyq2[10] = x.RBD != nil
+ yyq2[11] = x.FlexVolume != nil
+ yyq2[12] = x.Cinder != nil
+ yyq2[13] = x.CephFS != nil
+ yyq2[14] = x.Flocker != nil
+ yyq2[15] = x.DownwardAPI != nil
+ yyq2[16] = x.FC != nil
+ yyq2[17] = x.AzureFile != nil
+ yyq2[18] = x.ConfigMap != nil
+ yyq2[19] = x.VsphereVolume != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(20)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.HostPath == nil {
+ r.EncodeNil()
+ } else {
+ x.HostPath.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("hostPath"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.HostPath == nil {
+ r.EncodeNil()
+ } else {
+ x.HostPath.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.EmptyDir == nil {
+ r.EncodeNil()
+ } else {
+ x.EmptyDir.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("emptyDir"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.EmptyDir == nil {
+ r.EncodeNil()
+ } else {
+ x.EmptyDir.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.GCEPersistentDisk == nil {
+ r.EncodeNil()
+ } else {
+ x.GCEPersistentDisk.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("gcePersistentDisk"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.GCEPersistentDisk == nil {
+ r.EncodeNil()
+ } else {
+ x.GCEPersistentDisk.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ if x.AWSElasticBlockStore == nil {
+ r.EncodeNil()
+ } else {
+ x.AWSElasticBlockStore.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("awsElasticBlockStore"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.AWSElasticBlockStore == nil {
+ r.EncodeNil()
+ } else {
+ x.AWSElasticBlockStore.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ if x.GitRepo == nil {
+ r.EncodeNil()
+ } else {
+ x.GitRepo.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("gitRepo"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.GitRepo == nil {
+ r.EncodeNil()
+ } else {
+ x.GitRepo.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ if x.Secret == nil {
+ r.EncodeNil()
+ } else {
+ x.Secret.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("secret"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Secret == nil {
+ r.EncodeNil()
+ } else {
+ x.Secret.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[6] {
+ if x.NFS == nil {
+ r.EncodeNil()
+ } else {
+ x.NFS.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[6] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("nfs"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.NFS == nil {
+ r.EncodeNil()
+ } else {
+ x.NFS.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[7] {
+ if x.ISCSI == nil {
+ r.EncodeNil()
+ } else {
+ x.ISCSI.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[7] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("iscsi"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.ISCSI == nil {
+ r.EncodeNil()
+ } else {
+ x.ISCSI.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[8] {
+ if x.Glusterfs == nil {
+ r.EncodeNil()
+ } else {
+ x.Glusterfs.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[8] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("glusterfs"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Glusterfs == nil {
+ r.EncodeNil()
+ } else {
+ x.Glusterfs.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[9] {
+ if x.PersistentVolumeClaim == nil {
+ r.EncodeNil()
+ } else {
+ x.PersistentVolumeClaim.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[9] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("persistentVolumeClaim"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.PersistentVolumeClaim == nil {
+ r.EncodeNil()
+ } else {
+ x.PersistentVolumeClaim.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[10] {
+ if x.RBD == nil {
+ r.EncodeNil()
+ } else {
+ x.RBD.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[10] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("rbd"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.RBD == nil {
+ r.EncodeNil()
+ } else {
+ x.RBD.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[11] {
+ if x.FlexVolume == nil {
+ r.EncodeNil()
+ } else {
+ x.FlexVolume.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[11] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("flexVolume"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.FlexVolume == nil {
+ r.EncodeNil()
+ } else {
+ x.FlexVolume.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[12] {
+ if x.Cinder == nil {
+ r.EncodeNil()
+ } else {
+ x.Cinder.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[12] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("cinder"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Cinder == nil {
+ r.EncodeNil()
+ } else {
+ x.Cinder.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[13] {
+ if x.CephFS == nil {
+ r.EncodeNil()
+ } else {
+ x.CephFS.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[13] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("cephfs"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.CephFS == nil {
+ r.EncodeNil()
+ } else {
+ x.CephFS.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[14] {
+ if x.Flocker == nil {
+ r.EncodeNil()
+ } else {
+ x.Flocker.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[14] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("flocker"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Flocker == nil {
+ r.EncodeNil()
+ } else {
+ x.Flocker.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[15] {
+ if x.DownwardAPI == nil {
+ r.EncodeNil()
+ } else {
+ x.DownwardAPI.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[15] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("downwardAPI"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.DownwardAPI == nil {
+ r.EncodeNil()
+ } else {
+ x.DownwardAPI.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[16] {
+ if x.FC == nil {
+ r.EncodeNil()
+ } else {
+ x.FC.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[16] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("fc"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.FC == nil {
+ r.EncodeNil()
+ } else {
+ x.FC.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[17] {
+ if x.AzureFile == nil {
+ r.EncodeNil()
+ } else {
+ x.AzureFile.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[17] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("azureFile"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.AzureFile == nil {
+ r.EncodeNil()
+ } else {
+ x.AzureFile.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[18] {
+ if x.ConfigMap == nil {
+ r.EncodeNil()
+ } else {
+ x.ConfigMap.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[18] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("configMap"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.ConfigMap == nil {
+ r.EncodeNil()
+ } else {
+ x.ConfigMap.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[19] {
+ if x.VsphereVolume == nil {
+ r.EncodeNil()
+ } else {
+ x.VsphereVolume.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[19] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("vsphereVolume"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.VsphereVolume == nil {
+ r.EncodeNil()
+ } else {
+ x.VsphereVolume.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *VolumeSource) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *VolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "hostPath":
+ if r.TryDecodeAsNil() {
+ if x.HostPath != nil {
+ x.HostPath = nil
+ }
+ } else {
+ if x.HostPath == nil {
+ x.HostPath = new(HostPathVolumeSource)
+ }
+ x.HostPath.CodecDecodeSelf(d)
+ }
+ case "emptyDir":
+ if r.TryDecodeAsNil() {
+ if x.EmptyDir != nil {
+ x.EmptyDir = nil
+ }
+ } else {
+ if x.EmptyDir == nil {
+ x.EmptyDir = new(EmptyDirVolumeSource)
+ }
+ x.EmptyDir.CodecDecodeSelf(d)
+ }
+ case "gcePersistentDisk":
+ if r.TryDecodeAsNil() {
+ if x.GCEPersistentDisk != nil {
+ x.GCEPersistentDisk = nil
+ }
+ } else {
+ if x.GCEPersistentDisk == nil {
+ x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource)
+ }
+ x.GCEPersistentDisk.CodecDecodeSelf(d)
+ }
+ case "awsElasticBlockStore":
+ if r.TryDecodeAsNil() {
+ if x.AWSElasticBlockStore != nil {
+ x.AWSElasticBlockStore = nil
+ }
+ } else {
+ if x.AWSElasticBlockStore == nil {
+ x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource)
+ }
+ x.AWSElasticBlockStore.CodecDecodeSelf(d)
+ }
+ case "gitRepo":
+ if r.TryDecodeAsNil() {
+ if x.GitRepo != nil {
+ x.GitRepo = nil
+ }
+ } else {
+ if x.GitRepo == nil {
+ x.GitRepo = new(GitRepoVolumeSource)
+ }
+ x.GitRepo.CodecDecodeSelf(d)
+ }
+ case "secret":
+ if r.TryDecodeAsNil() {
+ if x.Secret != nil {
+ x.Secret = nil
+ }
+ } else {
+ if x.Secret == nil {
+ x.Secret = new(SecretVolumeSource)
+ }
+ x.Secret.CodecDecodeSelf(d)
+ }
+ case "nfs":
+ if r.TryDecodeAsNil() {
+ if x.NFS != nil {
+ x.NFS = nil
+ }
+ } else {
+ if x.NFS == nil {
+ x.NFS = new(NFSVolumeSource)
+ }
+ x.NFS.CodecDecodeSelf(d)
+ }
+ case "iscsi":
+ if r.TryDecodeAsNil() {
+ if x.ISCSI != nil {
+ x.ISCSI = nil
+ }
+ } else {
+ if x.ISCSI == nil {
+ x.ISCSI = new(ISCSIVolumeSource)
+ }
+ x.ISCSI.CodecDecodeSelf(d)
+ }
+ case "glusterfs":
+ if r.TryDecodeAsNil() {
+ if x.Glusterfs != nil {
+ x.Glusterfs = nil
+ }
+ } else {
+ if x.Glusterfs == nil {
+ x.Glusterfs = new(GlusterfsVolumeSource)
+ }
+ x.Glusterfs.CodecDecodeSelf(d)
+ }
+ case "persistentVolumeClaim":
+ if r.TryDecodeAsNil() {
+ if x.PersistentVolumeClaim != nil {
+ x.PersistentVolumeClaim = nil
+ }
+ } else {
+ if x.PersistentVolumeClaim == nil {
+ x.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource)
+ }
+ x.PersistentVolumeClaim.CodecDecodeSelf(d)
+ }
+ case "rbd":
+ if r.TryDecodeAsNil() {
+ if x.RBD != nil {
+ x.RBD = nil
+ }
+ } else {
+ if x.RBD == nil {
+ x.RBD = new(RBDVolumeSource)
+ }
+ x.RBD.CodecDecodeSelf(d)
+ }
+ case "flexVolume":
+ if r.TryDecodeAsNil() {
+ if x.FlexVolume != nil {
+ x.FlexVolume = nil
+ }
+ } else {
+ if x.FlexVolume == nil {
+ x.FlexVolume = new(FlexVolumeSource)
+ }
+ x.FlexVolume.CodecDecodeSelf(d)
+ }
+ case "cinder":
+ if r.TryDecodeAsNil() {
+ if x.Cinder != nil {
+ x.Cinder = nil
+ }
+ } else {
+ if x.Cinder == nil {
+ x.Cinder = new(CinderVolumeSource)
+ }
+ x.Cinder.CodecDecodeSelf(d)
+ }
+ case "cephfs":
+ if r.TryDecodeAsNil() {
+ if x.CephFS != nil {
+ x.CephFS = nil
+ }
+ } else {
+ if x.CephFS == nil {
+ x.CephFS = new(CephFSVolumeSource)
+ }
+ x.CephFS.CodecDecodeSelf(d)
+ }
+ case "flocker":
+ if r.TryDecodeAsNil() {
+ if x.Flocker != nil {
+ x.Flocker = nil
+ }
+ } else {
+ if x.Flocker == nil {
+ x.Flocker = new(FlockerVolumeSource)
+ }
+ x.Flocker.CodecDecodeSelf(d)
+ }
+ case "downwardAPI":
+ if r.TryDecodeAsNil() {
+ if x.DownwardAPI != nil {
+ x.DownwardAPI = nil
+ }
+ } else {
+ if x.DownwardAPI == nil {
+ x.DownwardAPI = new(DownwardAPIVolumeSource)
+ }
+ x.DownwardAPI.CodecDecodeSelf(d)
+ }
+ case "fc":
+ if r.TryDecodeAsNil() {
+ if x.FC != nil {
+ x.FC = nil
+ }
+ } else {
+ if x.FC == nil {
+ x.FC = new(FCVolumeSource)
+ }
+ x.FC.CodecDecodeSelf(d)
+ }
+ case "azureFile":
+ if r.TryDecodeAsNil() {
+ if x.AzureFile != nil {
+ x.AzureFile = nil
+ }
+ } else {
+ if x.AzureFile == nil {
+ x.AzureFile = new(AzureFileVolumeSource)
+ }
+ x.AzureFile.CodecDecodeSelf(d)
+ }
+ case "configMap":
+ if r.TryDecodeAsNil() {
+ if x.ConfigMap != nil {
+ x.ConfigMap = nil
+ }
+ } else {
+ if x.ConfigMap == nil {
+ x.ConfigMap = new(ConfigMapVolumeSource)
+ }
+ x.ConfigMap.CodecDecodeSelf(d)
+ }
+ case "vsphereVolume":
+ if r.TryDecodeAsNil() {
+ if x.VsphereVolume != nil {
+ x.VsphereVolume = nil
+ }
+ } else {
+ if x.VsphereVolume == nil {
+ x.VsphereVolume = new(VsphereVirtualDiskVolumeSource)
+ }
+ x.VsphereVolume.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj24 int
+ var yyb24 bool
+ var yyhl24 bool = l >= 0
+ yyj24++
+ if yyhl24 {
+ yyb24 = yyj24 > l
+ } else {
+ yyb24 = r.CheckBreak()
+ }
+ if yyb24 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.HostPath != nil {
+ x.HostPath = nil
+ }
+ } else {
+ if x.HostPath == nil {
+ x.HostPath = new(HostPathVolumeSource)
+ }
+ x.HostPath.CodecDecodeSelf(d)
+ }
+ yyj24++
+ if yyhl24 {
+ yyb24 = yyj24 > l
+ } else {
+ yyb24 = r.CheckBreak()
+ }
+ if yyb24 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.EmptyDir != nil {
+ x.EmptyDir = nil
+ }
+ } else {
+ if x.EmptyDir == nil {
+ x.EmptyDir = new(EmptyDirVolumeSource)
+ }
+ x.EmptyDir.CodecDecodeSelf(d)
+ }
+ yyj24++
+ if yyhl24 {
+ yyb24 = yyj24 > l
+ } else {
+ yyb24 = r.CheckBreak()
+ }
+ if yyb24 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.GCEPersistentDisk != nil {
+ x.GCEPersistentDisk = nil
+ }
+ } else {
+ if x.GCEPersistentDisk == nil {
+ x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource)
+ }
+ x.GCEPersistentDisk.CodecDecodeSelf(d)
+ }
+ yyj24++
+ if yyhl24 {
+ yyb24 = yyj24 > l
+ } else {
+ yyb24 = r.CheckBreak()
+ }
+ if yyb24 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.AWSElasticBlockStore != nil {
+ x.AWSElasticBlockStore = nil
+ }
+ } else {
+ if x.AWSElasticBlockStore == nil {
+ x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource)
+ }
+ x.AWSElasticBlockStore.CodecDecodeSelf(d)
+ }
+ yyj24++
+ if yyhl24 {
+ yyb24 = yyj24 > l
+ } else {
+ yyb24 = r.CheckBreak()
+ }
+ if yyb24 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.GitRepo != nil {
+ x.GitRepo = nil
+ }
+ } else {
+ if x.GitRepo == nil {
+ x.GitRepo = new(GitRepoVolumeSource)
+ }
+ x.GitRepo.CodecDecodeSelf(d)
+ }
+ yyj24++
+ if yyhl24 {
+ yyb24 = yyj24 > l
+ } else {
+ yyb24 = r.CheckBreak()
+ }
+ if yyb24 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Secret != nil {
+ x.Secret = nil
+ }
+ } else {
+ if x.Secret == nil {
+ x.Secret = new(SecretVolumeSource)
+ }
+ x.Secret.CodecDecodeSelf(d)
+ }
+ yyj24++
+ if yyhl24 {
+ yyb24 = yyj24 > l
+ } else {
+ yyb24 = r.CheckBreak()
+ }
+ if yyb24 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.NFS != nil {
+ x.NFS = nil
+ }
+ } else {
+ if x.NFS == nil {
+ x.NFS = new(NFSVolumeSource)
+ }
+ x.NFS.CodecDecodeSelf(d)
+ }
+ yyj24++
+ if yyhl24 {
+ yyb24 = yyj24 > l
+ } else {
+ yyb24 = r.CheckBreak()
+ }
+ if yyb24 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.ISCSI != nil {
+ x.ISCSI = nil
+ }
+ } else {
+ if x.ISCSI == nil {
+ x.ISCSI = new(ISCSIVolumeSource)
+ }
+ x.ISCSI.CodecDecodeSelf(d)
+ }
+ yyj24++
+ if yyhl24 {
+ yyb24 = yyj24 > l
+ } else {
+ yyb24 = r.CheckBreak()
+ }
+ if yyb24 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Glusterfs != nil {
+ x.Glusterfs = nil
+ }
+ } else {
+ if x.Glusterfs == nil {
+ x.Glusterfs = new(GlusterfsVolumeSource)
+ }
+ x.Glusterfs.CodecDecodeSelf(d)
+ }
+ yyj24++
+ if yyhl24 {
+ yyb24 = yyj24 > l
+ } else {
+ yyb24 = r.CheckBreak()
+ }
+ if yyb24 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.PersistentVolumeClaim != nil {
+ x.PersistentVolumeClaim = nil
+ }
+ } else {
+ if x.PersistentVolumeClaim == nil {
+ x.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource)
+ }
+ x.PersistentVolumeClaim.CodecDecodeSelf(d)
+ }
+ yyj24++
+ if yyhl24 {
+ yyb24 = yyj24 > l
+ } else {
+ yyb24 = r.CheckBreak()
+ }
+ if yyb24 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.RBD != nil {
+ x.RBD = nil
+ }
+ } else {
+ if x.RBD == nil {
+ x.RBD = new(RBDVolumeSource)
+ }
+ x.RBD.CodecDecodeSelf(d)
+ }
+ yyj24++
+ if yyhl24 {
+ yyb24 = yyj24 > l
+ } else {
+ yyb24 = r.CheckBreak()
+ }
+ if yyb24 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.FlexVolume != nil {
+ x.FlexVolume = nil
+ }
+ } else {
+ if x.FlexVolume == nil {
+ x.FlexVolume = new(FlexVolumeSource)
+ }
+ x.FlexVolume.CodecDecodeSelf(d)
+ }
+ yyj24++
+ if yyhl24 {
+ yyb24 = yyj24 > l
+ } else {
+ yyb24 = r.CheckBreak()
+ }
+ if yyb24 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Cinder != nil {
+ x.Cinder = nil
+ }
+ } else {
+ if x.Cinder == nil {
+ x.Cinder = new(CinderVolumeSource)
+ }
+ x.Cinder.CodecDecodeSelf(d)
+ }
+ yyj24++
+ if yyhl24 {
+ yyb24 = yyj24 > l
+ } else {
+ yyb24 = r.CheckBreak()
+ }
+ if yyb24 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.CephFS != nil {
+ x.CephFS = nil
+ }
+ } else {
+ if x.CephFS == nil {
+ x.CephFS = new(CephFSVolumeSource)
+ }
+ x.CephFS.CodecDecodeSelf(d)
+ }
+ yyj24++
+ if yyhl24 {
+ yyb24 = yyj24 > l
+ } else {
+ yyb24 = r.CheckBreak()
+ }
+ if yyb24 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Flocker != nil {
+ x.Flocker = nil
+ }
+ } else {
+ if x.Flocker == nil {
+ x.Flocker = new(FlockerVolumeSource)
+ }
+ x.Flocker.CodecDecodeSelf(d)
+ }
+ yyj24++
+ if yyhl24 {
+ yyb24 = yyj24 > l
+ } else {
+ yyb24 = r.CheckBreak()
+ }
+ if yyb24 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.DownwardAPI != nil {
+ x.DownwardAPI = nil
+ }
+ } else {
+ if x.DownwardAPI == nil {
+ x.DownwardAPI = new(DownwardAPIVolumeSource)
+ }
+ x.DownwardAPI.CodecDecodeSelf(d)
+ }
+ yyj24++
+ if yyhl24 {
+ yyb24 = yyj24 > l
+ } else {
+ yyb24 = r.CheckBreak()
+ }
+ if yyb24 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.FC != nil {
+ x.FC = nil
+ }
+ } else {
+ if x.FC == nil {
+ x.FC = new(FCVolumeSource)
+ }
+ x.FC.CodecDecodeSelf(d)
+ }
+ yyj24++
+ if yyhl24 {
+ yyb24 = yyj24 > l
+ } else {
+ yyb24 = r.CheckBreak()
+ }
+ if yyb24 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.AzureFile != nil {
+ x.AzureFile = nil
+ }
+ } else {
+ if x.AzureFile == nil {
+ x.AzureFile = new(AzureFileVolumeSource)
+ }
+ x.AzureFile.CodecDecodeSelf(d)
+ }
+ yyj24++
+ if yyhl24 {
+ yyb24 = yyj24 > l
+ } else {
+ yyb24 = r.CheckBreak()
+ }
+ if yyb24 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.ConfigMap != nil {
+ x.ConfigMap = nil
+ }
+ } else {
+ if x.ConfigMap == nil {
+ x.ConfigMap = new(ConfigMapVolumeSource)
+ }
+ x.ConfigMap.CodecDecodeSelf(d)
+ }
+ yyj24++
+ if yyhl24 {
+ yyb24 = yyj24 > l
+ } else {
+ yyb24 = r.CheckBreak()
+ }
+ if yyb24 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.VsphereVolume != nil {
+ x.VsphereVolume = nil
+ }
+ } else {
+ if x.VsphereVolume == nil {
+ x.VsphereVolume = new(VsphereVirtualDiskVolumeSource)
+ }
+ x.VsphereVolume.CodecDecodeSelf(d)
+ }
+ for {
+ yyj24++
+ if yyhl24 {
+ yyb24 = yyj24 > l
+ } else {
+ yyb24 = r.CheckBreak()
+ }
+ if yyb24 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj24-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PersistentVolumeClaimVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.ReadOnly != false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ClaimName))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("claimName"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ClaimName))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("readOnly"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PersistentVolumeClaimVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PersistentVolumeClaimVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "claimName":
+ if r.TryDecodeAsNil() {
+ x.ClaimName = ""
+ } else {
+ x.ClaimName = string(r.DecodeString())
+ }
+ case "readOnly":
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PersistentVolumeClaimVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ClaimName = ""
+ } else {
+ x.ClaimName = string(r.DecodeString())
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PersistentVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [14]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.GCEPersistentDisk != nil
+ yyq2[1] = x.AWSElasticBlockStore != nil
+ yyq2[2] = x.HostPath != nil
+ yyq2[3] = x.Glusterfs != nil
+ yyq2[4] = x.NFS != nil
+ yyq2[5] = x.RBD != nil
+ yyq2[6] = x.ISCSI != nil
+ yyq2[7] = x.Cinder != nil
+ yyq2[8] = x.CephFS != nil
+ yyq2[9] = x.FC != nil
+ yyq2[10] = x.Flocker != nil
+ yyq2[11] = x.FlexVolume != nil
+ yyq2[12] = x.AzureFile != nil
+ yyq2[13] = x.VsphereVolume != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(14)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.GCEPersistentDisk == nil {
+ r.EncodeNil()
+ } else {
+ x.GCEPersistentDisk.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("gcePersistentDisk"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.GCEPersistentDisk == nil {
+ r.EncodeNil()
+ } else {
+ x.GCEPersistentDisk.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.AWSElasticBlockStore == nil {
+ r.EncodeNil()
+ } else {
+ x.AWSElasticBlockStore.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("awsElasticBlockStore"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.AWSElasticBlockStore == nil {
+ r.EncodeNil()
+ } else {
+ x.AWSElasticBlockStore.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.HostPath == nil {
+ r.EncodeNil()
+ } else {
+ x.HostPath.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("hostPath"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.HostPath == nil {
+ r.EncodeNil()
+ } else {
+ x.HostPath.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ if x.Glusterfs == nil {
+ r.EncodeNil()
+ } else {
+ x.Glusterfs.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("glusterfs"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Glusterfs == nil {
+ r.EncodeNil()
+ } else {
+ x.Glusterfs.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ if x.NFS == nil {
+ r.EncodeNil()
+ } else {
+ x.NFS.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("nfs"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.NFS == nil {
+ r.EncodeNil()
+ } else {
+ x.NFS.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ if x.RBD == nil {
+ r.EncodeNil()
+ } else {
+ x.RBD.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("rbd"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.RBD == nil {
+ r.EncodeNil()
+ } else {
+ x.RBD.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[6] {
+ if x.ISCSI == nil {
+ r.EncodeNil()
+ } else {
+ x.ISCSI.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[6] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("iscsi"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.ISCSI == nil {
+ r.EncodeNil()
+ } else {
+ x.ISCSI.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[7] {
+ if x.Cinder == nil {
+ r.EncodeNil()
+ } else {
+ x.Cinder.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[7] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("cinder"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Cinder == nil {
+ r.EncodeNil()
+ } else {
+ x.Cinder.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[8] {
+ if x.CephFS == nil {
+ r.EncodeNil()
+ } else {
+ x.CephFS.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[8] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("cephfs"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.CephFS == nil {
+ r.EncodeNil()
+ } else {
+ x.CephFS.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[9] {
+ if x.FC == nil {
+ r.EncodeNil()
+ } else {
+ x.FC.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[9] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("fc"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.FC == nil {
+ r.EncodeNil()
+ } else {
+ x.FC.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[10] {
+ if x.Flocker == nil {
+ r.EncodeNil()
+ } else {
+ x.Flocker.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[10] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("flocker"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Flocker == nil {
+ r.EncodeNil()
+ } else {
+ x.Flocker.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[11] {
+ if x.FlexVolume == nil {
+ r.EncodeNil()
+ } else {
+ x.FlexVolume.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[11] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("flexVolume"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.FlexVolume == nil {
+ r.EncodeNil()
+ } else {
+ x.FlexVolume.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[12] {
+ if x.AzureFile == nil {
+ r.EncodeNil()
+ } else {
+ x.AzureFile.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[12] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("azureFile"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.AzureFile == nil {
+ r.EncodeNil()
+ } else {
+ x.AzureFile.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[13] {
+ if x.VsphereVolume == nil {
+ r.EncodeNil()
+ } else {
+ x.VsphereVolume.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[13] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("vsphereVolume"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.VsphereVolume == nil {
+ r.EncodeNil()
+ } else {
+ x.VsphereVolume.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PersistentVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PersistentVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "gcePersistentDisk":
+ if r.TryDecodeAsNil() {
+ if x.GCEPersistentDisk != nil {
+ x.GCEPersistentDisk = nil
+ }
+ } else {
+ if x.GCEPersistentDisk == nil {
+ x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource)
+ }
+ x.GCEPersistentDisk.CodecDecodeSelf(d)
+ }
+ case "awsElasticBlockStore":
+ if r.TryDecodeAsNil() {
+ if x.AWSElasticBlockStore != nil {
+ x.AWSElasticBlockStore = nil
+ }
+ } else {
+ if x.AWSElasticBlockStore == nil {
+ x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource)
+ }
+ x.AWSElasticBlockStore.CodecDecodeSelf(d)
+ }
+ case "hostPath":
+ if r.TryDecodeAsNil() {
+ if x.HostPath != nil {
+ x.HostPath = nil
+ }
+ } else {
+ if x.HostPath == nil {
+ x.HostPath = new(HostPathVolumeSource)
+ }
+ x.HostPath.CodecDecodeSelf(d)
+ }
+ case "glusterfs":
+ if r.TryDecodeAsNil() {
+ if x.Glusterfs != nil {
+ x.Glusterfs = nil
+ }
+ } else {
+ if x.Glusterfs == nil {
+ x.Glusterfs = new(GlusterfsVolumeSource)
+ }
+ x.Glusterfs.CodecDecodeSelf(d)
+ }
+ case "nfs":
+ if r.TryDecodeAsNil() {
+ if x.NFS != nil {
+ x.NFS = nil
+ }
+ } else {
+ if x.NFS == nil {
+ x.NFS = new(NFSVolumeSource)
+ }
+ x.NFS.CodecDecodeSelf(d)
+ }
+ case "rbd":
+ if r.TryDecodeAsNil() {
+ if x.RBD != nil {
+ x.RBD = nil
+ }
+ } else {
+ if x.RBD == nil {
+ x.RBD = new(RBDVolumeSource)
+ }
+ x.RBD.CodecDecodeSelf(d)
+ }
+ case "iscsi":
+ if r.TryDecodeAsNil() {
+ if x.ISCSI != nil {
+ x.ISCSI = nil
+ }
+ } else {
+ if x.ISCSI == nil {
+ x.ISCSI = new(ISCSIVolumeSource)
+ }
+ x.ISCSI.CodecDecodeSelf(d)
+ }
+ case "cinder":
+ if r.TryDecodeAsNil() {
+ if x.Cinder != nil {
+ x.Cinder = nil
+ }
+ } else {
+ if x.Cinder == nil {
+ x.Cinder = new(CinderVolumeSource)
+ }
+ x.Cinder.CodecDecodeSelf(d)
+ }
+ case "cephfs":
+ if r.TryDecodeAsNil() {
+ if x.CephFS != nil {
+ x.CephFS = nil
+ }
+ } else {
+ if x.CephFS == nil {
+ x.CephFS = new(CephFSVolumeSource)
+ }
+ x.CephFS.CodecDecodeSelf(d)
+ }
+ case "fc":
+ if r.TryDecodeAsNil() {
+ if x.FC != nil {
+ x.FC = nil
+ }
+ } else {
+ if x.FC == nil {
+ x.FC = new(FCVolumeSource)
+ }
+ x.FC.CodecDecodeSelf(d)
+ }
+ case "flocker":
+ if r.TryDecodeAsNil() {
+ if x.Flocker != nil {
+ x.Flocker = nil
+ }
+ } else {
+ if x.Flocker == nil {
+ x.Flocker = new(FlockerVolumeSource)
+ }
+ x.Flocker.CodecDecodeSelf(d)
+ }
+ case "flexVolume":
+ if r.TryDecodeAsNil() {
+ if x.FlexVolume != nil {
+ x.FlexVolume = nil
+ }
+ } else {
+ if x.FlexVolume == nil {
+ x.FlexVolume = new(FlexVolumeSource)
+ }
+ x.FlexVolume.CodecDecodeSelf(d)
+ }
+ case "azureFile":
+ if r.TryDecodeAsNil() {
+ if x.AzureFile != nil {
+ x.AzureFile = nil
+ }
+ } else {
+ if x.AzureFile == nil {
+ x.AzureFile = new(AzureFileVolumeSource)
+ }
+ x.AzureFile.CodecDecodeSelf(d)
+ }
+ case "vsphereVolume":
+ if r.TryDecodeAsNil() {
+ if x.VsphereVolume != nil {
+ x.VsphereVolume = nil
+ }
+ } else {
+ if x.VsphereVolume == nil {
+ x.VsphereVolume = new(VsphereVirtualDiskVolumeSource)
+ }
+ x.VsphereVolume.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj18 int
+ var yyb18 bool
+ var yyhl18 bool = l >= 0
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.GCEPersistentDisk != nil {
+ x.GCEPersistentDisk = nil
+ }
+ } else {
+ if x.GCEPersistentDisk == nil {
+ x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource)
+ }
+ x.GCEPersistentDisk.CodecDecodeSelf(d)
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.AWSElasticBlockStore != nil {
+ x.AWSElasticBlockStore = nil
+ }
+ } else {
+ if x.AWSElasticBlockStore == nil {
+ x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource)
+ }
+ x.AWSElasticBlockStore.CodecDecodeSelf(d)
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.HostPath != nil {
+ x.HostPath = nil
+ }
+ } else {
+ if x.HostPath == nil {
+ x.HostPath = new(HostPathVolumeSource)
+ }
+ x.HostPath.CodecDecodeSelf(d)
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Glusterfs != nil {
+ x.Glusterfs = nil
+ }
+ } else {
+ if x.Glusterfs == nil {
+ x.Glusterfs = new(GlusterfsVolumeSource)
+ }
+ x.Glusterfs.CodecDecodeSelf(d)
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.NFS != nil {
+ x.NFS = nil
+ }
+ } else {
+ if x.NFS == nil {
+ x.NFS = new(NFSVolumeSource)
+ }
+ x.NFS.CodecDecodeSelf(d)
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.RBD != nil {
+ x.RBD = nil
+ }
+ } else {
+ if x.RBD == nil {
+ x.RBD = new(RBDVolumeSource)
+ }
+ x.RBD.CodecDecodeSelf(d)
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.ISCSI != nil {
+ x.ISCSI = nil
+ }
+ } else {
+ if x.ISCSI == nil {
+ x.ISCSI = new(ISCSIVolumeSource)
+ }
+ x.ISCSI.CodecDecodeSelf(d)
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Cinder != nil {
+ x.Cinder = nil
+ }
+ } else {
+ if x.Cinder == nil {
+ x.Cinder = new(CinderVolumeSource)
+ }
+ x.Cinder.CodecDecodeSelf(d)
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.CephFS != nil {
+ x.CephFS = nil
+ }
+ } else {
+ if x.CephFS == nil {
+ x.CephFS = new(CephFSVolumeSource)
+ }
+ x.CephFS.CodecDecodeSelf(d)
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.FC != nil {
+ x.FC = nil
+ }
+ } else {
+ if x.FC == nil {
+ x.FC = new(FCVolumeSource)
+ }
+ x.FC.CodecDecodeSelf(d)
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Flocker != nil {
+ x.Flocker = nil
+ }
+ } else {
+ if x.Flocker == nil {
+ x.Flocker = new(FlockerVolumeSource)
+ }
+ x.Flocker.CodecDecodeSelf(d)
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.FlexVolume != nil {
+ x.FlexVolume = nil
+ }
+ } else {
+ if x.FlexVolume == nil {
+ x.FlexVolume = new(FlexVolumeSource)
+ }
+ x.FlexVolume.CodecDecodeSelf(d)
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.AzureFile != nil {
+ x.AzureFile = nil
+ }
+ } else {
+ if x.AzureFile == nil {
+ x.AzureFile = new(AzureFileVolumeSource)
+ }
+ x.AzureFile.CodecDecodeSelf(d)
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.VsphereVolume != nil {
+ x.VsphereVolume = nil
+ }
+ } else {
+ if x.VsphereVolume == nil {
+ x.VsphereVolume = new(VsphereVirtualDiskVolumeSource)
+ }
+ x.VsphereVolume.CodecDecodeSelf(d)
+ }
+ for {
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj18-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PersistentVolume) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = true
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy14 := &x.Status
+ yy14.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy16 := &x.Status
+ yy16.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PersistentVolume) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PersistentVolume) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = PersistentVolumeSpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = PersistentVolumeStatus{}
+ } else {
+ yyv6 := &x.Status
+ yyv6.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PersistentVolume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = PersistentVolumeSpec{}
+ } else {
+ yyv11 := &x.Spec
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = PersistentVolumeStatus{}
+ } else {
+ yyv12 := &x.Status
+ yyv12.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [18]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = len(x.Capacity) != 0
+ yyq2[1] = len(x.AccessModes) != 0
+ yyq2[2] = x.ClaimRef != nil
+ yyq2[3] = x.PersistentVolumeReclaimPolicy != ""
+ yyq2[4] = x.PersistentVolumeSource.GCEPersistentDisk != nil && x.GCEPersistentDisk != nil
+ yyq2[5] = x.PersistentVolumeSource.AWSElasticBlockStore != nil && x.AWSElasticBlockStore != nil
+ yyq2[6] = x.PersistentVolumeSource.HostPath != nil && x.HostPath != nil
+ yyq2[7] = x.PersistentVolumeSource.Glusterfs != nil && x.Glusterfs != nil
+ yyq2[8] = x.PersistentVolumeSource.NFS != nil && x.NFS != nil
+ yyq2[9] = x.PersistentVolumeSource.RBD != nil && x.RBD != nil
+ yyq2[10] = x.PersistentVolumeSource.ISCSI != nil && x.ISCSI != nil
+ yyq2[11] = x.PersistentVolumeSource.Cinder != nil && x.Cinder != nil
+ yyq2[12] = x.PersistentVolumeSource.CephFS != nil && x.CephFS != nil
+ yyq2[13] = x.PersistentVolumeSource.FC != nil && x.FC != nil
+ yyq2[14] = x.PersistentVolumeSource.Flocker != nil && x.Flocker != nil
+ yyq2[15] = x.PersistentVolumeSource.FlexVolume != nil && x.FlexVolume != nil
+ yyq2[16] = x.PersistentVolumeSource.AzureFile != nil && x.AzureFile != nil
+ yyq2[17] = x.PersistentVolumeSource.VsphereVolume != nil && x.VsphereVolume != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(18)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Capacity == nil {
+ r.EncodeNil()
+ } else {
+ x.Capacity.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("capacity"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Capacity == nil {
+ r.EncodeNil()
+ } else {
+ x.Capacity.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.AccessModes == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("accessModes"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.AccessModes == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.ClaimRef == nil {
+ r.EncodeNil()
+ } else {
+ x.ClaimRef.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("claimRef"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.ClaimRef == nil {
+ r.EncodeNil()
+ } else {
+ x.ClaimRef.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ x.PersistentVolumeReclaimPolicy.CodecEncodeSelf(e)
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("persistentVolumeReclaimPolicy"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.PersistentVolumeReclaimPolicy.CodecEncodeSelf(e)
+ }
+ }
+ var yyn15 bool
+ if x.PersistentVolumeSource.GCEPersistentDisk == nil {
+ yyn15 = true
+ goto LABEL15
+ }
+ LABEL15:
+ if yyr2 || yy2arr2 {
+ if yyn15 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ if x.GCEPersistentDisk == nil {
+ r.EncodeNil()
+ } else {
+ x.GCEPersistentDisk.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("gcePersistentDisk"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn15 {
+ r.EncodeNil()
+ } else {
+ if x.GCEPersistentDisk == nil {
+ r.EncodeNil()
+ } else {
+ x.GCEPersistentDisk.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn18 bool
+ if x.PersistentVolumeSource.AWSElasticBlockStore == nil {
+ yyn18 = true
+ goto LABEL18
+ }
+ LABEL18:
+ if yyr2 || yy2arr2 {
+ if yyn18 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ if x.AWSElasticBlockStore == nil {
+ r.EncodeNil()
+ } else {
+ x.AWSElasticBlockStore.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("awsElasticBlockStore"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn18 {
+ r.EncodeNil()
+ } else {
+ if x.AWSElasticBlockStore == nil {
+ r.EncodeNil()
+ } else {
+ x.AWSElasticBlockStore.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn21 bool
+ if x.PersistentVolumeSource.HostPath == nil {
+ yyn21 = true
+ goto LABEL21
+ }
+ LABEL21:
+ if yyr2 || yy2arr2 {
+ if yyn21 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[6] {
+ if x.HostPath == nil {
+ r.EncodeNil()
+ } else {
+ x.HostPath.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[6] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("hostPath"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn21 {
+ r.EncodeNil()
+ } else {
+ if x.HostPath == nil {
+ r.EncodeNil()
+ } else {
+ x.HostPath.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn24 bool
+ if x.PersistentVolumeSource.Glusterfs == nil {
+ yyn24 = true
+ goto LABEL24
+ }
+ LABEL24:
+ if yyr2 || yy2arr2 {
+ if yyn24 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[7] {
+ if x.Glusterfs == nil {
+ r.EncodeNil()
+ } else {
+ x.Glusterfs.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[7] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("glusterfs"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn24 {
+ r.EncodeNil()
+ } else {
+ if x.Glusterfs == nil {
+ r.EncodeNil()
+ } else {
+ x.Glusterfs.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn27 bool
+ if x.PersistentVolumeSource.NFS == nil {
+ yyn27 = true
+ goto LABEL27
+ }
+ LABEL27:
+ if yyr2 || yy2arr2 {
+ if yyn27 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[8] {
+ if x.NFS == nil {
+ r.EncodeNil()
+ } else {
+ x.NFS.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[8] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("nfs"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn27 {
+ r.EncodeNil()
+ } else {
+ if x.NFS == nil {
+ r.EncodeNil()
+ } else {
+ x.NFS.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn30 bool
+ if x.PersistentVolumeSource.RBD == nil {
+ yyn30 = true
+ goto LABEL30
+ }
+ LABEL30:
+ if yyr2 || yy2arr2 {
+ if yyn30 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[9] {
+ if x.RBD == nil {
+ r.EncodeNil()
+ } else {
+ x.RBD.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[9] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("rbd"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn30 {
+ r.EncodeNil()
+ } else {
+ if x.RBD == nil {
+ r.EncodeNil()
+ } else {
+ x.RBD.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn33 bool
+ if x.PersistentVolumeSource.ISCSI == nil {
+ yyn33 = true
+ goto LABEL33
+ }
+ LABEL33:
+ if yyr2 || yy2arr2 {
+ if yyn33 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[10] {
+ if x.ISCSI == nil {
+ r.EncodeNil()
+ } else {
+ x.ISCSI.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[10] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("iscsi"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn33 {
+ r.EncodeNil()
+ } else {
+ if x.ISCSI == nil {
+ r.EncodeNil()
+ } else {
+ x.ISCSI.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn36 bool
+ if x.PersistentVolumeSource.Cinder == nil {
+ yyn36 = true
+ goto LABEL36
+ }
+ LABEL36:
+ if yyr2 || yy2arr2 {
+ if yyn36 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[11] {
+ if x.Cinder == nil {
+ r.EncodeNil()
+ } else {
+ x.Cinder.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[11] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("cinder"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn36 {
+ r.EncodeNil()
+ } else {
+ if x.Cinder == nil {
+ r.EncodeNil()
+ } else {
+ x.Cinder.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn39 bool
+ if x.PersistentVolumeSource.CephFS == nil {
+ yyn39 = true
+ goto LABEL39
+ }
+ LABEL39:
+ if yyr2 || yy2arr2 {
+ if yyn39 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[12] {
+ if x.CephFS == nil {
+ r.EncodeNil()
+ } else {
+ x.CephFS.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[12] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("cephfs"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn39 {
+ r.EncodeNil()
+ } else {
+ if x.CephFS == nil {
+ r.EncodeNil()
+ } else {
+ x.CephFS.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn42 bool
+ if x.PersistentVolumeSource.FC == nil {
+ yyn42 = true
+ goto LABEL42
+ }
+ LABEL42:
+ if yyr2 || yy2arr2 {
+ if yyn42 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[13] {
+ if x.FC == nil {
+ r.EncodeNil()
+ } else {
+ x.FC.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[13] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("fc"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn42 {
+ r.EncodeNil()
+ } else {
+ if x.FC == nil {
+ r.EncodeNil()
+ } else {
+ x.FC.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn45 bool
+ if x.PersistentVolumeSource.Flocker == nil {
+ yyn45 = true
+ goto LABEL45
+ }
+ LABEL45:
+ if yyr2 || yy2arr2 {
+ if yyn45 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[14] {
+ if x.Flocker == nil {
+ r.EncodeNil()
+ } else {
+ x.Flocker.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[14] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("flocker"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn45 {
+ r.EncodeNil()
+ } else {
+ if x.Flocker == nil {
+ r.EncodeNil()
+ } else {
+ x.Flocker.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn48 bool
+ if x.PersistentVolumeSource.FlexVolume == nil {
+ yyn48 = true
+ goto LABEL48
+ }
+ LABEL48:
+ if yyr2 || yy2arr2 {
+ if yyn48 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[15] {
+ if x.FlexVolume == nil {
+ r.EncodeNil()
+ } else {
+ x.FlexVolume.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[15] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("flexVolume"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn48 {
+ r.EncodeNil()
+ } else {
+ if x.FlexVolume == nil {
+ r.EncodeNil()
+ } else {
+ x.FlexVolume.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn51 bool
+ if x.PersistentVolumeSource.AzureFile == nil {
+ yyn51 = true
+ goto LABEL51
+ }
+ LABEL51:
+ if yyr2 || yy2arr2 {
+ if yyn51 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[16] {
+ if x.AzureFile == nil {
+ r.EncodeNil()
+ } else {
+ x.AzureFile.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[16] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("azureFile"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn51 {
+ r.EncodeNil()
+ } else {
+ if x.AzureFile == nil {
+ r.EncodeNil()
+ } else {
+ x.AzureFile.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn54 bool
+ if x.PersistentVolumeSource.VsphereVolume == nil {
+ yyn54 = true
+ goto LABEL54
+ }
+ LABEL54:
+ if yyr2 || yy2arr2 {
+ if yyn54 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[17] {
+ if x.VsphereVolume == nil {
+ r.EncodeNil()
+ } else {
+ x.VsphereVolume.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[17] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("vsphereVolume"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn54 {
+ r.EncodeNil()
+ } else {
+ if x.VsphereVolume == nil {
+ r.EncodeNil()
+ } else {
+ x.VsphereVolume.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PersistentVolumeSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PersistentVolumeSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "capacity":
+ if r.TryDecodeAsNil() {
+ x.Capacity = nil
+ } else {
+ yyv4 := &x.Capacity
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "accessModes":
+ if r.TryDecodeAsNil() {
+ x.AccessModes = nil
+ } else {
+ yyv5 := &x.AccessModes
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv5), d)
+ }
+ }
+ case "claimRef":
+ if r.TryDecodeAsNil() {
+ if x.ClaimRef != nil {
+ x.ClaimRef = nil
+ }
+ } else {
+ if x.ClaimRef == nil {
+ x.ClaimRef = new(ObjectReference)
+ }
+ x.ClaimRef.CodecDecodeSelf(d)
+ }
+ case "persistentVolumeReclaimPolicy":
+ if r.TryDecodeAsNil() {
+ x.PersistentVolumeReclaimPolicy = ""
+ } else {
+ x.PersistentVolumeReclaimPolicy = PersistentVolumeReclaimPolicy(r.DecodeString())
+ }
+ case "gcePersistentDisk":
+ if x.PersistentVolumeSource.GCEPersistentDisk == nil {
+ x.PersistentVolumeSource.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.GCEPersistentDisk != nil {
+ x.GCEPersistentDisk = nil
+ }
+ } else {
+ if x.GCEPersistentDisk == nil {
+ x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource)
+ }
+ x.GCEPersistentDisk.CodecDecodeSelf(d)
+ }
+ case "awsElasticBlockStore":
+ if x.PersistentVolumeSource.AWSElasticBlockStore == nil {
+ x.PersistentVolumeSource.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.AWSElasticBlockStore != nil {
+ x.AWSElasticBlockStore = nil
+ }
+ } else {
+ if x.AWSElasticBlockStore == nil {
+ x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource)
+ }
+ x.AWSElasticBlockStore.CodecDecodeSelf(d)
+ }
+ case "hostPath":
+ if x.PersistentVolumeSource.HostPath == nil {
+ x.PersistentVolumeSource.HostPath = new(HostPathVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.HostPath != nil {
+ x.HostPath = nil
+ }
+ } else {
+ if x.HostPath == nil {
+ x.HostPath = new(HostPathVolumeSource)
+ }
+ x.HostPath.CodecDecodeSelf(d)
+ }
+ case "glusterfs":
+ if x.PersistentVolumeSource.Glusterfs == nil {
+ x.PersistentVolumeSource.Glusterfs = new(GlusterfsVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.Glusterfs != nil {
+ x.Glusterfs = nil
+ }
+ } else {
+ if x.Glusterfs == nil {
+ x.Glusterfs = new(GlusterfsVolumeSource)
+ }
+ x.Glusterfs.CodecDecodeSelf(d)
+ }
+ case "nfs":
+ if x.PersistentVolumeSource.NFS == nil {
+ x.PersistentVolumeSource.NFS = new(NFSVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.NFS != nil {
+ x.NFS = nil
+ }
+ } else {
+ if x.NFS == nil {
+ x.NFS = new(NFSVolumeSource)
+ }
+ x.NFS.CodecDecodeSelf(d)
+ }
+ case "rbd":
+ if x.PersistentVolumeSource.RBD == nil {
+ x.PersistentVolumeSource.RBD = new(RBDVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.RBD != nil {
+ x.RBD = nil
+ }
+ } else {
+ if x.RBD == nil {
+ x.RBD = new(RBDVolumeSource)
+ }
+ x.RBD.CodecDecodeSelf(d)
+ }
+ case "iscsi":
+ if x.PersistentVolumeSource.ISCSI == nil {
+ x.PersistentVolumeSource.ISCSI = new(ISCSIVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.ISCSI != nil {
+ x.ISCSI = nil
+ }
+ } else {
+ if x.ISCSI == nil {
+ x.ISCSI = new(ISCSIVolumeSource)
+ }
+ x.ISCSI.CodecDecodeSelf(d)
+ }
+ case "cinder":
+ if x.PersistentVolumeSource.Cinder == nil {
+ x.PersistentVolumeSource.Cinder = new(CinderVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.Cinder != nil {
+ x.Cinder = nil
+ }
+ } else {
+ if x.Cinder == nil {
+ x.Cinder = new(CinderVolumeSource)
+ }
+ x.Cinder.CodecDecodeSelf(d)
+ }
+ case "cephfs":
+ if x.PersistentVolumeSource.CephFS == nil {
+ x.PersistentVolumeSource.CephFS = new(CephFSVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.CephFS != nil {
+ x.CephFS = nil
+ }
+ } else {
+ if x.CephFS == nil {
+ x.CephFS = new(CephFSVolumeSource)
+ }
+ x.CephFS.CodecDecodeSelf(d)
+ }
+ case "fc":
+ if x.PersistentVolumeSource.FC == nil {
+ x.PersistentVolumeSource.FC = new(FCVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.FC != nil {
+ x.FC = nil
+ }
+ } else {
+ if x.FC == nil {
+ x.FC = new(FCVolumeSource)
+ }
+ x.FC.CodecDecodeSelf(d)
+ }
+ case "flocker":
+ if x.PersistentVolumeSource.Flocker == nil {
+ x.PersistentVolumeSource.Flocker = new(FlockerVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.Flocker != nil {
+ x.Flocker = nil
+ }
+ } else {
+ if x.Flocker == nil {
+ x.Flocker = new(FlockerVolumeSource)
+ }
+ x.Flocker.CodecDecodeSelf(d)
+ }
+ case "flexVolume":
+ if x.PersistentVolumeSource.FlexVolume == nil {
+ x.PersistentVolumeSource.FlexVolume = new(FlexVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.FlexVolume != nil {
+ x.FlexVolume = nil
+ }
+ } else {
+ if x.FlexVolume == nil {
+ x.FlexVolume = new(FlexVolumeSource)
+ }
+ x.FlexVolume.CodecDecodeSelf(d)
+ }
+ case "azureFile":
+ if x.PersistentVolumeSource.AzureFile == nil {
+ x.PersistentVolumeSource.AzureFile = new(AzureFileVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.AzureFile != nil {
+ x.AzureFile = nil
+ }
+ } else {
+ if x.AzureFile == nil {
+ x.AzureFile = new(AzureFileVolumeSource)
+ }
+ x.AzureFile.CodecDecodeSelf(d)
+ }
+ case "vsphereVolume":
+ if x.PersistentVolumeSource.VsphereVolume == nil {
+ x.PersistentVolumeSource.VsphereVolume = new(VsphereVirtualDiskVolumeSource)
+ }
+ if r.TryDecodeAsNil() {
+ if x.VsphereVolume != nil {
+ x.VsphereVolume = nil
+ }
+ } else {
+ if x.VsphereVolume == nil {
+ x.VsphereVolume = new(VsphereVirtualDiskVolumeSource)
+ }
+ x.VsphereVolume.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj23 int
+ var yyb23 bool
+ var yyhl23 bool = l >= 0
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Capacity = nil
+ } else {
+ yyv24 := &x.Capacity
+ yyv24.CodecDecodeSelf(d)
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.AccessModes = nil
+ } else {
+ yyv25 := &x.AccessModes
+ yym26 := z.DecBinary()
+ _ = yym26
+ if false {
+ } else {
+ h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv25), d)
+ }
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.ClaimRef != nil {
+ x.ClaimRef = nil
+ }
+ } else {
+ if x.ClaimRef == nil {
+ x.ClaimRef = new(ObjectReference)
+ }
+ x.ClaimRef.CodecDecodeSelf(d)
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.PersistentVolumeReclaimPolicy = ""
+ } else {
+ x.PersistentVolumeReclaimPolicy = PersistentVolumeReclaimPolicy(r.DecodeString())
+ }
+ if x.PersistentVolumeSource.GCEPersistentDisk == nil {
+ x.PersistentVolumeSource.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource)
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.GCEPersistentDisk != nil {
+ x.GCEPersistentDisk = nil
+ }
+ } else {
+ if x.GCEPersistentDisk == nil {
+ x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource)
+ }
+ x.GCEPersistentDisk.CodecDecodeSelf(d)
+ }
+ if x.PersistentVolumeSource.AWSElasticBlockStore == nil {
+ x.PersistentVolumeSource.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource)
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.AWSElasticBlockStore != nil {
+ x.AWSElasticBlockStore = nil
+ }
+ } else {
+ if x.AWSElasticBlockStore == nil {
+ x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource)
+ }
+ x.AWSElasticBlockStore.CodecDecodeSelf(d)
+ }
+ if x.PersistentVolumeSource.HostPath == nil {
+ x.PersistentVolumeSource.HostPath = new(HostPathVolumeSource)
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.HostPath != nil {
+ x.HostPath = nil
+ }
+ } else {
+ if x.HostPath == nil {
+ x.HostPath = new(HostPathVolumeSource)
+ }
+ x.HostPath.CodecDecodeSelf(d)
+ }
+ if x.PersistentVolumeSource.Glusterfs == nil {
+ x.PersistentVolumeSource.Glusterfs = new(GlusterfsVolumeSource)
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Glusterfs != nil {
+ x.Glusterfs = nil
+ }
+ } else {
+ if x.Glusterfs == nil {
+ x.Glusterfs = new(GlusterfsVolumeSource)
+ }
+ x.Glusterfs.CodecDecodeSelf(d)
+ }
+ if x.PersistentVolumeSource.NFS == nil {
+ x.PersistentVolumeSource.NFS = new(NFSVolumeSource)
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.NFS != nil {
+ x.NFS = nil
+ }
+ } else {
+ if x.NFS == nil {
+ x.NFS = new(NFSVolumeSource)
+ }
+ x.NFS.CodecDecodeSelf(d)
+ }
+ if x.PersistentVolumeSource.RBD == nil {
+ x.PersistentVolumeSource.RBD = new(RBDVolumeSource)
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.RBD != nil {
+ x.RBD = nil
+ }
+ } else {
+ if x.RBD == nil {
+ x.RBD = new(RBDVolumeSource)
+ }
+ x.RBD.CodecDecodeSelf(d)
+ }
+ if x.PersistentVolumeSource.ISCSI == nil {
+ x.PersistentVolumeSource.ISCSI = new(ISCSIVolumeSource)
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.ISCSI != nil {
+ x.ISCSI = nil
+ }
+ } else {
+ if x.ISCSI == nil {
+ x.ISCSI = new(ISCSIVolumeSource)
+ }
+ x.ISCSI.CodecDecodeSelf(d)
+ }
+ if x.PersistentVolumeSource.Cinder == nil {
+ x.PersistentVolumeSource.Cinder = new(CinderVolumeSource)
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Cinder != nil {
+ x.Cinder = nil
+ }
+ } else {
+ if x.Cinder == nil {
+ x.Cinder = new(CinderVolumeSource)
+ }
+ x.Cinder.CodecDecodeSelf(d)
+ }
+ if x.PersistentVolumeSource.CephFS == nil {
+ x.PersistentVolumeSource.CephFS = new(CephFSVolumeSource)
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.CephFS != nil {
+ x.CephFS = nil
+ }
+ } else {
+ if x.CephFS == nil {
+ x.CephFS = new(CephFSVolumeSource)
+ }
+ x.CephFS.CodecDecodeSelf(d)
+ }
+ if x.PersistentVolumeSource.FC == nil {
+ x.PersistentVolumeSource.FC = new(FCVolumeSource)
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.FC != nil {
+ x.FC = nil
+ }
+ } else {
+ if x.FC == nil {
+ x.FC = new(FCVolumeSource)
+ }
+ x.FC.CodecDecodeSelf(d)
+ }
+ if x.PersistentVolumeSource.Flocker == nil {
+ x.PersistentVolumeSource.Flocker = new(FlockerVolumeSource)
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Flocker != nil {
+ x.Flocker = nil
+ }
+ } else {
+ if x.Flocker == nil {
+ x.Flocker = new(FlockerVolumeSource)
+ }
+ x.Flocker.CodecDecodeSelf(d)
+ }
+ if x.PersistentVolumeSource.FlexVolume == nil {
+ x.PersistentVolumeSource.FlexVolume = new(FlexVolumeSource)
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.FlexVolume != nil {
+ x.FlexVolume = nil
+ }
+ } else {
+ if x.FlexVolume == nil {
+ x.FlexVolume = new(FlexVolumeSource)
+ }
+ x.FlexVolume.CodecDecodeSelf(d)
+ }
+ if x.PersistentVolumeSource.AzureFile == nil {
+ x.PersistentVolumeSource.AzureFile = new(AzureFileVolumeSource)
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.AzureFile != nil {
+ x.AzureFile = nil
+ }
+ } else {
+ if x.AzureFile == nil {
+ x.AzureFile = new(AzureFileVolumeSource)
+ }
+ x.AzureFile.CodecDecodeSelf(d)
+ }
+ if x.PersistentVolumeSource.VsphereVolume == nil {
+ x.PersistentVolumeSource.VsphereVolume = new(VsphereVirtualDiskVolumeSource)
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.VsphereVolume != nil {
+ x.VsphereVolume = nil
+ }
+ } else {
+ if x.VsphereVolume == nil {
+ x.VsphereVolume = new(VsphereVirtualDiskVolumeSource)
+ }
+ x.VsphereVolume.CodecDecodeSelf(d)
+ }
+ for {
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj23-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x PersistentVolumeReclaimPolicy) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *PersistentVolumeReclaimPolicy) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *PersistentVolumeStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Phase != ""
+ yyq2[1] = x.Message != ""
+ yyq2[2] = x.Reason != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ x.Phase.CodecEncodeSelf(e)
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("phase"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Phase.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Message))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("message"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Message))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Reason))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("reason"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Reason))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PersistentVolumeStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PersistentVolumeStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "phase":
+ if r.TryDecodeAsNil() {
+ x.Phase = ""
+ } else {
+ x.Phase = PersistentVolumePhase(r.DecodeString())
+ }
+ case "message":
+ if r.TryDecodeAsNil() {
+ x.Message = ""
+ } else {
+ x.Message = string(r.DecodeString())
+ }
+ case "reason":
+ if r.TryDecodeAsNil() {
+ x.Reason = ""
+ } else {
+ x.Reason = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PersistentVolumeStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Phase = ""
+ } else {
+ x.Phase = PersistentVolumePhase(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Message = ""
+ } else {
+ x.Message = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Reason = ""
+ } else {
+ x.Reason = string(r.DecodeString())
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PersistentVolumeList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSlicePersistentVolume(([]PersistentVolume)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSlicePersistentVolume(([]PersistentVolume)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PersistentVolumeList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PersistentVolumeList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSlicePersistentVolume((*[]PersistentVolume)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PersistentVolumeList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSlicePersistentVolume((*[]PersistentVolume)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PersistentVolumeClaim) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = true
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy14 := &x.Status
+ yy14.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy16 := &x.Status
+ yy16.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PersistentVolumeClaim) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PersistentVolumeClaim) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = PersistentVolumeClaimSpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = PersistentVolumeClaimStatus{}
+ } else {
+ yyv6 := &x.Status
+ yyv6.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PersistentVolumeClaim) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = PersistentVolumeClaimSpec{}
+ } else {
+ yyv11 := &x.Spec
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = PersistentVolumeClaimStatus{}
+ } else {
+ yyv12 := &x.Status
+ yyv12.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PersistentVolumeClaimList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSlicePersistentVolumeClaim(([]PersistentVolumeClaim)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSlicePersistentVolumeClaim(([]PersistentVolumeClaim)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PersistentVolumeClaimList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PersistentVolumeClaimList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSlicePersistentVolumeClaim((*[]PersistentVolumeClaim)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PersistentVolumeClaimList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSlicePersistentVolumeClaim((*[]PersistentVolumeClaim)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PersistentVolumeClaimSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = len(x.AccessModes) != 0
+ yyq2[1] = x.Selector != nil
+ yyq2[2] = true
+ yyq2[3] = x.VolumeName != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.AccessModes == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("accessModes"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.AccessModes == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Selector == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.Selector) {
+ } else {
+ z.EncFallback(x.Selector)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("selector"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Selector == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.Selector) {
+ } else {
+ z.EncFallback(x.Selector)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy10 := &x.Resources
+ yy10.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("resources"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy12 := &x.Resources
+ yy12.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.VolumeName))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("volumeName"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.VolumeName))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PersistentVolumeClaimSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PersistentVolumeClaimSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "accessModes":
+ if r.TryDecodeAsNil() {
+ x.AccessModes = nil
+ } else {
+ yyv4 := &x.AccessModes
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv4), d)
+ }
+ }
+ case "selector":
+ if r.TryDecodeAsNil() {
+ if x.Selector != nil {
+ x.Selector = nil
+ }
+ } else {
+ if x.Selector == nil {
+ x.Selector = new(pkg2_unversioned.LabelSelector)
+ }
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.Selector) {
+ } else {
+ z.DecFallback(x.Selector, false)
+ }
+ }
+ case "resources":
+ if r.TryDecodeAsNil() {
+ x.Resources = ResourceRequirements{}
+ } else {
+ yyv8 := &x.Resources
+ yyv8.CodecDecodeSelf(d)
+ }
+ case "volumeName":
+ if r.TryDecodeAsNil() {
+ x.VolumeName = ""
+ } else {
+ x.VolumeName = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PersistentVolumeClaimSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.AccessModes = nil
+ } else {
+ yyv11 := &x.AccessModes
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv11), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Selector != nil {
+ x.Selector = nil
+ }
+ } else {
+ if x.Selector == nil {
+ x.Selector = new(pkg2_unversioned.LabelSelector)
+ }
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.Selector) {
+ } else {
+ z.DecFallback(x.Selector, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Resources = ResourceRequirements{}
+ } else {
+ yyv15 := &x.Resources
+ yyv15.CodecDecodeSelf(d)
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.VolumeName = ""
+ } else {
+ x.VolumeName = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PersistentVolumeClaimStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Phase != ""
+ yyq2[1] = len(x.AccessModes) != 0
+ yyq2[2] = len(x.Capacity) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ x.Phase.CodecEncodeSelf(e)
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("phase"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Phase.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.AccessModes == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("accessModes"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.AccessModes == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.Capacity == nil {
+ r.EncodeNil()
+ } else {
+ x.Capacity.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("capacity"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Capacity == nil {
+ r.EncodeNil()
+ } else {
+ x.Capacity.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PersistentVolumeClaimStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PersistentVolumeClaimStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "phase":
+ if r.TryDecodeAsNil() {
+ x.Phase = ""
+ } else {
+ x.Phase = PersistentVolumeClaimPhase(r.DecodeString())
+ }
+ case "accessModes":
+ if r.TryDecodeAsNil() {
+ x.AccessModes = nil
+ } else {
+ yyv5 := &x.AccessModes
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv5), d)
+ }
+ }
+ case "capacity":
+ if r.TryDecodeAsNil() {
+ x.Capacity = nil
+ } else {
+ yyv7 := &x.Capacity
+ yyv7.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PersistentVolumeClaimStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Phase = ""
+ } else {
+ x.Phase = PersistentVolumeClaimPhase(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.AccessModes = nil
+ } else {
+ yyv10 := &x.AccessModes
+ yym11 := z.DecBinary()
+ _ = yym11
+ if false {
+ } else {
+ h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv10), d)
+ }
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Capacity = nil
+ } else {
+ yyv12 := &x.Capacity
+ yyv12.CodecDecodeSelf(d)
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x PersistentVolumeAccessMode) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *PersistentVolumeAccessMode) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x PersistentVolumePhase) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *PersistentVolumePhase) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x PersistentVolumeClaimPhase) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *PersistentVolumeClaimPhase) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *HostPathVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Path))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("path"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Path))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *HostPathVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *HostPathVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "path":
+ if r.TryDecodeAsNil() {
+ x.Path = ""
+ } else {
+ x.Path = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *HostPathVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj5 int
+ var yyb5 bool
+ var yyhl5 bool = l >= 0
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Path = ""
+ } else {
+ x.Path = string(r.DecodeString())
+ }
+ for {
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj5-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *EmptyDirVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Medium != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ x.Medium.CodecEncodeSelf(e)
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("medium"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Medium.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *EmptyDirVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *EmptyDirVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "medium":
+ if r.TryDecodeAsNil() {
+ x.Medium = ""
+ } else {
+ x.Medium = StorageMedium(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *EmptyDirVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj5 int
+ var yyb5 bool
+ var yyhl5 bool = l >= 0
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Medium = ""
+ } else {
+ x.Medium = StorageMedium(r.DecodeString())
+ }
+ for {
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj5-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *GlusterfsVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[2] = x.ReadOnly != false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.EndpointsName))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("endpoints"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.EndpointsName))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Path))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("path"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Path))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("readOnly"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *GlusterfsVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *GlusterfsVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "endpoints":
+ if r.TryDecodeAsNil() {
+ x.EndpointsName = ""
+ } else {
+ x.EndpointsName = string(r.DecodeString())
+ }
+ case "path":
+ if r.TryDecodeAsNil() {
+ x.Path = ""
+ } else {
+ x.Path = string(r.DecodeString())
+ }
+ case "readOnly":
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *GlusterfsVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.EndpointsName = ""
+ } else {
+ x.EndpointsName = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Path = ""
+ } else {
+ x.Path = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *RBDVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [8]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[2] = x.FSType != ""
+ yyq2[3] = x.RBDPool != ""
+ yyq2[4] = x.RadosUser != ""
+ yyq2[5] = x.Keyring != ""
+ yyq2[6] = x.SecretRef != nil
+ yyq2[7] = x.ReadOnly != false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(8)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.CephMonitors == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.CephMonitors, false, e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("monitors"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.CephMonitors == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.CephMonitors, false, e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.RBDImage))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("image"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.RBDImage))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.FSType))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("fsType"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.FSType))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.RBDPool))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("pool"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.RBDPool))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.RadosUser))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("user"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.RadosUser))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Keyring))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("keyring"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Keyring))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[6] {
+ if x.SecretRef == nil {
+ r.EncodeNil()
+ } else {
+ x.SecretRef.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[6] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("secretRef"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.SecretRef == nil {
+ r.EncodeNil()
+ } else {
+ x.SecretRef.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[7] {
+ yym25 := z.EncBinary()
+ _ = yym25
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[7] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("readOnly"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym26 := z.EncBinary()
+ _ = yym26
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *RBDVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *RBDVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "monitors":
+ if r.TryDecodeAsNil() {
+ x.CephMonitors = nil
+ } else {
+ yyv4 := &x.CephMonitors
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv4, false, d)
+ }
+ }
+ case "image":
+ if r.TryDecodeAsNil() {
+ x.RBDImage = ""
+ } else {
+ x.RBDImage = string(r.DecodeString())
+ }
+ case "fsType":
+ if r.TryDecodeAsNil() {
+ x.FSType = ""
+ } else {
+ x.FSType = string(r.DecodeString())
+ }
+ case "pool":
+ if r.TryDecodeAsNil() {
+ x.RBDPool = ""
+ } else {
+ x.RBDPool = string(r.DecodeString())
+ }
+ case "user":
+ if r.TryDecodeAsNil() {
+ x.RadosUser = ""
+ } else {
+ x.RadosUser = string(r.DecodeString())
+ }
+ case "keyring":
+ if r.TryDecodeAsNil() {
+ x.Keyring = ""
+ } else {
+ x.Keyring = string(r.DecodeString())
+ }
+ case "secretRef":
+ if r.TryDecodeAsNil() {
+ if x.SecretRef != nil {
+ x.SecretRef = nil
+ }
+ } else {
+ if x.SecretRef == nil {
+ x.SecretRef = new(LocalObjectReference)
+ }
+ x.SecretRef.CodecDecodeSelf(d)
+ }
+ case "readOnly":
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *RBDVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj13 int
+ var yyb13 bool
+ var yyhl13 bool = l >= 0
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.CephMonitors = nil
+ } else {
+ yyv14 := &x.CephMonitors
+ yym15 := z.DecBinary()
+ _ = yym15
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv14, false, d)
+ }
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.RBDImage = ""
+ } else {
+ x.RBDImage = string(r.DecodeString())
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.FSType = ""
+ } else {
+ x.FSType = string(r.DecodeString())
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.RBDPool = ""
+ } else {
+ x.RBDPool = string(r.DecodeString())
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.RadosUser = ""
+ } else {
+ x.RadosUser = string(r.DecodeString())
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Keyring = ""
+ } else {
+ x.Keyring = string(r.DecodeString())
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.SecretRef != nil {
+ x.SecretRef = nil
+ }
+ } else {
+ if x.SecretRef == nil {
+ x.SecretRef = new(LocalObjectReference)
+ }
+ x.SecretRef.CodecDecodeSelf(d)
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ for {
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj13-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *CinderVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.FSType != ""
+ yyq2[2] = x.ReadOnly != false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.VolumeID))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("volumeID"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.VolumeID))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.FSType))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("fsType"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.FSType))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("readOnly"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *CinderVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *CinderVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "volumeID":
+ if r.TryDecodeAsNil() {
+ x.VolumeID = ""
+ } else {
+ x.VolumeID = string(r.DecodeString())
+ }
+ case "fsType":
+ if r.TryDecodeAsNil() {
+ x.FSType = ""
+ } else {
+ x.FSType = string(r.DecodeString())
+ }
+ case "readOnly":
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *CinderVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.VolumeID = ""
+ } else {
+ x.VolumeID = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.FSType = ""
+ } else {
+ x.FSType = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *CephFSVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [6]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.Path != ""
+ yyq2[2] = x.User != ""
+ yyq2[3] = x.SecretFile != ""
+ yyq2[4] = x.SecretRef != nil
+ yyq2[5] = x.ReadOnly != false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(6)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Monitors == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Monitors, false, e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("monitors"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Monitors == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Monitors, false, e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Path))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("path"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Path))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.User))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("user"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.User))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.SecretFile))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("secretFile"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.SecretFile))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ if x.SecretRef == nil {
+ r.EncodeNil()
+ } else {
+ x.SecretRef.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("secretRef"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.SecretRef == nil {
+ r.EncodeNil()
+ } else {
+ x.SecretRef.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("readOnly"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *CephFSVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *CephFSVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "monitors":
+ if r.TryDecodeAsNil() {
+ x.Monitors = nil
+ } else {
+ yyv4 := &x.Monitors
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv4, false, d)
+ }
+ }
+ case "path":
+ if r.TryDecodeAsNil() {
+ x.Path = ""
+ } else {
+ x.Path = string(r.DecodeString())
+ }
+ case "user":
+ if r.TryDecodeAsNil() {
+ x.User = ""
+ } else {
+ x.User = string(r.DecodeString())
+ }
+ case "secretFile":
+ if r.TryDecodeAsNil() {
+ x.SecretFile = ""
+ } else {
+ x.SecretFile = string(r.DecodeString())
+ }
+ case "secretRef":
+ if r.TryDecodeAsNil() {
+ if x.SecretRef != nil {
+ x.SecretRef = nil
+ }
+ } else {
+ if x.SecretRef == nil {
+ x.SecretRef = new(LocalObjectReference)
+ }
+ x.SecretRef.CodecDecodeSelf(d)
+ }
+ case "readOnly":
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *CephFSVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj11 int
+ var yyb11 bool
+ var yyhl11 bool = l >= 0
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Monitors = nil
+ } else {
+ yyv12 := &x.Monitors
+ yym13 := z.DecBinary()
+ _ = yym13
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv12, false, d)
+ }
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Path = ""
+ } else {
+ x.Path = string(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.User = ""
+ } else {
+ x.User = string(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.SecretFile = ""
+ } else {
+ x.SecretFile = string(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.SecretRef != nil {
+ x.SecretRef = nil
+ }
+ } else {
+ if x.SecretRef == nil {
+ x.SecretRef = new(LocalObjectReference)
+ }
+ x.SecretRef.CodecDecodeSelf(d)
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ for {
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj11-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *FlockerVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.DatasetName))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("datasetName"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.DatasetName))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *FlockerVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *FlockerVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "datasetName":
+ if r.TryDecodeAsNil() {
+ x.DatasetName = ""
+ } else {
+ x.DatasetName = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *FlockerVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj5 int
+ var yyb5 bool
+ var yyhl5 bool = l >= 0
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.DatasetName = ""
+ } else {
+ x.DatasetName = string(r.DecodeString())
+ }
+ for {
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj5-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x StorageMedium) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *StorageMedium) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x Protocol) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *Protocol) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *GCEPersistentDiskVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.FSType != ""
+ yyq2[2] = x.Partition != 0
+ yyq2[3] = x.ReadOnly != false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.PDName))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("pdName"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.PDName))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.FSType))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("fsType"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.FSType))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Partition))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("partition"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Partition))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("readOnly"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *GCEPersistentDiskVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *GCEPersistentDiskVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "pdName":
+ if r.TryDecodeAsNil() {
+ x.PDName = ""
+ } else {
+ x.PDName = string(r.DecodeString())
+ }
+ case "fsType":
+ if r.TryDecodeAsNil() {
+ x.FSType = ""
+ } else {
+ x.FSType = string(r.DecodeString())
+ }
+ case "partition":
+ if r.TryDecodeAsNil() {
+ x.Partition = 0
+ } else {
+ x.Partition = int32(r.DecodeInt(32))
+ }
+ case "readOnly":
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *GCEPersistentDiskVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.PDName = ""
+ } else {
+ x.PDName = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.FSType = ""
+ } else {
+ x.FSType = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Partition = 0
+ } else {
+ x.Partition = int32(r.DecodeInt(32))
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *FlexVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.FSType != ""
+ yyq2[2] = x.SecretRef != nil
+ yyq2[3] = x.ReadOnly != false
+ yyq2[4] = len(x.Options) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Driver))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("driver"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Driver))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.FSType))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("fsType"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.FSType))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.SecretRef == nil {
+ r.EncodeNil()
+ } else {
+ x.SecretRef.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("secretRef"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.SecretRef == nil {
+ r.EncodeNil()
+ } else {
+ x.SecretRef.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("readOnly"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ if x.Options == nil {
+ r.EncodeNil()
+ } else {
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ z.F.EncMapStringStringV(x.Options, false, e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("options"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Options == nil {
+ r.EncodeNil()
+ } else {
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ z.F.EncMapStringStringV(x.Options, false, e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *FlexVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *FlexVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "driver":
+ if r.TryDecodeAsNil() {
+ x.Driver = ""
+ } else {
+ x.Driver = string(r.DecodeString())
+ }
+ case "fsType":
+ if r.TryDecodeAsNil() {
+ x.FSType = ""
+ } else {
+ x.FSType = string(r.DecodeString())
+ }
+ case "secretRef":
+ if r.TryDecodeAsNil() {
+ if x.SecretRef != nil {
+ x.SecretRef = nil
+ }
+ } else {
+ if x.SecretRef == nil {
+ x.SecretRef = new(LocalObjectReference)
+ }
+ x.SecretRef.CodecDecodeSelf(d)
+ }
+ case "readOnly":
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ case "options":
+ if r.TryDecodeAsNil() {
+ x.Options = nil
+ } else {
+ yyv8 := &x.Options
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else {
+ z.F.DecMapStringStringX(yyv8, false, d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *FlexVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Driver = ""
+ } else {
+ x.Driver = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.FSType = ""
+ } else {
+ x.FSType = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.SecretRef != nil {
+ x.SecretRef = nil
+ }
+ } else {
+ if x.SecretRef == nil {
+ x.SecretRef = new(LocalObjectReference)
+ }
+ x.SecretRef.CodecDecodeSelf(d)
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Options = nil
+ } else {
+ yyv15 := &x.Options
+ yym16 := z.DecBinary()
+ _ = yym16
+ if false {
+ } else {
+ z.F.DecMapStringStringX(yyv15, false, d)
+ }
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *AWSElasticBlockStoreVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.FSType != ""
+ yyq2[2] = x.Partition != 0
+ yyq2[3] = x.ReadOnly != false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.VolumeID))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("volumeID"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.VolumeID))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.FSType))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("fsType"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.FSType))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Partition))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("partition"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Partition))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("readOnly"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *AWSElasticBlockStoreVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *AWSElasticBlockStoreVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "volumeID":
+ if r.TryDecodeAsNil() {
+ x.VolumeID = ""
+ } else {
+ x.VolumeID = string(r.DecodeString())
+ }
+ case "fsType":
+ if r.TryDecodeAsNil() {
+ x.FSType = ""
+ } else {
+ x.FSType = string(r.DecodeString())
+ }
+ case "partition":
+ if r.TryDecodeAsNil() {
+ x.Partition = 0
+ } else {
+ x.Partition = int32(r.DecodeInt(32))
+ }
+ case "readOnly":
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *AWSElasticBlockStoreVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.VolumeID = ""
+ } else {
+ x.VolumeID = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.FSType = ""
+ } else {
+ x.FSType = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Partition = 0
+ } else {
+ x.Partition = int32(r.DecodeInt(32))
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *GitRepoVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.Revision != ""
+ yyq2[2] = x.Directory != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Repository))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("repository"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Repository))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Revision))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("revision"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Revision))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Directory))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("directory"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Directory))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *GitRepoVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *GitRepoVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "repository":
+ if r.TryDecodeAsNil() {
+ x.Repository = ""
+ } else {
+ x.Repository = string(r.DecodeString())
+ }
+ case "revision":
+ if r.TryDecodeAsNil() {
+ x.Revision = ""
+ } else {
+ x.Revision = string(r.DecodeString())
+ }
+ case "directory":
+ if r.TryDecodeAsNil() {
+ x.Directory = ""
+ } else {
+ x.Directory = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *GitRepoVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Repository = ""
+ } else {
+ x.Repository = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Revision = ""
+ } else {
+ x.Revision = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Directory = ""
+ } else {
+ x.Directory = string(r.DecodeString())
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *SecretVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.SecretName != ""
+ yyq2[1] = len(x.Items) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.SecretName))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("secretName"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.SecretName))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.encSliceKeyToPath(([]KeyToPath)(x.Items), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.encSliceKeyToPath(([]KeyToPath)(x.Items), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *SecretVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *SecretVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "secretName":
+ if r.TryDecodeAsNil() {
+ x.SecretName = ""
+ } else {
+ x.SecretName = string(r.DecodeString())
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv5 := &x.Items
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ h.decSliceKeyToPath((*[]KeyToPath)(yyv5), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *SecretVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.SecretName = ""
+ } else {
+ x.SecretName = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv9 := &x.Items
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.decSliceKeyToPath((*[]KeyToPath)(yyv9), d)
+ }
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *NFSVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[2] = x.ReadOnly != false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Server))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("server"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Server))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Path))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("path"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Path))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("readOnly"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *NFSVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *NFSVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "server":
+ if r.TryDecodeAsNil() {
+ x.Server = ""
+ } else {
+ x.Server = string(r.DecodeString())
+ }
+ case "path":
+ if r.TryDecodeAsNil() {
+ x.Path = ""
+ } else {
+ x.Path = string(r.DecodeString())
+ }
+ case "readOnly":
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *NFSVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Server = ""
+ } else {
+ x.Server = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Path = ""
+ } else {
+ x.Path = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ISCSIVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [6]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[3] = x.ISCSIInterface != ""
+ yyq2[4] = x.FSType != ""
+ yyq2[5] = x.ReadOnly != false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(6)
+ } else {
+ yynn2 = 3
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.TargetPortal))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("targetPortal"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.TargetPortal))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.IQN))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("iqn"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.IQN))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Lun))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("lun"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Lun))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ISCSIInterface))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("iscsiInterface"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ISCSIInterface))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.FSType))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("fsType"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.FSType))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("readOnly"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ISCSIVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ISCSIVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "targetPortal":
+ if r.TryDecodeAsNil() {
+ x.TargetPortal = ""
+ } else {
+ x.TargetPortal = string(r.DecodeString())
+ }
+ case "iqn":
+ if r.TryDecodeAsNil() {
+ x.IQN = ""
+ } else {
+ x.IQN = string(r.DecodeString())
+ }
+ case "lun":
+ if r.TryDecodeAsNil() {
+ x.Lun = 0
+ } else {
+ x.Lun = int32(r.DecodeInt(32))
+ }
+ case "iscsiInterface":
+ if r.TryDecodeAsNil() {
+ x.ISCSIInterface = ""
+ } else {
+ x.ISCSIInterface = string(r.DecodeString())
+ }
+ case "fsType":
+ if r.TryDecodeAsNil() {
+ x.FSType = ""
+ } else {
+ x.FSType = string(r.DecodeString())
+ }
+ case "readOnly":
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ISCSIVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.TargetPortal = ""
+ } else {
+ x.TargetPortal = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.IQN = ""
+ } else {
+ x.IQN = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Lun = 0
+ } else {
+ x.Lun = int32(r.DecodeInt(32))
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ISCSIInterface = ""
+ } else {
+ x.ISCSIInterface = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.FSType = ""
+ } else {
+ x.FSType = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *FCVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[2] = x.FSType != ""
+ yyq2[3] = x.ReadOnly != false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.TargetWWNs == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.TargetWWNs, false, e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("targetWWNs"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.TargetWWNs == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.TargetWWNs, false, e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Lun == nil {
+ r.EncodeNil()
+ } else {
+ yy7 := *x.Lun
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeInt(int64(yy7))
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("lun"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Lun == nil {
+ r.EncodeNil()
+ } else {
+ yy9 := *x.Lun
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeInt(int64(yy9))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.FSType))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("fsType"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.FSType))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("readOnly"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *FCVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *FCVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "targetWWNs":
+ if r.TryDecodeAsNil() {
+ x.TargetWWNs = nil
+ } else {
+ yyv4 := &x.TargetWWNs
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv4, false, d)
+ }
+ }
+ case "lun":
+ if r.TryDecodeAsNil() {
+ if x.Lun != nil {
+ x.Lun = nil
+ }
+ } else {
+ if x.Lun == nil {
+ x.Lun = new(int32)
+ }
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ *((*int32)(x.Lun)) = int32(r.DecodeInt(32))
+ }
+ }
+ case "fsType":
+ if r.TryDecodeAsNil() {
+ x.FSType = ""
+ } else {
+ x.FSType = string(r.DecodeString())
+ }
+ case "readOnly":
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *FCVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.TargetWWNs = nil
+ } else {
+ yyv11 := &x.TargetWWNs
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv11, false, d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Lun != nil {
+ x.Lun = nil
+ }
+ } else {
+ if x.Lun == nil {
+ x.Lun = new(int32)
+ }
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ *((*int32)(x.Lun)) = int32(r.DecodeInt(32))
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.FSType = ""
+ } else {
+ x.FSType = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *AzureFileVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[2] = x.ReadOnly != false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.SecretName))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("secretName"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.SecretName))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ShareName))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("shareName"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ShareName))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("readOnly"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *AzureFileVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *AzureFileVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "secretName":
+ if r.TryDecodeAsNil() {
+ x.SecretName = ""
+ } else {
+ x.SecretName = string(r.DecodeString())
+ }
+ case "shareName":
+ if r.TryDecodeAsNil() {
+ x.ShareName = ""
+ } else {
+ x.ShareName = string(r.DecodeString())
+ }
+ case "readOnly":
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *AzureFileVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.SecretName = ""
+ } else {
+ x.SecretName = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ShareName = ""
+ } else {
+ x.ShareName = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *VsphereVirtualDiskVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.FSType != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.VolumePath))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("volumePath"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.VolumePath))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.FSType))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("fsType"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.FSType))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *VsphereVirtualDiskVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *VsphereVirtualDiskVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "volumePath":
+ if r.TryDecodeAsNil() {
+ x.VolumePath = ""
+ } else {
+ x.VolumePath = string(r.DecodeString())
+ }
+ case "fsType":
+ if r.TryDecodeAsNil() {
+ x.FSType = ""
+ } else {
+ x.FSType = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *VsphereVirtualDiskVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.VolumePath = ""
+ } else {
+ x.VolumePath = string(r.DecodeString())
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.FSType = ""
+ } else {
+ x.FSType = string(r.DecodeString())
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ConfigMapVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = len(x.Items) != 0
+ yyq2[1] = x.Name != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ h.encSliceKeyToPath(([]KeyToPath)(x.Items), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.encSliceKeyToPath(([]KeyToPath)(x.Items), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("name"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ConfigMapVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ConfigMapVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv4 := &x.Items
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.decSliceKeyToPath((*[]KeyToPath)(yyv4), d)
+ }
+ }
+ case "name":
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ConfigMapVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv8 := &x.Items
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.decSliceKeyToPath((*[]KeyToPath)(yyv8), d)
+ }
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *KeyToPath) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Key))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("key"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Key))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Path))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("path"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Path))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *KeyToPath) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *KeyToPath) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "key":
+ if r.TryDecodeAsNil() {
+ x.Key = ""
+ } else {
+ x.Key = string(r.DecodeString())
+ }
+ case "path":
+ if r.TryDecodeAsNil() {
+ x.Path = ""
+ } else {
+ x.Path = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *KeyToPath) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Key = ""
+ } else {
+ x.Key = string(r.DecodeString())
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Path = ""
+ } else {
+ x.Path = string(r.DecodeString())
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ContainerPort) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Name != ""
+ yyq2[1] = x.HostPort != 0
+ yyq2[3] = x.Protocol != ""
+ yyq2[4] = x.HostIP != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("name"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeInt(int64(x.HostPort))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("hostPort"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeInt(int64(x.HostPort))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ContainerPort))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("containerPort"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ContainerPort))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ x.Protocol.CodecEncodeSelf(e)
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("protocol"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Protocol.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.HostIP))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("hostIP"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.HostIP))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ContainerPort) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ContainerPort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "name":
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ case "hostPort":
+ if r.TryDecodeAsNil() {
+ x.HostPort = 0
+ } else {
+ x.HostPort = int32(r.DecodeInt(32))
+ }
+ case "containerPort":
+ if r.TryDecodeAsNil() {
+ x.ContainerPort = 0
+ } else {
+ x.ContainerPort = int32(r.DecodeInt(32))
+ }
+ case "protocol":
+ if r.TryDecodeAsNil() {
+ x.Protocol = ""
+ } else {
+ x.Protocol = Protocol(r.DecodeString())
+ }
+ case "hostIP":
+ if r.TryDecodeAsNil() {
+ x.HostIP = ""
+ } else {
+ x.HostIP = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ContainerPort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.HostPort = 0
+ } else {
+ x.HostPort = int32(r.DecodeInt(32))
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ContainerPort = 0
+ } else {
+ x.ContainerPort = int32(r.DecodeInt(32))
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Protocol = ""
+ } else {
+ x.Protocol = Protocol(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.HostIP = ""
+ } else {
+ x.HostIP = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *VolumeMount) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.ReadOnly != false
+ yyq2[3] = x.SubPath != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("name"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("readOnly"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnly))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.MountPath))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("mountPath"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.MountPath))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.SubPath))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("subPath"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.SubPath))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *VolumeMount) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *VolumeMount) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "name":
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ case "readOnly":
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ case "mountPath":
+ if r.TryDecodeAsNil() {
+ x.MountPath = ""
+ } else {
+ x.MountPath = string(r.DecodeString())
+ }
+ case "subPath":
+ if r.TryDecodeAsNil() {
+ x.SubPath = ""
+ } else {
+ x.SubPath = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *VolumeMount) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ReadOnly = false
+ } else {
+ x.ReadOnly = bool(r.DecodeBool())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.MountPath = ""
+ } else {
+ x.MountPath = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.SubPath = ""
+ } else {
+ x.SubPath = string(r.DecodeString())
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *EnvVar) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.Value != ""
+ yyq2[2] = x.ValueFrom != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("name"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Value))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("value"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Value))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.ValueFrom == nil {
+ r.EncodeNil()
+ } else {
+ x.ValueFrom.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("valueFrom"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.ValueFrom == nil {
+ r.EncodeNil()
+ } else {
+ x.ValueFrom.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *EnvVar) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *EnvVar) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "name":
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ case "value":
+ if r.TryDecodeAsNil() {
+ x.Value = ""
+ } else {
+ x.Value = string(r.DecodeString())
+ }
+ case "valueFrom":
+ if r.TryDecodeAsNil() {
+ if x.ValueFrom != nil {
+ x.ValueFrom = nil
+ }
+ } else {
+ if x.ValueFrom == nil {
+ x.ValueFrom = new(EnvVarSource)
+ }
+ x.ValueFrom.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *EnvVar) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Value = ""
+ } else {
+ x.Value = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.ValueFrom != nil {
+ x.ValueFrom = nil
+ }
+ } else {
+ if x.ValueFrom == nil {
+ x.ValueFrom = new(EnvVarSource)
+ }
+ x.ValueFrom.CodecDecodeSelf(d)
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *EnvVarSource) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.FieldRef != nil
+ yyq2[1] = x.ResourceFieldRef != nil
+ yyq2[2] = x.ConfigMapKeyRef != nil
+ yyq2[3] = x.SecretKeyRef != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.FieldRef == nil {
+ r.EncodeNil()
+ } else {
+ x.FieldRef.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("fieldRef"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.FieldRef == nil {
+ r.EncodeNil()
+ } else {
+ x.FieldRef.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.ResourceFieldRef == nil {
+ r.EncodeNil()
+ } else {
+ x.ResourceFieldRef.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("resourceFieldRef"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.ResourceFieldRef == nil {
+ r.EncodeNil()
+ } else {
+ x.ResourceFieldRef.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.ConfigMapKeyRef == nil {
+ r.EncodeNil()
+ } else {
+ x.ConfigMapKeyRef.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("configMapKeyRef"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.ConfigMapKeyRef == nil {
+ r.EncodeNil()
+ } else {
+ x.ConfigMapKeyRef.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ if x.SecretKeyRef == nil {
+ r.EncodeNil()
+ } else {
+ x.SecretKeyRef.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("secretKeyRef"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.SecretKeyRef == nil {
+ r.EncodeNil()
+ } else {
+ x.SecretKeyRef.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *EnvVarSource) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *EnvVarSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "fieldRef":
+ if r.TryDecodeAsNil() {
+ if x.FieldRef != nil {
+ x.FieldRef = nil
+ }
+ } else {
+ if x.FieldRef == nil {
+ x.FieldRef = new(ObjectFieldSelector)
+ }
+ x.FieldRef.CodecDecodeSelf(d)
+ }
+ case "resourceFieldRef":
+ if r.TryDecodeAsNil() {
+ if x.ResourceFieldRef != nil {
+ x.ResourceFieldRef = nil
+ }
+ } else {
+ if x.ResourceFieldRef == nil {
+ x.ResourceFieldRef = new(ResourceFieldSelector)
+ }
+ x.ResourceFieldRef.CodecDecodeSelf(d)
+ }
+ case "configMapKeyRef":
+ if r.TryDecodeAsNil() {
+ if x.ConfigMapKeyRef != nil {
+ x.ConfigMapKeyRef = nil
+ }
+ } else {
+ if x.ConfigMapKeyRef == nil {
+ x.ConfigMapKeyRef = new(ConfigMapKeySelector)
+ }
+ x.ConfigMapKeyRef.CodecDecodeSelf(d)
+ }
+ case "secretKeyRef":
+ if r.TryDecodeAsNil() {
+ if x.SecretKeyRef != nil {
+ x.SecretKeyRef = nil
+ }
+ } else {
+ if x.SecretKeyRef == nil {
+ x.SecretKeyRef = new(SecretKeySelector)
+ }
+ x.SecretKeyRef.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *EnvVarSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.FieldRef != nil {
+ x.FieldRef = nil
+ }
+ } else {
+ if x.FieldRef == nil {
+ x.FieldRef = new(ObjectFieldSelector)
+ }
+ x.FieldRef.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.ResourceFieldRef != nil {
+ x.ResourceFieldRef = nil
+ }
+ } else {
+ if x.ResourceFieldRef == nil {
+ x.ResourceFieldRef = new(ResourceFieldSelector)
+ }
+ x.ResourceFieldRef.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.ConfigMapKeyRef != nil {
+ x.ConfigMapKeyRef = nil
+ }
+ } else {
+ if x.ConfigMapKeyRef == nil {
+ x.ConfigMapKeyRef = new(ConfigMapKeySelector)
+ }
+ x.ConfigMapKeyRef.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.SecretKeyRef != nil {
+ x.SecretKeyRef = nil
+ }
+ } else {
+ if x.SecretKeyRef == nil {
+ x.SecretKeyRef = new(SecretKeySelector)
+ }
+ x.SecretKeyRef.CodecDecodeSelf(d)
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ObjectFieldSelector) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.FieldPath))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("fieldPath"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.FieldPath))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ObjectFieldSelector) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ObjectFieldSelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ case "fieldPath":
+ if r.TryDecodeAsNil() {
+ x.FieldPath = ""
+ } else {
+ x.FieldPath = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ObjectFieldSelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.FieldPath = ""
+ } else {
+ x.FieldPath = string(r.DecodeString())
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ResourceFieldSelector) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.ContainerName != ""
+ yyq2[2] = true
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ContainerName))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("containerName"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ContainerName))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Resource))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("resource"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Resource))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy10 := &x.Divisor
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy10) {
+ } else if !yym11 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy10)
+ } else {
+ z.EncFallback(yy10)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("divisor"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy12 := &x.Divisor
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy12) {
+ } else if !yym13 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy12)
+ } else {
+ z.EncFallback(yy12)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ResourceFieldSelector) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ResourceFieldSelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "containerName":
+ if r.TryDecodeAsNil() {
+ x.ContainerName = ""
+ } else {
+ x.ContainerName = string(r.DecodeString())
+ }
+ case "resource":
+ if r.TryDecodeAsNil() {
+ x.Resource = ""
+ } else {
+ x.Resource = string(r.DecodeString())
+ }
+ case "divisor":
+ if r.TryDecodeAsNil() {
+ x.Divisor = pkg3_resource.Quantity{}
+ } else {
+ yyv6 := &x.Divisor
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv6) {
+ } else if !yym7 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv6)
+ } else {
+ z.DecFallback(yyv6, false)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ResourceFieldSelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ContainerName = ""
+ } else {
+ x.ContainerName = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Resource = ""
+ } else {
+ x.Resource = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Divisor = pkg3_resource.Quantity{}
+ } else {
+ yyv11 := &x.Divisor
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else if !yym12 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv11)
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ConfigMapKeySelector) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.Name != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Key))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("key"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Key))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("name"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ConfigMapKeySelector) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ConfigMapKeySelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "key":
+ if r.TryDecodeAsNil() {
+ x.Key = ""
+ } else {
+ x.Key = string(r.DecodeString())
+ }
+ case "name":
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ConfigMapKeySelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Key = ""
+ } else {
+ x.Key = string(r.DecodeString())
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *SecretKeySelector) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.Name != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Key))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("key"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Key))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("name"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *SecretKeySelector) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *SecretKeySelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "key":
+ if r.TryDecodeAsNil() {
+ x.Key = ""
+ } else {
+ x.Key = string(r.DecodeString())
+ }
+ case "name":
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *SecretKeySelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Key = ""
+ } else {
+ x.Key = string(r.DecodeString())
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *HTTPHeader) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("name"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Value))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("value"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Value))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *HTTPHeader) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *HTTPHeader) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "name":
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ case "value":
+ if r.TryDecodeAsNil() {
+ x.Value = ""
+ } else {
+ x.Value = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *HTTPHeader) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Value = ""
+ } else {
+ x.Value = string(r.DecodeString())
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *HTTPGetAction) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Path != ""
+ yyq2[2] = x.Host != ""
+ yyq2[3] = x.Scheme != ""
+ yyq2[4] = len(x.HTTPHeaders) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Path))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("path"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Path))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy7 := &x.Port
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy7) {
+ } else if !yym8 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy7)
+ } else {
+ z.EncFallback(yy7)
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("port"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy9 := &x.Port
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy9) {
+ } else if !yym10 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy9)
+ } else {
+ z.EncFallback(yy9)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Host))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("host"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Host))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ x.Scheme.CodecEncodeSelf(e)
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("scheme"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Scheme.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ if x.HTTPHeaders == nil {
+ r.EncodeNil()
+ } else {
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ h.encSliceHTTPHeader(([]HTTPHeader)(x.HTTPHeaders), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("httpHeaders"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.HTTPHeaders == nil {
+ r.EncodeNil()
+ } else {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ h.encSliceHTTPHeader(([]HTTPHeader)(x.HTTPHeaders), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *HTTPGetAction) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *HTTPGetAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "path":
+ if r.TryDecodeAsNil() {
+ x.Path = ""
+ } else {
+ x.Path = string(r.DecodeString())
+ }
+ case "port":
+ if r.TryDecodeAsNil() {
+ x.Port = pkg4_intstr.IntOrString{}
+ } else {
+ yyv5 := &x.Port
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv5) {
+ } else if !yym6 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv5)
+ } else {
+ z.DecFallback(yyv5, false)
+ }
+ }
+ case "host":
+ if r.TryDecodeAsNil() {
+ x.Host = ""
+ } else {
+ x.Host = string(r.DecodeString())
+ }
+ case "scheme":
+ if r.TryDecodeAsNil() {
+ x.Scheme = ""
+ } else {
+ x.Scheme = URIScheme(r.DecodeString())
+ }
+ case "httpHeaders":
+ if r.TryDecodeAsNil() {
+ x.HTTPHeaders = nil
+ } else {
+ yyv9 := &x.HTTPHeaders
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.decSliceHTTPHeader((*[]HTTPHeader)(yyv9), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *HTTPGetAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj11 int
+ var yyb11 bool
+ var yyhl11 bool = l >= 0
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Path = ""
+ } else {
+ x.Path = string(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Port = pkg4_intstr.IntOrString{}
+ } else {
+ yyv13 := &x.Port
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv13) {
+ } else if !yym14 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv13)
+ } else {
+ z.DecFallback(yyv13, false)
+ }
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Host = ""
+ } else {
+ x.Host = string(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Scheme = ""
+ } else {
+ x.Scheme = URIScheme(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.HTTPHeaders = nil
+ } else {
+ yyv17 := &x.HTTPHeaders
+ yym18 := z.DecBinary()
+ _ = yym18
+ if false {
+ } else {
+ h.decSliceHTTPHeader((*[]HTTPHeader)(yyv17), d)
+ }
+ }
+ for {
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj11-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x URIScheme) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *URIScheme) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *TCPSocketAction) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy4 := &x.Port
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else if !yym5 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy4)
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("port"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.Port
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else if !yym7 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy6)
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *TCPSocketAction) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *TCPSocketAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "port":
+ if r.TryDecodeAsNil() {
+ x.Port = pkg4_intstr.IntOrString{}
+ } else {
+ yyv4 := &x.Port
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else if !yym5 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv4)
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *TCPSocketAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Port = pkg4_intstr.IntOrString{}
+ } else {
+ yyv7 := &x.Port
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv7) {
+ } else if !yym8 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv7)
+ } else {
+ z.DecFallback(yyv7, false)
+ }
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ExecAction) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = len(x.Command) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Command == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Command, false, e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("command"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Command == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Command, false, e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ExecAction) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ExecAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "command":
+ if r.TryDecodeAsNil() {
+ x.Command = nil
+ } else {
+ yyv4 := &x.Command
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv4, false, d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ExecAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Command = nil
+ } else {
+ yyv7 := &x.Command
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv7, false, d)
+ }
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *Probe) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [8]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.InitialDelaySeconds != 0
+ yyq2[1] = x.TimeoutSeconds != 0
+ yyq2[2] = x.PeriodSeconds != 0
+ yyq2[3] = x.SuccessThreshold != 0
+ yyq2[4] = x.FailureThreshold != 0
+ yyq2[5] = x.Handler.Exec != nil && x.Exec != nil
+ yyq2[6] = x.Handler.HTTPGet != nil && x.HTTPGet != nil
+ yyq2[7] = x.Handler.TCPSocket != nil && x.TCPSocket != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(8)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeInt(int64(x.InitialDelaySeconds))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("initialDelaySeconds"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(x.InitialDelaySeconds))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeInt(int64(x.TimeoutSeconds))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("timeoutSeconds"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeInt(int64(x.TimeoutSeconds))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeInt(int64(x.PeriodSeconds))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("periodSeconds"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeInt(int64(x.PeriodSeconds))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeInt(int64(x.SuccessThreshold))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("successThreshold"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeInt(int64(x.SuccessThreshold))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeInt(int64(x.FailureThreshold))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("failureThreshold"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeInt(int64(x.FailureThreshold))
+ }
+ }
+ }
+ var yyn18 bool
+ if x.Handler.Exec == nil {
+ yyn18 = true
+ goto LABEL18
+ }
+ LABEL18:
+ if yyr2 || yy2arr2 {
+ if yyn18 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ if x.Exec == nil {
+ r.EncodeNil()
+ } else {
+ x.Exec.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("exec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn18 {
+ r.EncodeNil()
+ } else {
+ if x.Exec == nil {
+ r.EncodeNil()
+ } else {
+ x.Exec.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn21 bool
+ if x.Handler.HTTPGet == nil {
+ yyn21 = true
+ goto LABEL21
+ }
+ LABEL21:
+ if yyr2 || yy2arr2 {
+ if yyn21 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[6] {
+ if x.HTTPGet == nil {
+ r.EncodeNil()
+ } else {
+ x.HTTPGet.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[6] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("httpGet"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn21 {
+ r.EncodeNil()
+ } else {
+ if x.HTTPGet == nil {
+ r.EncodeNil()
+ } else {
+ x.HTTPGet.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ var yyn24 bool
+ if x.Handler.TCPSocket == nil {
+ yyn24 = true
+ goto LABEL24
+ }
+ LABEL24:
+ if yyr2 || yy2arr2 {
+ if yyn24 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[7] {
+ if x.TCPSocket == nil {
+ r.EncodeNil()
+ } else {
+ x.TCPSocket.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[7] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("tcpSocket"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn24 {
+ r.EncodeNil()
+ } else {
+ if x.TCPSocket == nil {
+ r.EncodeNil()
+ } else {
+ x.TCPSocket.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *Probe) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *Probe) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "initialDelaySeconds":
+ if r.TryDecodeAsNil() {
+ x.InitialDelaySeconds = 0
+ } else {
+ x.InitialDelaySeconds = int32(r.DecodeInt(32))
+ }
+ case "timeoutSeconds":
+ if r.TryDecodeAsNil() {
+ x.TimeoutSeconds = 0
+ } else {
+ x.TimeoutSeconds = int32(r.DecodeInt(32))
+ }
+ case "periodSeconds":
+ if r.TryDecodeAsNil() {
+ x.PeriodSeconds = 0
+ } else {
+ x.PeriodSeconds = int32(r.DecodeInt(32))
+ }
+ case "successThreshold":
+ if r.TryDecodeAsNil() {
+ x.SuccessThreshold = 0
+ } else {
+ x.SuccessThreshold = int32(r.DecodeInt(32))
+ }
+ case "failureThreshold":
+ if r.TryDecodeAsNil() {
+ x.FailureThreshold = 0
+ } else {
+ x.FailureThreshold = int32(r.DecodeInt(32))
+ }
+ case "exec":
+ if x.Handler.Exec == nil {
+ x.Handler.Exec = new(ExecAction)
+ }
+ if r.TryDecodeAsNil() {
+ if x.Exec != nil {
+ x.Exec = nil
+ }
+ } else {
+ if x.Exec == nil {
+ x.Exec = new(ExecAction)
+ }
+ x.Exec.CodecDecodeSelf(d)
+ }
+ case "httpGet":
+ if x.Handler.HTTPGet == nil {
+ x.Handler.HTTPGet = new(HTTPGetAction)
+ }
+ if r.TryDecodeAsNil() {
+ if x.HTTPGet != nil {
+ x.HTTPGet = nil
+ }
+ } else {
+ if x.HTTPGet == nil {
+ x.HTTPGet = new(HTTPGetAction)
+ }
+ x.HTTPGet.CodecDecodeSelf(d)
+ }
+ case "tcpSocket":
+ if x.Handler.TCPSocket == nil {
+ x.Handler.TCPSocket = new(TCPSocketAction)
+ }
+ if r.TryDecodeAsNil() {
+ if x.TCPSocket != nil {
+ x.TCPSocket = nil
+ }
+ } else {
+ if x.TCPSocket == nil {
+ x.TCPSocket = new(TCPSocketAction)
+ }
+ x.TCPSocket.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *Probe) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj12 int
+ var yyb12 bool
+ var yyhl12 bool = l >= 0
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.InitialDelaySeconds = 0
+ } else {
+ x.InitialDelaySeconds = int32(r.DecodeInt(32))
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.TimeoutSeconds = 0
+ } else {
+ x.TimeoutSeconds = int32(r.DecodeInt(32))
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.PeriodSeconds = 0
+ } else {
+ x.PeriodSeconds = int32(r.DecodeInt(32))
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.SuccessThreshold = 0
+ } else {
+ x.SuccessThreshold = int32(r.DecodeInt(32))
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.FailureThreshold = 0
+ } else {
+ x.FailureThreshold = int32(r.DecodeInt(32))
+ }
+ if x.Handler.Exec == nil {
+ x.Handler.Exec = new(ExecAction)
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Exec != nil {
+ x.Exec = nil
+ }
+ } else {
+ if x.Exec == nil {
+ x.Exec = new(ExecAction)
+ }
+ x.Exec.CodecDecodeSelf(d)
+ }
+ if x.Handler.HTTPGet == nil {
+ x.Handler.HTTPGet = new(HTTPGetAction)
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.HTTPGet != nil {
+ x.HTTPGet = nil
+ }
+ } else {
+ if x.HTTPGet == nil {
+ x.HTTPGet = new(HTTPGetAction)
+ }
+ x.HTTPGet.CodecDecodeSelf(d)
+ }
+ if x.Handler.TCPSocket == nil {
+ x.Handler.TCPSocket = new(TCPSocketAction)
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.TCPSocket != nil {
+ x.TCPSocket = nil
+ }
+ } else {
+ if x.TCPSocket == nil {
+ x.TCPSocket = new(TCPSocketAction)
+ }
+ x.TCPSocket.CodecDecodeSelf(d)
+ }
+ for {
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj12-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x PullPolicy) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *PullPolicy) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x Capability) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *Capability) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *Capabilities) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = len(x.Add) != 0
+ yyq2[1] = len(x.Drop) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Add == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ h.encSliceCapability(([]Capability)(x.Add), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("add"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Add == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.encSliceCapability(([]Capability)(x.Add), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Drop == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.encSliceCapability(([]Capability)(x.Drop), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("drop"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Drop == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.encSliceCapability(([]Capability)(x.Drop), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *Capabilities) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *Capabilities) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "add":
+ if r.TryDecodeAsNil() {
+ x.Add = nil
+ } else {
+ yyv4 := &x.Add
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.decSliceCapability((*[]Capability)(yyv4), d)
+ }
+ }
+ case "drop":
+ if r.TryDecodeAsNil() {
+ x.Drop = nil
+ } else {
+ yyv6 := &x.Drop
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceCapability((*[]Capability)(yyv6), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *Capabilities) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Add = nil
+ } else {
+ yyv9 := &x.Add
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.decSliceCapability((*[]Capability)(yyv9), d)
+ }
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Drop = nil
+ } else {
+ yyv11 := &x.Drop
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ h.decSliceCapability((*[]Capability)(yyv11), d)
+ }
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ResourceRequirements) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = len(x.Limits) != 0
+ yyq2[1] = len(x.Requests) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Limits == nil {
+ r.EncodeNil()
+ } else {
+ x.Limits.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("limits"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Limits == nil {
+ r.EncodeNil()
+ } else {
+ x.Limits.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Requests == nil {
+ r.EncodeNil()
+ } else {
+ x.Requests.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("requests"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Requests == nil {
+ r.EncodeNil()
+ } else {
+ x.Requests.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ResourceRequirements) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ResourceRequirements) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "limits":
+ if r.TryDecodeAsNil() {
+ x.Limits = nil
+ } else {
+ yyv4 := &x.Limits
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "requests":
+ if r.TryDecodeAsNil() {
+ x.Requests = nil
+ } else {
+ yyv5 := &x.Requests
+ yyv5.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ResourceRequirements) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Limits = nil
+ } else {
+ yyv7 := &x.Limits
+ yyv7.CodecDecodeSelf(d)
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Requests = nil
+ } else {
+ yyv8 := &x.Requests
+ yyv8.CodecDecodeSelf(d)
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *Container) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [18]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.Image != ""
+ yyq2[2] = len(x.Command) != 0
+ yyq2[3] = len(x.Args) != 0
+ yyq2[4] = x.WorkingDir != ""
+ yyq2[5] = len(x.Ports) != 0
+ yyq2[6] = len(x.Env) != 0
+ yyq2[7] = true
+ yyq2[8] = len(x.VolumeMounts) != 0
+ yyq2[9] = x.LivenessProbe != nil
+ yyq2[10] = x.ReadinessProbe != nil
+ yyq2[11] = x.Lifecycle != nil
+ yyq2[12] = x.TerminationMessagePath != ""
+ yyq2[13] = x.ImagePullPolicy != ""
+ yyq2[14] = x.SecurityContext != nil
+ yyq2[15] = x.Stdin != false
+ yyq2[16] = x.StdinOnce != false
+ yyq2[17] = x.TTY != false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(18)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("name"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Image))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("image"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Image))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.Command == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Command, false, e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("command"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Command == nil {
+ r.EncodeNil()
+ } else {
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Command, false, e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ if x.Args == nil {
+ r.EncodeNil()
+ } else {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Args, false, e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("args"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Args == nil {
+ r.EncodeNil()
+ } else {
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Args, false, e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.WorkingDir))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("workingDir"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.WorkingDir))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ if x.Ports == nil {
+ r.EncodeNil()
+ } else {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ h.encSliceContainerPort(([]ContainerPort)(x.Ports), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("ports"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Ports == nil {
+ r.EncodeNil()
+ } else {
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ h.encSliceContainerPort(([]ContainerPort)(x.Ports), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[6] {
+ if x.Env == nil {
+ r.EncodeNil()
+ } else {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ h.encSliceEnvVar(([]EnvVar)(x.Env), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[6] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("env"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Env == nil {
+ r.EncodeNil()
+ } else {
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ h.encSliceEnvVar(([]EnvVar)(x.Env), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[7] {
+ yy25 := &x.Resources
+ yy25.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[7] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("resources"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy27 := &x.Resources
+ yy27.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[8] {
+ if x.VolumeMounts == nil {
+ r.EncodeNil()
+ } else {
+ yym30 := z.EncBinary()
+ _ = yym30
+ if false {
+ } else {
+ h.encSliceVolumeMount(([]VolumeMount)(x.VolumeMounts), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[8] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("volumeMounts"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.VolumeMounts == nil {
+ r.EncodeNil()
+ } else {
+ yym31 := z.EncBinary()
+ _ = yym31
+ if false {
+ } else {
+ h.encSliceVolumeMount(([]VolumeMount)(x.VolumeMounts), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[9] {
+ if x.LivenessProbe == nil {
+ r.EncodeNil()
+ } else {
+ x.LivenessProbe.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[9] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("livenessProbe"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.LivenessProbe == nil {
+ r.EncodeNil()
+ } else {
+ x.LivenessProbe.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[10] {
+ if x.ReadinessProbe == nil {
+ r.EncodeNil()
+ } else {
+ x.ReadinessProbe.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[10] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("readinessProbe"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.ReadinessProbe == nil {
+ r.EncodeNil()
+ } else {
+ x.ReadinessProbe.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[11] {
+ if x.Lifecycle == nil {
+ r.EncodeNil()
+ } else {
+ x.Lifecycle.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[11] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("lifecycle"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Lifecycle == nil {
+ r.EncodeNil()
+ } else {
+ x.Lifecycle.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[12] {
+ yym42 := z.EncBinary()
+ _ = yym42
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.TerminationMessagePath))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[12] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("terminationMessagePath"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym43 := z.EncBinary()
+ _ = yym43
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.TerminationMessagePath))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[13] {
+ x.ImagePullPolicy.CodecEncodeSelf(e)
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[13] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("imagePullPolicy"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.ImagePullPolicy.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[14] {
+ if x.SecurityContext == nil {
+ r.EncodeNil()
+ } else {
+ x.SecurityContext.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[14] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("securityContext"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.SecurityContext == nil {
+ r.EncodeNil()
+ } else {
+ x.SecurityContext.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[15] {
+ yym51 := z.EncBinary()
+ _ = yym51
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Stdin))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[15] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("stdin"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym52 := z.EncBinary()
+ _ = yym52
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Stdin))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[16] {
+ yym54 := z.EncBinary()
+ _ = yym54
+ if false {
+ } else {
+ r.EncodeBool(bool(x.StdinOnce))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[16] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("stdinOnce"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym55 := z.EncBinary()
+ _ = yym55
+ if false {
+ } else {
+ r.EncodeBool(bool(x.StdinOnce))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[17] {
+ yym57 := z.EncBinary()
+ _ = yym57
+ if false {
+ } else {
+ r.EncodeBool(bool(x.TTY))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[17] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("tty"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym58 := z.EncBinary()
+ _ = yym58
+ if false {
+ } else {
+ r.EncodeBool(bool(x.TTY))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *Container) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *Container) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "name":
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ case "image":
+ if r.TryDecodeAsNil() {
+ x.Image = ""
+ } else {
+ x.Image = string(r.DecodeString())
+ }
+ case "command":
+ if r.TryDecodeAsNil() {
+ x.Command = nil
+ } else {
+ yyv6 := &x.Command
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv6, false, d)
+ }
+ }
+ case "args":
+ if r.TryDecodeAsNil() {
+ x.Args = nil
+ } else {
+ yyv8 := &x.Args
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv8, false, d)
+ }
+ }
+ case "workingDir":
+ if r.TryDecodeAsNil() {
+ x.WorkingDir = ""
+ } else {
+ x.WorkingDir = string(r.DecodeString())
+ }
+ case "ports":
+ if r.TryDecodeAsNil() {
+ x.Ports = nil
+ } else {
+ yyv11 := &x.Ports
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ h.decSliceContainerPort((*[]ContainerPort)(yyv11), d)
+ }
+ }
+ case "env":
+ if r.TryDecodeAsNil() {
+ x.Env = nil
+ } else {
+ yyv13 := &x.Env
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceEnvVar((*[]EnvVar)(yyv13), d)
+ }
+ }
+ case "resources":
+ if r.TryDecodeAsNil() {
+ x.Resources = ResourceRequirements{}
+ } else {
+ yyv15 := &x.Resources
+ yyv15.CodecDecodeSelf(d)
+ }
+ case "volumeMounts":
+ if r.TryDecodeAsNil() {
+ x.VolumeMounts = nil
+ } else {
+ yyv16 := &x.VolumeMounts
+ yym17 := z.DecBinary()
+ _ = yym17
+ if false {
+ } else {
+ h.decSliceVolumeMount((*[]VolumeMount)(yyv16), d)
+ }
+ }
+ case "livenessProbe":
+ if r.TryDecodeAsNil() {
+ if x.LivenessProbe != nil {
+ x.LivenessProbe = nil
+ }
+ } else {
+ if x.LivenessProbe == nil {
+ x.LivenessProbe = new(Probe)
+ }
+ x.LivenessProbe.CodecDecodeSelf(d)
+ }
+ case "readinessProbe":
+ if r.TryDecodeAsNil() {
+ if x.ReadinessProbe != nil {
+ x.ReadinessProbe = nil
+ }
+ } else {
+ if x.ReadinessProbe == nil {
+ x.ReadinessProbe = new(Probe)
+ }
+ x.ReadinessProbe.CodecDecodeSelf(d)
+ }
+ case "lifecycle":
+ if r.TryDecodeAsNil() {
+ if x.Lifecycle != nil {
+ x.Lifecycle = nil
+ }
+ } else {
+ if x.Lifecycle == nil {
+ x.Lifecycle = new(Lifecycle)
+ }
+ x.Lifecycle.CodecDecodeSelf(d)
+ }
+ case "terminationMessagePath":
+ if r.TryDecodeAsNil() {
+ x.TerminationMessagePath = ""
+ } else {
+ x.TerminationMessagePath = string(r.DecodeString())
+ }
+ case "imagePullPolicy":
+ if r.TryDecodeAsNil() {
+ x.ImagePullPolicy = ""
+ } else {
+ x.ImagePullPolicy = PullPolicy(r.DecodeString())
+ }
+ case "securityContext":
+ if r.TryDecodeAsNil() {
+ if x.SecurityContext != nil {
+ x.SecurityContext = nil
+ }
+ } else {
+ if x.SecurityContext == nil {
+ x.SecurityContext = new(SecurityContext)
+ }
+ x.SecurityContext.CodecDecodeSelf(d)
+ }
+ case "stdin":
+ if r.TryDecodeAsNil() {
+ x.Stdin = false
+ } else {
+ x.Stdin = bool(r.DecodeBool())
+ }
+ case "stdinOnce":
+ if r.TryDecodeAsNil() {
+ x.StdinOnce = false
+ } else {
+ x.StdinOnce = bool(r.DecodeBool())
+ }
+ case "tty":
+ if r.TryDecodeAsNil() {
+ x.TTY = false
+ } else {
+ x.TTY = bool(r.DecodeBool())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *Container) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj27 int
+ var yyb27 bool
+ var yyhl27 bool = l >= 0
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Image = ""
+ } else {
+ x.Image = string(r.DecodeString())
+ }
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Command = nil
+ } else {
+ yyv30 := &x.Command
+ yym31 := z.DecBinary()
+ _ = yym31
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv30, false, d)
+ }
+ }
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Args = nil
+ } else {
+ yyv32 := &x.Args
+ yym33 := z.DecBinary()
+ _ = yym33
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv32, false, d)
+ }
+ }
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.WorkingDir = ""
+ } else {
+ x.WorkingDir = string(r.DecodeString())
+ }
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Ports = nil
+ } else {
+ yyv35 := &x.Ports
+ yym36 := z.DecBinary()
+ _ = yym36
+ if false {
+ } else {
+ h.decSliceContainerPort((*[]ContainerPort)(yyv35), d)
+ }
+ }
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Env = nil
+ } else {
+ yyv37 := &x.Env
+ yym38 := z.DecBinary()
+ _ = yym38
+ if false {
+ } else {
+ h.decSliceEnvVar((*[]EnvVar)(yyv37), d)
+ }
+ }
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Resources = ResourceRequirements{}
+ } else {
+ yyv39 := &x.Resources
+ yyv39.CodecDecodeSelf(d)
+ }
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.VolumeMounts = nil
+ } else {
+ yyv40 := &x.VolumeMounts
+ yym41 := z.DecBinary()
+ _ = yym41
+ if false {
+ } else {
+ h.decSliceVolumeMount((*[]VolumeMount)(yyv40), d)
+ }
+ }
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.LivenessProbe != nil {
+ x.LivenessProbe = nil
+ }
+ } else {
+ if x.LivenessProbe == nil {
+ x.LivenessProbe = new(Probe)
+ }
+ x.LivenessProbe.CodecDecodeSelf(d)
+ }
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.ReadinessProbe != nil {
+ x.ReadinessProbe = nil
+ }
+ } else {
+ if x.ReadinessProbe == nil {
+ x.ReadinessProbe = new(Probe)
+ }
+ x.ReadinessProbe.CodecDecodeSelf(d)
+ }
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Lifecycle != nil {
+ x.Lifecycle = nil
+ }
+ } else {
+ if x.Lifecycle == nil {
+ x.Lifecycle = new(Lifecycle)
+ }
+ x.Lifecycle.CodecDecodeSelf(d)
+ }
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.TerminationMessagePath = ""
+ } else {
+ x.TerminationMessagePath = string(r.DecodeString())
+ }
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ImagePullPolicy = ""
+ } else {
+ x.ImagePullPolicy = PullPolicy(r.DecodeString())
+ }
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.SecurityContext != nil {
+ x.SecurityContext = nil
+ }
+ } else {
+ if x.SecurityContext == nil {
+ x.SecurityContext = new(SecurityContext)
+ }
+ x.SecurityContext.CodecDecodeSelf(d)
+ }
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Stdin = false
+ } else {
+ x.Stdin = bool(r.DecodeBool())
+ }
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.StdinOnce = false
+ } else {
+ x.StdinOnce = bool(r.DecodeBool())
+ }
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.TTY = false
+ } else {
+ x.TTY = bool(r.DecodeBool())
+ }
+ for {
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj27-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *Handler) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Exec != nil
+ yyq2[1] = x.HTTPGet != nil
+ yyq2[2] = x.TCPSocket != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Exec == nil {
+ r.EncodeNil()
+ } else {
+ x.Exec.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("exec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Exec == nil {
+ r.EncodeNil()
+ } else {
+ x.Exec.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.HTTPGet == nil {
+ r.EncodeNil()
+ } else {
+ x.HTTPGet.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("httpGet"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.HTTPGet == nil {
+ r.EncodeNil()
+ } else {
+ x.HTTPGet.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.TCPSocket == nil {
+ r.EncodeNil()
+ } else {
+ x.TCPSocket.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("tcpSocket"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.TCPSocket == nil {
+ r.EncodeNil()
+ } else {
+ x.TCPSocket.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *Handler) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *Handler) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "exec":
+ if r.TryDecodeAsNil() {
+ if x.Exec != nil {
+ x.Exec = nil
+ }
+ } else {
+ if x.Exec == nil {
+ x.Exec = new(ExecAction)
+ }
+ x.Exec.CodecDecodeSelf(d)
+ }
+ case "httpGet":
+ if r.TryDecodeAsNil() {
+ if x.HTTPGet != nil {
+ x.HTTPGet = nil
+ }
+ } else {
+ if x.HTTPGet == nil {
+ x.HTTPGet = new(HTTPGetAction)
+ }
+ x.HTTPGet.CodecDecodeSelf(d)
+ }
+ case "tcpSocket":
+ if r.TryDecodeAsNil() {
+ if x.TCPSocket != nil {
+ x.TCPSocket = nil
+ }
+ } else {
+ if x.TCPSocket == nil {
+ x.TCPSocket = new(TCPSocketAction)
+ }
+ x.TCPSocket.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *Handler) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Exec != nil {
+ x.Exec = nil
+ }
+ } else {
+ if x.Exec == nil {
+ x.Exec = new(ExecAction)
+ }
+ x.Exec.CodecDecodeSelf(d)
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.HTTPGet != nil {
+ x.HTTPGet = nil
+ }
+ } else {
+ if x.HTTPGet == nil {
+ x.HTTPGet = new(HTTPGetAction)
+ }
+ x.HTTPGet.CodecDecodeSelf(d)
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.TCPSocket != nil {
+ x.TCPSocket = nil
+ }
+ } else {
+ if x.TCPSocket == nil {
+ x.TCPSocket = new(TCPSocketAction)
+ }
+ x.TCPSocket.CodecDecodeSelf(d)
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *Lifecycle) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.PostStart != nil
+ yyq2[1] = x.PreStop != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.PostStart == nil {
+ r.EncodeNil()
+ } else {
+ x.PostStart.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("postStart"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.PostStart == nil {
+ r.EncodeNil()
+ } else {
+ x.PostStart.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.PreStop == nil {
+ r.EncodeNil()
+ } else {
+ x.PreStop.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("preStop"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.PreStop == nil {
+ r.EncodeNil()
+ } else {
+ x.PreStop.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *Lifecycle) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *Lifecycle) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "postStart":
+ if r.TryDecodeAsNil() {
+ if x.PostStart != nil {
+ x.PostStart = nil
+ }
+ } else {
+ if x.PostStart == nil {
+ x.PostStart = new(Handler)
+ }
+ x.PostStart.CodecDecodeSelf(d)
+ }
+ case "preStop":
+ if r.TryDecodeAsNil() {
+ if x.PreStop != nil {
+ x.PreStop = nil
+ }
+ } else {
+ if x.PreStop == nil {
+ x.PreStop = new(Handler)
+ }
+ x.PreStop.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *Lifecycle) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.PostStart != nil {
+ x.PostStart = nil
+ }
+ } else {
+ if x.PostStart == nil {
+ x.PostStart = new(Handler)
+ }
+ x.PostStart.CodecDecodeSelf(d)
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.PreStop != nil {
+ x.PreStop = nil
+ }
+ } else {
+ if x.PreStop == nil {
+ x.PreStop = new(Handler)
+ }
+ x.PreStop.CodecDecodeSelf(d)
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x ConditionStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *ConditionStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *ContainerStateWaiting) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Reason != ""
+ yyq2[1] = x.Message != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Reason))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("reason"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Reason))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Message))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("message"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Message))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ContainerStateWaiting) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ContainerStateWaiting) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "reason":
+ if r.TryDecodeAsNil() {
+ x.Reason = ""
+ } else {
+ x.Reason = string(r.DecodeString())
+ }
+ case "message":
+ if r.TryDecodeAsNil() {
+ x.Message = ""
+ } else {
+ x.Message = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ContainerStateWaiting) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Reason = ""
+ } else {
+ x.Reason = string(r.DecodeString())
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Message = ""
+ } else {
+ x.Message = string(r.DecodeString())
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ContainerStateRunning) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.StartedAt
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else if yym5 {
+ z.EncBinaryMarshal(yy4)
+ } else if !yym5 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy4)
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("startedAt"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.StartedAt
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else if yym7 {
+ z.EncBinaryMarshal(yy6)
+ } else if !yym7 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy6)
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ContainerStateRunning) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ContainerStateRunning) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "startedAt":
+ if r.TryDecodeAsNil() {
+ x.StartedAt = pkg2_unversioned.Time{}
+ } else {
+ yyv4 := &x.StartedAt
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else if yym5 {
+ z.DecBinaryUnmarshal(yyv4)
+ } else if !yym5 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv4)
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ContainerStateRunning) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.StartedAt = pkg2_unversioned.Time{}
+ } else {
+ yyv7 := &x.StartedAt
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv7) {
+ } else if yym8 {
+ z.DecBinaryUnmarshal(yyv7)
+ } else if !yym8 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv7)
+ } else {
+ z.DecFallback(yyv7, false)
+ }
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ContainerStateTerminated) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [7]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.Signal != 0
+ yyq2[2] = x.Reason != ""
+ yyq2[3] = x.Message != ""
+ yyq2[4] = true
+ yyq2[5] = true
+ yyq2[6] = x.ContainerID != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(7)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ExitCode))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("exitCode"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ExitCode))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Signal))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("signal"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Signal))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Reason))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("reason"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Reason))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Message))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("message"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Message))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yy16 := &x.StartedAt
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy16) {
+ } else if yym17 {
+ z.EncBinaryMarshal(yy16)
+ } else if !yym17 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy16)
+ } else {
+ z.EncFallback(yy16)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("startedAt"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy18 := &x.StartedAt
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy18) {
+ } else if yym19 {
+ z.EncBinaryMarshal(yy18)
+ } else if !yym19 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy18)
+ } else {
+ z.EncFallback(yy18)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ yy21 := &x.FinishedAt
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy21) {
+ } else if yym22 {
+ z.EncBinaryMarshal(yy21)
+ } else if !yym22 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy21)
+ } else {
+ z.EncFallback(yy21)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("finishedAt"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy23 := &x.FinishedAt
+ yym24 := z.EncBinary()
+ _ = yym24
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy23) {
+ } else if yym24 {
+ z.EncBinaryMarshal(yy23)
+ } else if !yym24 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy23)
+ } else {
+ z.EncFallback(yy23)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[6] {
+ yym26 := z.EncBinary()
+ _ = yym26
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ContainerID))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[6] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("containerID"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym27 := z.EncBinary()
+ _ = yym27
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ContainerID))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ContainerStateTerminated) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ContainerStateTerminated) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "exitCode":
+ if r.TryDecodeAsNil() {
+ x.ExitCode = 0
+ } else {
+ x.ExitCode = int32(r.DecodeInt(32))
+ }
+ case "signal":
+ if r.TryDecodeAsNil() {
+ x.Signal = 0
+ } else {
+ x.Signal = int32(r.DecodeInt(32))
+ }
+ case "reason":
+ if r.TryDecodeAsNil() {
+ x.Reason = ""
+ } else {
+ x.Reason = string(r.DecodeString())
+ }
+ case "message":
+ if r.TryDecodeAsNil() {
+ x.Message = ""
+ } else {
+ x.Message = string(r.DecodeString())
+ }
+ case "startedAt":
+ if r.TryDecodeAsNil() {
+ x.StartedAt = pkg2_unversioned.Time{}
+ } else {
+ yyv8 := &x.StartedAt
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv8) {
+ } else if yym9 {
+ z.DecBinaryUnmarshal(yyv8)
+ } else if !yym9 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv8)
+ } else {
+ z.DecFallback(yyv8, false)
+ }
+ }
+ case "finishedAt":
+ if r.TryDecodeAsNil() {
+ x.FinishedAt = pkg2_unversioned.Time{}
+ } else {
+ yyv10 := &x.FinishedAt
+ yym11 := z.DecBinary()
+ _ = yym11
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv10) {
+ } else if yym11 {
+ z.DecBinaryUnmarshal(yyv10)
+ } else if !yym11 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv10)
+ } else {
+ z.DecFallback(yyv10, false)
+ }
+ }
+ case "containerID":
+ if r.TryDecodeAsNil() {
+ x.ContainerID = ""
+ } else {
+ x.ContainerID = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ContainerStateTerminated) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj13 int
+ var yyb13 bool
+ var yyhl13 bool = l >= 0
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ExitCode = 0
+ } else {
+ x.ExitCode = int32(r.DecodeInt(32))
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Signal = 0
+ } else {
+ x.Signal = int32(r.DecodeInt(32))
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Reason = ""
+ } else {
+ x.Reason = string(r.DecodeString())
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Message = ""
+ } else {
+ x.Message = string(r.DecodeString())
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.StartedAt = pkg2_unversioned.Time{}
+ } else {
+ yyv18 := &x.StartedAt
+ yym19 := z.DecBinary()
+ _ = yym19
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv18) {
+ } else if yym19 {
+ z.DecBinaryUnmarshal(yyv18)
+ } else if !yym19 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv18)
+ } else {
+ z.DecFallback(yyv18, false)
+ }
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.FinishedAt = pkg2_unversioned.Time{}
+ } else {
+ yyv20 := &x.FinishedAt
+ yym21 := z.DecBinary()
+ _ = yym21
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv20) {
+ } else if yym21 {
+ z.DecBinaryUnmarshal(yyv20)
+ } else if !yym21 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv20)
+ } else {
+ z.DecFallback(yyv20, false)
+ }
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ContainerID = ""
+ } else {
+ x.ContainerID = string(r.DecodeString())
+ }
+ for {
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj13-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ContainerState) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Waiting != nil
+ yyq2[1] = x.Running != nil
+ yyq2[2] = x.Terminated != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Waiting == nil {
+ r.EncodeNil()
+ } else {
+ x.Waiting.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("waiting"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Waiting == nil {
+ r.EncodeNil()
+ } else {
+ x.Waiting.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Running == nil {
+ r.EncodeNil()
+ } else {
+ x.Running.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("running"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Running == nil {
+ r.EncodeNil()
+ } else {
+ x.Running.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.Terminated == nil {
+ r.EncodeNil()
+ } else {
+ x.Terminated.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("terminated"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Terminated == nil {
+ r.EncodeNil()
+ } else {
+ x.Terminated.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ContainerState) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ContainerState) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "waiting":
+ if r.TryDecodeAsNil() {
+ if x.Waiting != nil {
+ x.Waiting = nil
+ }
+ } else {
+ if x.Waiting == nil {
+ x.Waiting = new(ContainerStateWaiting)
+ }
+ x.Waiting.CodecDecodeSelf(d)
+ }
+ case "running":
+ if r.TryDecodeAsNil() {
+ if x.Running != nil {
+ x.Running = nil
+ }
+ } else {
+ if x.Running == nil {
+ x.Running = new(ContainerStateRunning)
+ }
+ x.Running.CodecDecodeSelf(d)
+ }
+ case "terminated":
+ if r.TryDecodeAsNil() {
+ if x.Terminated != nil {
+ x.Terminated = nil
+ }
+ } else {
+ if x.Terminated == nil {
+ x.Terminated = new(ContainerStateTerminated)
+ }
+ x.Terminated.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ContainerState) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Waiting != nil {
+ x.Waiting = nil
+ }
+ } else {
+ if x.Waiting == nil {
+ x.Waiting = new(ContainerStateWaiting)
+ }
+ x.Waiting.CodecDecodeSelf(d)
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Running != nil {
+ x.Running = nil
+ }
+ } else {
+ if x.Running == nil {
+ x.Running = new(ContainerStateRunning)
+ }
+ x.Running.CodecDecodeSelf(d)
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Terminated != nil {
+ x.Terminated = nil
+ }
+ } else {
+ if x.Terminated == nil {
+ x.Terminated = new(ContainerStateTerminated)
+ }
+ x.Terminated.CodecDecodeSelf(d)
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ContainerStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [8]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = true
+ yyq2[2] = true
+ yyq2[7] = x.ContainerID != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(8)
+ } else {
+ yynn2 = 5
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("name"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy7 := &x.State
+ yy7.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("state"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy9 := &x.State
+ yy9.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy12 := &x.LastTerminationState
+ yy12.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("lastState"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy14 := &x.LastTerminationState
+ yy14.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Ready))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("ready"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Ready))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeInt(int64(x.RestartCount))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("restartCount"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym21 := z.EncBinary()
+ _ = yym21
+ if false {
+ } else {
+ r.EncodeInt(int64(x.RestartCount))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Image))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("image"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym24 := z.EncBinary()
+ _ = yym24
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Image))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym26 := z.EncBinary()
+ _ = yym26
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ImageID))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("imageID"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym27 := z.EncBinary()
+ _ = yym27
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ImageID))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[7] {
+ yym29 := z.EncBinary()
+ _ = yym29
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ContainerID))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[7] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("containerID"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym30 := z.EncBinary()
+ _ = yym30
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ContainerID))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ContainerStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ContainerStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "name":
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ case "state":
+ if r.TryDecodeAsNil() {
+ x.State = ContainerState{}
+ } else {
+ yyv5 := &x.State
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "lastState":
+ if r.TryDecodeAsNil() {
+ x.LastTerminationState = ContainerState{}
+ } else {
+ yyv6 := &x.LastTerminationState
+ yyv6.CodecDecodeSelf(d)
+ }
+ case "ready":
+ if r.TryDecodeAsNil() {
+ x.Ready = false
+ } else {
+ x.Ready = bool(r.DecodeBool())
+ }
+ case "restartCount":
+ if r.TryDecodeAsNil() {
+ x.RestartCount = 0
+ } else {
+ x.RestartCount = int32(r.DecodeInt(32))
+ }
+ case "image":
+ if r.TryDecodeAsNil() {
+ x.Image = ""
+ } else {
+ x.Image = string(r.DecodeString())
+ }
+ case "imageID":
+ if r.TryDecodeAsNil() {
+ x.ImageID = ""
+ } else {
+ x.ImageID = string(r.DecodeString())
+ }
+ case "containerID":
+ if r.TryDecodeAsNil() {
+ x.ContainerID = ""
+ } else {
+ x.ContainerID = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ContainerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj12 int
+ var yyb12 bool
+ var yyhl12 bool = l >= 0
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.State = ContainerState{}
+ } else {
+ yyv14 := &x.State
+ yyv14.CodecDecodeSelf(d)
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.LastTerminationState = ContainerState{}
+ } else {
+ yyv15 := &x.LastTerminationState
+ yyv15.CodecDecodeSelf(d)
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Ready = false
+ } else {
+ x.Ready = bool(r.DecodeBool())
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.RestartCount = 0
+ } else {
+ x.RestartCount = int32(r.DecodeInt(32))
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Image = ""
+ } else {
+ x.Image = string(r.DecodeString())
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ImageID = ""
+ } else {
+ x.ImageID = string(r.DecodeString())
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ContainerID = ""
+ } else {
+ x.ContainerID = string(r.DecodeString())
+ }
+ for {
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj12-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x PodPhase) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *PodPhase) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x PodConditionType) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *PodConditionType) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *PodCondition) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [6]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[2] = true
+ yyq2[3] = true
+ yyq2[4] = x.Reason != ""
+ yyq2[5] = x.Message != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(6)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ x.Type.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("type"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Type.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ x.Status.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Status.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy10 := &x.LastProbeTime
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy10) {
+ } else if yym11 {
+ z.EncBinaryMarshal(yy10)
+ } else if !yym11 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy10)
+ } else {
+ z.EncFallback(yy10)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("lastProbeTime"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy12 := &x.LastProbeTime
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy12) {
+ } else if yym13 {
+ z.EncBinaryMarshal(yy12)
+ } else if !yym13 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy12)
+ } else {
+ z.EncFallback(yy12)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yy15 := &x.LastTransitionTime
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy15) {
+ } else if yym16 {
+ z.EncBinaryMarshal(yy15)
+ } else if !yym16 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy15)
+ } else {
+ z.EncFallback(yy15)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy17 := &x.LastTransitionTime
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy17) {
+ } else if yym18 {
+ z.EncBinaryMarshal(yy17)
+ } else if !yym18 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy17)
+ } else {
+ z.EncFallback(yy17)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Reason))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("reason"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym21 := z.EncBinary()
+ _ = yym21
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Reason))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Message))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("message"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym24 := z.EncBinary()
+ _ = yym24
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Message))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PodCondition) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PodCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "type":
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = PodConditionType(r.DecodeString())
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = ""
+ } else {
+ x.Status = ConditionStatus(r.DecodeString())
+ }
+ case "lastProbeTime":
+ if r.TryDecodeAsNil() {
+ x.LastProbeTime = pkg2_unversioned.Time{}
+ } else {
+ yyv6 := &x.LastProbeTime
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv6) {
+ } else if yym7 {
+ z.DecBinaryUnmarshal(yyv6)
+ } else if !yym7 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv6)
+ } else {
+ z.DecFallback(yyv6, false)
+ }
+ }
+ case "lastTransitionTime":
+ if r.TryDecodeAsNil() {
+ x.LastTransitionTime = pkg2_unversioned.Time{}
+ } else {
+ yyv8 := &x.LastTransitionTime
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv8) {
+ } else if yym9 {
+ z.DecBinaryUnmarshal(yyv8)
+ } else if !yym9 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv8)
+ } else {
+ z.DecFallback(yyv8, false)
+ }
+ }
+ case "reason":
+ if r.TryDecodeAsNil() {
+ x.Reason = ""
+ } else {
+ x.Reason = string(r.DecodeString())
+ }
+ case "message":
+ if r.TryDecodeAsNil() {
+ x.Message = ""
+ } else {
+ x.Message = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PodCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj12 int
+ var yyb12 bool
+ var yyhl12 bool = l >= 0
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = PodConditionType(r.DecodeString())
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = ""
+ } else {
+ x.Status = ConditionStatus(r.DecodeString())
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.LastProbeTime = pkg2_unversioned.Time{}
+ } else {
+ yyv15 := &x.LastProbeTime
+ yym16 := z.DecBinary()
+ _ = yym16
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv15) {
+ } else if yym16 {
+ z.DecBinaryUnmarshal(yyv15)
+ } else if !yym16 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv15)
+ } else {
+ z.DecFallback(yyv15, false)
+ }
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.LastTransitionTime = pkg2_unversioned.Time{}
+ } else {
+ yyv17 := &x.LastTransitionTime
+ yym18 := z.DecBinary()
+ _ = yym18
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv17) {
+ } else if yym18 {
+ z.DecBinaryUnmarshal(yyv17)
+ } else if !yym18 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv17)
+ } else {
+ z.DecFallback(yyv17, false)
+ }
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Reason = ""
+ } else {
+ x.Reason = string(r.DecodeString())
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Message = ""
+ } else {
+ x.Message = string(r.DecodeString())
+ }
+ for {
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj12-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x RestartPolicy) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *RestartPolicy) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x DNSPolicy) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *DNSPolicy) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *NodeSelector) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.NodeSelectorTerms == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ h.encSliceNodeSelectorTerm(([]NodeSelectorTerm)(x.NodeSelectorTerms), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("nodeSelectorTerms"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.NodeSelectorTerms == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.encSliceNodeSelectorTerm(([]NodeSelectorTerm)(x.NodeSelectorTerms), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *NodeSelector) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *NodeSelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "nodeSelectorTerms":
+ if r.TryDecodeAsNil() {
+ x.NodeSelectorTerms = nil
+ } else {
+ yyv4 := &x.NodeSelectorTerms
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.decSliceNodeSelectorTerm((*[]NodeSelectorTerm)(yyv4), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *NodeSelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.NodeSelectorTerms = nil
+ } else {
+ yyv7 := &x.NodeSelectorTerms
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.decSliceNodeSelectorTerm((*[]NodeSelectorTerm)(yyv7), d)
+ }
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *NodeSelectorTerm) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.MatchExpressions == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ h.encSliceNodeSelectorRequirement(([]NodeSelectorRequirement)(x.MatchExpressions), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("matchExpressions"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.MatchExpressions == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.encSliceNodeSelectorRequirement(([]NodeSelectorRequirement)(x.MatchExpressions), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *NodeSelectorTerm) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *NodeSelectorTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "matchExpressions":
+ if r.TryDecodeAsNil() {
+ x.MatchExpressions = nil
+ } else {
+ yyv4 := &x.MatchExpressions
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.decSliceNodeSelectorRequirement((*[]NodeSelectorRequirement)(yyv4), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *NodeSelectorTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.MatchExpressions = nil
+ } else {
+ yyv7 := &x.MatchExpressions
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.decSliceNodeSelectorRequirement((*[]NodeSelectorRequirement)(yyv7), d)
+ }
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *NodeSelectorRequirement) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[2] = len(x.Values) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Key))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("key"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Key))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ x.Operator.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("operator"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Operator.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.Values == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Values, false, e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("values"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Values == nil {
+ r.EncodeNil()
+ } else {
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Values, false, e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *NodeSelectorRequirement) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *NodeSelectorRequirement) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "key":
+ if r.TryDecodeAsNil() {
+ x.Key = ""
+ } else {
+ x.Key = string(r.DecodeString())
+ }
+ case "operator":
+ if r.TryDecodeAsNil() {
+ x.Operator = ""
+ } else {
+ x.Operator = NodeSelectorOperator(r.DecodeString())
+ }
+ case "values":
+ if r.TryDecodeAsNil() {
+ x.Values = nil
+ } else {
+ yyv6 := &x.Values
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv6, false, d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *NodeSelectorRequirement) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Key = ""
+ } else {
+ x.Key = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Operator = ""
+ } else {
+ x.Operator = NodeSelectorOperator(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Values = nil
+ } else {
+ yyv11 := &x.Values
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv11, false, d)
+ }
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x NodeSelectorOperator) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *NodeSelectorOperator) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *Affinity) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.NodeAffinity != nil
+ yyq2[1] = x.PodAffinity != nil
+ yyq2[2] = x.PodAntiAffinity != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.NodeAffinity == nil {
+ r.EncodeNil()
+ } else {
+ x.NodeAffinity.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("nodeAffinity"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.NodeAffinity == nil {
+ r.EncodeNil()
+ } else {
+ x.NodeAffinity.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.PodAffinity == nil {
+ r.EncodeNil()
+ } else {
+ x.PodAffinity.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("podAffinity"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.PodAffinity == nil {
+ r.EncodeNil()
+ } else {
+ x.PodAffinity.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.PodAntiAffinity == nil {
+ r.EncodeNil()
+ } else {
+ x.PodAntiAffinity.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("podAntiAffinity"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.PodAntiAffinity == nil {
+ r.EncodeNil()
+ } else {
+ x.PodAntiAffinity.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *Affinity) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *Affinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "nodeAffinity":
+ if r.TryDecodeAsNil() {
+ if x.NodeAffinity != nil {
+ x.NodeAffinity = nil
+ }
+ } else {
+ if x.NodeAffinity == nil {
+ x.NodeAffinity = new(NodeAffinity)
+ }
+ x.NodeAffinity.CodecDecodeSelf(d)
+ }
+ case "podAffinity":
+ if r.TryDecodeAsNil() {
+ if x.PodAffinity != nil {
+ x.PodAffinity = nil
+ }
+ } else {
+ if x.PodAffinity == nil {
+ x.PodAffinity = new(PodAffinity)
+ }
+ x.PodAffinity.CodecDecodeSelf(d)
+ }
+ case "podAntiAffinity":
+ if r.TryDecodeAsNil() {
+ if x.PodAntiAffinity != nil {
+ x.PodAntiAffinity = nil
+ }
+ } else {
+ if x.PodAntiAffinity == nil {
+ x.PodAntiAffinity = new(PodAntiAffinity)
+ }
+ x.PodAntiAffinity.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *Affinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.NodeAffinity != nil {
+ x.NodeAffinity = nil
+ }
+ } else {
+ if x.NodeAffinity == nil {
+ x.NodeAffinity = new(NodeAffinity)
+ }
+ x.NodeAffinity.CodecDecodeSelf(d)
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.PodAffinity != nil {
+ x.PodAffinity = nil
+ }
+ } else {
+ if x.PodAffinity == nil {
+ x.PodAffinity = new(PodAffinity)
+ }
+ x.PodAffinity.CodecDecodeSelf(d)
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.PodAntiAffinity != nil {
+ x.PodAntiAffinity = nil
+ }
+ } else {
+ if x.PodAntiAffinity == nil {
+ x.PodAntiAffinity = new(PodAntiAffinity)
+ }
+ x.PodAntiAffinity.CodecDecodeSelf(d)
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PodAffinity) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = len(x.RequiredDuringSchedulingIgnoredDuringExecution) != 0
+ yyq2[1] = len(x.PreferredDuringSchedulingIgnoredDuringExecution) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.RequiredDuringSchedulingIgnoredDuringExecution == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("requiredDuringSchedulingIgnoredDuringExecution"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.RequiredDuringSchedulingIgnoredDuringExecution == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.PreferredDuringSchedulingIgnoredDuringExecution == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("preferredDuringSchedulingIgnoredDuringExecution"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.PreferredDuringSchedulingIgnoredDuringExecution == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PodAffinity) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PodAffinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "requiredDuringSchedulingIgnoredDuringExecution":
+ if r.TryDecodeAsNil() {
+ x.RequiredDuringSchedulingIgnoredDuringExecution = nil
+ } else {
+ yyv4 := &x.RequiredDuringSchedulingIgnoredDuringExecution
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv4), d)
+ }
+ }
+ case "preferredDuringSchedulingIgnoredDuringExecution":
+ if r.TryDecodeAsNil() {
+ x.PreferredDuringSchedulingIgnoredDuringExecution = nil
+ } else {
+ yyv6 := &x.PreferredDuringSchedulingIgnoredDuringExecution
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv6), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PodAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.RequiredDuringSchedulingIgnoredDuringExecution = nil
+ } else {
+ yyv9 := &x.RequiredDuringSchedulingIgnoredDuringExecution
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv9), d)
+ }
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.PreferredDuringSchedulingIgnoredDuringExecution = nil
+ } else {
+ yyv11 := &x.PreferredDuringSchedulingIgnoredDuringExecution
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv11), d)
+ }
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PodAntiAffinity) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = len(x.RequiredDuringSchedulingIgnoredDuringExecution) != 0
+ yyq2[1] = len(x.PreferredDuringSchedulingIgnoredDuringExecution) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.RequiredDuringSchedulingIgnoredDuringExecution == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("requiredDuringSchedulingIgnoredDuringExecution"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.RequiredDuringSchedulingIgnoredDuringExecution == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.PreferredDuringSchedulingIgnoredDuringExecution == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("preferredDuringSchedulingIgnoredDuringExecution"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.PreferredDuringSchedulingIgnoredDuringExecution == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PodAntiAffinity) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PodAntiAffinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "requiredDuringSchedulingIgnoredDuringExecution":
+ if r.TryDecodeAsNil() {
+ x.RequiredDuringSchedulingIgnoredDuringExecution = nil
+ } else {
+ yyv4 := &x.RequiredDuringSchedulingIgnoredDuringExecution
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv4), d)
+ }
+ }
+ case "preferredDuringSchedulingIgnoredDuringExecution":
+ if r.TryDecodeAsNil() {
+ x.PreferredDuringSchedulingIgnoredDuringExecution = nil
+ } else {
+ yyv6 := &x.PreferredDuringSchedulingIgnoredDuringExecution
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv6), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PodAntiAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.RequiredDuringSchedulingIgnoredDuringExecution = nil
+ } else {
+ yyv9 := &x.RequiredDuringSchedulingIgnoredDuringExecution
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv9), d)
+ }
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.PreferredDuringSchedulingIgnoredDuringExecution = nil
+ } else {
+ yyv11 := &x.PreferredDuringSchedulingIgnoredDuringExecution
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv11), d)
+ }
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *WeightedPodAffinityTerm) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Weight))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("weight"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Weight))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy7 := &x.PodAffinityTerm
+ yy7.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("podAffinityTerm"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy9 := &x.PodAffinityTerm
+ yy9.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *WeightedPodAffinityTerm) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *WeightedPodAffinityTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "weight":
+ if r.TryDecodeAsNil() {
+ x.Weight = 0
+ } else {
+ x.Weight = int32(r.DecodeInt(32))
+ }
+ case "podAffinityTerm":
+ if r.TryDecodeAsNil() {
+ x.PodAffinityTerm = PodAffinityTerm{}
+ } else {
+ yyv5 := &x.PodAffinityTerm
+ yyv5.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *WeightedPodAffinityTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Weight = 0
+ } else {
+ x.Weight = int32(r.DecodeInt(32))
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.PodAffinityTerm = PodAffinityTerm{}
+ } else {
+ yyv8 := &x.PodAffinityTerm
+ yyv8.CodecDecodeSelf(d)
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PodAffinityTerm) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.LabelSelector != nil
+ yyq2[2] = x.TopologyKey != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.LabelSelector == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.LabelSelector) {
+ } else {
+ z.EncFallback(x.LabelSelector)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("labelSelector"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.LabelSelector == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.LabelSelector) {
+ } else {
+ z.EncFallback(x.LabelSelector)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Namespaces == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Namespaces, false, e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("namespaces"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Namespaces == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Namespaces, false, e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.TopologyKey))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("topologyKey"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.TopologyKey))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PodAffinityTerm) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PodAffinityTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "labelSelector":
+ if r.TryDecodeAsNil() {
+ if x.LabelSelector != nil {
+ x.LabelSelector = nil
+ }
+ } else {
+ if x.LabelSelector == nil {
+ x.LabelSelector = new(pkg2_unversioned.LabelSelector)
+ }
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.LabelSelector) {
+ } else {
+ z.DecFallback(x.LabelSelector, false)
+ }
+ }
+ case "namespaces":
+ if r.TryDecodeAsNil() {
+ x.Namespaces = nil
+ } else {
+ yyv6 := &x.Namespaces
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv6, false, d)
+ }
+ }
+ case "topologyKey":
+ if r.TryDecodeAsNil() {
+ x.TopologyKey = ""
+ } else {
+ x.TopologyKey = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PodAffinityTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.LabelSelector != nil {
+ x.LabelSelector = nil
+ }
+ } else {
+ if x.LabelSelector == nil {
+ x.LabelSelector = new(pkg2_unversioned.LabelSelector)
+ }
+ yym11 := z.DecBinary()
+ _ = yym11
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.LabelSelector) {
+ } else {
+ z.DecFallback(x.LabelSelector, false)
+ }
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Namespaces = nil
+ } else {
+ yyv12 := &x.Namespaces
+ yym13 := z.DecBinary()
+ _ = yym13
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv12, false, d)
+ }
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.TopologyKey = ""
+ } else {
+ x.TopologyKey = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *NodeAffinity) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.RequiredDuringSchedulingIgnoredDuringExecution != nil
+ yyq2[1] = len(x.PreferredDuringSchedulingIgnoredDuringExecution) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.RequiredDuringSchedulingIgnoredDuringExecution == nil {
+ r.EncodeNil()
+ } else {
+ x.RequiredDuringSchedulingIgnoredDuringExecution.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("requiredDuringSchedulingIgnoredDuringExecution"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.RequiredDuringSchedulingIgnoredDuringExecution == nil {
+ r.EncodeNil()
+ } else {
+ x.RequiredDuringSchedulingIgnoredDuringExecution.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.PreferredDuringSchedulingIgnoredDuringExecution == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.encSlicePreferredSchedulingTerm(([]PreferredSchedulingTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("preferredDuringSchedulingIgnoredDuringExecution"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.PreferredDuringSchedulingIgnoredDuringExecution == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.encSlicePreferredSchedulingTerm(([]PreferredSchedulingTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *NodeAffinity) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *NodeAffinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "requiredDuringSchedulingIgnoredDuringExecution":
+ if r.TryDecodeAsNil() {
+ if x.RequiredDuringSchedulingIgnoredDuringExecution != nil {
+ x.RequiredDuringSchedulingIgnoredDuringExecution = nil
+ }
+ } else {
+ if x.RequiredDuringSchedulingIgnoredDuringExecution == nil {
+ x.RequiredDuringSchedulingIgnoredDuringExecution = new(NodeSelector)
+ }
+ x.RequiredDuringSchedulingIgnoredDuringExecution.CodecDecodeSelf(d)
+ }
+ case "preferredDuringSchedulingIgnoredDuringExecution":
+ if r.TryDecodeAsNil() {
+ x.PreferredDuringSchedulingIgnoredDuringExecution = nil
+ } else {
+ yyv5 := &x.PreferredDuringSchedulingIgnoredDuringExecution
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ h.decSlicePreferredSchedulingTerm((*[]PreferredSchedulingTerm)(yyv5), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *NodeAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.RequiredDuringSchedulingIgnoredDuringExecution != nil {
+ x.RequiredDuringSchedulingIgnoredDuringExecution = nil
+ }
+ } else {
+ if x.RequiredDuringSchedulingIgnoredDuringExecution == nil {
+ x.RequiredDuringSchedulingIgnoredDuringExecution = new(NodeSelector)
+ }
+ x.RequiredDuringSchedulingIgnoredDuringExecution.CodecDecodeSelf(d)
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.PreferredDuringSchedulingIgnoredDuringExecution = nil
+ } else {
+ yyv9 := &x.PreferredDuringSchedulingIgnoredDuringExecution
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.decSlicePreferredSchedulingTerm((*[]PreferredSchedulingTerm)(yyv9), d)
+ }
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PreferredSchedulingTerm) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Weight))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("weight"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Weight))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy7 := &x.Preference
+ yy7.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("preference"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy9 := &x.Preference
+ yy9.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PreferredSchedulingTerm) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PreferredSchedulingTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "weight":
+ if r.TryDecodeAsNil() {
+ x.Weight = 0
+ } else {
+ x.Weight = int32(r.DecodeInt(32))
+ }
+ case "preference":
+ if r.TryDecodeAsNil() {
+ x.Preference = NodeSelectorTerm{}
+ } else {
+ yyv5 := &x.Preference
+ yyv5.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PreferredSchedulingTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Weight = 0
+ } else {
+ x.Weight = int32(r.DecodeInt(32))
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Preference = NodeSelectorTerm{}
+ } else {
+ yyv8 := &x.Preference
+ yyv8.CodecDecodeSelf(d)
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *Taint) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.Value != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Key))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("key"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Key))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Value))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("value"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Value))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ x.Effect.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("effect"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Effect.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *Taint) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *Taint) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "key":
+ if r.TryDecodeAsNil() {
+ x.Key = ""
+ } else {
+ x.Key = string(r.DecodeString())
+ }
+ case "value":
+ if r.TryDecodeAsNil() {
+ x.Value = ""
+ } else {
+ x.Value = string(r.DecodeString())
+ }
+ case "effect":
+ if r.TryDecodeAsNil() {
+ x.Effect = ""
+ } else {
+ x.Effect = TaintEffect(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *Taint) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Key = ""
+ } else {
+ x.Key = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Value = ""
+ } else {
+ x.Value = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Effect = ""
+ } else {
+ x.Effect = TaintEffect(r.DecodeString())
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x TaintEffect) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *TaintEffect) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *Toleration) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Key != ""
+ yyq2[1] = x.Operator != ""
+ yyq2[2] = x.Value != ""
+ yyq2[3] = x.Effect != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Key))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("key"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Key))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ x.Operator.CodecEncodeSelf(e)
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("operator"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Operator.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Value))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("value"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Value))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ x.Effect.CodecEncodeSelf(e)
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("effect"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Effect.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *Toleration) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *Toleration) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "key":
+ if r.TryDecodeAsNil() {
+ x.Key = ""
+ } else {
+ x.Key = string(r.DecodeString())
+ }
+ case "operator":
+ if r.TryDecodeAsNil() {
+ x.Operator = ""
+ } else {
+ x.Operator = TolerationOperator(r.DecodeString())
+ }
+ case "value":
+ if r.TryDecodeAsNil() {
+ x.Value = ""
+ } else {
+ x.Value = string(r.DecodeString())
+ }
+ case "effect":
+ if r.TryDecodeAsNil() {
+ x.Effect = ""
+ } else {
+ x.Effect = TaintEffect(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *Toleration) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Key = ""
+ } else {
+ x.Key = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Operator = ""
+ } else {
+ x.Operator = TolerationOperator(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Value = ""
+ } else {
+ x.Value = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Effect = ""
+ } else {
+ x.Effect = TaintEffect(r.DecodeString())
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x TolerationOperator) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *TolerationOperator) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *PodSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [17]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = len(x.Volumes) != 0
+ yyq2[2] = x.RestartPolicy != ""
+ yyq2[3] = x.TerminationGracePeriodSeconds != nil
+ yyq2[4] = x.ActiveDeadlineSeconds != nil
+ yyq2[5] = x.DNSPolicy != ""
+ yyq2[6] = len(x.NodeSelector) != 0
+ yyq2[7] = x.ServiceAccountName != ""
+ yyq2[8] = x.DeprecatedServiceAccount != ""
+ yyq2[9] = x.NodeName != ""
+ yyq2[10] = x.HostNetwork != false
+ yyq2[11] = x.HostPID != false
+ yyq2[12] = x.HostIPC != false
+ yyq2[13] = x.SecurityContext != nil
+ yyq2[14] = len(x.ImagePullSecrets) != 0
+ yyq2[15] = x.Hostname != ""
+ yyq2[16] = x.Subdomain != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(17)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Volumes == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ h.encSliceVolume(([]Volume)(x.Volumes), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("volumes"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Volumes == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.encSliceVolume(([]Volume)(x.Volumes), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Containers == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.encSliceContainer(([]Container)(x.Containers), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("containers"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Containers == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.encSliceContainer(([]Container)(x.Containers), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ x.RestartPolicy.CodecEncodeSelf(e)
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("restartPolicy"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.RestartPolicy.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ if x.TerminationGracePeriodSeconds == nil {
+ r.EncodeNil()
+ } else {
+ yy13 := *x.TerminationGracePeriodSeconds
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeInt(int64(yy13))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("terminationGracePeriodSeconds"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.TerminationGracePeriodSeconds == nil {
+ r.EncodeNil()
+ } else {
+ yy15 := *x.TerminationGracePeriodSeconds
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeInt(int64(yy15))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ if x.ActiveDeadlineSeconds == nil {
+ r.EncodeNil()
+ } else {
+ yy18 := *x.ActiveDeadlineSeconds
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeInt(int64(yy18))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("activeDeadlineSeconds"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.ActiveDeadlineSeconds == nil {
+ r.EncodeNil()
+ } else {
+ yy20 := *x.ActiveDeadlineSeconds
+ yym21 := z.EncBinary()
+ _ = yym21
+ if false {
+ } else {
+ r.EncodeInt(int64(yy20))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ x.DNSPolicy.CodecEncodeSelf(e)
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("dnsPolicy"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.DNSPolicy.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[6] {
+ if x.NodeSelector == nil {
+ r.EncodeNil()
+ } else {
+ yym26 := z.EncBinary()
+ _ = yym26
+ if false {
+ } else {
+ z.F.EncMapStringStringV(x.NodeSelector, false, e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[6] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("nodeSelector"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.NodeSelector == nil {
+ r.EncodeNil()
+ } else {
+ yym27 := z.EncBinary()
+ _ = yym27
+ if false {
+ } else {
+ z.F.EncMapStringStringV(x.NodeSelector, false, e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[7] {
+ yym29 := z.EncBinary()
+ _ = yym29
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ServiceAccountName))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[7] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("serviceAccountName"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym30 := z.EncBinary()
+ _ = yym30
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ServiceAccountName))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[8] {
+ yym32 := z.EncBinary()
+ _ = yym32
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.DeprecatedServiceAccount))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[8] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("serviceAccount"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym33 := z.EncBinary()
+ _ = yym33
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.DeprecatedServiceAccount))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[9] {
+ yym35 := z.EncBinary()
+ _ = yym35
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.NodeName))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[9] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("nodeName"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym36 := z.EncBinary()
+ _ = yym36
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.NodeName))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[10] {
+ yym38 := z.EncBinary()
+ _ = yym38
+ if false {
+ } else {
+ r.EncodeBool(bool(x.HostNetwork))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[10] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("hostNetwork"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym39 := z.EncBinary()
+ _ = yym39
+ if false {
+ } else {
+ r.EncodeBool(bool(x.HostNetwork))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[11] {
+ yym41 := z.EncBinary()
+ _ = yym41
+ if false {
+ } else {
+ r.EncodeBool(bool(x.HostPID))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[11] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("hostPID"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym42 := z.EncBinary()
+ _ = yym42
+ if false {
+ } else {
+ r.EncodeBool(bool(x.HostPID))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[12] {
+ yym44 := z.EncBinary()
+ _ = yym44
+ if false {
+ } else {
+ r.EncodeBool(bool(x.HostIPC))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[12] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("hostIPC"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym45 := z.EncBinary()
+ _ = yym45
+ if false {
+ } else {
+ r.EncodeBool(bool(x.HostIPC))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[13] {
+ if x.SecurityContext == nil {
+ r.EncodeNil()
+ } else {
+ x.SecurityContext.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[13] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("securityContext"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.SecurityContext == nil {
+ r.EncodeNil()
+ } else {
+ x.SecurityContext.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[14] {
+ if x.ImagePullSecrets == nil {
+ r.EncodeNil()
+ } else {
+ yym50 := z.EncBinary()
+ _ = yym50
+ if false {
+ } else {
+ h.encSliceLocalObjectReference(([]LocalObjectReference)(x.ImagePullSecrets), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[14] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("imagePullSecrets"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.ImagePullSecrets == nil {
+ r.EncodeNil()
+ } else {
+ yym51 := z.EncBinary()
+ _ = yym51
+ if false {
+ } else {
+ h.encSliceLocalObjectReference(([]LocalObjectReference)(x.ImagePullSecrets), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[15] {
+ yym53 := z.EncBinary()
+ _ = yym53
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Hostname))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[15] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("hostname"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym54 := z.EncBinary()
+ _ = yym54
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Hostname))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[16] {
+ yym56 := z.EncBinary()
+ _ = yym56
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Subdomain))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[16] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("subdomain"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym57 := z.EncBinary()
+ _ = yym57
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Subdomain))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PodSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PodSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "volumes":
+ if r.TryDecodeAsNil() {
+ x.Volumes = nil
+ } else {
+ yyv4 := &x.Volumes
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.decSliceVolume((*[]Volume)(yyv4), d)
+ }
+ }
+ case "containers":
+ if r.TryDecodeAsNil() {
+ x.Containers = nil
+ } else {
+ yyv6 := &x.Containers
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceContainer((*[]Container)(yyv6), d)
+ }
+ }
+ case "restartPolicy":
+ if r.TryDecodeAsNil() {
+ x.RestartPolicy = ""
+ } else {
+ x.RestartPolicy = RestartPolicy(r.DecodeString())
+ }
+ case "terminationGracePeriodSeconds":
+ if r.TryDecodeAsNil() {
+ if x.TerminationGracePeriodSeconds != nil {
+ x.TerminationGracePeriodSeconds = nil
+ }
+ } else {
+ if x.TerminationGracePeriodSeconds == nil {
+ x.TerminationGracePeriodSeconds = new(int64)
+ }
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else {
+ *((*int64)(x.TerminationGracePeriodSeconds)) = int64(r.DecodeInt(64))
+ }
+ }
+ case "activeDeadlineSeconds":
+ if r.TryDecodeAsNil() {
+ if x.ActiveDeadlineSeconds != nil {
+ x.ActiveDeadlineSeconds = nil
+ }
+ } else {
+ if x.ActiveDeadlineSeconds == nil {
+ x.ActiveDeadlineSeconds = new(int64)
+ }
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64))
+ }
+ }
+ case "dnsPolicy":
+ if r.TryDecodeAsNil() {
+ x.DNSPolicy = ""
+ } else {
+ x.DNSPolicy = DNSPolicy(r.DecodeString())
+ }
+ case "nodeSelector":
+ if r.TryDecodeAsNil() {
+ x.NodeSelector = nil
+ } else {
+ yyv14 := &x.NodeSelector
+ yym15 := z.DecBinary()
+ _ = yym15
+ if false {
+ } else {
+ z.F.DecMapStringStringX(yyv14, false, d)
+ }
+ }
+ case "serviceAccountName":
+ if r.TryDecodeAsNil() {
+ x.ServiceAccountName = ""
+ } else {
+ x.ServiceAccountName = string(r.DecodeString())
+ }
+ case "serviceAccount":
+ if r.TryDecodeAsNil() {
+ x.DeprecatedServiceAccount = ""
+ } else {
+ x.DeprecatedServiceAccount = string(r.DecodeString())
+ }
+ case "nodeName":
+ if r.TryDecodeAsNil() {
+ x.NodeName = ""
+ } else {
+ x.NodeName = string(r.DecodeString())
+ }
+ case "hostNetwork":
+ if r.TryDecodeAsNil() {
+ x.HostNetwork = false
+ } else {
+ x.HostNetwork = bool(r.DecodeBool())
+ }
+ case "hostPID":
+ if r.TryDecodeAsNil() {
+ x.HostPID = false
+ } else {
+ x.HostPID = bool(r.DecodeBool())
+ }
+ case "hostIPC":
+ if r.TryDecodeAsNil() {
+ x.HostIPC = false
+ } else {
+ x.HostIPC = bool(r.DecodeBool())
+ }
+ case "securityContext":
+ if r.TryDecodeAsNil() {
+ if x.SecurityContext != nil {
+ x.SecurityContext = nil
+ }
+ } else {
+ if x.SecurityContext == nil {
+ x.SecurityContext = new(PodSecurityContext)
+ }
+ x.SecurityContext.CodecDecodeSelf(d)
+ }
+ case "imagePullSecrets":
+ if r.TryDecodeAsNil() {
+ x.ImagePullSecrets = nil
+ } else {
+ yyv23 := &x.ImagePullSecrets
+ yym24 := z.DecBinary()
+ _ = yym24
+ if false {
+ } else {
+ h.decSliceLocalObjectReference((*[]LocalObjectReference)(yyv23), d)
+ }
+ }
+ case "hostname":
+ if r.TryDecodeAsNil() {
+ x.Hostname = ""
+ } else {
+ x.Hostname = string(r.DecodeString())
+ }
+ case "subdomain":
+ if r.TryDecodeAsNil() {
+ x.Subdomain = ""
+ } else {
+ x.Subdomain = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj27 int
+ var yyb27 bool
+ var yyhl27 bool = l >= 0
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Volumes = nil
+ } else {
+ yyv28 := &x.Volumes
+ yym29 := z.DecBinary()
+ _ = yym29
+ if false {
+ } else {
+ h.decSliceVolume((*[]Volume)(yyv28), d)
+ }
+ }
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Containers = nil
+ } else {
+ yyv30 := &x.Containers
+ yym31 := z.DecBinary()
+ _ = yym31
+ if false {
+ } else {
+ h.decSliceContainer((*[]Container)(yyv30), d)
+ }
+ }
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.RestartPolicy = ""
+ } else {
+ x.RestartPolicy = RestartPolicy(r.DecodeString())
+ }
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.TerminationGracePeriodSeconds != nil {
+ x.TerminationGracePeriodSeconds = nil
+ }
+ } else {
+ if x.TerminationGracePeriodSeconds == nil {
+ x.TerminationGracePeriodSeconds = new(int64)
+ }
+ yym34 := z.DecBinary()
+ _ = yym34
+ if false {
+ } else {
+ *((*int64)(x.TerminationGracePeriodSeconds)) = int64(r.DecodeInt(64))
+ }
+ }
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.ActiveDeadlineSeconds != nil {
+ x.ActiveDeadlineSeconds = nil
+ }
+ } else {
+ if x.ActiveDeadlineSeconds == nil {
+ x.ActiveDeadlineSeconds = new(int64)
+ }
+ yym36 := z.DecBinary()
+ _ = yym36
+ if false {
+ } else {
+ *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64))
+ }
+ }
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.DNSPolicy = ""
+ } else {
+ x.DNSPolicy = DNSPolicy(r.DecodeString())
+ }
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.NodeSelector = nil
+ } else {
+ yyv38 := &x.NodeSelector
+ yym39 := z.DecBinary()
+ _ = yym39
+ if false {
+ } else {
+ z.F.DecMapStringStringX(yyv38, false, d)
+ }
+ }
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ServiceAccountName = ""
+ } else {
+ x.ServiceAccountName = string(r.DecodeString())
+ }
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.DeprecatedServiceAccount = ""
+ } else {
+ x.DeprecatedServiceAccount = string(r.DecodeString())
+ }
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.NodeName = ""
+ } else {
+ x.NodeName = string(r.DecodeString())
+ }
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.HostNetwork = false
+ } else {
+ x.HostNetwork = bool(r.DecodeBool())
+ }
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.HostPID = false
+ } else {
+ x.HostPID = bool(r.DecodeBool())
+ }
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.HostIPC = false
+ } else {
+ x.HostIPC = bool(r.DecodeBool())
+ }
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.SecurityContext != nil {
+ x.SecurityContext = nil
+ }
+ } else {
+ if x.SecurityContext == nil {
+ x.SecurityContext = new(PodSecurityContext)
+ }
+ x.SecurityContext.CodecDecodeSelf(d)
+ }
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ImagePullSecrets = nil
+ } else {
+ yyv47 := &x.ImagePullSecrets
+ yym48 := z.DecBinary()
+ _ = yym48
+ if false {
+ } else {
+ h.decSliceLocalObjectReference((*[]LocalObjectReference)(yyv47), d)
+ }
+ }
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Hostname = ""
+ } else {
+ x.Hostname = string(r.DecodeString())
+ }
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Subdomain = ""
+ } else {
+ x.Subdomain = string(r.DecodeString())
+ }
+ for {
+ yyj27++
+ if yyhl27 {
+ yyb27 = yyj27 > l
+ } else {
+ yyb27 = r.CheckBreak()
+ }
+ if yyb27 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj27-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PodSecurityContext) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.SELinuxOptions != nil
+ yyq2[1] = x.RunAsUser != nil
+ yyq2[2] = x.RunAsNonRoot != nil
+ yyq2[3] = len(x.SupplementalGroups) != 0
+ yyq2[4] = x.FSGroup != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.SELinuxOptions == nil {
+ r.EncodeNil()
+ } else {
+ x.SELinuxOptions.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("seLinuxOptions"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.SELinuxOptions == nil {
+ r.EncodeNil()
+ } else {
+ x.SELinuxOptions.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.RunAsUser == nil {
+ r.EncodeNil()
+ } else {
+ yy7 := *x.RunAsUser
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeInt(int64(yy7))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("runAsUser"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.RunAsUser == nil {
+ r.EncodeNil()
+ } else {
+ yy9 := *x.RunAsUser
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeInt(int64(yy9))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.RunAsNonRoot == nil {
+ r.EncodeNil()
+ } else {
+ yy12 := *x.RunAsNonRoot
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeBool(bool(yy12))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("runAsNonRoot"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.RunAsNonRoot == nil {
+ r.EncodeNil()
+ } else {
+ yy14 := *x.RunAsNonRoot
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeBool(bool(yy14))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ if x.SupplementalGroups == nil {
+ r.EncodeNil()
+ } else {
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ z.F.EncSliceInt64V(x.SupplementalGroups, false, e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("supplementalGroups"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.SupplementalGroups == nil {
+ r.EncodeNil()
+ } else {
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ z.F.EncSliceInt64V(x.SupplementalGroups, false, e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ if x.FSGroup == nil {
+ r.EncodeNil()
+ } else {
+ yy20 := *x.FSGroup
+ yym21 := z.EncBinary()
+ _ = yym21
+ if false {
+ } else {
+ r.EncodeInt(int64(yy20))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("fsGroup"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.FSGroup == nil {
+ r.EncodeNil()
+ } else {
+ yy22 := *x.FSGroup
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeInt(int64(yy22))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PodSecurityContext) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PodSecurityContext) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "seLinuxOptions":
+ if r.TryDecodeAsNil() {
+ if x.SELinuxOptions != nil {
+ x.SELinuxOptions = nil
+ }
+ } else {
+ if x.SELinuxOptions == nil {
+ x.SELinuxOptions = new(SELinuxOptions)
+ }
+ x.SELinuxOptions.CodecDecodeSelf(d)
+ }
+ case "runAsUser":
+ if r.TryDecodeAsNil() {
+ if x.RunAsUser != nil {
+ x.RunAsUser = nil
+ }
+ } else {
+ if x.RunAsUser == nil {
+ x.RunAsUser = new(int64)
+ }
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ *((*int64)(x.RunAsUser)) = int64(r.DecodeInt(64))
+ }
+ }
+ case "runAsNonRoot":
+ if r.TryDecodeAsNil() {
+ if x.RunAsNonRoot != nil {
+ x.RunAsNonRoot = nil
+ }
+ } else {
+ if x.RunAsNonRoot == nil {
+ x.RunAsNonRoot = new(bool)
+ }
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else {
+ *((*bool)(x.RunAsNonRoot)) = r.DecodeBool()
+ }
+ }
+ case "supplementalGroups":
+ if r.TryDecodeAsNil() {
+ x.SupplementalGroups = nil
+ } else {
+ yyv9 := &x.SupplementalGroups
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else {
+ z.F.DecSliceInt64X(yyv9, false, d)
+ }
+ }
+ case "fsGroup":
+ if r.TryDecodeAsNil() {
+ if x.FSGroup != nil {
+ x.FSGroup = nil
+ }
+ } else {
+ if x.FSGroup == nil {
+ x.FSGroup = new(int64)
+ }
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ *((*int64)(x.FSGroup)) = int64(r.DecodeInt(64))
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PodSecurityContext) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj13 int
+ var yyb13 bool
+ var yyhl13 bool = l >= 0
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.SELinuxOptions != nil {
+ x.SELinuxOptions = nil
+ }
+ } else {
+ if x.SELinuxOptions == nil {
+ x.SELinuxOptions = new(SELinuxOptions)
+ }
+ x.SELinuxOptions.CodecDecodeSelf(d)
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.RunAsUser != nil {
+ x.RunAsUser = nil
+ }
+ } else {
+ if x.RunAsUser == nil {
+ x.RunAsUser = new(int64)
+ }
+ yym16 := z.DecBinary()
+ _ = yym16
+ if false {
+ } else {
+ *((*int64)(x.RunAsUser)) = int64(r.DecodeInt(64))
+ }
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.RunAsNonRoot != nil {
+ x.RunAsNonRoot = nil
+ }
+ } else {
+ if x.RunAsNonRoot == nil {
+ x.RunAsNonRoot = new(bool)
+ }
+ yym18 := z.DecBinary()
+ _ = yym18
+ if false {
+ } else {
+ *((*bool)(x.RunAsNonRoot)) = r.DecodeBool()
+ }
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.SupplementalGroups = nil
+ } else {
+ yyv19 := &x.SupplementalGroups
+ yym20 := z.DecBinary()
+ _ = yym20
+ if false {
+ } else {
+ z.F.DecSliceInt64X(yyv19, false, d)
+ }
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.FSGroup != nil {
+ x.FSGroup = nil
+ }
+ } else {
+ if x.FSGroup == nil {
+ x.FSGroup = new(int64)
+ }
+ yym22 := z.DecBinary()
+ _ = yym22
+ if false {
+ } else {
+ *((*int64)(x.FSGroup)) = int64(r.DecodeInt(64))
+ }
+ }
+ for {
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj13-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PodStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [8]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Phase != ""
+ yyq2[1] = len(x.Conditions) != 0
+ yyq2[2] = x.Message != ""
+ yyq2[3] = x.Reason != ""
+ yyq2[4] = x.HostIP != ""
+ yyq2[5] = x.PodIP != ""
+ yyq2[6] = x.StartTime != nil
+ yyq2[7] = len(x.ContainerStatuses) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(8)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ x.Phase.CodecEncodeSelf(e)
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("phase"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Phase.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Conditions == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.encSlicePodCondition(([]PodCondition)(x.Conditions), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("conditions"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Conditions == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.encSlicePodCondition(([]PodCondition)(x.Conditions), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Message))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("message"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Message))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Reason))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("reason"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Reason))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.HostIP))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("hostIP"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.HostIP))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.PodIP))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("podIP"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.PodIP))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[6] {
+ if x.StartTime == nil {
+ r.EncodeNil()
+ } else {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.StartTime) {
+ } else if yym22 {
+ z.EncBinaryMarshal(x.StartTime)
+ } else if !yym22 && z.IsJSONHandle() {
+ z.EncJSONMarshal(x.StartTime)
+ } else {
+ z.EncFallback(x.StartTime)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[6] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("startTime"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.StartTime == nil {
+ r.EncodeNil()
+ } else {
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.StartTime) {
+ } else if yym23 {
+ z.EncBinaryMarshal(x.StartTime)
+ } else if !yym23 && z.IsJSONHandle() {
+ z.EncJSONMarshal(x.StartTime)
+ } else {
+ z.EncFallback(x.StartTime)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[7] {
+ if x.ContainerStatuses == nil {
+ r.EncodeNil()
+ } else {
+ yym25 := z.EncBinary()
+ _ = yym25
+ if false {
+ } else {
+ h.encSliceContainerStatus(([]ContainerStatus)(x.ContainerStatuses), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[7] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("containerStatuses"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.ContainerStatuses == nil {
+ r.EncodeNil()
+ } else {
+ yym26 := z.EncBinary()
+ _ = yym26
+ if false {
+ } else {
+ h.encSliceContainerStatus(([]ContainerStatus)(x.ContainerStatuses), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PodStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PodStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "phase":
+ if r.TryDecodeAsNil() {
+ x.Phase = ""
+ } else {
+ x.Phase = PodPhase(r.DecodeString())
+ }
+ case "conditions":
+ if r.TryDecodeAsNil() {
+ x.Conditions = nil
+ } else {
+ yyv5 := &x.Conditions
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ h.decSlicePodCondition((*[]PodCondition)(yyv5), d)
+ }
+ }
+ case "message":
+ if r.TryDecodeAsNil() {
+ x.Message = ""
+ } else {
+ x.Message = string(r.DecodeString())
+ }
+ case "reason":
+ if r.TryDecodeAsNil() {
+ x.Reason = ""
+ } else {
+ x.Reason = string(r.DecodeString())
+ }
+ case "hostIP":
+ if r.TryDecodeAsNil() {
+ x.HostIP = ""
+ } else {
+ x.HostIP = string(r.DecodeString())
+ }
+ case "podIP":
+ if r.TryDecodeAsNil() {
+ x.PodIP = ""
+ } else {
+ x.PodIP = string(r.DecodeString())
+ }
+ case "startTime":
+ if r.TryDecodeAsNil() {
+ if x.StartTime != nil {
+ x.StartTime = nil
+ }
+ } else {
+ if x.StartTime == nil {
+ x.StartTime = new(pkg2_unversioned.Time)
+ }
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.StartTime) {
+ } else if yym12 {
+ z.DecBinaryUnmarshal(x.StartTime)
+ } else if !yym12 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(x.StartTime)
+ } else {
+ z.DecFallback(x.StartTime, false)
+ }
+ }
+ case "containerStatuses":
+ if r.TryDecodeAsNil() {
+ x.ContainerStatuses = nil
+ } else {
+ yyv13 := &x.ContainerStatuses
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceContainerStatus((*[]ContainerStatus)(yyv13), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PodStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj15 int
+ var yyb15 bool
+ var yyhl15 bool = l >= 0
+ yyj15++
+ if yyhl15 {
+ yyb15 = yyj15 > l
+ } else {
+ yyb15 = r.CheckBreak()
+ }
+ if yyb15 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Phase = ""
+ } else {
+ x.Phase = PodPhase(r.DecodeString())
+ }
+ yyj15++
+ if yyhl15 {
+ yyb15 = yyj15 > l
+ } else {
+ yyb15 = r.CheckBreak()
+ }
+ if yyb15 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Conditions = nil
+ } else {
+ yyv17 := &x.Conditions
+ yym18 := z.DecBinary()
+ _ = yym18
+ if false {
+ } else {
+ h.decSlicePodCondition((*[]PodCondition)(yyv17), d)
+ }
+ }
+ yyj15++
+ if yyhl15 {
+ yyb15 = yyj15 > l
+ } else {
+ yyb15 = r.CheckBreak()
+ }
+ if yyb15 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Message = ""
+ } else {
+ x.Message = string(r.DecodeString())
+ }
+ yyj15++
+ if yyhl15 {
+ yyb15 = yyj15 > l
+ } else {
+ yyb15 = r.CheckBreak()
+ }
+ if yyb15 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Reason = ""
+ } else {
+ x.Reason = string(r.DecodeString())
+ }
+ yyj15++
+ if yyhl15 {
+ yyb15 = yyj15 > l
+ } else {
+ yyb15 = r.CheckBreak()
+ }
+ if yyb15 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.HostIP = ""
+ } else {
+ x.HostIP = string(r.DecodeString())
+ }
+ yyj15++
+ if yyhl15 {
+ yyb15 = yyj15 > l
+ } else {
+ yyb15 = r.CheckBreak()
+ }
+ if yyb15 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.PodIP = ""
+ } else {
+ x.PodIP = string(r.DecodeString())
+ }
+ yyj15++
+ if yyhl15 {
+ yyb15 = yyj15 > l
+ } else {
+ yyb15 = r.CheckBreak()
+ }
+ if yyb15 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.StartTime != nil {
+ x.StartTime = nil
+ }
+ } else {
+ if x.StartTime == nil {
+ x.StartTime = new(pkg2_unversioned.Time)
+ }
+ yym24 := z.DecBinary()
+ _ = yym24
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.StartTime) {
+ } else if yym24 {
+ z.DecBinaryUnmarshal(x.StartTime)
+ } else if !yym24 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(x.StartTime)
+ } else {
+ z.DecFallback(x.StartTime, false)
+ }
+ }
+ yyj15++
+ if yyhl15 {
+ yyb15 = yyj15 > l
+ } else {
+ yyb15 = r.CheckBreak()
+ }
+ if yyb15 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ContainerStatuses = nil
+ } else {
+ yyv25 := &x.ContainerStatuses
+ yym26 := z.DecBinary()
+ _ = yym26
+ if false {
+ } else {
+ h.decSliceContainerStatus((*[]ContainerStatus)(yyv25), d)
+ }
+ }
+ for {
+ yyj15++
+ if yyhl15 {
+ yyb15 = yyj15 > l
+ } else {
+ yyb15 = r.CheckBreak()
+ }
+ if yyb15 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj15-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PodStatusResult) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Status
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Status
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PodStatusResult) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PodStatusResult) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = PodStatus{}
+ } else {
+ yyv5 := &x.Status
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PodStatusResult) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv9 := &x.ObjectMeta
+ yyv9.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = PodStatus{}
+ } else {
+ yyv10 := &x.Status
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *Pod) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = true
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy14 := &x.Status
+ yy14.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy16 := &x.Status
+ yy16.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *Pod) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *Pod) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = PodSpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = PodStatus{}
+ } else {
+ yyv6 := &x.Status
+ yyv6.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *Pod) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = PodSpec{}
+ } else {
+ yyv11 := &x.Spec
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = PodStatus{}
+ } else {
+ yyv12 := &x.Status
+ yyv12.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PodList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSlicePod(([]Pod)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSlicePod(([]Pod)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PodList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PodList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSlicePod((*[]Pod)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PodList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSlicePod((*[]Pod)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PodTemplateSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PodTemplateSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PodTemplateSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = PodSpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PodTemplateSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv7 := &x.ObjectMeta
+ yyv7.CodecDecodeSelf(d)
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = PodSpec{}
+ } else {
+ yyv8 := &x.Spec
+ yyv8.CodecDecodeSelf(d)
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PodTemplate) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Template
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("template"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Template
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PodTemplate) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PodTemplate) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "template":
+ if r.TryDecodeAsNil() {
+ x.Template = PodTemplateSpec{}
+ } else {
+ yyv5 := &x.Template
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PodTemplate) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv9 := &x.ObjectMeta
+ yyv9.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Template = PodTemplateSpec{}
+ } else {
+ yyv10 := &x.Template
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PodTemplateList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSlicePodTemplate(([]PodTemplate)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSlicePodTemplate(([]PodTemplate)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PodTemplateList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PodTemplateList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSlicePodTemplate((*[]PodTemplate)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PodTemplateList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSlicePodTemplate((*[]PodTemplate)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ReplicationControllerSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Replicas != nil
+ yyq2[1] = len(x.Selector) != 0
+ yyq2[2] = x.Template != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Replicas == nil {
+ r.EncodeNil()
+ } else {
+ yy4 := *x.Replicas
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(yy4))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("replicas"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Replicas == nil {
+ r.EncodeNil()
+ } else {
+ yy6 := *x.Replicas
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeInt(int64(yy6))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Selector == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ z.F.EncMapStringStringV(x.Selector, false, e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("selector"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Selector == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ z.F.EncMapStringStringV(x.Selector, false, e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.Template == nil {
+ r.EncodeNil()
+ } else {
+ x.Template.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("template"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Template == nil {
+ r.EncodeNil()
+ } else {
+ x.Template.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ReplicationControllerSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ReplicationControllerSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "replicas":
+ if r.TryDecodeAsNil() {
+ if x.Replicas != nil {
+ x.Replicas = nil
+ }
+ } else {
+ if x.Replicas == nil {
+ x.Replicas = new(int32)
+ }
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ *((*int32)(x.Replicas)) = int32(r.DecodeInt(32))
+ }
+ }
+ case "selector":
+ if r.TryDecodeAsNil() {
+ x.Selector = nil
+ } else {
+ yyv6 := &x.Selector
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ z.F.DecMapStringStringX(yyv6, false, d)
+ }
+ }
+ case "template":
+ if r.TryDecodeAsNil() {
+ if x.Template != nil {
+ x.Template = nil
+ }
+ } else {
+ if x.Template == nil {
+ x.Template = new(PodTemplateSpec)
+ }
+ x.Template.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ReplicationControllerSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Replicas != nil {
+ x.Replicas = nil
+ }
+ } else {
+ if x.Replicas == nil {
+ x.Replicas = new(int32)
+ }
+ yym11 := z.DecBinary()
+ _ = yym11
+ if false {
+ } else {
+ *((*int32)(x.Replicas)) = int32(r.DecodeInt(32))
+ }
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Selector = nil
+ } else {
+ yyv12 := &x.Selector
+ yym13 := z.DecBinary()
+ _ = yym13
+ if false {
+ } else {
+ z.F.DecMapStringStringX(yyv12, false, d)
+ }
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Template != nil {
+ x.Template = nil
+ }
+ } else {
+ if x.Template == nil {
+ x.Template = new(PodTemplateSpec)
+ }
+ x.Template.CodecDecodeSelf(d)
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ReplicationControllerStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.FullyLabeledReplicas != 0
+ yyq2[2] = x.ObservedGeneration != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Replicas))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("replicas"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Replicas))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeInt(int64(x.FullyLabeledReplicas))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("fullyLabeledReplicas"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeInt(int64(x.FullyLabeledReplicas))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ObservedGeneration))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("observedGeneration"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ObservedGeneration))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ReplicationControllerStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ReplicationControllerStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "replicas":
+ if r.TryDecodeAsNil() {
+ x.Replicas = 0
+ } else {
+ x.Replicas = int32(r.DecodeInt(32))
+ }
+ case "fullyLabeledReplicas":
+ if r.TryDecodeAsNil() {
+ x.FullyLabeledReplicas = 0
+ } else {
+ x.FullyLabeledReplicas = int32(r.DecodeInt(32))
+ }
+ case "observedGeneration":
+ if r.TryDecodeAsNil() {
+ x.ObservedGeneration = 0
+ } else {
+ x.ObservedGeneration = int64(r.DecodeInt(64))
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ReplicationControllerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Replicas = 0
+ } else {
+ x.Replicas = int32(r.DecodeInt(32))
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.FullyLabeledReplicas = 0
+ } else {
+ x.FullyLabeledReplicas = int32(r.DecodeInt(32))
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObservedGeneration = 0
+ } else {
+ x.ObservedGeneration = int64(r.DecodeInt(64))
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ReplicationController) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = true
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy14 := &x.Status
+ yy14.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy16 := &x.Status
+ yy16.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ReplicationController) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ReplicationController) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = ReplicationControllerSpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = ReplicationControllerStatus{}
+ } else {
+ yyv6 := &x.Status
+ yyv6.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ReplicationController) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = ReplicationControllerSpec{}
+ } else {
+ yyv11 := &x.Spec
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = ReplicationControllerStatus{}
+ } else {
+ yyv12 := &x.Status
+ yyv12.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ReplicationControllerList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceReplicationController(([]ReplicationController)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceReplicationController(([]ReplicationController)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ReplicationControllerList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ReplicationControllerList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceReplicationController((*[]ReplicationController)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ReplicationControllerList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceReplicationController((*[]ReplicationController)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x ServiceAffinity) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *ServiceAffinity) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x ServiceType) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *ServiceType) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *ServiceStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.LoadBalancer
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("loadBalancer"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.LoadBalancer
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ServiceStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ServiceStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "loadBalancer":
+ if r.TryDecodeAsNil() {
+ x.LoadBalancer = LoadBalancerStatus{}
+ } else {
+ yyv4 := &x.LoadBalancer
+ yyv4.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ServiceStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj5 int
+ var yyb5 bool
+ var yyhl5 bool = l >= 0
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.LoadBalancer = LoadBalancerStatus{}
+ } else {
+ yyv6 := &x.LoadBalancer
+ yyv6.CodecDecodeSelf(d)
+ }
+ for {
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj5-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *LoadBalancerStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = len(x.Ingress) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Ingress == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ h.encSliceLoadBalancerIngress(([]LoadBalancerIngress)(x.Ingress), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("ingress"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Ingress == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.encSliceLoadBalancerIngress(([]LoadBalancerIngress)(x.Ingress), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *LoadBalancerStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *LoadBalancerStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "ingress":
+ if r.TryDecodeAsNil() {
+ x.Ingress = nil
+ } else {
+ yyv4 := &x.Ingress
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.decSliceLoadBalancerIngress((*[]LoadBalancerIngress)(yyv4), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *LoadBalancerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Ingress = nil
+ } else {
+ yyv7 := &x.Ingress
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.decSliceLoadBalancerIngress((*[]LoadBalancerIngress)(yyv7), d)
+ }
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *LoadBalancerIngress) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.IP != ""
+ yyq2[1] = x.Hostname != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.IP))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("ip"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.IP))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Hostname))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("hostname"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Hostname))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *LoadBalancerIngress) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *LoadBalancerIngress) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "ip":
+ if r.TryDecodeAsNil() {
+ x.IP = ""
+ } else {
+ x.IP = string(r.DecodeString())
+ }
+ case "hostname":
+ if r.TryDecodeAsNil() {
+ x.Hostname = ""
+ } else {
+ x.Hostname = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *LoadBalancerIngress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.IP = ""
+ } else {
+ x.IP = string(r.DecodeString())
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Hostname = ""
+ } else {
+ x.Hostname = string(r.DecodeString())
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ServiceSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [9]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = len(x.Selector) != 0
+ yyq2[2] = x.ClusterIP != ""
+ yyq2[3] = x.Type != ""
+ yyq2[4] = len(x.ExternalIPs) != 0
+ yyq2[5] = len(x.DeprecatedPublicIPs) != 0
+ yyq2[6] = x.SessionAffinity != ""
+ yyq2[7] = x.LoadBalancerIP != ""
+ yyq2[8] = len(x.LoadBalancerSourceRanges) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(9)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Ports == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ h.encSliceServicePort(([]ServicePort)(x.Ports), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("ports"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Ports == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.encSliceServicePort(([]ServicePort)(x.Ports), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Selector == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ z.F.EncMapStringStringV(x.Selector, false, e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("selector"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Selector == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ z.F.EncMapStringStringV(x.Selector, false, e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ClusterIP))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("clusterIP"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ClusterIP))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ x.Type.CodecEncodeSelf(e)
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("type"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Type.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ if x.ExternalIPs == nil {
+ r.EncodeNil()
+ } else {
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.ExternalIPs, false, e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("externalIPs"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.ExternalIPs == nil {
+ r.EncodeNil()
+ } else {
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.ExternalIPs, false, e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ if x.DeprecatedPublicIPs == nil {
+ r.EncodeNil()
+ } else {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.DeprecatedPublicIPs, false, e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("deprecatedPublicIPs"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.DeprecatedPublicIPs == nil {
+ r.EncodeNil()
+ } else {
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.DeprecatedPublicIPs, false, e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[6] {
+ x.SessionAffinity.CodecEncodeSelf(e)
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[6] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("sessionAffinity"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.SessionAffinity.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[7] {
+ yym25 := z.EncBinary()
+ _ = yym25
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.LoadBalancerIP))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[7] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("loadBalancerIP"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym26 := z.EncBinary()
+ _ = yym26
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.LoadBalancerIP))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[8] {
+ if x.LoadBalancerSourceRanges == nil {
+ r.EncodeNil()
+ } else {
+ yym28 := z.EncBinary()
+ _ = yym28
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.LoadBalancerSourceRanges, false, e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[8] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("loadBalancerSourceRanges"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.LoadBalancerSourceRanges == nil {
+ r.EncodeNil()
+ } else {
+ yym29 := z.EncBinary()
+ _ = yym29
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.LoadBalancerSourceRanges, false, e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ServiceSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ServiceSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "ports":
+ if r.TryDecodeAsNil() {
+ x.Ports = nil
+ } else {
+ yyv4 := &x.Ports
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.decSliceServicePort((*[]ServicePort)(yyv4), d)
+ }
+ }
+ case "selector":
+ if r.TryDecodeAsNil() {
+ x.Selector = nil
+ } else {
+ yyv6 := &x.Selector
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ z.F.DecMapStringStringX(yyv6, false, d)
+ }
+ }
+ case "clusterIP":
+ if r.TryDecodeAsNil() {
+ x.ClusterIP = ""
+ } else {
+ x.ClusterIP = string(r.DecodeString())
+ }
+ case "type":
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = ServiceType(r.DecodeString())
+ }
+ case "externalIPs":
+ if r.TryDecodeAsNil() {
+ x.ExternalIPs = nil
+ } else {
+ yyv10 := &x.ExternalIPs
+ yym11 := z.DecBinary()
+ _ = yym11
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv10, false, d)
+ }
+ }
+ case "deprecatedPublicIPs":
+ if r.TryDecodeAsNil() {
+ x.DeprecatedPublicIPs = nil
+ } else {
+ yyv12 := &x.DeprecatedPublicIPs
+ yym13 := z.DecBinary()
+ _ = yym13
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv12, false, d)
+ }
+ }
+ case "sessionAffinity":
+ if r.TryDecodeAsNil() {
+ x.SessionAffinity = ""
+ } else {
+ x.SessionAffinity = ServiceAffinity(r.DecodeString())
+ }
+ case "loadBalancerIP":
+ if r.TryDecodeAsNil() {
+ x.LoadBalancerIP = ""
+ } else {
+ x.LoadBalancerIP = string(r.DecodeString())
+ }
+ case "loadBalancerSourceRanges":
+ if r.TryDecodeAsNil() {
+ x.LoadBalancerSourceRanges = nil
+ } else {
+ yyv16 := &x.LoadBalancerSourceRanges
+ yym17 := z.DecBinary()
+ _ = yym17
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv16, false, d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ServiceSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj18 int
+ var yyb18 bool
+ var yyhl18 bool = l >= 0
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Ports = nil
+ } else {
+ yyv19 := &x.Ports
+ yym20 := z.DecBinary()
+ _ = yym20
+ if false {
+ } else {
+ h.decSliceServicePort((*[]ServicePort)(yyv19), d)
+ }
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Selector = nil
+ } else {
+ yyv21 := &x.Selector
+ yym22 := z.DecBinary()
+ _ = yym22
+ if false {
+ } else {
+ z.F.DecMapStringStringX(yyv21, false, d)
+ }
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ClusterIP = ""
+ } else {
+ x.ClusterIP = string(r.DecodeString())
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = ServiceType(r.DecodeString())
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ExternalIPs = nil
+ } else {
+ yyv25 := &x.ExternalIPs
+ yym26 := z.DecBinary()
+ _ = yym26
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv25, false, d)
+ }
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.DeprecatedPublicIPs = nil
+ } else {
+ yyv27 := &x.DeprecatedPublicIPs
+ yym28 := z.DecBinary()
+ _ = yym28
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv27, false, d)
+ }
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.SessionAffinity = ""
+ } else {
+ x.SessionAffinity = ServiceAffinity(r.DecodeString())
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.LoadBalancerIP = ""
+ } else {
+ x.LoadBalancerIP = string(r.DecodeString())
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.LoadBalancerSourceRanges = nil
+ } else {
+ yyv31 := &x.LoadBalancerSourceRanges
+ yym32 := z.DecBinary()
+ _ = yym32
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv31, false, d)
+ }
+ }
+ for {
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj18-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ServicePort) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Name != ""
+ yyq2[1] = x.Protocol != ""
+ yyq2[3] = true
+ yyq2[4] = x.NodePort != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("name"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ x.Protocol.CodecEncodeSelf(e)
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("protocol"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Protocol.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Port))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("port"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Port))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yy13 := &x.TargetPort
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy13) {
+ } else if !yym14 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy13)
+ } else {
+ z.EncFallback(yy13)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("targetPort"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy15 := &x.TargetPort
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy15) {
+ } else if !yym16 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy15)
+ } else {
+ z.EncFallback(yy15)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ r.EncodeInt(int64(x.NodePort))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("nodePort"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeInt(int64(x.NodePort))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ServicePort) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ServicePort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "name":
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ case "protocol":
+ if r.TryDecodeAsNil() {
+ x.Protocol = ""
+ } else {
+ x.Protocol = Protocol(r.DecodeString())
+ }
+ case "port":
+ if r.TryDecodeAsNil() {
+ x.Port = 0
+ } else {
+ x.Port = int32(r.DecodeInt(32))
+ }
+ case "targetPort":
+ if r.TryDecodeAsNil() {
+ x.TargetPort = pkg4_intstr.IntOrString{}
+ } else {
+ yyv7 := &x.TargetPort
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv7) {
+ } else if !yym8 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv7)
+ } else {
+ z.DecFallback(yyv7, false)
+ }
+ }
+ case "nodePort":
+ if r.TryDecodeAsNil() {
+ x.NodePort = 0
+ } else {
+ x.NodePort = int32(r.DecodeInt(32))
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ServicePort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Protocol = ""
+ } else {
+ x.Protocol = Protocol(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Port = 0
+ } else {
+ x.Port = int32(r.DecodeInt(32))
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.TargetPort = pkg4_intstr.IntOrString{}
+ } else {
+ yyv14 := &x.TargetPort
+ yym15 := z.DecBinary()
+ _ = yym15
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv14) {
+ } else if !yym15 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv14)
+ } else {
+ z.DecFallback(yyv14, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.NodePort = 0
+ } else {
+ x.NodePort = int32(r.DecodeInt(32))
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *Service) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = true
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy14 := &x.Status
+ yy14.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy16 := &x.Status
+ yy16.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *Service) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *Service) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = ServiceSpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = ServiceStatus{}
+ } else {
+ yyv6 := &x.Status
+ yyv6.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *Service) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = ServiceSpec{}
+ } else {
+ yyv11 := &x.Spec
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = ServiceStatus{}
+ } else {
+ yyv12 := &x.Status
+ yyv12.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ServiceList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceService(([]Service)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceService(([]Service)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ServiceList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ServiceList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceService((*[]Service)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ServiceList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceService((*[]Service)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ServiceAccount) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = len(x.Secrets) != 0
+ yyq2[2] = len(x.ImagePullSecrets) != 0
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Secrets == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceObjectReference(([]ObjectReference)(x.Secrets), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("secrets"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Secrets == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceObjectReference(([]ObjectReference)(x.Secrets), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.ImagePullSecrets == nil {
+ r.EncodeNil()
+ } else {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ h.encSliceLocalObjectReference(([]LocalObjectReference)(x.ImagePullSecrets), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("imagePullSecrets"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.ImagePullSecrets == nil {
+ r.EncodeNil()
+ } else {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ h.encSliceLocalObjectReference(([]LocalObjectReference)(x.ImagePullSecrets), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ServiceAccount) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ServiceAccount) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "secrets":
+ if r.TryDecodeAsNil() {
+ x.Secrets = nil
+ } else {
+ yyv5 := &x.Secrets
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ h.decSliceObjectReference((*[]ObjectReference)(yyv5), d)
+ }
+ }
+ case "imagePullSecrets":
+ if r.TryDecodeAsNil() {
+ x.ImagePullSecrets = nil
+ } else {
+ yyv7 := &x.ImagePullSecrets
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.decSliceLocalObjectReference((*[]LocalObjectReference)(yyv7), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ServiceAccount) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj11 int
+ var yyb11 bool
+ var yyhl11 bool = l >= 0
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv12 := &x.ObjectMeta
+ yyv12.CodecDecodeSelf(d)
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Secrets = nil
+ } else {
+ yyv13 := &x.Secrets
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceObjectReference((*[]ObjectReference)(yyv13), d)
+ }
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ImagePullSecrets = nil
+ } else {
+ yyv15 := &x.ImagePullSecrets
+ yym16 := z.DecBinary()
+ _ = yym16
+ if false {
+ } else {
+ h.decSliceLocalObjectReference((*[]LocalObjectReference)(yyv15), d)
+ }
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj11-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ServiceAccountList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceServiceAccount(([]ServiceAccount)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceServiceAccount(([]ServiceAccount)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ServiceAccountList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ServiceAccountList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceServiceAccount((*[]ServiceAccount)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ServiceAccountList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceServiceAccount((*[]ServiceAccount)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *Endpoints) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Subsets == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceEndpointSubset(([]EndpointSubset)(x.Subsets), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("subsets"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Subsets == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceEndpointSubset(([]EndpointSubset)(x.Subsets), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *Endpoints) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *Endpoints) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "subsets":
+ if r.TryDecodeAsNil() {
+ x.Subsets = nil
+ } else {
+ yyv5 := &x.Subsets
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ h.decSliceEndpointSubset((*[]EndpointSubset)(yyv5), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *Endpoints) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Subsets = nil
+ } else {
+ yyv11 := &x.Subsets
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ h.decSliceEndpointSubset((*[]EndpointSubset)(yyv11), d)
+ }
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *EndpointSubset) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = len(x.Addresses) != 0
+ yyq2[1] = len(x.NotReadyAddresses) != 0
+ yyq2[2] = len(x.Ports) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Addresses == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ h.encSliceEndpointAddress(([]EndpointAddress)(x.Addresses), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("addresses"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Addresses == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.encSliceEndpointAddress(([]EndpointAddress)(x.Addresses), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.NotReadyAddresses == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.encSliceEndpointAddress(([]EndpointAddress)(x.NotReadyAddresses), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("notReadyAddresses"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.NotReadyAddresses == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.encSliceEndpointAddress(([]EndpointAddress)(x.NotReadyAddresses), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.Ports == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceEndpointPort(([]EndpointPort)(x.Ports), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("ports"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Ports == nil {
+ r.EncodeNil()
+ } else {
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ h.encSliceEndpointPort(([]EndpointPort)(x.Ports), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *EndpointSubset) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *EndpointSubset) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "addresses":
+ if r.TryDecodeAsNil() {
+ x.Addresses = nil
+ } else {
+ yyv4 := &x.Addresses
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.decSliceEndpointAddress((*[]EndpointAddress)(yyv4), d)
+ }
+ }
+ case "notReadyAddresses":
+ if r.TryDecodeAsNil() {
+ x.NotReadyAddresses = nil
+ } else {
+ yyv6 := &x.NotReadyAddresses
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceEndpointAddress((*[]EndpointAddress)(yyv6), d)
+ }
+ }
+ case "ports":
+ if r.TryDecodeAsNil() {
+ x.Ports = nil
+ } else {
+ yyv8 := &x.Ports
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.decSliceEndpointPort((*[]EndpointPort)(yyv8), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *EndpointSubset) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Addresses = nil
+ } else {
+ yyv11 := &x.Addresses
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ h.decSliceEndpointAddress((*[]EndpointAddress)(yyv11), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.NotReadyAddresses = nil
+ } else {
+ yyv13 := &x.NotReadyAddresses
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceEndpointAddress((*[]EndpointAddress)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Ports = nil
+ } else {
+ yyv15 := &x.Ports
+ yym16 := z.DecBinary()
+ _ = yym16
+ if false {
+ } else {
+ h.decSliceEndpointPort((*[]EndpointPort)(yyv15), d)
+ }
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *EndpointAddress) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.Hostname != ""
+ yyq2[2] = x.TargetRef != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.IP))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("ip"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.IP))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Hostname))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("hostname"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Hostname))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.TargetRef == nil {
+ r.EncodeNil()
+ } else {
+ x.TargetRef.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("targetRef"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.TargetRef == nil {
+ r.EncodeNil()
+ } else {
+ x.TargetRef.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *EndpointAddress) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *EndpointAddress) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "ip":
+ if r.TryDecodeAsNil() {
+ x.IP = ""
+ } else {
+ x.IP = string(r.DecodeString())
+ }
+ case "hostname":
+ if r.TryDecodeAsNil() {
+ x.Hostname = ""
+ } else {
+ x.Hostname = string(r.DecodeString())
+ }
+ case "targetRef":
+ if r.TryDecodeAsNil() {
+ if x.TargetRef != nil {
+ x.TargetRef = nil
+ }
+ } else {
+ if x.TargetRef == nil {
+ x.TargetRef = new(ObjectReference)
+ }
+ x.TargetRef.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *EndpointAddress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.IP = ""
+ } else {
+ x.IP = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Hostname = ""
+ } else {
+ x.Hostname = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.TargetRef != nil {
+ x.TargetRef = nil
+ }
+ } else {
+ if x.TargetRef == nil {
+ x.TargetRef = new(ObjectReference)
+ }
+ x.TargetRef.CodecDecodeSelf(d)
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *EndpointPort) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Name != ""
+ yyq2[2] = x.Protocol != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("name"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Port))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("port"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Port))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ x.Protocol.CodecEncodeSelf(e)
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("protocol"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Protocol.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *EndpointPort) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *EndpointPort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "name":
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ case "port":
+ if r.TryDecodeAsNil() {
+ x.Port = 0
+ } else {
+ x.Port = int32(r.DecodeInt(32))
+ }
+ case "protocol":
+ if r.TryDecodeAsNil() {
+ x.Protocol = ""
+ } else {
+ x.Protocol = Protocol(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *EndpointPort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Port = 0
+ } else {
+ x.Port = int32(r.DecodeInt(32))
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Protocol = ""
+ } else {
+ x.Protocol = Protocol(r.DecodeString())
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *EndpointsList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceEndpoints(([]Endpoints)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceEndpoints(([]Endpoints)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *EndpointsList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *EndpointsList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceEndpoints((*[]Endpoints)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *EndpointsList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceEndpoints((*[]Endpoints)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *NodeSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.PodCIDR != ""
+ yyq2[1] = x.ExternalID != ""
+ yyq2[2] = x.ProviderID != ""
+ yyq2[3] = x.Unschedulable != false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.PodCIDR))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("podCIDR"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.PodCIDR))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ExternalID))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("externalID"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ExternalID))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ProviderID))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("providerID"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ProviderID))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Unschedulable))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("unschedulable"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Unschedulable))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *NodeSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *NodeSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "podCIDR":
+ if r.TryDecodeAsNil() {
+ x.PodCIDR = ""
+ } else {
+ x.PodCIDR = string(r.DecodeString())
+ }
+ case "externalID":
+ if r.TryDecodeAsNil() {
+ x.ExternalID = ""
+ } else {
+ x.ExternalID = string(r.DecodeString())
+ }
+ case "providerID":
+ if r.TryDecodeAsNil() {
+ x.ProviderID = ""
+ } else {
+ x.ProviderID = string(r.DecodeString())
+ }
+ case "unschedulable":
+ if r.TryDecodeAsNil() {
+ x.Unschedulable = false
+ } else {
+ x.Unschedulable = bool(r.DecodeBool())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *NodeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.PodCIDR = ""
+ } else {
+ x.PodCIDR = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ExternalID = ""
+ } else {
+ x.ExternalID = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ProviderID = ""
+ } else {
+ x.ProviderID = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Unschedulable = false
+ } else {
+ x.Unschedulable = bool(r.DecodeBool())
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *DaemonEndpoint) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Port))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Port"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Port))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *DaemonEndpoint) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *DaemonEndpoint) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "Port":
+ if r.TryDecodeAsNil() {
+ x.Port = 0
+ } else {
+ x.Port = int32(r.DecodeInt(32))
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *DaemonEndpoint) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj5 int
+ var yyb5 bool
+ var yyhl5 bool = l >= 0
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Port = 0
+ } else {
+ x.Port = int32(r.DecodeInt(32))
+ }
+ for {
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj5-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *NodeDaemonEndpoints) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.KubeletEndpoint
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kubeletEndpoint"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.KubeletEndpoint
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *NodeDaemonEndpoints) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *NodeDaemonEndpoints) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "kubeletEndpoint":
+ if r.TryDecodeAsNil() {
+ x.KubeletEndpoint = DaemonEndpoint{}
+ } else {
+ yyv4 := &x.KubeletEndpoint
+ yyv4.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *NodeDaemonEndpoints) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj5 int
+ var yyb5 bool
+ var yyhl5 bool = l >= 0
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.KubeletEndpoint = DaemonEndpoint{}
+ } else {
+ yyv6 := &x.KubeletEndpoint
+ yyv6.CodecDecodeSelf(d)
+ }
+ for {
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj5-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *NodeSystemInfo) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [10]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(10)
+ } else {
+ yynn2 = 10
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.MachineID))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("machineID"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.MachineID))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.SystemUUID))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("systemUUID"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.SystemUUID))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.BootID))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("bootID"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.BootID))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.KernelVersion))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kernelVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.KernelVersion))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.OSImage))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("osImage"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.OSImage))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ContainerRuntimeVersion))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("containerRuntimeVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ContainerRuntimeVersion))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.KubeletVersion))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kubeletVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.KubeletVersion))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym25 := z.EncBinary()
+ _ = yym25
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.KubeProxyVersion))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kubeProxyVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym26 := z.EncBinary()
+ _ = yym26
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.KubeProxyVersion))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym28 := z.EncBinary()
+ _ = yym28
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.OperatingSystem))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("operatingSystem"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym29 := z.EncBinary()
+ _ = yym29
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.OperatingSystem))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym31 := z.EncBinary()
+ _ = yym31
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Architecture))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("architecture"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym32 := z.EncBinary()
+ _ = yym32
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Architecture))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *NodeSystemInfo) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *NodeSystemInfo) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "machineID":
+ if r.TryDecodeAsNil() {
+ x.MachineID = ""
+ } else {
+ x.MachineID = string(r.DecodeString())
+ }
+ case "systemUUID":
+ if r.TryDecodeAsNil() {
+ x.SystemUUID = ""
+ } else {
+ x.SystemUUID = string(r.DecodeString())
+ }
+ case "bootID":
+ if r.TryDecodeAsNil() {
+ x.BootID = ""
+ } else {
+ x.BootID = string(r.DecodeString())
+ }
+ case "kernelVersion":
+ if r.TryDecodeAsNil() {
+ x.KernelVersion = ""
+ } else {
+ x.KernelVersion = string(r.DecodeString())
+ }
+ case "osImage":
+ if r.TryDecodeAsNil() {
+ x.OSImage = ""
+ } else {
+ x.OSImage = string(r.DecodeString())
+ }
+ case "containerRuntimeVersion":
+ if r.TryDecodeAsNil() {
+ x.ContainerRuntimeVersion = ""
+ } else {
+ x.ContainerRuntimeVersion = string(r.DecodeString())
+ }
+ case "kubeletVersion":
+ if r.TryDecodeAsNil() {
+ x.KubeletVersion = ""
+ } else {
+ x.KubeletVersion = string(r.DecodeString())
+ }
+ case "kubeProxyVersion":
+ if r.TryDecodeAsNil() {
+ x.KubeProxyVersion = ""
+ } else {
+ x.KubeProxyVersion = string(r.DecodeString())
+ }
+ case "operatingSystem":
+ if r.TryDecodeAsNil() {
+ x.OperatingSystem = ""
+ } else {
+ x.OperatingSystem = string(r.DecodeString())
+ }
+ case "architecture":
+ if r.TryDecodeAsNil() {
+ x.Architecture = ""
+ } else {
+ x.Architecture = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *NodeSystemInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj14 int
+ var yyb14 bool
+ var yyhl14 bool = l >= 0
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.MachineID = ""
+ } else {
+ x.MachineID = string(r.DecodeString())
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.SystemUUID = ""
+ } else {
+ x.SystemUUID = string(r.DecodeString())
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.BootID = ""
+ } else {
+ x.BootID = string(r.DecodeString())
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.KernelVersion = ""
+ } else {
+ x.KernelVersion = string(r.DecodeString())
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.OSImage = ""
+ } else {
+ x.OSImage = string(r.DecodeString())
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ContainerRuntimeVersion = ""
+ } else {
+ x.ContainerRuntimeVersion = string(r.DecodeString())
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.KubeletVersion = ""
+ } else {
+ x.KubeletVersion = string(r.DecodeString())
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.KubeProxyVersion = ""
+ } else {
+ x.KubeProxyVersion = string(r.DecodeString())
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.OperatingSystem = ""
+ } else {
+ x.OperatingSystem = string(r.DecodeString())
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Architecture = ""
+ } else {
+ x.Architecture = string(r.DecodeString())
+ }
+ for {
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj14-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *NodeStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [10]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = len(x.Capacity) != 0
+ yyq2[1] = len(x.Allocatable) != 0
+ yyq2[2] = x.Phase != ""
+ yyq2[3] = len(x.Conditions) != 0
+ yyq2[4] = len(x.Addresses) != 0
+ yyq2[5] = true
+ yyq2[6] = true
+ yyq2[7] = len(x.Images) != 0
+ yyq2[8] = len(x.VolumesInUse) != 0
+ yyq2[9] = len(x.VolumesAttached) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(10)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Capacity == nil {
+ r.EncodeNil()
+ } else {
+ x.Capacity.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("capacity"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Capacity == nil {
+ r.EncodeNil()
+ } else {
+ x.Capacity.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Allocatable == nil {
+ r.EncodeNil()
+ } else {
+ x.Allocatable.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("allocatable"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Allocatable == nil {
+ r.EncodeNil()
+ } else {
+ x.Allocatable.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ x.Phase.CodecEncodeSelf(e)
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("phase"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Phase.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ if x.Conditions == nil {
+ r.EncodeNil()
+ } else {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ h.encSliceNodeCondition(([]NodeCondition)(x.Conditions), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("conditions"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Conditions == nil {
+ r.EncodeNil()
+ } else {
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.encSliceNodeCondition(([]NodeCondition)(x.Conditions), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ if x.Addresses == nil {
+ r.EncodeNil()
+ } else {
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ h.encSliceNodeAddress(([]NodeAddress)(x.Addresses), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("addresses"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Addresses == nil {
+ r.EncodeNil()
+ } else {
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ h.encSliceNodeAddress(([]NodeAddress)(x.Addresses), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ yy19 := &x.DaemonEndpoints
+ yy19.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("daemonEndpoints"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy21 := &x.DaemonEndpoints
+ yy21.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[6] {
+ yy24 := &x.NodeInfo
+ yy24.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[6] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("nodeInfo"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy26 := &x.NodeInfo
+ yy26.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[7] {
+ if x.Images == nil {
+ r.EncodeNil()
+ } else {
+ yym29 := z.EncBinary()
+ _ = yym29
+ if false {
+ } else {
+ h.encSliceContainerImage(([]ContainerImage)(x.Images), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[7] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("images"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Images == nil {
+ r.EncodeNil()
+ } else {
+ yym30 := z.EncBinary()
+ _ = yym30
+ if false {
+ } else {
+ h.encSliceContainerImage(([]ContainerImage)(x.Images), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[8] {
+ if x.VolumesInUse == nil {
+ r.EncodeNil()
+ } else {
+ yym32 := z.EncBinary()
+ _ = yym32
+ if false {
+ } else {
+ h.encSliceUniqueVolumeName(([]UniqueVolumeName)(x.VolumesInUse), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[8] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("volumesInUse"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.VolumesInUse == nil {
+ r.EncodeNil()
+ } else {
+ yym33 := z.EncBinary()
+ _ = yym33
+ if false {
+ } else {
+ h.encSliceUniqueVolumeName(([]UniqueVolumeName)(x.VolumesInUse), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[9] {
+ if x.VolumesAttached == nil {
+ r.EncodeNil()
+ } else {
+ yym35 := z.EncBinary()
+ _ = yym35
+ if false {
+ } else {
+ h.encSliceAttachedVolume(([]AttachedVolume)(x.VolumesAttached), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[9] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("volumesAttached"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.VolumesAttached == nil {
+ r.EncodeNil()
+ } else {
+ yym36 := z.EncBinary()
+ _ = yym36
+ if false {
+ } else {
+ h.encSliceAttachedVolume(([]AttachedVolume)(x.VolumesAttached), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *NodeStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *NodeStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "capacity":
+ if r.TryDecodeAsNil() {
+ x.Capacity = nil
+ } else {
+ yyv4 := &x.Capacity
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "allocatable":
+ if r.TryDecodeAsNil() {
+ x.Allocatable = nil
+ } else {
+ yyv5 := &x.Allocatable
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "phase":
+ if r.TryDecodeAsNil() {
+ x.Phase = ""
+ } else {
+ x.Phase = NodePhase(r.DecodeString())
+ }
+ case "conditions":
+ if r.TryDecodeAsNil() {
+ x.Conditions = nil
+ } else {
+ yyv7 := &x.Conditions
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.decSliceNodeCondition((*[]NodeCondition)(yyv7), d)
+ }
+ }
+ case "addresses":
+ if r.TryDecodeAsNil() {
+ x.Addresses = nil
+ } else {
+ yyv9 := &x.Addresses
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.decSliceNodeAddress((*[]NodeAddress)(yyv9), d)
+ }
+ }
+ case "daemonEndpoints":
+ if r.TryDecodeAsNil() {
+ x.DaemonEndpoints = NodeDaemonEndpoints{}
+ } else {
+ yyv11 := &x.DaemonEndpoints
+ yyv11.CodecDecodeSelf(d)
+ }
+ case "nodeInfo":
+ if r.TryDecodeAsNil() {
+ x.NodeInfo = NodeSystemInfo{}
+ } else {
+ yyv12 := &x.NodeInfo
+ yyv12.CodecDecodeSelf(d)
+ }
+ case "images":
+ if r.TryDecodeAsNil() {
+ x.Images = nil
+ } else {
+ yyv13 := &x.Images
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceContainerImage((*[]ContainerImage)(yyv13), d)
+ }
+ }
+ case "volumesInUse":
+ if r.TryDecodeAsNil() {
+ x.VolumesInUse = nil
+ } else {
+ yyv15 := &x.VolumesInUse
+ yym16 := z.DecBinary()
+ _ = yym16
+ if false {
+ } else {
+ h.decSliceUniqueVolumeName((*[]UniqueVolumeName)(yyv15), d)
+ }
+ }
+ case "volumesAttached":
+ if r.TryDecodeAsNil() {
+ x.VolumesAttached = nil
+ } else {
+ yyv17 := &x.VolumesAttached
+ yym18 := z.DecBinary()
+ _ = yym18
+ if false {
+ } else {
+ h.decSliceAttachedVolume((*[]AttachedVolume)(yyv17), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *NodeStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj19 int
+ var yyb19 bool
+ var yyhl19 bool = l >= 0
+ yyj19++
+ if yyhl19 {
+ yyb19 = yyj19 > l
+ } else {
+ yyb19 = r.CheckBreak()
+ }
+ if yyb19 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Capacity = nil
+ } else {
+ yyv20 := &x.Capacity
+ yyv20.CodecDecodeSelf(d)
+ }
+ yyj19++
+ if yyhl19 {
+ yyb19 = yyj19 > l
+ } else {
+ yyb19 = r.CheckBreak()
+ }
+ if yyb19 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Allocatable = nil
+ } else {
+ yyv21 := &x.Allocatable
+ yyv21.CodecDecodeSelf(d)
+ }
+ yyj19++
+ if yyhl19 {
+ yyb19 = yyj19 > l
+ } else {
+ yyb19 = r.CheckBreak()
+ }
+ if yyb19 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Phase = ""
+ } else {
+ x.Phase = NodePhase(r.DecodeString())
+ }
+ yyj19++
+ if yyhl19 {
+ yyb19 = yyj19 > l
+ } else {
+ yyb19 = r.CheckBreak()
+ }
+ if yyb19 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Conditions = nil
+ } else {
+ yyv23 := &x.Conditions
+ yym24 := z.DecBinary()
+ _ = yym24
+ if false {
+ } else {
+ h.decSliceNodeCondition((*[]NodeCondition)(yyv23), d)
+ }
+ }
+ yyj19++
+ if yyhl19 {
+ yyb19 = yyj19 > l
+ } else {
+ yyb19 = r.CheckBreak()
+ }
+ if yyb19 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Addresses = nil
+ } else {
+ yyv25 := &x.Addresses
+ yym26 := z.DecBinary()
+ _ = yym26
+ if false {
+ } else {
+ h.decSliceNodeAddress((*[]NodeAddress)(yyv25), d)
+ }
+ }
+ yyj19++
+ if yyhl19 {
+ yyb19 = yyj19 > l
+ } else {
+ yyb19 = r.CheckBreak()
+ }
+ if yyb19 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.DaemonEndpoints = NodeDaemonEndpoints{}
+ } else {
+ yyv27 := &x.DaemonEndpoints
+ yyv27.CodecDecodeSelf(d)
+ }
+ yyj19++
+ if yyhl19 {
+ yyb19 = yyj19 > l
+ } else {
+ yyb19 = r.CheckBreak()
+ }
+ if yyb19 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.NodeInfo = NodeSystemInfo{}
+ } else {
+ yyv28 := &x.NodeInfo
+ yyv28.CodecDecodeSelf(d)
+ }
+ yyj19++
+ if yyhl19 {
+ yyb19 = yyj19 > l
+ } else {
+ yyb19 = r.CheckBreak()
+ }
+ if yyb19 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Images = nil
+ } else {
+ yyv29 := &x.Images
+ yym30 := z.DecBinary()
+ _ = yym30
+ if false {
+ } else {
+ h.decSliceContainerImage((*[]ContainerImage)(yyv29), d)
+ }
+ }
+ yyj19++
+ if yyhl19 {
+ yyb19 = yyj19 > l
+ } else {
+ yyb19 = r.CheckBreak()
+ }
+ if yyb19 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.VolumesInUse = nil
+ } else {
+ yyv31 := &x.VolumesInUse
+ yym32 := z.DecBinary()
+ _ = yym32
+ if false {
+ } else {
+ h.decSliceUniqueVolumeName((*[]UniqueVolumeName)(yyv31), d)
+ }
+ }
+ yyj19++
+ if yyhl19 {
+ yyb19 = yyj19 > l
+ } else {
+ yyb19 = r.CheckBreak()
+ }
+ if yyb19 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.VolumesAttached = nil
+ } else {
+ yyv33 := &x.VolumesAttached
+ yym34 := z.DecBinary()
+ _ = yym34
+ if false {
+ } else {
+ h.decSliceAttachedVolume((*[]AttachedVolume)(yyv33), d)
+ }
+ }
+ for {
+ yyj19++
+ if yyhl19 {
+ yyb19 = yyj19 > l
+ } else {
+ yyb19 = r.CheckBreak()
+ }
+ if yyb19 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj19-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x UniqueVolumeName) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *UniqueVolumeName) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *AttachedVolume) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ x.Name.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("name"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Name.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.DevicePath))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("devicePath"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.DevicePath))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *AttachedVolume) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *AttachedVolume) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "name":
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = UniqueVolumeName(r.DecodeString())
+ }
+ case "devicePath":
+ if r.TryDecodeAsNil() {
+ x.DevicePath = ""
+ } else {
+ x.DevicePath = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *AttachedVolume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = UniqueVolumeName(r.DecodeString())
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.DevicePath = ""
+ } else {
+ x.DevicePath = string(r.DecodeString())
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ContainerImage) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.SizeBytes != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Names == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Names, false, e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("names"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Names == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Names, false, e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeInt(int64(x.SizeBytes))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("sizeBytes"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeInt(int64(x.SizeBytes))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ContainerImage) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ContainerImage) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "names":
+ if r.TryDecodeAsNil() {
+ x.Names = nil
+ } else {
+ yyv4 := &x.Names
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv4, false, d)
+ }
+ }
+ case "sizeBytes":
+ if r.TryDecodeAsNil() {
+ x.SizeBytes = 0
+ } else {
+ x.SizeBytes = int64(r.DecodeInt(64))
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ContainerImage) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Names = nil
+ } else {
+ yyv8 := &x.Names
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv8, false, d)
+ }
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.SizeBytes = 0
+ } else {
+ x.SizeBytes = int64(r.DecodeInt(64))
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x NodePhase) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *NodePhase) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x NodeConditionType) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *NodeConditionType) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *NodeCondition) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [6]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[2] = true
+ yyq2[3] = true
+ yyq2[4] = x.Reason != ""
+ yyq2[5] = x.Message != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(6)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ x.Type.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("type"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Type.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ x.Status.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Status.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy10 := &x.LastHeartbeatTime
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy10) {
+ } else if yym11 {
+ z.EncBinaryMarshal(yy10)
+ } else if !yym11 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy10)
+ } else {
+ z.EncFallback(yy10)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("lastHeartbeatTime"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy12 := &x.LastHeartbeatTime
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy12) {
+ } else if yym13 {
+ z.EncBinaryMarshal(yy12)
+ } else if !yym13 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy12)
+ } else {
+ z.EncFallback(yy12)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yy15 := &x.LastTransitionTime
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy15) {
+ } else if yym16 {
+ z.EncBinaryMarshal(yy15)
+ } else if !yym16 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy15)
+ } else {
+ z.EncFallback(yy15)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy17 := &x.LastTransitionTime
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy17) {
+ } else if yym18 {
+ z.EncBinaryMarshal(yy17)
+ } else if !yym18 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy17)
+ } else {
+ z.EncFallback(yy17)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Reason))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("reason"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym21 := z.EncBinary()
+ _ = yym21
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Reason))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Message))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("message"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym24 := z.EncBinary()
+ _ = yym24
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Message))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *NodeCondition) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *NodeCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "type":
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = NodeConditionType(r.DecodeString())
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = ""
+ } else {
+ x.Status = ConditionStatus(r.DecodeString())
+ }
+ case "lastHeartbeatTime":
+ if r.TryDecodeAsNil() {
+ x.LastHeartbeatTime = pkg2_unversioned.Time{}
+ } else {
+ yyv6 := &x.LastHeartbeatTime
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv6) {
+ } else if yym7 {
+ z.DecBinaryUnmarshal(yyv6)
+ } else if !yym7 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv6)
+ } else {
+ z.DecFallback(yyv6, false)
+ }
+ }
+ case "lastTransitionTime":
+ if r.TryDecodeAsNil() {
+ x.LastTransitionTime = pkg2_unversioned.Time{}
+ } else {
+ yyv8 := &x.LastTransitionTime
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv8) {
+ } else if yym9 {
+ z.DecBinaryUnmarshal(yyv8)
+ } else if !yym9 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv8)
+ } else {
+ z.DecFallback(yyv8, false)
+ }
+ }
+ case "reason":
+ if r.TryDecodeAsNil() {
+ x.Reason = ""
+ } else {
+ x.Reason = string(r.DecodeString())
+ }
+ case "message":
+ if r.TryDecodeAsNil() {
+ x.Message = ""
+ } else {
+ x.Message = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *NodeCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj12 int
+ var yyb12 bool
+ var yyhl12 bool = l >= 0
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = NodeConditionType(r.DecodeString())
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = ""
+ } else {
+ x.Status = ConditionStatus(r.DecodeString())
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.LastHeartbeatTime = pkg2_unversioned.Time{}
+ } else {
+ yyv15 := &x.LastHeartbeatTime
+ yym16 := z.DecBinary()
+ _ = yym16
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv15) {
+ } else if yym16 {
+ z.DecBinaryUnmarshal(yyv15)
+ } else if !yym16 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv15)
+ } else {
+ z.DecFallback(yyv15, false)
+ }
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.LastTransitionTime = pkg2_unversioned.Time{}
+ } else {
+ yyv17 := &x.LastTransitionTime
+ yym18 := z.DecBinary()
+ _ = yym18
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv17) {
+ } else if yym18 {
+ z.DecBinaryUnmarshal(yyv17)
+ } else if !yym18 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv17)
+ } else {
+ z.DecFallback(yyv17, false)
+ }
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Reason = ""
+ } else {
+ x.Reason = string(r.DecodeString())
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Message = ""
+ } else {
+ x.Message = string(r.DecodeString())
+ }
+ for {
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj12-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x NodeAddressType) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *NodeAddressType) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *NodeAddress) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ x.Type.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("type"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Type.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Address))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("address"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Address))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *NodeAddress) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *NodeAddress) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "type":
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = NodeAddressType(r.DecodeString())
+ }
+ case "address":
+ if r.TryDecodeAsNil() {
+ x.Address = ""
+ } else {
+ x.Address = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *NodeAddress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = NodeAddressType(r.DecodeString())
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Address = ""
+ } else {
+ x.Address = string(r.DecodeString())
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x ResourceName) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *ResourceName) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x ResourceList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ h.encResourceList((ResourceList)(x), e)
+ }
+ }
+}
+
+func (x *ResourceList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ h.decResourceList((*ResourceList)(x), d)
+ }
+}
+
+func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = true
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy14 := &x.Status
+ yy14.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy16 := &x.Status
+ yy16.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *Node) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *Node) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = NodeSpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = NodeStatus{}
+ } else {
+ yyv6 := &x.Status
+ yyv6.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = NodeSpec{}
+ } else {
+ yyv11 := &x.Spec
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = NodeStatus{}
+ } else {
+ yyv12 := &x.Status
+ yyv12.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *NodeList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceNode(([]Node)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceNode(([]Node)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *NodeList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *NodeList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceNode((*[]Node)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *NodeList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceNode((*[]Node)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x FinalizerName) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *FinalizerName) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *NamespaceSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = len(x.Finalizers) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Finalizers == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ h.encSliceFinalizerName(([]FinalizerName)(x.Finalizers), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("finalizers"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Finalizers == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.encSliceFinalizerName(([]FinalizerName)(x.Finalizers), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *NamespaceSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *NamespaceSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "finalizers":
+ if r.TryDecodeAsNil() {
+ x.Finalizers = nil
+ } else {
+ yyv4 := &x.Finalizers
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.decSliceFinalizerName((*[]FinalizerName)(yyv4), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *NamespaceSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Finalizers = nil
+ } else {
+ yyv7 := &x.Finalizers
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.decSliceFinalizerName((*[]FinalizerName)(yyv7), d)
+ }
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *NamespaceStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Phase != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ x.Phase.CodecEncodeSelf(e)
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("phase"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Phase.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *NamespaceStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *NamespaceStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "phase":
+ if r.TryDecodeAsNil() {
+ x.Phase = ""
+ } else {
+ x.Phase = NamespacePhase(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *NamespaceStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj5 int
+ var yyb5 bool
+ var yyhl5 bool = l >= 0
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Phase = ""
+ } else {
+ x.Phase = NamespacePhase(r.DecodeString())
+ }
+ for {
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj5-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x NamespacePhase) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *NamespacePhase) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *Namespace) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = true
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy14 := &x.Status
+ yy14.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy16 := &x.Status
+ yy16.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *Namespace) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *Namespace) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = NamespaceSpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = NamespaceStatus{}
+ } else {
+ yyv6 := &x.Status
+ yyv6.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *Namespace) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = NamespaceSpec{}
+ } else {
+ yyv11 := &x.Spec
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = NamespaceStatus{}
+ } else {
+ yyv12 := &x.Status
+ yyv12.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *NamespaceList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceNamespace(([]Namespace)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceNamespace(([]Namespace)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *NamespaceList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *NamespaceList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceNamespace((*[]Namespace)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *NamespaceList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceNamespace((*[]Namespace)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *Binding) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy9 := &x.Target
+ yy9.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("target"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Target
+ yy11.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *Binding) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *Binding) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "target":
+ if r.TryDecodeAsNil() {
+ x.Target = ObjectReference{}
+ } else {
+ yyv5 := &x.Target
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *Binding) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv9 := &x.ObjectMeta
+ yyv9.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Target = ObjectReference{}
+ } else {
+ yyv10 := &x.Target
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *Preconditions) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.UID != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.UID == nil {
+ r.EncodeNil()
+ } else {
+ yy4 := *x.UID
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(yy4))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("uid"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.UID == nil {
+ r.EncodeNil()
+ } else {
+ yy6 := *x.UID
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(yy6))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *Preconditions) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *Preconditions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "uid":
+ if r.TryDecodeAsNil() {
+ if x.UID != nil {
+ x.UID = nil
+ }
+ } else {
+ if x.UID == nil {
+ x.UID = new(pkg1_types.UID)
+ }
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.UID) {
+ } else {
+ *((*string)(x.UID)) = r.DecodeString()
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *Preconditions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.UID != nil {
+ x.UID = nil
+ }
+ } else {
+ if x.UID == nil {
+ x.UID = new(pkg1_types.UID)
+ }
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.UID) {
+ } else {
+ *((*string)(x.UID)) = r.DecodeString()
+ }
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *DeleteOptions) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.GracePeriodSeconds != nil
+ yyq2[1] = x.Preconditions != nil
+ yyq2[2] = x.OrphanDependents != nil
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.GracePeriodSeconds == nil {
+ r.EncodeNil()
+ } else {
+ yy4 := *x.GracePeriodSeconds
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(yy4))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("gracePeriodSeconds"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.GracePeriodSeconds == nil {
+ r.EncodeNil()
+ } else {
+ yy6 := *x.GracePeriodSeconds
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeInt(int64(yy6))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Preconditions == nil {
+ r.EncodeNil()
+ } else {
+ x.Preconditions.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("preconditions"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Preconditions == nil {
+ r.EncodeNil()
+ } else {
+ x.Preconditions.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.OrphanDependents == nil {
+ r.EncodeNil()
+ } else {
+ yy12 := *x.OrphanDependents
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeBool(bool(yy12))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("orphanDependents"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.OrphanDependents == nil {
+ r.EncodeNil()
+ } else {
+ yy14 := *x.OrphanDependents
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeBool(bool(yy14))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym21 := z.EncBinary()
+ _ = yym21
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *DeleteOptions) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *DeleteOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "gracePeriodSeconds":
+ if r.TryDecodeAsNil() {
+ if x.GracePeriodSeconds != nil {
+ x.GracePeriodSeconds = nil
+ }
+ } else {
+ if x.GracePeriodSeconds == nil {
+ x.GracePeriodSeconds = new(int64)
+ }
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ *((*int64)(x.GracePeriodSeconds)) = int64(r.DecodeInt(64))
+ }
+ }
+ case "preconditions":
+ if r.TryDecodeAsNil() {
+ if x.Preconditions != nil {
+ x.Preconditions = nil
+ }
+ } else {
+ if x.Preconditions == nil {
+ x.Preconditions = new(Preconditions)
+ }
+ x.Preconditions.CodecDecodeSelf(d)
+ }
+ case "orphanDependents":
+ if r.TryDecodeAsNil() {
+ if x.OrphanDependents != nil {
+ x.OrphanDependents = nil
+ }
+ } else {
+ if x.OrphanDependents == nil {
+ x.OrphanDependents = new(bool)
+ }
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else {
+ *((*bool)(x.OrphanDependents)) = r.DecodeBool()
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *DeleteOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj11 int
+ var yyb11 bool
+ var yyhl11 bool = l >= 0
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.GracePeriodSeconds != nil {
+ x.GracePeriodSeconds = nil
+ }
+ } else {
+ if x.GracePeriodSeconds == nil {
+ x.GracePeriodSeconds = new(int64)
+ }
+ yym13 := z.DecBinary()
+ _ = yym13
+ if false {
+ } else {
+ *((*int64)(x.GracePeriodSeconds)) = int64(r.DecodeInt(64))
+ }
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Preconditions != nil {
+ x.Preconditions = nil
+ }
+ } else {
+ if x.Preconditions == nil {
+ x.Preconditions = new(Preconditions)
+ }
+ x.Preconditions.CodecDecodeSelf(d)
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.OrphanDependents != nil {
+ x.OrphanDependents = nil
+ }
+ } else {
+ if x.OrphanDependents == nil {
+ x.OrphanDependents = new(bool)
+ }
+ yym16 := z.DecBinary()
+ _ = yym16
+ if false {
+ } else {
+ *((*bool)(x.OrphanDependents)) = r.DecodeBool()
+ }
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj11-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ExportOptions) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Export))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("export"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Export))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Exact))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("exact"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Exact))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ExportOptions) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ExportOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "export":
+ if r.TryDecodeAsNil() {
+ x.Export = false
+ } else {
+ x.Export = bool(r.DecodeBool())
+ }
+ case "exact":
+ if r.TryDecodeAsNil() {
+ x.Exact = false
+ } else {
+ x.Exact = bool(r.DecodeBool())
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ExportOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Export = false
+ } else {
+ x.Export = bool(r.DecodeBool())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Exact = false
+ } else {
+ x.Exact = bool(r.DecodeBool())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ListOptions) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [7]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.LabelSelector != ""
+ yyq2[1] = x.FieldSelector != ""
+ yyq2[2] = x.Watch != false
+ yyq2[3] = x.ResourceVersion != ""
+ yyq2[4] = x.TimeoutSeconds != nil
+ yyq2[5] = x.Kind != ""
+ yyq2[6] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(7)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.LabelSelector))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("labelSelector"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.LabelSelector))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.FieldSelector))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("fieldSelector"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.FieldSelector))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Watch))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("watch"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Watch))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("resourceVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ if x.TimeoutSeconds == nil {
+ r.EncodeNil()
+ } else {
+ yy16 := *x.TimeoutSeconds
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeInt(int64(yy16))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("timeoutSeconds"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.TimeoutSeconds == nil {
+ r.EncodeNil()
+ } else {
+ yy18 := *x.TimeoutSeconds
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeInt(int64(yy18))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ yym21 := z.EncBinary()
+ _ = yym21
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[6] {
+ yym24 := z.EncBinary()
+ _ = yym24
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[6] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym25 := z.EncBinary()
+ _ = yym25
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ListOptions) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ListOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "labelSelector":
+ if r.TryDecodeAsNil() {
+ x.LabelSelector = ""
+ } else {
+ x.LabelSelector = string(r.DecodeString())
+ }
+ case "fieldSelector":
+ if r.TryDecodeAsNil() {
+ x.FieldSelector = ""
+ } else {
+ x.FieldSelector = string(r.DecodeString())
+ }
+ case "watch":
+ if r.TryDecodeAsNil() {
+ x.Watch = false
+ } else {
+ x.Watch = bool(r.DecodeBool())
+ }
+ case "resourceVersion":
+ if r.TryDecodeAsNil() {
+ x.ResourceVersion = ""
+ } else {
+ x.ResourceVersion = string(r.DecodeString())
+ }
+ case "timeoutSeconds":
+ if r.TryDecodeAsNil() {
+ if x.TimeoutSeconds != nil {
+ x.TimeoutSeconds = nil
+ }
+ } else {
+ if x.TimeoutSeconds == nil {
+ x.TimeoutSeconds = new(int64)
+ }
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else {
+ *((*int64)(x.TimeoutSeconds)) = int64(r.DecodeInt(64))
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ListOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj12 int
+ var yyb12 bool
+ var yyhl12 bool = l >= 0
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.LabelSelector = ""
+ } else {
+ x.LabelSelector = string(r.DecodeString())
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.FieldSelector = ""
+ } else {
+ x.FieldSelector = string(r.DecodeString())
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Watch = false
+ } else {
+ x.Watch = bool(r.DecodeBool())
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ResourceVersion = ""
+ } else {
+ x.ResourceVersion = string(r.DecodeString())
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.TimeoutSeconds != nil {
+ x.TimeoutSeconds = nil
+ }
+ } else {
+ if x.TimeoutSeconds == nil {
+ x.TimeoutSeconds = new(int64)
+ }
+ yym18 := z.DecBinary()
+ _ = yym18
+ if false {
+ } else {
+ *((*int64)(x.TimeoutSeconds)) = int64(r.DecodeInt(64))
+ }
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj12-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PodLogOptions) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [10]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Container != ""
+ yyq2[1] = x.Follow != false
+ yyq2[2] = x.Previous != false
+ yyq2[3] = x.SinceSeconds != nil
+ yyq2[4] = x.SinceTime != nil
+ yyq2[5] = x.Timestamps != false
+ yyq2[6] = x.TailLines != nil
+ yyq2[7] = x.LimitBytes != nil
+ yyq2[8] = x.Kind != ""
+ yyq2[9] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(10)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Container))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("container"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Container))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Follow))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("follow"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Follow))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Previous))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("previous"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Previous))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ if x.SinceSeconds == nil {
+ r.EncodeNil()
+ } else {
+ yy13 := *x.SinceSeconds
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeInt(int64(yy13))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("sinceSeconds"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.SinceSeconds == nil {
+ r.EncodeNil()
+ } else {
+ yy15 := *x.SinceSeconds
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeInt(int64(yy15))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ if x.SinceTime == nil {
+ r.EncodeNil()
+ } else {
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.SinceTime) {
+ } else if yym18 {
+ z.EncBinaryMarshal(x.SinceTime)
+ } else if !yym18 && z.IsJSONHandle() {
+ z.EncJSONMarshal(x.SinceTime)
+ } else {
+ z.EncFallback(x.SinceTime)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("sinceTime"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.SinceTime == nil {
+ r.EncodeNil()
+ } else {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.SinceTime) {
+ } else if yym19 {
+ z.EncBinaryMarshal(x.SinceTime)
+ } else if !yym19 && z.IsJSONHandle() {
+ z.EncJSONMarshal(x.SinceTime)
+ } else {
+ z.EncFallback(x.SinceTime)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ yym21 := z.EncBinary()
+ _ = yym21
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Timestamps))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("timestamps"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Timestamps))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[6] {
+ if x.TailLines == nil {
+ r.EncodeNil()
+ } else {
+ yy24 := *x.TailLines
+ yym25 := z.EncBinary()
+ _ = yym25
+ if false {
+ } else {
+ r.EncodeInt(int64(yy24))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[6] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("tailLines"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.TailLines == nil {
+ r.EncodeNil()
+ } else {
+ yy26 := *x.TailLines
+ yym27 := z.EncBinary()
+ _ = yym27
+ if false {
+ } else {
+ r.EncodeInt(int64(yy26))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[7] {
+ if x.LimitBytes == nil {
+ r.EncodeNil()
+ } else {
+ yy29 := *x.LimitBytes
+ yym30 := z.EncBinary()
+ _ = yym30
+ if false {
+ } else {
+ r.EncodeInt(int64(yy29))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[7] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("limitBytes"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.LimitBytes == nil {
+ r.EncodeNil()
+ } else {
+ yy31 := *x.LimitBytes
+ yym32 := z.EncBinary()
+ _ = yym32
+ if false {
+ } else {
+ r.EncodeInt(int64(yy31))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[8] {
+ yym34 := z.EncBinary()
+ _ = yym34
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[8] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym35 := z.EncBinary()
+ _ = yym35
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[9] {
+ yym37 := z.EncBinary()
+ _ = yym37
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[9] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym38 := z.EncBinary()
+ _ = yym38
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PodLogOptions) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PodLogOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "container":
+ if r.TryDecodeAsNil() {
+ x.Container = ""
+ } else {
+ x.Container = string(r.DecodeString())
+ }
+ case "follow":
+ if r.TryDecodeAsNil() {
+ x.Follow = false
+ } else {
+ x.Follow = bool(r.DecodeBool())
+ }
+ case "previous":
+ if r.TryDecodeAsNil() {
+ x.Previous = false
+ } else {
+ x.Previous = bool(r.DecodeBool())
+ }
+ case "sinceSeconds":
+ if r.TryDecodeAsNil() {
+ if x.SinceSeconds != nil {
+ x.SinceSeconds = nil
+ }
+ } else {
+ if x.SinceSeconds == nil {
+ x.SinceSeconds = new(int64)
+ }
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else {
+ *((*int64)(x.SinceSeconds)) = int64(r.DecodeInt(64))
+ }
+ }
+ case "sinceTime":
+ if r.TryDecodeAsNil() {
+ if x.SinceTime != nil {
+ x.SinceTime = nil
+ }
+ } else {
+ if x.SinceTime == nil {
+ x.SinceTime = new(pkg2_unversioned.Time)
+ }
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.SinceTime) {
+ } else if yym10 {
+ z.DecBinaryUnmarshal(x.SinceTime)
+ } else if !yym10 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(x.SinceTime)
+ } else {
+ z.DecFallback(x.SinceTime, false)
+ }
+ }
+ case "timestamps":
+ if r.TryDecodeAsNil() {
+ x.Timestamps = false
+ } else {
+ x.Timestamps = bool(r.DecodeBool())
+ }
+ case "tailLines":
+ if r.TryDecodeAsNil() {
+ if x.TailLines != nil {
+ x.TailLines = nil
+ }
+ } else {
+ if x.TailLines == nil {
+ x.TailLines = new(int64)
+ }
+ yym13 := z.DecBinary()
+ _ = yym13
+ if false {
+ } else {
+ *((*int64)(x.TailLines)) = int64(r.DecodeInt(64))
+ }
+ }
+ case "limitBytes":
+ if r.TryDecodeAsNil() {
+ if x.LimitBytes != nil {
+ x.LimitBytes = nil
+ }
+ } else {
+ if x.LimitBytes == nil {
+ x.LimitBytes = new(int64)
+ }
+ yym15 := z.DecBinary()
+ _ = yym15
+ if false {
+ } else {
+ *((*int64)(x.LimitBytes)) = int64(r.DecodeInt(64))
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PodLogOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj18 int
+ var yyb18 bool
+ var yyhl18 bool = l >= 0
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Container = ""
+ } else {
+ x.Container = string(r.DecodeString())
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Follow = false
+ } else {
+ x.Follow = bool(r.DecodeBool())
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Previous = false
+ } else {
+ x.Previous = bool(r.DecodeBool())
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.SinceSeconds != nil {
+ x.SinceSeconds = nil
+ }
+ } else {
+ if x.SinceSeconds == nil {
+ x.SinceSeconds = new(int64)
+ }
+ yym23 := z.DecBinary()
+ _ = yym23
+ if false {
+ } else {
+ *((*int64)(x.SinceSeconds)) = int64(r.DecodeInt(64))
+ }
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.SinceTime != nil {
+ x.SinceTime = nil
+ }
+ } else {
+ if x.SinceTime == nil {
+ x.SinceTime = new(pkg2_unversioned.Time)
+ }
+ yym25 := z.DecBinary()
+ _ = yym25
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.SinceTime) {
+ } else if yym25 {
+ z.DecBinaryUnmarshal(x.SinceTime)
+ } else if !yym25 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(x.SinceTime)
+ } else {
+ z.DecFallback(x.SinceTime, false)
+ }
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Timestamps = false
+ } else {
+ x.Timestamps = bool(r.DecodeBool())
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.TailLines != nil {
+ x.TailLines = nil
+ }
+ } else {
+ if x.TailLines == nil {
+ x.TailLines = new(int64)
+ }
+ yym28 := z.DecBinary()
+ _ = yym28
+ if false {
+ } else {
+ *((*int64)(x.TailLines)) = int64(r.DecodeInt(64))
+ }
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.LimitBytes != nil {
+ x.LimitBytes = nil
+ }
+ } else {
+ if x.LimitBytes == nil {
+ x.LimitBytes = new(int64)
+ }
+ yym30 := z.DecBinary()
+ _ = yym30
+ if false {
+ } else {
+ *((*int64)(x.LimitBytes)) = int64(r.DecodeInt(64))
+ }
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj18-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PodAttachOptions) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [7]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Stdin != false
+ yyq2[1] = x.Stdout != false
+ yyq2[2] = x.Stderr != false
+ yyq2[3] = x.TTY != false
+ yyq2[4] = x.Container != ""
+ yyq2[5] = x.Kind != ""
+ yyq2[6] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(7)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Stdin))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("stdin"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Stdin))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Stdout))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("stdout"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Stdout))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Stderr))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("stderr"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Stderr))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeBool(bool(x.TTY))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("tty"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeBool(bool(x.TTY))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Container))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("container"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Container))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[6] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[6] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PodAttachOptions) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PodAttachOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "stdin":
+ if r.TryDecodeAsNil() {
+ x.Stdin = false
+ } else {
+ x.Stdin = bool(r.DecodeBool())
+ }
+ case "stdout":
+ if r.TryDecodeAsNil() {
+ x.Stdout = false
+ } else {
+ x.Stdout = bool(r.DecodeBool())
+ }
+ case "stderr":
+ if r.TryDecodeAsNil() {
+ x.Stderr = false
+ } else {
+ x.Stderr = bool(r.DecodeBool())
+ }
+ case "tty":
+ if r.TryDecodeAsNil() {
+ x.TTY = false
+ } else {
+ x.TTY = bool(r.DecodeBool())
+ }
+ case "container":
+ if r.TryDecodeAsNil() {
+ x.Container = ""
+ } else {
+ x.Container = string(r.DecodeString())
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PodAttachOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj11 int
+ var yyb11 bool
+ var yyhl11 bool = l >= 0
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Stdin = false
+ } else {
+ x.Stdin = bool(r.DecodeBool())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Stdout = false
+ } else {
+ x.Stdout = bool(r.DecodeBool())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Stderr = false
+ } else {
+ x.Stderr = bool(r.DecodeBool())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.TTY = false
+ } else {
+ x.TTY = bool(r.DecodeBool())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Container = ""
+ } else {
+ x.Container = string(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj11-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PodExecOptions) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [8]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Stdin != false
+ yyq2[1] = x.Stdout != false
+ yyq2[2] = x.Stderr != false
+ yyq2[3] = x.TTY != false
+ yyq2[4] = x.Container != ""
+ yyq2[6] = x.Kind != ""
+ yyq2[7] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(8)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Stdin))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("stdin"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Stdin))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Stdout))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("stdout"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Stdout))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Stderr))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("stderr"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Stderr))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeBool(bool(x.TTY))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("tty"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeBool(bool(x.TTY))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Container))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("container"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Container))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Command == nil {
+ r.EncodeNil()
+ } else {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Command, false, e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("command"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Command == nil {
+ r.EncodeNil()
+ } else {
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Command, false, e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[6] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[6] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[7] {
+ yym25 := z.EncBinary()
+ _ = yym25
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[7] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym26 := z.EncBinary()
+ _ = yym26
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PodExecOptions) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PodExecOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "stdin":
+ if r.TryDecodeAsNil() {
+ x.Stdin = false
+ } else {
+ x.Stdin = bool(r.DecodeBool())
+ }
+ case "stdout":
+ if r.TryDecodeAsNil() {
+ x.Stdout = false
+ } else {
+ x.Stdout = bool(r.DecodeBool())
+ }
+ case "stderr":
+ if r.TryDecodeAsNil() {
+ x.Stderr = false
+ } else {
+ x.Stderr = bool(r.DecodeBool())
+ }
+ case "tty":
+ if r.TryDecodeAsNil() {
+ x.TTY = false
+ } else {
+ x.TTY = bool(r.DecodeBool())
+ }
+ case "container":
+ if r.TryDecodeAsNil() {
+ x.Container = ""
+ } else {
+ x.Container = string(r.DecodeString())
+ }
+ case "command":
+ if r.TryDecodeAsNil() {
+ x.Command = nil
+ } else {
+ yyv9 := &x.Command
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv9, false, d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PodExecOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj13 int
+ var yyb13 bool
+ var yyhl13 bool = l >= 0
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Stdin = false
+ } else {
+ x.Stdin = bool(r.DecodeBool())
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Stdout = false
+ } else {
+ x.Stdout = bool(r.DecodeBool())
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Stderr = false
+ } else {
+ x.Stderr = bool(r.DecodeBool())
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.TTY = false
+ } else {
+ x.TTY = bool(r.DecodeBool())
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Container = ""
+ } else {
+ x.Container = string(r.DecodeString())
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Command = nil
+ } else {
+ yyv19 := &x.Command
+ yym20 := z.DecBinary()
+ _ = yym20
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv19, false, d)
+ }
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj13-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PodProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Path != ""
+ yyq2[1] = x.Kind != ""
+ yyq2[2] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Path))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("path"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Path))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PodProxyOptions) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PodProxyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "path":
+ if r.TryDecodeAsNil() {
+ x.Path = ""
+ } else {
+ x.Path = string(r.DecodeString())
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PodProxyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Path = ""
+ } else {
+ x.Path = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *NodeProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Path != ""
+ yyq2[1] = x.Kind != ""
+ yyq2[2] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Path))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("path"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Path))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *NodeProxyOptions) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *NodeProxyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "path":
+ if r.TryDecodeAsNil() {
+ x.Path = ""
+ } else {
+ x.Path = string(r.DecodeString())
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *NodeProxyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Path = ""
+ } else {
+ x.Path = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ServiceProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Path != ""
+ yyq2[1] = x.Kind != ""
+ yyq2[2] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Path))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("path"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Path))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ServiceProxyOptions) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ServiceProxyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "path":
+ if r.TryDecodeAsNil() {
+ x.Path = ""
+ } else {
+ x.Path = string(r.DecodeString())
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ServiceProxyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Path = ""
+ } else {
+ x.Path = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *OwnerReference) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[4] = x.Controller != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 4
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("name"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.UID) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.UID))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("uid"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.UID) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.UID))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ if x.Controller == nil {
+ r.EncodeNil()
+ } else {
+ yy16 := *x.Controller
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeBool(bool(yy16))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("controller"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Controller == nil {
+ r.EncodeNil()
+ } else {
+ yy18 := *x.Controller
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeBool(bool(yy18))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *OwnerReference) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *OwnerReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "name":
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ case "uid":
+ if r.TryDecodeAsNil() {
+ x.UID = ""
+ } else {
+ x.UID = pkg1_types.UID(r.DecodeString())
+ }
+ case "controller":
+ if r.TryDecodeAsNil() {
+ if x.Controller != nil {
+ x.Controller = nil
+ }
+ } else {
+ if x.Controller == nil {
+ x.Controller = new(bool)
+ }
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else {
+ *((*bool)(x.Controller)) = r.DecodeBool()
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *OwnerReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.UID = ""
+ } else {
+ x.UID = pkg1_types.UID(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Controller != nil {
+ x.Controller = nil
+ }
+ } else {
+ if x.Controller == nil {
+ x.Controller = new(bool)
+ }
+ yym16 := z.DecBinary()
+ _ = yym16
+ if false {
+ } else {
+ *((*bool)(x.Controller)) = r.DecodeBool()
+ }
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ObjectReference) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [7]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Kind != ""
+ yyq2[1] = x.Namespace != ""
+ yyq2[2] = x.Name != ""
+ yyq2[3] = x.UID != ""
+ yyq2[4] = x.APIVersion != ""
+ yyq2[5] = x.ResourceVersion != ""
+ yyq2[6] = x.FieldPath != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(7)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Namespace))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("namespace"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Namespace))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("name"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.UID) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.UID))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("uid"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.UID) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.UID))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("resourceVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[6] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.FieldPath))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[6] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("fieldPath"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.FieldPath))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ObjectReference) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ObjectReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "namespace":
+ if r.TryDecodeAsNil() {
+ x.Namespace = ""
+ } else {
+ x.Namespace = string(r.DecodeString())
+ }
+ case "name":
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ case "uid":
+ if r.TryDecodeAsNil() {
+ x.UID = ""
+ } else {
+ x.UID = pkg1_types.UID(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ case "resourceVersion":
+ if r.TryDecodeAsNil() {
+ x.ResourceVersion = ""
+ } else {
+ x.ResourceVersion = string(r.DecodeString())
+ }
+ case "fieldPath":
+ if r.TryDecodeAsNil() {
+ x.FieldPath = ""
+ } else {
+ x.FieldPath = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ObjectReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj11 int
+ var yyb11 bool
+ var yyhl11 bool = l >= 0
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Namespace = ""
+ } else {
+ x.Namespace = string(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.UID = ""
+ } else {
+ x.UID = pkg1_types.UID(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ResourceVersion = ""
+ } else {
+ x.ResourceVersion = string(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.FieldPath = ""
+ } else {
+ x.FieldPath = string(r.DecodeString())
+ }
+ for {
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj11-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *LocalObjectReference) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Name != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("name"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *LocalObjectReference) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *LocalObjectReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "name":
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *LocalObjectReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj5 int
+ var yyb5 bool
+ var yyhl5 bool = l >= 0
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ for {
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj5-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *SerializedReference) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = x.Kind != ""
+ yyq2[2] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.Reference
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("reference"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.Reference
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *SerializedReference) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *SerializedReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "reference":
+ if r.TryDecodeAsNil() {
+ x.Reference = ObjectReference{}
+ } else {
+ yyv4 := &x.Reference
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *SerializedReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Reference = ObjectReference{}
+ } else {
+ yyv8 := &x.Reference
+ yyv8.CodecDecodeSelf(d)
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *EventSource) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Component != ""
+ yyq2[1] = x.Host != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Component))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("component"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Component))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Host))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("host"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Host))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *EventSource) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *EventSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "component":
+ if r.TryDecodeAsNil() {
+ x.Component = ""
+ } else {
+ x.Component = string(r.DecodeString())
+ }
+ case "host":
+ if r.TryDecodeAsNil() {
+ x.Host = ""
+ } else {
+ x.Host = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *EventSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Component = ""
+ } else {
+ x.Component = string(r.DecodeString())
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Host = ""
+ } else {
+ x.Host = string(r.DecodeString())
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *Event) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [11]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[2] = x.Reason != ""
+ yyq2[3] = x.Message != ""
+ yyq2[4] = true
+ yyq2[5] = true
+ yyq2[6] = true
+ yyq2[7] = x.Count != 0
+ yyq2[8] = x.Type != ""
+ yyq2[9] = x.Kind != ""
+ yyq2[10] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(11)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy9 := &x.InvolvedObject
+ yy9.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("involvedObject"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.InvolvedObject
+ yy11.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Reason))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("reason"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Reason))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Message))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("message"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Message))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yy20 := &x.Source
+ yy20.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("source"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy22 := &x.Source
+ yy22.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ yy25 := &x.FirstTimestamp
+ yym26 := z.EncBinary()
+ _ = yym26
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy25) {
+ } else if yym26 {
+ z.EncBinaryMarshal(yy25)
+ } else if !yym26 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy25)
+ } else {
+ z.EncFallback(yy25)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("firstTimestamp"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy27 := &x.FirstTimestamp
+ yym28 := z.EncBinary()
+ _ = yym28
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy27) {
+ } else if yym28 {
+ z.EncBinaryMarshal(yy27)
+ } else if !yym28 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy27)
+ } else {
+ z.EncFallback(yy27)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[6] {
+ yy30 := &x.LastTimestamp
+ yym31 := z.EncBinary()
+ _ = yym31
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy30) {
+ } else if yym31 {
+ z.EncBinaryMarshal(yy30)
+ } else if !yym31 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy30)
+ } else {
+ z.EncFallback(yy30)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[6] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("lastTimestamp"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy32 := &x.LastTimestamp
+ yym33 := z.EncBinary()
+ _ = yym33
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy32) {
+ } else if yym33 {
+ z.EncBinaryMarshal(yy32)
+ } else if !yym33 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy32)
+ } else {
+ z.EncFallback(yy32)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[7] {
+ yym35 := z.EncBinary()
+ _ = yym35
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Count))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[7] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("count"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym36 := z.EncBinary()
+ _ = yym36
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Count))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[8] {
+ yym38 := z.EncBinary()
+ _ = yym38
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Type))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[8] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("type"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym39 := z.EncBinary()
+ _ = yym39
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Type))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[9] {
+ yym41 := z.EncBinary()
+ _ = yym41
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[9] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym42 := z.EncBinary()
+ _ = yym42
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[10] {
+ yym44 := z.EncBinary()
+ _ = yym44
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[10] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym45 := z.EncBinary()
+ _ = yym45
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *Event) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *Event) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "involvedObject":
+ if r.TryDecodeAsNil() {
+ x.InvolvedObject = ObjectReference{}
+ } else {
+ yyv5 := &x.InvolvedObject
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "reason":
+ if r.TryDecodeAsNil() {
+ x.Reason = ""
+ } else {
+ x.Reason = string(r.DecodeString())
+ }
+ case "message":
+ if r.TryDecodeAsNil() {
+ x.Message = ""
+ } else {
+ x.Message = string(r.DecodeString())
+ }
+ case "source":
+ if r.TryDecodeAsNil() {
+ x.Source = EventSource{}
+ } else {
+ yyv8 := &x.Source
+ yyv8.CodecDecodeSelf(d)
+ }
+ case "firstTimestamp":
+ if r.TryDecodeAsNil() {
+ x.FirstTimestamp = pkg2_unversioned.Time{}
+ } else {
+ yyv9 := &x.FirstTimestamp
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv9) {
+ } else if yym10 {
+ z.DecBinaryUnmarshal(yyv9)
+ } else if !yym10 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv9)
+ } else {
+ z.DecFallback(yyv9, false)
+ }
+ }
+ case "lastTimestamp":
+ if r.TryDecodeAsNil() {
+ x.LastTimestamp = pkg2_unversioned.Time{}
+ } else {
+ yyv11 := &x.LastTimestamp
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else if yym12 {
+ z.DecBinaryUnmarshal(yyv11)
+ } else if !yym12 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv11)
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ case "count":
+ if r.TryDecodeAsNil() {
+ x.Count = 0
+ } else {
+ x.Count = int32(r.DecodeInt(32))
+ }
+ case "type":
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = string(r.DecodeString())
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *Event) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj17 int
+ var yyb17 bool
+ var yyhl17 bool = l >= 0
+ yyj17++
+ if yyhl17 {
+ yyb17 = yyj17 > l
+ } else {
+ yyb17 = r.CheckBreak()
+ }
+ if yyb17 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv18 := &x.ObjectMeta
+ yyv18.CodecDecodeSelf(d)
+ }
+ yyj17++
+ if yyhl17 {
+ yyb17 = yyj17 > l
+ } else {
+ yyb17 = r.CheckBreak()
+ }
+ if yyb17 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.InvolvedObject = ObjectReference{}
+ } else {
+ yyv19 := &x.InvolvedObject
+ yyv19.CodecDecodeSelf(d)
+ }
+ yyj17++
+ if yyhl17 {
+ yyb17 = yyj17 > l
+ } else {
+ yyb17 = r.CheckBreak()
+ }
+ if yyb17 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Reason = ""
+ } else {
+ x.Reason = string(r.DecodeString())
+ }
+ yyj17++
+ if yyhl17 {
+ yyb17 = yyj17 > l
+ } else {
+ yyb17 = r.CheckBreak()
+ }
+ if yyb17 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Message = ""
+ } else {
+ x.Message = string(r.DecodeString())
+ }
+ yyj17++
+ if yyhl17 {
+ yyb17 = yyj17 > l
+ } else {
+ yyb17 = r.CheckBreak()
+ }
+ if yyb17 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Source = EventSource{}
+ } else {
+ yyv22 := &x.Source
+ yyv22.CodecDecodeSelf(d)
+ }
+ yyj17++
+ if yyhl17 {
+ yyb17 = yyj17 > l
+ } else {
+ yyb17 = r.CheckBreak()
+ }
+ if yyb17 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.FirstTimestamp = pkg2_unversioned.Time{}
+ } else {
+ yyv23 := &x.FirstTimestamp
+ yym24 := z.DecBinary()
+ _ = yym24
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv23) {
+ } else if yym24 {
+ z.DecBinaryUnmarshal(yyv23)
+ } else if !yym24 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv23)
+ } else {
+ z.DecFallback(yyv23, false)
+ }
+ }
+ yyj17++
+ if yyhl17 {
+ yyb17 = yyj17 > l
+ } else {
+ yyb17 = r.CheckBreak()
+ }
+ if yyb17 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.LastTimestamp = pkg2_unversioned.Time{}
+ } else {
+ yyv25 := &x.LastTimestamp
+ yym26 := z.DecBinary()
+ _ = yym26
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv25) {
+ } else if yym26 {
+ z.DecBinaryUnmarshal(yyv25)
+ } else if !yym26 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv25)
+ } else {
+ z.DecFallback(yyv25, false)
+ }
+ }
+ yyj17++
+ if yyhl17 {
+ yyb17 = yyj17 > l
+ } else {
+ yyb17 = r.CheckBreak()
+ }
+ if yyb17 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Count = 0
+ } else {
+ x.Count = int32(r.DecodeInt(32))
+ }
+ yyj17++
+ if yyhl17 {
+ yyb17 = yyj17 > l
+ } else {
+ yyb17 = r.CheckBreak()
+ }
+ if yyb17 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = string(r.DecodeString())
+ }
+ yyj17++
+ if yyhl17 {
+ yyb17 = yyj17 > l
+ } else {
+ yyb17 = r.CheckBreak()
+ }
+ if yyb17 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj17++
+ if yyhl17 {
+ yyb17 = yyj17 > l
+ } else {
+ yyb17 = r.CheckBreak()
+ }
+ if yyb17 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj17++
+ if yyhl17 {
+ yyb17 = yyj17 > l
+ } else {
+ yyb17 = r.CheckBreak()
+ }
+ if yyb17 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj17-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *EventList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceEvent(([]Event)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceEvent(([]Event)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *EventList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *EventList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceEvent((*[]Event)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *EventList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceEvent((*[]Event)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *List) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceruntime_RawExtension(([]pkg5_runtime.RawExtension)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceruntime_RawExtension(([]pkg5_runtime.RawExtension)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *List) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *List) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceruntime_RawExtension((*[]pkg5_runtime.RawExtension)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *List) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceruntime_RawExtension((*[]pkg5_runtime.RawExtension)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x LimitType) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *LimitType) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *LimitRangeItem) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [6]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Type != ""
+ yyq2[1] = len(x.Max) != 0
+ yyq2[2] = len(x.Min) != 0
+ yyq2[3] = len(x.Default) != 0
+ yyq2[4] = len(x.DefaultRequest) != 0
+ yyq2[5] = len(x.MaxLimitRequestRatio) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(6)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ x.Type.CodecEncodeSelf(e)
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("type"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Type.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Max == nil {
+ r.EncodeNil()
+ } else {
+ x.Max.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("max"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Max == nil {
+ r.EncodeNil()
+ } else {
+ x.Max.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.Min == nil {
+ r.EncodeNil()
+ } else {
+ x.Min.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("min"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Min == nil {
+ r.EncodeNil()
+ } else {
+ x.Min.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ if x.Default == nil {
+ r.EncodeNil()
+ } else {
+ x.Default.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("default"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Default == nil {
+ r.EncodeNil()
+ } else {
+ x.Default.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ if x.DefaultRequest == nil {
+ r.EncodeNil()
+ } else {
+ x.DefaultRequest.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("defaultRequest"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.DefaultRequest == nil {
+ r.EncodeNil()
+ } else {
+ x.DefaultRequest.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ if x.MaxLimitRequestRatio == nil {
+ r.EncodeNil()
+ } else {
+ x.MaxLimitRequestRatio.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("maxLimitRequestRatio"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.MaxLimitRequestRatio == nil {
+ r.EncodeNil()
+ } else {
+ x.MaxLimitRequestRatio.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *LimitRangeItem) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *LimitRangeItem) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "type":
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = LimitType(r.DecodeString())
+ }
+ case "max":
+ if r.TryDecodeAsNil() {
+ x.Max = nil
+ } else {
+ yyv5 := &x.Max
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "min":
+ if r.TryDecodeAsNil() {
+ x.Min = nil
+ } else {
+ yyv6 := &x.Min
+ yyv6.CodecDecodeSelf(d)
+ }
+ case "default":
+ if r.TryDecodeAsNil() {
+ x.Default = nil
+ } else {
+ yyv7 := &x.Default
+ yyv7.CodecDecodeSelf(d)
+ }
+ case "defaultRequest":
+ if r.TryDecodeAsNil() {
+ x.DefaultRequest = nil
+ } else {
+ yyv8 := &x.DefaultRequest
+ yyv8.CodecDecodeSelf(d)
+ }
+ case "maxLimitRequestRatio":
+ if r.TryDecodeAsNil() {
+ x.MaxLimitRequestRatio = nil
+ } else {
+ yyv9 := &x.MaxLimitRequestRatio
+ yyv9.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *LimitRangeItem) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = LimitType(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Max = nil
+ } else {
+ yyv12 := &x.Max
+ yyv12.CodecDecodeSelf(d)
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Min = nil
+ } else {
+ yyv13 := &x.Min
+ yyv13.CodecDecodeSelf(d)
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Default = nil
+ } else {
+ yyv14 := &x.Default
+ yyv14.CodecDecodeSelf(d)
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.DefaultRequest = nil
+ } else {
+ yyv15 := &x.DefaultRequest
+ yyv15.CodecDecodeSelf(d)
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.MaxLimitRequestRatio = nil
+ } else {
+ yyv16 := &x.MaxLimitRequestRatio
+ yyv16.CodecDecodeSelf(d)
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *LimitRangeSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Limits == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ h.encSliceLimitRangeItem(([]LimitRangeItem)(x.Limits), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("limits"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Limits == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.encSliceLimitRangeItem(([]LimitRangeItem)(x.Limits), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *LimitRangeSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *LimitRangeSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "limits":
+ if r.TryDecodeAsNil() {
+ x.Limits = nil
+ } else {
+ yyv4 := &x.Limits
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.decSliceLimitRangeItem((*[]LimitRangeItem)(yyv4), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *LimitRangeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Limits = nil
+ } else {
+ yyv7 := &x.Limits
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.decSliceLimitRangeItem((*[]LimitRangeItem)(yyv7), d)
+ }
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *LimitRange) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *LimitRange) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *LimitRange) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = LimitRangeSpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *LimitRange) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv9 := &x.ObjectMeta
+ yyv9.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = LimitRangeSpec{}
+ } else {
+ yyv10 := &x.Spec
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *LimitRangeList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceLimitRange(([]LimitRange)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceLimitRange(([]LimitRange)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *LimitRangeList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *LimitRangeList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceLimitRange((*[]LimitRange)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *LimitRangeList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceLimitRange((*[]LimitRange)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x ResourceQuotaScope) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *ResourceQuotaScope) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *ResourceQuotaSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = len(x.Hard) != 0
+ yyq2[1] = len(x.Scopes) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Hard == nil {
+ r.EncodeNil()
+ } else {
+ x.Hard.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("hard"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Hard == nil {
+ r.EncodeNil()
+ } else {
+ x.Hard.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Scopes == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.encSliceResourceQuotaScope(([]ResourceQuotaScope)(x.Scopes), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("scopes"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Scopes == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.encSliceResourceQuotaScope(([]ResourceQuotaScope)(x.Scopes), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ResourceQuotaSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ResourceQuotaSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "hard":
+ if r.TryDecodeAsNil() {
+ x.Hard = nil
+ } else {
+ yyv4 := &x.Hard
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "scopes":
+ if r.TryDecodeAsNil() {
+ x.Scopes = nil
+ } else {
+ yyv5 := &x.Scopes
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ h.decSliceResourceQuotaScope((*[]ResourceQuotaScope)(yyv5), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ResourceQuotaSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Hard = nil
+ } else {
+ yyv8 := &x.Hard
+ yyv8.CodecDecodeSelf(d)
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Scopes = nil
+ } else {
+ yyv9 := &x.Scopes
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.decSliceResourceQuotaScope((*[]ResourceQuotaScope)(yyv9), d)
+ }
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ResourceQuotaStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = len(x.Hard) != 0
+ yyq2[1] = len(x.Used) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Hard == nil {
+ r.EncodeNil()
+ } else {
+ x.Hard.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("hard"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Hard == nil {
+ r.EncodeNil()
+ } else {
+ x.Hard.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Used == nil {
+ r.EncodeNil()
+ } else {
+ x.Used.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("used"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Used == nil {
+ r.EncodeNil()
+ } else {
+ x.Used.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ResourceQuotaStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ResourceQuotaStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "hard":
+ if r.TryDecodeAsNil() {
+ x.Hard = nil
+ } else {
+ yyv4 := &x.Hard
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "used":
+ if r.TryDecodeAsNil() {
+ x.Used = nil
+ } else {
+ yyv5 := &x.Used
+ yyv5.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ResourceQuotaStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Hard = nil
+ } else {
+ yyv7 := &x.Hard
+ yyv7.CodecDecodeSelf(d)
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Used = nil
+ } else {
+ yyv8 := &x.Used
+ yyv8.CodecDecodeSelf(d)
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ResourceQuota) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = true
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy14 := &x.Status
+ yy14.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy16 := &x.Status
+ yy16.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ResourceQuota) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ResourceQuota) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = ResourceQuotaSpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = ResourceQuotaStatus{}
+ } else {
+ yyv6 := &x.Status
+ yyv6.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ResourceQuota) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = ResourceQuotaSpec{}
+ } else {
+ yyv11 := &x.Spec
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = ResourceQuotaStatus{}
+ } else {
+ yyv12 := &x.Status
+ yyv12.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ResourceQuotaList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceResourceQuota(([]ResourceQuota)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceResourceQuota(([]ResourceQuota)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ResourceQuotaList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ResourceQuotaList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceResourceQuota((*[]ResourceQuota)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ResourceQuotaList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceResourceQuota((*[]ResourceQuota)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *Secret) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [6]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = len(x.Data) != 0
+ yyq2[2] = len(x.StringData) != 0
+ yyq2[3] = x.Type != ""
+ yyq2[4] = x.Kind != ""
+ yyq2[5] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(6)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Data == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encMapstringSliceuint8((map[string][]uint8)(x.Data), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("data"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Data == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encMapstringSliceuint8((map[string][]uint8)(x.Data), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.StringData == nil {
+ r.EncodeNil()
+ } else {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ z.F.EncMapStringStringV(x.StringData, false, e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("stringData"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.StringData == nil {
+ r.EncodeNil()
+ } else {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ z.F.EncMapStringStringV(x.StringData, false, e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ x.Type.CodecEncodeSelf(e)
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("type"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Type.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ yym21 := z.EncBinary()
+ _ = yym21
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *Secret) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *Secret) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "data":
+ if r.TryDecodeAsNil() {
+ x.Data = nil
+ } else {
+ yyv5 := &x.Data
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ h.decMapstringSliceuint8((*map[string][]uint8)(yyv5), d)
+ }
+ }
+ case "stringData":
+ if r.TryDecodeAsNil() {
+ x.StringData = nil
+ } else {
+ yyv7 := &x.StringData
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else {
+ z.F.DecMapStringStringX(yyv7, false, d)
+ }
+ }
+ case "type":
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = SecretType(r.DecodeString())
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *Secret) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj12 int
+ var yyb12 bool
+ var yyhl12 bool = l >= 0
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv13 := &x.ObjectMeta
+ yyv13.CodecDecodeSelf(d)
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Data = nil
+ } else {
+ yyv14 := &x.Data
+ yym15 := z.DecBinary()
+ _ = yym15
+ if false {
+ } else {
+ h.decMapstringSliceuint8((*map[string][]uint8)(yyv14), d)
+ }
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.StringData = nil
+ } else {
+ yyv16 := &x.StringData
+ yym17 := z.DecBinary()
+ _ = yym17
+ if false {
+ } else {
+ z.F.DecMapStringStringX(yyv16, false, d)
+ }
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = SecretType(r.DecodeString())
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj12-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x SecretType) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *SecretType) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *SecretList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceSecret(([]Secret)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceSecret(([]Secret)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *SecretList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *SecretList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceSecret((*[]Secret)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *SecretList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceSecret((*[]Secret)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ConfigMap) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = len(x.Data) != 0
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Data == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ z.F.EncMapStringStringV(x.Data, false, e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("data"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Data == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ z.F.EncMapStringStringV(x.Data, false, e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ConfigMap) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ConfigMap) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "data":
+ if r.TryDecodeAsNil() {
+ x.Data = nil
+ } else {
+ yyv5 := &x.Data
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ z.F.DecMapStringStringX(yyv5, false, d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ConfigMap) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Data = nil
+ } else {
+ yyv11 := &x.Data
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ z.F.DecMapStringStringX(yyv11, false, d)
+ }
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ConfigMapList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceConfigMap(([]ConfigMap)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceConfigMap(([]ConfigMap)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ConfigMapList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ConfigMapList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceConfigMap((*[]ConfigMap)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ConfigMapList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceConfigMap((*[]ConfigMap)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x ComponentConditionType) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *ComponentConditionType) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *ComponentCondition) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[2] = x.Message != ""
+ yyq2[3] = x.Error != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ x.Type.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("type"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Type.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ x.Status.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Status.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Message))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("message"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Message))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Error))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("error"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Error))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ComponentCondition) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ComponentCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "type":
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = ComponentConditionType(r.DecodeString())
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = ""
+ } else {
+ x.Status = ConditionStatus(r.DecodeString())
+ }
+ case "message":
+ if r.TryDecodeAsNil() {
+ x.Message = ""
+ } else {
+ x.Message = string(r.DecodeString())
+ }
+ case "error":
+ if r.TryDecodeAsNil() {
+ x.Error = ""
+ } else {
+ x.Error = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ComponentCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = ComponentConditionType(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = ""
+ } else {
+ x.Status = ConditionStatus(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Message = ""
+ } else {
+ x.Message = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Error = ""
+ } else {
+ x.Error = string(r.DecodeString())
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ComponentStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = len(x.Conditions) != 0
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Conditions == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceComponentCondition(([]ComponentCondition)(x.Conditions), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("conditions"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Conditions == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceComponentCondition(([]ComponentCondition)(x.Conditions), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ComponentStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ComponentStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "conditions":
+ if r.TryDecodeAsNil() {
+ x.Conditions = nil
+ } else {
+ yyv5 := &x.Conditions
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ h.decSliceComponentCondition((*[]ComponentCondition)(yyv5), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ComponentStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Conditions = nil
+ } else {
+ yyv11 := &x.Conditions
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ h.decSliceComponentCondition((*[]ComponentCondition)(yyv11), d)
+ }
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ComponentStatusList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceComponentStatus(([]ComponentStatus)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceComponentStatus(([]ComponentStatus)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ComponentStatusList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ComponentStatusList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceComponentStatus((*[]ComponentStatus)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ComponentStatusList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceComponentStatus((*[]ComponentStatus)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *DownwardAPIVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = len(x.Items) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ h.encSliceDownwardAPIVolumeFile(([]DownwardAPIVolumeFile)(x.Items), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.encSliceDownwardAPIVolumeFile(([]DownwardAPIVolumeFile)(x.Items), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *DownwardAPIVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *DownwardAPIVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv4 := &x.Items
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.decSliceDownwardAPIVolumeFile((*[]DownwardAPIVolumeFile)(yyv4), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *DownwardAPIVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv7 := &x.Items
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.decSliceDownwardAPIVolumeFile((*[]DownwardAPIVolumeFile)(yyv7), d)
+ }
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *DownwardAPIVolumeFile) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.FieldRef != nil
+ yyq2[2] = x.ResourceFieldRef != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Path))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("path"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Path))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.FieldRef == nil {
+ r.EncodeNil()
+ } else {
+ x.FieldRef.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("fieldRef"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.FieldRef == nil {
+ r.EncodeNil()
+ } else {
+ x.FieldRef.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.ResourceFieldRef == nil {
+ r.EncodeNil()
+ } else {
+ x.ResourceFieldRef.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("resourceFieldRef"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.ResourceFieldRef == nil {
+ r.EncodeNil()
+ } else {
+ x.ResourceFieldRef.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *DownwardAPIVolumeFile) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *DownwardAPIVolumeFile) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "path":
+ if r.TryDecodeAsNil() {
+ x.Path = ""
+ } else {
+ x.Path = string(r.DecodeString())
+ }
+ case "fieldRef":
+ if r.TryDecodeAsNil() {
+ if x.FieldRef != nil {
+ x.FieldRef = nil
+ }
+ } else {
+ if x.FieldRef == nil {
+ x.FieldRef = new(ObjectFieldSelector)
+ }
+ x.FieldRef.CodecDecodeSelf(d)
+ }
+ case "resourceFieldRef":
+ if r.TryDecodeAsNil() {
+ if x.ResourceFieldRef != nil {
+ x.ResourceFieldRef = nil
+ }
+ } else {
+ if x.ResourceFieldRef == nil {
+ x.ResourceFieldRef = new(ResourceFieldSelector)
+ }
+ x.ResourceFieldRef.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *DownwardAPIVolumeFile) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Path = ""
+ } else {
+ x.Path = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.FieldRef != nil {
+ x.FieldRef = nil
+ }
+ } else {
+ if x.FieldRef == nil {
+ x.FieldRef = new(ObjectFieldSelector)
+ }
+ x.FieldRef.CodecDecodeSelf(d)
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.ResourceFieldRef != nil {
+ x.ResourceFieldRef = nil
+ }
+ } else {
+ if x.ResourceFieldRef == nil {
+ x.ResourceFieldRef = new(ResourceFieldSelector)
+ }
+ x.ResourceFieldRef.CodecDecodeSelf(d)
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *SecurityContext) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [6]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Capabilities != nil
+ yyq2[1] = x.Privileged != nil
+ yyq2[2] = x.SELinuxOptions != nil
+ yyq2[3] = x.RunAsUser != nil
+ yyq2[4] = x.RunAsNonRoot != nil
+ yyq2[5] = x.ReadOnlyRootFilesystem != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(6)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Capabilities == nil {
+ r.EncodeNil()
+ } else {
+ x.Capabilities.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("capabilities"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Capabilities == nil {
+ r.EncodeNil()
+ } else {
+ x.Capabilities.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Privileged == nil {
+ r.EncodeNil()
+ } else {
+ yy7 := *x.Privileged
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeBool(bool(yy7))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("privileged"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Privileged == nil {
+ r.EncodeNil()
+ } else {
+ yy9 := *x.Privileged
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeBool(bool(yy9))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.SELinuxOptions == nil {
+ r.EncodeNil()
+ } else {
+ x.SELinuxOptions.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("seLinuxOptions"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.SELinuxOptions == nil {
+ r.EncodeNil()
+ } else {
+ x.SELinuxOptions.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ if x.RunAsUser == nil {
+ r.EncodeNil()
+ } else {
+ yy15 := *x.RunAsUser
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeInt(int64(yy15))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("runAsUser"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.RunAsUser == nil {
+ r.EncodeNil()
+ } else {
+ yy17 := *x.RunAsUser
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ r.EncodeInt(int64(yy17))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ if x.RunAsNonRoot == nil {
+ r.EncodeNil()
+ } else {
+ yy20 := *x.RunAsNonRoot
+ yym21 := z.EncBinary()
+ _ = yym21
+ if false {
+ } else {
+ r.EncodeBool(bool(yy20))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("runAsNonRoot"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.RunAsNonRoot == nil {
+ r.EncodeNil()
+ } else {
+ yy22 := *x.RunAsNonRoot
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeBool(bool(yy22))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ if x.ReadOnlyRootFilesystem == nil {
+ r.EncodeNil()
+ } else {
+ yy25 := *x.ReadOnlyRootFilesystem
+ yym26 := z.EncBinary()
+ _ = yym26
+ if false {
+ } else {
+ r.EncodeBool(bool(yy25))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("readOnlyRootFilesystem"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.ReadOnlyRootFilesystem == nil {
+ r.EncodeNil()
+ } else {
+ yy27 := *x.ReadOnlyRootFilesystem
+ yym28 := z.EncBinary()
+ _ = yym28
+ if false {
+ } else {
+ r.EncodeBool(bool(yy27))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *SecurityContext) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *SecurityContext) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "capabilities":
+ if r.TryDecodeAsNil() {
+ if x.Capabilities != nil {
+ x.Capabilities = nil
+ }
+ } else {
+ if x.Capabilities == nil {
+ x.Capabilities = new(Capabilities)
+ }
+ x.Capabilities.CodecDecodeSelf(d)
+ }
+ case "privileged":
+ if r.TryDecodeAsNil() {
+ if x.Privileged != nil {
+ x.Privileged = nil
+ }
+ } else {
+ if x.Privileged == nil {
+ x.Privileged = new(bool)
+ }
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ *((*bool)(x.Privileged)) = r.DecodeBool()
+ }
+ }
+ case "seLinuxOptions":
+ if r.TryDecodeAsNil() {
+ if x.SELinuxOptions != nil {
+ x.SELinuxOptions = nil
+ }
+ } else {
+ if x.SELinuxOptions == nil {
+ x.SELinuxOptions = new(SELinuxOptions)
+ }
+ x.SELinuxOptions.CodecDecodeSelf(d)
+ }
+ case "runAsUser":
+ if r.TryDecodeAsNil() {
+ if x.RunAsUser != nil {
+ x.RunAsUser = nil
+ }
+ } else {
+ if x.RunAsUser == nil {
+ x.RunAsUser = new(int64)
+ }
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else {
+ *((*int64)(x.RunAsUser)) = int64(r.DecodeInt(64))
+ }
+ }
+ case "runAsNonRoot":
+ if r.TryDecodeAsNil() {
+ if x.RunAsNonRoot != nil {
+ x.RunAsNonRoot = nil
+ }
+ } else {
+ if x.RunAsNonRoot == nil {
+ x.RunAsNonRoot = new(bool)
+ }
+ yym11 := z.DecBinary()
+ _ = yym11
+ if false {
+ } else {
+ *((*bool)(x.RunAsNonRoot)) = r.DecodeBool()
+ }
+ }
+ case "readOnlyRootFilesystem":
+ if r.TryDecodeAsNil() {
+ if x.ReadOnlyRootFilesystem != nil {
+ x.ReadOnlyRootFilesystem = nil
+ }
+ } else {
+ if x.ReadOnlyRootFilesystem == nil {
+ x.ReadOnlyRootFilesystem = new(bool)
+ }
+ yym13 := z.DecBinary()
+ _ = yym13
+ if false {
+ } else {
+ *((*bool)(x.ReadOnlyRootFilesystem)) = r.DecodeBool()
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *SecurityContext) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj14 int
+ var yyb14 bool
+ var yyhl14 bool = l >= 0
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Capabilities != nil {
+ x.Capabilities = nil
+ }
+ } else {
+ if x.Capabilities == nil {
+ x.Capabilities = new(Capabilities)
+ }
+ x.Capabilities.CodecDecodeSelf(d)
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Privileged != nil {
+ x.Privileged = nil
+ }
+ } else {
+ if x.Privileged == nil {
+ x.Privileged = new(bool)
+ }
+ yym17 := z.DecBinary()
+ _ = yym17
+ if false {
+ } else {
+ *((*bool)(x.Privileged)) = r.DecodeBool()
+ }
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.SELinuxOptions != nil {
+ x.SELinuxOptions = nil
+ }
+ } else {
+ if x.SELinuxOptions == nil {
+ x.SELinuxOptions = new(SELinuxOptions)
+ }
+ x.SELinuxOptions.CodecDecodeSelf(d)
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.RunAsUser != nil {
+ x.RunAsUser = nil
+ }
+ } else {
+ if x.RunAsUser == nil {
+ x.RunAsUser = new(int64)
+ }
+ yym20 := z.DecBinary()
+ _ = yym20
+ if false {
+ } else {
+ *((*int64)(x.RunAsUser)) = int64(r.DecodeInt(64))
+ }
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.RunAsNonRoot != nil {
+ x.RunAsNonRoot = nil
+ }
+ } else {
+ if x.RunAsNonRoot == nil {
+ x.RunAsNonRoot = new(bool)
+ }
+ yym22 := z.DecBinary()
+ _ = yym22
+ if false {
+ } else {
+ *((*bool)(x.RunAsNonRoot)) = r.DecodeBool()
+ }
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.ReadOnlyRootFilesystem != nil {
+ x.ReadOnlyRootFilesystem = nil
+ }
+ } else {
+ if x.ReadOnlyRootFilesystem == nil {
+ x.ReadOnlyRootFilesystem = new(bool)
+ }
+ yym24 := z.DecBinary()
+ _ = yym24
+ if false {
+ } else {
+ *((*bool)(x.ReadOnlyRootFilesystem)) = r.DecodeBool()
+ }
+ }
+ for {
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj14-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *SELinuxOptions) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.User != ""
+ yyq2[1] = x.Role != ""
+ yyq2[2] = x.Type != ""
+ yyq2[3] = x.Level != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.User))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("user"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.User))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Role))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("role"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Role))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Type))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("type"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Type))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Level))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("level"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Level))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *SELinuxOptions) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *SELinuxOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "user":
+ if r.TryDecodeAsNil() {
+ x.User = ""
+ } else {
+ x.User = string(r.DecodeString())
+ }
+ case "role":
+ if r.TryDecodeAsNil() {
+ x.Role = ""
+ } else {
+ x.Role = string(r.DecodeString())
+ }
+ case "type":
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = string(r.DecodeString())
+ }
+ case "level":
+ if r.TryDecodeAsNil() {
+ x.Level = ""
+ } else {
+ x.Level = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *SELinuxOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.User = ""
+ } else {
+ x.User = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Role = ""
+ } else {
+ x.Role = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Level = ""
+ } else {
+ x.Level = string(r.DecodeString())
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *RangeAllocation) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Range))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("range"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Range))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Data == nil {
+ r.EncodeNil()
+ } else {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Data))
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("data"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Data == nil {
+ r.EncodeNil()
+ } else {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Data))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *RangeAllocation) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *RangeAllocation) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "range":
+ if r.TryDecodeAsNil() {
+ x.Range = ""
+ } else {
+ x.Range = string(r.DecodeString())
+ }
+ case "data":
+ if r.TryDecodeAsNil() {
+ x.Data = nil
+ } else {
+ yyv6 := &x.Data
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ *yyv6 = r.DecodeBytes(*(*[]byte)(yyv6), false, false)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *RangeAllocation) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = ObjectMeta{}
+ } else {
+ yyv11 := &x.ObjectMeta
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Range = ""
+ } else {
+ x.Range = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Data = nil
+ } else {
+ yyv13 := &x.Data
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ *yyv13 = r.DecodeBytes(*(*[]byte)(yyv13), false, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) encSliceOwnerReference(v []OwnerReference, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceOwnerReference(v *[]OwnerReference, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []OwnerReference{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 72)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]OwnerReference, yyrl1)
+ }
+ } else {
+ yyv1 = make([]OwnerReference, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = OwnerReference{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, OwnerReference{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = OwnerReference{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, OwnerReference{}) // var yyz1 OwnerReference
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = OwnerReference{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []OwnerReference{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSlicePersistentVolumeAccessMode(v []PersistentVolumeAccessMode, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yyv1.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSlicePersistentVolumeAccessMode(v *[]PersistentVolumeAccessMode, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []PersistentVolumeAccessMode{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]PersistentVolumeAccessMode, yyrl1)
+ }
+ } else {
+ yyv1 = make([]PersistentVolumeAccessMode, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = PersistentVolumeAccessMode(r.DecodeString())
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, "")
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = PersistentVolumeAccessMode(r.DecodeString())
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, "") // var yyz1 PersistentVolumeAccessMode
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = PersistentVolumeAccessMode(r.DecodeString())
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []PersistentVolumeAccessMode{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSlicePersistentVolume(v []PersistentVolume, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSlicePersistentVolume(v *[]PersistentVolume, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []PersistentVolume{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 456)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]PersistentVolume, yyrl1)
+ }
+ } else {
+ yyv1 = make([]PersistentVolume, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PersistentVolume{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, PersistentVolume{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PersistentVolume{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, PersistentVolume{}) // var yyz1 PersistentVolume
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PersistentVolume{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []PersistentVolume{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSlicePersistentVolumeClaim(v []PersistentVolumeClaim, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSlicePersistentVolumeClaim(v *[]PersistentVolumeClaim, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []PersistentVolumeClaim{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 352)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]PersistentVolumeClaim, yyrl1)
+ }
+ } else {
+ yyv1 = make([]PersistentVolumeClaim, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PersistentVolumeClaim{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, PersistentVolumeClaim{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PersistentVolumeClaim{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, PersistentVolumeClaim{}) // var yyz1 PersistentVolumeClaim
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PersistentVolumeClaim{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []PersistentVolumeClaim{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceKeyToPath(v []KeyToPath, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceKeyToPath(v *[]KeyToPath, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []KeyToPath{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]KeyToPath, yyrl1)
+ }
+ } else {
+ yyv1 = make([]KeyToPath, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = KeyToPath{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, KeyToPath{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = KeyToPath{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, KeyToPath{}) // var yyz1 KeyToPath
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = KeyToPath{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []KeyToPath{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceHTTPHeader(v []HTTPHeader, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceHTTPHeader(v *[]HTTPHeader, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []HTTPHeader{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]HTTPHeader, yyrl1)
+ }
+ } else {
+ yyv1 = make([]HTTPHeader, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = HTTPHeader{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, HTTPHeader{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = HTTPHeader{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, HTTPHeader{}) // var yyz1 HTTPHeader
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = HTTPHeader{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []HTTPHeader{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceCapability(v []Capability, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yyv1.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceCapability(v *[]Capability, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []Capability{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]Capability, yyrl1)
+ }
+ } else {
+ yyv1 = make([]Capability, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = Capability(r.DecodeString())
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, "")
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = Capability(r.DecodeString())
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, "") // var yyz1 Capability
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = Capability(r.DecodeString())
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []Capability{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceContainerPort(v []ContainerPort, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceContainerPort(v *[]ContainerPort, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []ContainerPort{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]ContainerPort, yyrl1)
+ }
+ } else {
+ yyv1 = make([]ContainerPort, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ContainerPort{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, ContainerPort{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ContainerPort{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, ContainerPort{}) // var yyz1 ContainerPort
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ContainerPort{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []ContainerPort{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceEnvVar(v []EnvVar, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceEnvVar(v *[]EnvVar, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []EnvVar{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]EnvVar, yyrl1)
+ }
+ } else {
+ yyv1 = make([]EnvVar, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = EnvVar{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, EnvVar{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = EnvVar{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, EnvVar{}) // var yyz1 EnvVar
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = EnvVar{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []EnvVar{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceVolumeMount(v []VolumeMount, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceVolumeMount(v *[]VolumeMount, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []VolumeMount{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]VolumeMount, yyrl1)
+ }
+ } else {
+ yyv1 = make([]VolumeMount, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = VolumeMount{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, VolumeMount{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = VolumeMount{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, VolumeMount{}) // var yyz1 VolumeMount
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = VolumeMount{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []VolumeMount{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceNodeSelectorTerm(v []NodeSelectorTerm, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceNodeSelectorTerm(v *[]NodeSelectorTerm, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []NodeSelectorTerm{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 24)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]NodeSelectorTerm, yyrl1)
+ }
+ } else {
+ yyv1 = make([]NodeSelectorTerm, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = NodeSelectorTerm{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, NodeSelectorTerm{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = NodeSelectorTerm{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, NodeSelectorTerm{}) // var yyz1 NodeSelectorTerm
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = NodeSelectorTerm{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []NodeSelectorTerm{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceNodeSelectorRequirement(v []NodeSelectorRequirement, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceNodeSelectorRequirement(v *[]NodeSelectorRequirement, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []NodeSelectorRequirement{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]NodeSelectorRequirement, yyrl1)
+ }
+ } else {
+ yyv1 = make([]NodeSelectorRequirement, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = NodeSelectorRequirement{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, NodeSelectorRequirement{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = NodeSelectorRequirement{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, NodeSelectorRequirement{}) // var yyz1 NodeSelectorRequirement
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = NodeSelectorRequirement{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []NodeSelectorRequirement{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSlicePodAffinityTerm(v []PodAffinityTerm, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSlicePodAffinityTerm(v *[]PodAffinityTerm, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []PodAffinityTerm{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 48)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]PodAffinityTerm, yyrl1)
+ }
+ } else {
+ yyv1 = make([]PodAffinityTerm, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PodAffinityTerm{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, PodAffinityTerm{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PodAffinityTerm{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, PodAffinityTerm{}) // var yyz1 PodAffinityTerm
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PodAffinityTerm{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []PodAffinityTerm{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceWeightedPodAffinityTerm(v []WeightedPodAffinityTerm, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceWeightedPodAffinityTerm(v *[]WeightedPodAffinityTerm, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []WeightedPodAffinityTerm{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]WeightedPodAffinityTerm, yyrl1)
+ }
+ } else {
+ yyv1 = make([]WeightedPodAffinityTerm, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = WeightedPodAffinityTerm{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, WeightedPodAffinityTerm{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = WeightedPodAffinityTerm{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, WeightedPodAffinityTerm{}) // var yyz1 WeightedPodAffinityTerm
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = WeightedPodAffinityTerm{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []WeightedPodAffinityTerm{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSlicePreferredSchedulingTerm(v []PreferredSchedulingTerm, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSlicePreferredSchedulingTerm(v *[]PreferredSchedulingTerm, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []PreferredSchedulingTerm{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]PreferredSchedulingTerm, yyrl1)
+ }
+ } else {
+ yyv1 = make([]PreferredSchedulingTerm, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PreferredSchedulingTerm{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, PreferredSchedulingTerm{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PreferredSchedulingTerm{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, PreferredSchedulingTerm{}) // var yyz1 PreferredSchedulingTerm
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PreferredSchedulingTerm{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []PreferredSchedulingTerm{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceVolume(v []Volume, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceVolume(v *[]Volume, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []Volume{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 176)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]Volume, yyrl1)
+ }
+ } else {
+ yyv1 = make([]Volume, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Volume{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, Volume{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Volume{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, Volume{}) // var yyz1 Volume
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Volume{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []Volume{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceContainer(v []Container, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceContainer(v *[]Container, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []Container{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 256)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]Container, yyrl1)
+ }
+ } else {
+ yyv1 = make([]Container, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Container{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, Container{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Container{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, Container{}) // var yyz1 Container
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Container{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []Container{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceLocalObjectReference(v []LocalObjectReference, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceLocalObjectReference(v *[]LocalObjectReference, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []LocalObjectReference{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]LocalObjectReference, yyrl1)
+ }
+ } else {
+ yyv1 = make([]LocalObjectReference, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = LocalObjectReference{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, LocalObjectReference{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = LocalObjectReference{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, LocalObjectReference{}) // var yyz1 LocalObjectReference
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = LocalObjectReference{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []LocalObjectReference{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSlicePodCondition(v []PodCondition, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSlicePodCondition(v *[]PodCondition, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []PodCondition{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]PodCondition, yyrl1)
+ }
+ } else {
+ yyv1 = make([]PodCondition, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PodCondition{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, PodCondition{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PodCondition{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, PodCondition{}) // var yyz1 PodCondition
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PodCondition{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []PodCondition{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceContainerStatus(v []ContainerStatus, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceContainerStatus(v *[]ContainerStatus, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []ContainerStatus{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 120)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]ContainerStatus, yyrl1)
+ }
+ } else {
+ yyv1 = make([]ContainerStatus, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ContainerStatus{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, ContainerStatus{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ContainerStatus{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, ContainerStatus{}) // var yyz1 ContainerStatus
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ContainerStatus{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []ContainerStatus{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSlicePod(v []Pod, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSlicePod(v *[]Pod, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []Pod{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 648)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]Pod, yyrl1)
+ }
+ } else {
+ yyv1 = make([]Pod, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Pod{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, Pod{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Pod{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, Pod{}) // var yyz1 Pod
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Pod{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []Pod{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSlicePodTemplate(v []PodTemplate, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSlicePodTemplate(v *[]PodTemplate, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []PodTemplate{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 696)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]PodTemplate, yyrl1)
+ }
+ } else {
+ yyv1 = make([]PodTemplate, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PodTemplate{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, PodTemplate{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PodTemplate{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, PodTemplate{}) // var yyz1 PodTemplate
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PodTemplate{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []PodTemplate{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceReplicationController(v []ReplicationController, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceReplicationController(v *[]ReplicationController, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []ReplicationController{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]ReplicationController, yyrl1)
+ }
+ } else {
+ yyv1 = make([]ReplicationController, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ReplicationController{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, ReplicationController{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ReplicationController{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, ReplicationController{}) // var yyz1 ReplicationController
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ReplicationController{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []ReplicationController{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceLoadBalancerIngress(v []LoadBalancerIngress, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceLoadBalancerIngress(v *[]LoadBalancerIngress, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []LoadBalancerIngress{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]LoadBalancerIngress, yyrl1)
+ }
+ } else {
+ yyv1 = make([]LoadBalancerIngress, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = LoadBalancerIngress{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, LoadBalancerIngress{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = LoadBalancerIngress{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, LoadBalancerIngress{}) // var yyz1 LoadBalancerIngress
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = LoadBalancerIngress{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []LoadBalancerIngress{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceServicePort(v []ServicePort, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceServicePort(v *[]ServicePort, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []ServicePort{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 80)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]ServicePort, yyrl1)
+ }
+ } else {
+ yyv1 = make([]ServicePort, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ServicePort{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, ServicePort{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ServicePort{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, ServicePort{}) // var yyz1 ServicePort
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ServicePort{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []ServicePort{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceService(v []Service, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceService(v *[]Service, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []Service{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 432)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]Service, yyrl1)
+ }
+ } else {
+ yyv1 = make([]Service, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Service{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, Service{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Service{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, Service{}) // var yyz1 Service
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Service{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []Service{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceObjectReference(v []ObjectReference, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceObjectReference(v *[]ObjectReference, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []ObjectReference{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]ObjectReference, yyrl1)
+ }
+ } else {
+ yyv1 = make([]ObjectReference, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ObjectReference{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, ObjectReference{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ObjectReference{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, ObjectReference{}) // var yyz1 ObjectReference
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ObjectReference{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []ObjectReference{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceServiceAccount(v []ServiceAccount, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceServiceAccount(v *[]ServiceAccount, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []ServiceAccount{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 288)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]ServiceAccount, yyrl1)
+ }
+ } else {
+ yyv1 = make([]ServiceAccount, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ServiceAccount{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, ServiceAccount{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ServiceAccount{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, ServiceAccount{}) // var yyz1 ServiceAccount
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ServiceAccount{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []ServiceAccount{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceEndpointSubset(v []EndpointSubset, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceEndpointSubset(v *[]EndpointSubset, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []EndpointSubset{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 72)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]EndpointSubset, yyrl1)
+ }
+ } else {
+ yyv1 = make([]EndpointSubset, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = EndpointSubset{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, EndpointSubset{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = EndpointSubset{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, EndpointSubset{}) // var yyz1 EndpointSubset
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = EndpointSubset{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []EndpointSubset{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceEndpointAddress(v []EndpointAddress, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceEndpointAddress(v *[]EndpointAddress, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []EndpointAddress{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]EndpointAddress, yyrl1)
+ }
+ } else {
+ yyv1 = make([]EndpointAddress, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = EndpointAddress{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, EndpointAddress{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = EndpointAddress{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, EndpointAddress{}) // var yyz1 EndpointAddress
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = EndpointAddress{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []EndpointAddress{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceEndpointPort(v []EndpointPort, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceEndpointPort(v *[]EndpointPort, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []EndpointPort{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]EndpointPort, yyrl1)
+ }
+ } else {
+ yyv1 = make([]EndpointPort, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = EndpointPort{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, EndpointPort{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = EndpointPort{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, EndpointPort{}) // var yyz1 EndpointPort
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = EndpointPort{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []EndpointPort{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceEndpoints(v []Endpoints, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceEndpoints(v *[]Endpoints, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []Endpoints{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 264)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]Endpoints, yyrl1)
+ }
+ } else {
+ yyv1 = make([]Endpoints, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Endpoints{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, Endpoints{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Endpoints{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, Endpoints{}) // var yyz1 Endpoints
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Endpoints{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []Endpoints{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceNodeCondition(v []NodeCondition, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceNodeCondition(v *[]NodeCondition, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []NodeCondition{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]NodeCondition, yyrl1)
+ }
+ } else {
+ yyv1 = make([]NodeCondition, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = NodeCondition{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, NodeCondition{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = NodeCondition{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, NodeCondition{}) // var yyz1 NodeCondition
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = NodeCondition{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []NodeCondition{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceNodeAddress(v []NodeAddress, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceNodeAddress(v *[]NodeAddress, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []NodeAddress{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]NodeAddress, yyrl1)
+ }
+ } else {
+ yyv1 = make([]NodeAddress, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = NodeAddress{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, NodeAddress{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = NodeAddress{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, NodeAddress{}) // var yyz1 NodeAddress
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = NodeAddress{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []NodeAddress{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceContainerImage(v []ContainerImage, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceContainerImage(v *[]ContainerImage, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []ContainerImage{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]ContainerImage, yyrl1)
+ }
+ } else {
+ yyv1 = make([]ContainerImage, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ContainerImage{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, ContainerImage{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ContainerImage{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, ContainerImage{}) // var yyz1 ContainerImage
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ContainerImage{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []ContainerImage{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceUniqueVolumeName(v []UniqueVolumeName, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yyv1.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceUniqueVolumeName(v *[]UniqueVolumeName, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []UniqueVolumeName{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]UniqueVolumeName, yyrl1)
+ }
+ } else {
+ yyv1 = make([]UniqueVolumeName, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = UniqueVolumeName(r.DecodeString())
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, "")
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = UniqueVolumeName(r.DecodeString())
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, "") // var yyz1 UniqueVolumeName
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = UniqueVolumeName(r.DecodeString())
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []UniqueVolumeName{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceAttachedVolume(v []AttachedVolume, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceAttachedVolume(v *[]AttachedVolume, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []AttachedVolume{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]AttachedVolume, yyrl1)
+ }
+ } else {
+ yyv1 = make([]AttachedVolume, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = AttachedVolume{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, AttachedVolume{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = AttachedVolume{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, AttachedVolume{}) // var yyz1 AttachedVolume
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = AttachedVolume{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []AttachedVolume{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encResourceList(v ResourceList, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeMapStart(len(v))
+ for yyk1, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ yyk1.CodecEncodeSelf(e)
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy3 := &yyv1
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy3) {
+ } else if !yym4 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy3)
+ } else {
+ z.EncFallback(yy3)
+ }
+ }
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x codecSelfer1234) decResourceList(v *ResourceList, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyl1 := r.ReadMapStart()
+ yybh1 := z.DecBasicHandle()
+ if yyv1 == nil {
+ yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 72)
+ yyv1 = make(map[ResourceName]pkg3_resource.Quantity, yyrl1)
+ *v = yyv1
+ }
+ var yymk1 ResourceName
+ var yymv1 pkg3_resource.Quantity
+ var yymg1 bool
+ if yybh1.MapValueReset {
+ yymg1 = true
+ }
+ if yyl1 > 0 {
+ for yyj1 := 0; yyj1 < yyl1; yyj1++ {
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ if r.TryDecodeAsNil() {
+ yymk1 = ""
+ } else {
+ yymk1 = ResourceName(r.DecodeString())
+ }
+
+ if yymg1 {
+ yymv1 = yyv1[yymk1]
+ } else {
+ yymv1 = pkg3_resource.Quantity{}
+ }
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ if r.TryDecodeAsNil() {
+ yymv1 = pkg3_resource.Quantity{}
+ } else {
+ yyv3 := &yymv1
+ yym4 := z.DecBinary()
+ _ = yym4
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv3) {
+ } else if !yym4 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv3)
+ } else {
+ z.DecFallback(yyv3, false)
+ }
+ }
+
+ if yyv1 != nil {
+ yyv1[yymk1] = yymv1
+ }
+ }
+ } else if yyl1 < 0 {
+ for yyj1 := 0; !r.CheckBreak(); yyj1++ {
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ if r.TryDecodeAsNil() {
+ yymk1 = ""
+ } else {
+ yymk1 = ResourceName(r.DecodeString())
+ }
+
+ if yymg1 {
+ yymv1 = yyv1[yymk1]
+ } else {
+ yymv1 = pkg3_resource.Quantity{}
+ }
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ if r.TryDecodeAsNil() {
+ yymv1 = pkg3_resource.Quantity{}
+ } else {
+ yyv6 := &yymv1
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv6) {
+ } else if !yym7 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv6)
+ } else {
+ z.DecFallback(yyv6, false)
+ }
+ }
+
+ if yyv1 != nil {
+ yyv1[yymk1] = yymv1
+ }
+ }
+ } // else len==0: TODO: Should we clear map entries?
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x codecSelfer1234) encSliceNode(v []Node, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceNode(v *[]Node, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []Node{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 616)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]Node, yyrl1)
+ }
+ } else {
+ yyv1 = make([]Node, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Node{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, Node{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Node{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, Node{}) // var yyz1 Node
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Node{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []Node{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceFinalizerName(v []FinalizerName, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yyv1.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceFinalizerName(v *[]FinalizerName, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []FinalizerName{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]FinalizerName, yyrl1)
+ }
+ } else {
+ yyv1 = make([]FinalizerName, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = FinalizerName(r.DecodeString())
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, "")
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = FinalizerName(r.DecodeString())
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, "") // var yyz1 FinalizerName
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = FinalizerName(r.DecodeString())
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []FinalizerName{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceNamespace(v []Namespace, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceNamespace(v *[]Namespace, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []Namespace{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]Namespace, yyrl1)
+ }
+ } else {
+ yyv1 = make([]Namespace, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Namespace{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, Namespace{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Namespace{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, Namespace{}) // var yyz1 Namespace
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Namespace{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []Namespace{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceEvent(v []Event, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceEvent(v *[]Event, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []Event{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 488)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]Event, yyrl1)
+ }
+ } else {
+ yyv1 = make([]Event, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Event{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, Event{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Event{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, Event{}) // var yyz1 Event
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Event{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []Event{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceruntime_RawExtension(v []pkg5_runtime.RawExtension, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yym3 := z.EncBinary()
+ _ = yym3
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy2) {
+ } else if !yym3 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy2)
+ } else {
+ z.EncFallback(yy2)
+ }
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceruntime_RawExtension(v *[]pkg5_runtime.RawExtension, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []pkg5_runtime.RawExtension{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]pkg5_runtime.RawExtension, yyrl1)
+ }
+ } else {
+ yyv1 = make([]pkg5_runtime.RawExtension, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = pkg5_runtime.RawExtension{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yym3 := z.DecBinary()
+ _ = yym3
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv2) {
+ } else if !yym3 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv2)
+ } else {
+ z.DecFallback(yyv2, false)
+ }
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, pkg5_runtime.RawExtension{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = pkg5_runtime.RawExtension{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else if !yym5 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv4)
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, pkg5_runtime.RawExtension{}) // var yyz1 pkg5_runtime.RawExtension
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = pkg5_runtime.RawExtension{}
+ } else {
+ yyv6 := &yyv1[yyj1]
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv6) {
+ } else if !yym7 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv6)
+ } else {
+ z.DecFallback(yyv6, false)
+ }
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []pkg5_runtime.RawExtension{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceLimitRangeItem(v []LimitRangeItem, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceLimitRangeItem(v *[]LimitRangeItem, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []LimitRangeItem{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]LimitRangeItem, yyrl1)
+ }
+ } else {
+ yyv1 = make([]LimitRangeItem, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = LimitRangeItem{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, LimitRangeItem{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = LimitRangeItem{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, LimitRangeItem{}) // var yyz1 LimitRangeItem
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = LimitRangeItem{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []LimitRangeItem{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceLimitRange(v []LimitRange, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceLimitRange(v *[]LimitRange, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []LimitRange{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 264)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]LimitRange, yyrl1)
+ }
+ } else {
+ yyv1 = make([]LimitRange, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = LimitRange{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, LimitRange{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = LimitRange{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, LimitRange{}) // var yyz1 LimitRange
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = LimitRange{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []LimitRange{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceResourceQuotaScope(v []ResourceQuotaScope, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yyv1.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceResourceQuotaScope(v *[]ResourceQuotaScope, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []ResourceQuotaScope{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]ResourceQuotaScope, yyrl1)
+ }
+ } else {
+ yyv1 = make([]ResourceQuotaScope, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = ResourceQuotaScope(r.DecodeString())
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, "")
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = ResourceQuotaScope(r.DecodeString())
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, "") // var yyz1 ResourceQuotaScope
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = ResourceQuotaScope(r.DecodeString())
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []ResourceQuotaScope{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceResourceQuota(v []ResourceQuota, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceResourceQuota(v *[]ResourceQuota, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []ResourceQuota{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 288)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]ResourceQuota, yyrl1)
+ }
+ } else {
+ yyv1 = make([]ResourceQuota, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ResourceQuota{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, ResourceQuota{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ResourceQuota{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, ResourceQuota{}) // var yyz1 ResourceQuota
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ResourceQuota{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []ResourceQuota{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encMapstringSliceuint8(v map[string][]uint8, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeMapStart(len(v))
+ for yyk1, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ yym2 := z.EncBinary()
+ _ = yym2
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(yyk1))
+ }
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyv1 == nil {
+ r.EncodeNil()
+ } else {
+ yym3 := z.EncBinary()
+ _ = yym3
+ if false {
+ } else {
+ r.EncodeStringBytes(codecSelferC_RAW1234, []byte(yyv1))
+ }
+ }
+ }
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x codecSelfer1234) decMapstringSliceuint8(v *map[string][]uint8, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyl1 := r.ReadMapStart()
+ yybh1 := z.DecBasicHandle()
+ if yyv1 == nil {
+ yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 40)
+ yyv1 = make(map[string][]uint8, yyrl1)
+ *v = yyv1
+ }
+ var yymk1 string
+ var yymv1 []uint8
+ var yymg1 bool
+ if yybh1.MapValueReset {
+ yymg1 = true
+ }
+ if yyl1 > 0 {
+ for yyj1 := 0; yyj1 < yyl1; yyj1++ {
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ if r.TryDecodeAsNil() {
+ yymk1 = ""
+ } else {
+ yymk1 = string(r.DecodeString())
+ }
+
+ if yymg1 {
+ yymv1 = yyv1[yymk1]
+ } else {
+ yymv1 = nil
+ }
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ if r.TryDecodeAsNil() {
+ yymv1 = nil
+ } else {
+ yyv3 := &yymv1
+ yym4 := z.DecBinary()
+ _ = yym4
+ if false {
+ } else {
+ *yyv3 = r.DecodeBytes(*(*[]byte)(yyv3), false, false)
+ }
+ }
+
+ if yyv1 != nil {
+ yyv1[yymk1] = yymv1
+ }
+ }
+ } else if yyl1 < 0 {
+ for yyj1 := 0; !r.CheckBreak(); yyj1++ {
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ if r.TryDecodeAsNil() {
+ yymk1 = ""
+ } else {
+ yymk1 = string(r.DecodeString())
+ }
+
+ if yymg1 {
+ yymv1 = yyv1[yymk1]
+ } else {
+ yymv1 = nil
+ }
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ if r.TryDecodeAsNil() {
+ yymv1 = nil
+ } else {
+ yyv6 := &yymv1
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ *yyv6 = r.DecodeBytes(*(*[]byte)(yyv6), false, false)
+ }
+ }
+
+ if yyv1 != nil {
+ yyv1[yymk1] = yymv1
+ }
+ }
+ } // else len==0: TODO: Should we clear map entries?
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x codecSelfer1234) encSliceuint8(v []uint8, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym2 := z.EncBinary()
+ _ = yym2
+ if false {
+ } else {
+ r.EncodeUint(uint64(yyv1))
+ }
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceuint8(v *[]uint8, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []uint8{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 1)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]uint8, yyrl1)
+ }
+ } else {
+ yyv1 = make([]uint8, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = 0
+ } else {
+ yyv1[yyj1] = uint8(r.DecodeUint(8))
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, 0)
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = 0
+ } else {
+ yyv1[yyj1] = uint8(r.DecodeUint(8))
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, 0) // var yyz1 uint8
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = 0
+ } else {
+ yyv1[yyj1] = uint8(r.DecodeUint(8))
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []uint8{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceSecret(v []Secret, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceSecret(v *[]Secret, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []Secret{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 272)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]Secret, yyrl1)
+ }
+ } else {
+ yyv1 = make([]Secret, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Secret{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, Secret{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Secret{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, Secret{}) // var yyz1 Secret
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Secret{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []Secret{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceConfigMap(v []ConfigMap, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceConfigMap(v *[]ConfigMap, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []ConfigMap{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 248)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]ConfigMap, yyrl1)
+ }
+ } else {
+ yyv1 = make([]ConfigMap, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ConfigMap{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, ConfigMap{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ConfigMap{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, ConfigMap{}) // var yyz1 ConfigMap
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ConfigMap{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []ConfigMap{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceComponentCondition(v []ComponentCondition, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceComponentCondition(v *[]ComponentCondition, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []ComponentCondition{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 64)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]ComponentCondition, yyrl1)
+ }
+ } else {
+ yyv1 = make([]ComponentCondition, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ComponentCondition{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, ComponentCondition{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ComponentCondition{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, ComponentCondition{}) // var yyz1 ComponentCondition
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ComponentCondition{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []ComponentCondition{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceComponentStatus(v []ComponentStatus, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceComponentStatus(v *[]ComponentStatus, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []ComponentStatus{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 264)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]ComponentStatus, yyrl1)
+ }
+ } else {
+ yyv1 = make([]ComponentStatus, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ComponentStatus{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, ComponentStatus{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ComponentStatus{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, ComponentStatus{}) // var yyz1 ComponentStatus
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ComponentStatus{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []ComponentStatus{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceDownwardAPIVolumeFile(v []DownwardAPIVolumeFile, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceDownwardAPIVolumeFile(v *[]DownwardAPIVolumeFile, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []DownwardAPIVolumeFile{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]DownwardAPIVolumeFile, yyrl1)
+ }
+ } else {
+ yyv1 = make([]DownwardAPIVolumeFile, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = DownwardAPIVolumeFile{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, DownwardAPIVolumeFile{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = DownwardAPIVolumeFile{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, DownwardAPIVolumeFile{}) // var yyz1 DownwardAPIVolumeFile
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = DownwardAPIVolumeFile{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []DownwardAPIVolumeFile{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/v1/types.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/v1/types.go
new file mode 100644
index 0000000..42a0b98
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/v1/types.go
@@ -0,0 +1,3329 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "k8s.io/kubernetes/pkg/api/resource"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/runtime"
+ "k8s.io/kubernetes/pkg/types"
+ "k8s.io/kubernetes/pkg/util/intstr"
+)
+
+// The comments for the structs and fields can be used from go-resful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored and not exported to the SwaggerAPI.
+//
+// The aforementioned methods can be generated by hack/update-generated-swagger-docs.sh
+
+// Common string formats
+// ---------------------
+// Many fields in this API have formatting requirements. The commonly used
+// formats are defined here.
+//
+// C_IDENTIFIER: This is a string that conforms to the definition of an "identifier"
+// in the C language. This is captured by the following regex:
+// [A-Za-z_][A-Za-z0-9_]*
+// This defines the format, but not the length restriction, which should be
+// specified at the definition of any field of this type.
+//
+// DNS_LABEL: This is a string, no more than 63 characters long, that conforms
+// to the definition of a "label" in RFCs 1035 and 1123. This is captured
+// by the following regex:
+// [a-z0-9]([-a-z0-9]*[a-z0-9])?
+//
+// DNS_SUBDOMAIN: This is a string, no more than 253 characters long, that conforms
+// to the definition of a "subdomain" in RFCs 1035 and 1123. This is captured
+// by the following regex:
+// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*
+// or more simply:
+// DNS_LABEL(\.DNS_LABEL)*
+//
+// IANA_SVC_NAME: This is a string, no more than 15 characters long, that
+// conforms to the definition of IANA service name in RFC 6335.
+// It must contains at least one letter [a-z] and it must contains only [a-z0-9-].
+// Hypens ('-') cannot be leading or trailing character of the string
+// and cannot be adjacent to other hyphens.
+
+// ObjectMeta is metadata that all persisted resources must have, which includes all objects
+// users must create.
+type ObjectMeta struct {
+ // Name must be unique within a namespace. Is required when creating resources, although
+ // some resources may allow a client to request the generation of an appropriate name
+ // automatically. Name is primarily intended for creation idempotence and configuration
+ // definition.
+ // Cannot be updated.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names
+ Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
+
+ // GenerateName is an optional prefix, used by the server, to generate a unique
+ // name ONLY IF the Name field has not been provided.
+ // If this field is used, the name returned to the client will be different
+ // than the name passed. This value will also be combined with a unique suffix.
+ // The provided value has the same validation rules as the Name field,
+ // and may be truncated by the length of the suffix required to make the value
+ // unique on the server.
+ //
+ // If this field is specified and the generated name exists, the server will
+ // NOT return a 409 - instead, it will either return 201 Created or 500 with Reason
+ // ServerTimeout indicating a unique name could not be found in the time allotted, and the client
+ // should retry (optionally after the time indicated in the Retry-After header).
+ //
+ // Applied only if Name is not specified.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#idempotency
+ GenerateName string `json:"generateName,omitempty" protobuf:"bytes,2,opt,name=generateName"`
+
+ // Namespace defines the space within each name must be unique. An empty namespace is
+ // equivalent to the "default" namespace, but "default" is the canonical representation.
+ // Not all objects are required to be scoped to a namespace - the value of this field for
+ // those objects will be empty.
+ //
+ // Must be a DNS_LABEL.
+ // Cannot be updated.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/namespaces.md
+ Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"`
+
+ // SelfLink is a URL representing this object.
+ // Populated by the system.
+ // Read-only.
+ SelfLink string `json:"selfLink,omitempty" protobuf:"bytes,4,opt,name=selfLink"`
+
+ // UID is the unique in time and space value for this object. It is typically generated by
+ // the server on successful creation of a resource and is not allowed to change on PUT
+ // operations.
+ //
+ // Populated by the system.
+ // Read-only.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#uids
+ UID types.UID `json:"uid,omitempty" protobuf:"bytes,5,opt,name=uid,casttype=k8s.io/kubernetes/pkg/types.UID"`
+
+ // An opaque value that represents the internal version of this object that can
+ // be used by clients to determine when objects have changed. May be used for optimistic
+ // concurrency, change detection, and the watch operation on a resource or set of resources.
+ // Clients must treat these values as opaque and passed unmodified back to the server.
+ // They may only be valid for a particular resource or set of resources.
+ //
+ // Populated by the system.
+ // Read-only.
+ // Value must be treated as opaque by clients and .
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency
+ ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"`
+
+ // A sequence number representing a specific generation of the desired state.
+ // Populated by the system. Read-only.
+ Generation int64 `json:"generation,omitempty" protobuf:"varint,7,opt,name=generation"`
+
+ // CreationTimestamp is a timestamp representing the server time when this object was
+ // created. It is not guaranteed to be set in happens-before order across separate operations.
+ // Clients may not set this value. It is represented in RFC3339 form and is in UTC.
+ //
+ // Populated by the system.
+ // Read-only.
+ // Null for lists.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ CreationTimestamp unversioned.Time `json:"creationTimestamp,omitempty" protobuf:"bytes,8,opt,name=creationTimestamp"`
+
+ // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This
+ // field is set by the server when a graceful deletion is requested by the user, and is not
+ // directly settable by a client. The resource will be deleted (no longer visible from
+ // resource lists, and not reachable by name) after the time in this field. Once set, this
+ // value may not be unset or be set further into the future, although it may be shortened
+ // or the resource may be deleted prior to this time. For example, a user may request that
+ // a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination
+ // signal to the containers in the pod. Once the resource is deleted in the API, the Kubelet
+ // will send a hard termination signal to the container.
+ // If not set, graceful deletion of the object has not been requested.
+ //
+ // Populated by the system when a graceful deletion is requested.
+ // Read-only.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ DeletionTimestamp *unversioned.Time `json:"deletionTimestamp,omitempty" protobuf:"bytes,9,opt,name=deletionTimestamp"`
+
+ // Number of seconds allowed for this object to gracefully terminate before
+ // it will be removed from the system. Only set when deletionTimestamp is also set.
+ // May only be shortened.
+ // Read-only.
+ DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty" protobuf:"varint,10,opt,name=deletionGracePeriodSeconds"`
+
+ // Map of string keys and values that can be used to organize and categorize
+ // (scope and select) objects. May match selectors of replication controllers
+ // and services.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md
+ // TODO: replace map[string]string with labels.LabelSet type
+ Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,11,rep,name=labels"`
+
+ // Annotations is an unstructured key value map stored with a resource that may be
+ // set by external tools to store and retrieve arbitrary metadata. They are not
+ // queryable and should be preserved when modifying objects.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/annotations.md
+ Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,12,rep,name=annotations"`
+
+ // List of objects depended by this object. If ALL objects in the list have
+ // been deleted, this object will be garbage collected. If this object is managed by a controller,
+ // then an entry in this list will point to this controller, with the controller field set to true.
+ // There cannot be more than one managing controller.
+ OwnerReferences []OwnerReference `json:"ownerReferences,omitempty" patchStrategy:"merge" patchMergeKey:"uid" protobuf:"bytes,13,rep,name=ownerReferences"`
+
+ // Must be empty before the object is deleted from the registry. Each entry
+ // is an identifier for the responsible component that will remove the entry
+ // from the list. If the deletionTimestamp of the object is non-nil, entries
+ // in this list can only be removed.
+ Finalizers []string `json:"finalizers,omitempty" patchStrategy:"merge" protobuf:"bytes,14,rep,name=finalizers"`
+}
+
+const (
+ // NamespaceDefault means the object is in the default namespace which is applied when not specified by clients
+ NamespaceDefault string = "default"
+ // NamespaceAll is the default argument to specify on a context when you want to list or filter resources across all namespaces
+ NamespaceAll string = ""
+)
+
+// Volume represents a named volume in a pod that may be accessed by any container in the pod.
+type Volume struct {
+ // Volume's name.
+ // Must be a DNS_LABEL and unique within the pod.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names
+ Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+ // VolumeSource represents the location and type of the mounted volume.
+ // If not specified, the Volume is implied to be an EmptyDir.
+ // This implied behavior is deprecated and will be removed in a future version.
+ VolumeSource `json:",inline" protobuf:"bytes,2,opt,name=volumeSource"`
+}
+
+// Represents the source of a volume to mount.
+// Only one of its members may be specified.
+type VolumeSource struct {
+ // HostPath represents a pre-existing file or directory on the host
+ // machine that is directly exposed to the container. This is generally
+ // used for system agents or other privileged things that are allowed
+ // to see the host machine. Most containers will NOT need this.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#hostpath
+ // ---
+ // TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not
+ // mount host directories as read/write.
+ HostPath *HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,1,opt,name=hostPath"`
+ // EmptyDir represents a temporary directory that shares a pod's lifetime.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#emptydir
+ EmptyDir *EmptyDirVolumeSource `json:"emptyDir,omitempty" protobuf:"bytes,2,opt,name=emptyDir"`
+ // GCEPersistentDisk represents a GCE Disk resource that is attached to a
+ // kubelet's host machine and then exposed to the pod.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk
+ GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty" protobuf:"bytes,3,opt,name=gcePersistentDisk"`
+ // AWSElasticBlockStore represents an AWS Disk resource that is attached to a
+ // kubelet's host machine and then exposed to the pod.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#awselasticblockstore
+ AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,4,opt,name=awsElasticBlockStore"`
+ // GitRepo represents a git repository at a particular revision.
+ GitRepo *GitRepoVolumeSource `json:"gitRepo,omitempty" protobuf:"bytes,5,opt,name=gitRepo"`
+ // Secret represents a secret that should populate this volume.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#secrets
+ Secret *SecretVolumeSource `json:"secret,omitempty" protobuf:"bytes,6,opt,name=secret"`
+ // NFS represents an NFS mount on the host that shares a pod's lifetime
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#nfs
+ NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,7,opt,name=nfs"`
+ // ISCSI represents an ISCSI Disk resource that is attached to a
+ // kubelet's host machine and then exposed to the pod.
+ // More info: http://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md
+ ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,8,opt,name=iscsi"`
+ // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
+ // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md
+ Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,9,opt,name=glusterfs"`
+ // PersistentVolumeClaimVolumeSource represents a reference to a
+ // PersistentVolumeClaim in the same namespace.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistentvolumeclaims
+ PersistentVolumeClaim *PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaim"`
+ // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime.
+ // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md
+ RBD *RBDVolumeSource `json:"rbd,omitempty" protobuf:"bytes,11,opt,name=rbd"`
+ // FlexVolume represents a generic volume resource that is
+ // provisioned/attached using a exec based plugin. This is an
+ // alpha feature and may change in future.
+ FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"`
+ // Cinder represents a cinder volume attached and mounted on kubelets host machine
+ // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
+ Cinder *CinderVolumeSource `json:"cinder,omitempty" protobuf:"bytes,13,opt,name=cinder"`
+
+ // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime
+ CephFS *CephFSVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,14,opt,name=cephfs"`
+
+ // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running
+ Flocker *FlockerVolumeSource `json:"flocker,omitempty" protobuf:"bytes,15,opt,name=flocker"`
+
+ // DownwardAPI represents downward API about the pod that should populate this volume
+ DownwardAPI *DownwardAPIVolumeSource `json:"downwardAPI,omitempty" protobuf:"bytes,16,opt,name=downwardAPI"`
+ // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.
+ FC *FCVolumeSource `json:"fc,omitempty" protobuf:"bytes,17,opt,name=fc"`
+ // AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
+ AzureFile *AzureFileVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,18,opt,name=azureFile"`
+ // ConfigMap represents a configMap that should populate this volume
+ ConfigMap *ConfigMapVolumeSource `json:"configMap,omitempty" protobuf:"bytes,19,opt,name=configMap"`
+ // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
+ VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty" protobuf:"bytes,20,opt,name=vsphereVolume"`
+}
+
+// PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace.
+// This volume finds the bound PV and mounts that volume for the pod. A
+// PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another
+// type of volume that is owned by someone else (the system).
+type PersistentVolumeClaimVolumeSource struct {
+ // ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistentvolumeclaims
+ ClaimName string `json:"claimName" protobuf:"bytes,1,opt,name=claimName"`
+ // Will force the ReadOnly setting in VolumeMounts.
+ // Default false.
+ ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"`
+}
+
+// PersistentVolumeSource is similar to VolumeSource but meant for the
+// administrator who creates PVs. Exactly one of its members must be set.
+type PersistentVolumeSource struct {
+ // GCEPersistentDisk represents a GCE Disk resource that is attached to a
+ // kubelet's host machine and then exposed to the pod. Provisioned by an admin.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk
+ GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty" protobuf:"bytes,1,opt,name=gcePersistentDisk"`
+ // AWSElasticBlockStore represents an AWS Disk resource that is attached to a
+ // kubelet's host machine and then exposed to the pod.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#awselasticblockstore
+ AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,2,opt,name=awsElasticBlockStore"`
+ // HostPath represents a directory on the host.
+ // Provisioned by a developer or tester.
+ // This is useful for single-node development and testing only!
+ // On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#hostpath
+ HostPath *HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,3,opt,name=hostPath"`
+ // Glusterfs represents a Glusterfs volume that is attached to a host and
+ // exposed to the pod. Provisioned by an admin.
+ // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md
+ Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,4,opt,name=glusterfs"`
+ // NFS represents an NFS mount on the host. Provisioned by an admin.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#nfs
+ NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,5,opt,name=nfs"`
+ // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime.
+ // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md
+ RBD *RBDVolumeSource `json:"rbd,omitempty" protobuf:"bytes,6,opt,name=rbd"`
+ // ISCSI represents an ISCSI Disk resource that is attached to a
+ // kubelet's host machine and then exposed to the pod. Provisioned by an admin.
+ ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,7,opt,name=iscsi"`
+ // Cinder represents a cinder volume attached and mounted on kubelets host machine
+ // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
+ Cinder *CinderVolumeSource `json:"cinder,omitempty" protobuf:"bytes,8,opt,name=cinder"`
+ // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime
+ CephFS *CephFSVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,9,opt,name=cephfs"`
+ // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.
+ FC *FCVolumeSource `json:"fc,omitempty" protobuf:"bytes,10,opt,name=fc"`
+ // Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running
+ Flocker *FlockerVolumeSource `json:"flocker,omitempty" protobuf:"bytes,11,opt,name=flocker"`
+ // FlexVolume represents a generic volume resource that is
+ // provisioned/attached using a exec based plugin. This is an
+ // alpha feature and may change in future.
+ FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"`
+ // AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
+ AzureFile *AzureFileVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,13,opt,name=azureFile"`
+ // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
+ VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty" protobuf:"bytes,14,opt,name=vsphereVolume"`
+}
+
+// +genclient=true
+// +nonNamespaced=true
+
+// PersistentVolume (PV) is a storage resource provisioned by an administrator.
+// It is analogous to a node.
+// More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md
+type PersistentVolume struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Spec defines a specification of a persistent volume owned by the cluster.
+ // Provisioned by an administrator.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistent-volumes
+ Spec PersistentVolumeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+ // Status represents the current information/status for the persistent volume.
+ // Populated by the system.
+ // Read-only.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistent-volumes
+ Status PersistentVolumeStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// PersistentVolumeSpec is the specification of a persistent volume.
+type PersistentVolumeSpec struct {
+ // A description of the persistent volume's resources and capacity.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#capacity
+ Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`
+ // The actual volume backing the persistent volume.
+ PersistentVolumeSource `json:",inline" protobuf:"bytes,2,opt,name=persistentVolumeSource"`
+ // AccessModes contains all ways the volume can be mounted.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#access-modes
+ AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,3,rep,name=accessModes,casttype=PersistentVolumeAccessMode"`
+ // ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim.
+ // Expected to be non-nil when bound.
+ // claim.VolumeName is the authoritative bind between PV and PVC.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#binding
+ ClaimRef *ObjectReference `json:"claimRef,omitempty" protobuf:"bytes,4,opt,name=claimRef"`
+ // What happens to a persistent volume when released from its claim.
+ // Valid options are Retain (default) and Recycle.
+ // Recyling must be supported by the volume plugin underlying this persistent volume.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#recycling-policy
+ PersistentVolumeReclaimPolicy PersistentVolumeReclaimPolicy `json:"persistentVolumeReclaimPolicy,omitempty" protobuf:"bytes,5,opt,name=persistentVolumeReclaimPolicy,casttype=PersistentVolumeReclaimPolicy"`
+}
+
+// PersistentVolumeReclaimPolicy describes a policy for end-of-life maintenance of persistent volumes.
+type PersistentVolumeReclaimPolicy string
+
+const (
+ // PersistentVolumeReclaimRecycle means the volume will be recycled back into the pool of unbound persistent volumes on release from its claim.
+ // The volume plugin must support Recycling.
+ PersistentVolumeReclaimRecycle PersistentVolumeReclaimPolicy = "Recycle"
+ // PersistentVolumeReclaimDelete means the volume will be deleted from Kubernetes on release from its claim.
+ // The volume plugin must support Deletion.
+ PersistentVolumeReclaimDelete PersistentVolumeReclaimPolicy = "Delete"
+ // PersistentVolumeReclaimRetain means the volume will be left in its current phase (Released) for manual reclamation by the administrator.
+ // The default policy is Retain.
+ PersistentVolumeReclaimRetain PersistentVolumeReclaimPolicy = "Retain"
+)
+
+// PersistentVolumeStatus is the current status of a persistent volume.
+type PersistentVolumeStatus struct {
+ // Phase indicates if a volume is available, bound to a claim, or released by a claim.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#phase
+ Phase PersistentVolumePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PersistentVolumePhase"`
+ // A human-readable message indicating details about why the volume is in this state.
+ Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"`
+ // Reason is a brief CamelCase string that describes any failure and is meant
+ // for machine parsing and tidy display in the CLI.
+ Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
+}
+
+// PersistentVolumeList is a list of PersistentVolume items.
+type PersistentVolumeList struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard list metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
+ unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // List of persistent volumes.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md
+ Items []PersistentVolume `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// PersistentVolumeClaim is a user's request for and claim to a persistent volume
+type PersistentVolumeClaim struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Spec defines the desired characteristics of a volume requested by a pod author.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistentvolumeclaims
+ Spec PersistentVolumeClaimSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+ // Status represents the current information/status of a persistent volume claim.
+ // Read-only.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistentvolumeclaims
+ Status PersistentVolumeClaimStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// PersistentVolumeClaimList is a list of PersistentVolumeClaim items.
+type PersistentVolumeClaimList struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard list metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
+ unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // A list of persistent volume claims.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistentvolumeclaims
+ Items []PersistentVolumeClaim `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// PersistentVolumeClaimSpec describes the common attributes of storage devices
+// and allows a Source for provider-specific attributes
+type PersistentVolumeClaimSpec struct {
+ // AccessModes contains the desired access modes the volume should have.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#access-modes-1
+ AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,1,rep,name=accessModes,casttype=PersistentVolumeAccessMode"`
+ // A label query over volumes to consider for binding.
+ Selector *unversioned.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"`
+ // Resources represents the minimum resources the volume should have.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#resources
+ Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,2,opt,name=resources"`
+ // VolumeName is the binding reference to the PersistentVolume backing this claim.
+ VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,3,opt,name=volumeName"`
+}
+
+// PersistentVolumeClaimStatus is the current status of a persistent volume claim.
+type PersistentVolumeClaimStatus struct {
+ // Phase represents the current phase of PersistentVolumeClaim.
+ Phase PersistentVolumeClaimPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PersistentVolumeClaimPhase"`
+ // AccessModes contains the actual access modes the volume backing the PVC has.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#access-modes-1
+ AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,2,rep,name=accessModes,casttype=PersistentVolumeAccessMode"`
+ // Represents the actual resources of the underlying volume.
+ Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,3,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`
+}
+
+type PersistentVolumeAccessMode string
+
+const (
+ // can be mounted read/write mode to exactly 1 host
+ ReadWriteOnce PersistentVolumeAccessMode = "ReadWriteOnce"
+ // can be mounted in read-only mode to many hosts
+ ReadOnlyMany PersistentVolumeAccessMode = "ReadOnlyMany"
+ // can be mounted in read/write mode to many hosts
+ ReadWriteMany PersistentVolumeAccessMode = "ReadWriteMany"
+)
+
+type PersistentVolumePhase string
+
+const (
+ // used for PersistentVolumes that are not available
+ VolumePending PersistentVolumePhase = "Pending"
+ // used for PersistentVolumes that are not yet bound
+ // Available volumes are held by the binder and matched to PersistentVolumeClaims
+ VolumeAvailable PersistentVolumePhase = "Available"
+ // used for PersistentVolumes that are bound
+ VolumeBound PersistentVolumePhase = "Bound"
+ // used for PersistentVolumes where the bound PersistentVolumeClaim was deleted
+ // released volumes must be recycled before becoming available again
+ // this phase is used by the persistent volume claim binder to signal to another process to reclaim the resource
+ VolumeReleased PersistentVolumePhase = "Released"
+ // used for PersistentVolumes that failed to be correctly recycled or deleted after being released from a claim
+ VolumeFailed PersistentVolumePhase = "Failed"
+)
+
+type PersistentVolumeClaimPhase string
+
+const (
+ // used for PersistentVolumeClaims that are not yet bound
+ ClaimPending PersistentVolumeClaimPhase = "Pending"
+ // used for PersistentVolumeClaims that are bound
+ ClaimBound PersistentVolumeClaimPhase = "Bound"
+ // used for PersistentVolumeClaims that lost their underlying
+ // PersistentVolume. The claim was bound to a PersistentVolume and this
+ // volume does not exist any longer and all data on it was lost.
+ ClaimLost PersistentVolumeClaimPhase = "Lost"
+)
+
+// Represents a host path mapped into a pod.
+// Host path volumes do not support ownership management or SELinux relabeling.
+type HostPathVolumeSource struct {
+ // Path of the directory on the host.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#hostpath
+ Path string `json:"path" protobuf:"bytes,1,opt,name=path"`
+}
+
+// Represents an empty directory for a pod.
+// Empty directory volumes support ownership management and SELinux relabeling.
+type EmptyDirVolumeSource struct {
+ // What type of storage medium should back this directory.
+ // The default is "" which means to use the node's default medium.
+ // Must be an empty string (default) or Memory.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#emptydir
+ Medium StorageMedium `json:"medium,omitempty" protobuf:"bytes,1,opt,name=medium,casttype=StorageMedium"`
+}
+
+// Represents a Glusterfs mount that lasts the lifetime of a pod.
+// Glusterfs volumes do not support ownership management or SELinux relabeling.
+type GlusterfsVolumeSource struct {
+ // EndpointsName is the endpoint name that details Glusterfs topology.
+ // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
+ EndpointsName string `json:"endpoints" protobuf:"bytes,1,opt,name=endpoints"`
+
+ // Path is the Glusterfs volume path.
+ // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
+ Path string `json:"path" protobuf:"bytes,2,opt,name=path"`
+
+ // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions.
+ // Defaults to false.
+ // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
+ ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
+}
+
+// Represents a Rados Block Device mount that lasts the lifetime of a pod.
+// RBD volumes support ownership management and SELinux relabeling.
+type RBDVolumeSource struct {
+ // A collection of Ceph monitors.
+ // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
+ CephMonitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"`
+ // The rados image name.
+ // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
+ RBDImage string `json:"image" protobuf:"bytes,2,opt,name=image"`
+ // Filesystem type of the volume that you want to mount.
+ // Tip: Ensure that the filesystem type is supported by the host operating system.
+ // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#rbd
+ // TODO: how do we prevent errors in the filesystem from compromising the machine
+ FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
+ // The rados pool name.
+ // Default is rbd.
+ // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it.
+ RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"`
+ // The rados user name.
+ // Default is admin.
+ // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
+ RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"`
+ // Keyring is the path to key ring for RBDUser.
+ // Default is /etc/ceph/keyring.
+ // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
+ Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"`
+ // SecretRef is name of the authentication secret for RBDUser. If provided
+ // overrides keyring.
+ // Default is nil.
+ // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
+ SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,7,opt,name=secretRef"`
+ // ReadOnly here will force the ReadOnly setting in VolumeMounts.
+ // Defaults to false.
+ // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
+ ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,8,opt,name=readOnly"`
+}
+
+// Represents a cinder volume resource in Openstack.
+// A Cinder volume must exist before mounting to a container.
+// The volume must also be in the same region as the kubelet.
+// Cinder volumes support ownership management and SELinux relabeling.
+type CinderVolumeSource struct {
+ // volume id used to identify the volume in cinder
+ // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
+ VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"`
+ // Filesystem type to mount.
+ // Must be a filesystem type supported by the host operating system.
+ // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
+ FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
+ // Optional: Defaults to false (read/write). ReadOnly here will force
+ // the ReadOnly setting in VolumeMounts.
+ // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
+ ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
+}
+
+// Represents a Ceph Filesystem mount that lasts the lifetime of a pod
+// Cephfs volumes do not support ownership management or SELinux relabeling.
+type CephFSVolumeSource struct {
+ // Required: Monitors is a collection of Ceph monitors
+ // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
+ Monitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"`
+ // Optional: Used as the mounted root, rather than the full Ceph tree, default is /
+ Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"`
+ // Optional: User is the rados user name, default is admin
+ // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
+ User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"`
+ // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret
+ // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
+ SecretFile string `json:"secretFile,omitempty" protobuf:"bytes,4,opt,name=secretFile"`
+ // Optional: SecretRef is reference to the authentication secret for User, default is empty.
+ // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
+ SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"`
+ // Optional: Defaults to false (read/write). ReadOnly here will force
+ // the ReadOnly setting in VolumeMounts.
+ // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
+ ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"`
+}
+
+// Represents a Flocker volume mounted by the Flocker agent.
+// Flocker volumes do not support ownership management or SELinux relabeling.
+type FlockerVolumeSource struct {
+ // Required: the volume name. This is going to be store on metadata -> name on the payload for Flocker
+ DatasetName string `json:"datasetName" protobuf:"bytes,1,opt,name=datasetName"`
+}
+
+// StorageMedium defines ways that storage can be allocated to a volume.
+type StorageMedium string
+
+const (
+ StorageMediumDefault StorageMedium = "" // use whatever the default is for the node
+ StorageMediumMemory StorageMedium = "Memory" // use memory (tmpfs)
+)
+
+// Protocol defines network protocols supported for things like container ports.
+type Protocol string
+
+const (
+ // ProtocolTCP is the TCP protocol.
+ ProtocolTCP Protocol = "TCP"
+ // ProtocolUDP is the UDP protocol.
+ ProtocolUDP Protocol = "UDP"
+)
+
+// Represents a Persistent Disk resource in Google Compute Engine.
+//
+// A GCE PD must exist before mounting to a container. The disk must
+// also be in the same GCE project and zone as the kubelet. A GCE PD
+// can only be mounted as read/write once or read-only many times. GCE
+// PDs support ownership management and SELinux relabeling.
+type GCEPersistentDiskVolumeSource struct {
+ // Unique name of the PD resource in GCE. Used to identify the disk in GCE.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk
+ PDName string `json:"pdName" protobuf:"bytes,1,opt,name=pdName"`
+ // Filesystem type of the volume that you want to mount.
+ // Tip: Ensure that the filesystem type is supported by the host operating system.
+ // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk
+ // TODO: how do we prevent errors in the filesystem from compromising the machine
+ FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
+ // The partition in the volume that you want to mount.
+ // If omitted, the default is to mount by volume name.
+ // Examples: For volume /dev/sda1, you specify the partition as "1".
+ // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk
+ Partition int32 `json:"partition,omitempty" protobuf:"varint,3,opt,name=partition"`
+ // ReadOnly here will force the ReadOnly setting in VolumeMounts.
+ // Defaults to false.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk
+ ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
+}
+
+// FlexVolume represents a generic volume resource that is
+// provisioned/attached using a exec based plugin. This is an alpha feature and may change in future.
+type FlexVolumeSource struct {
+ // Driver is the name of the driver to use for this volume.
+ Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"`
+ // Filesystem type to mount.
+ // Must be a filesystem type supported by the host operating system.
+ // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.
+ FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
+ // Optional: SecretRef is reference to the secret object containing
+ // sensitive information to pass to the plugin scripts. This may be
+ // empty if no secret object is specified. If the secret object
+ // contains more than one secret, all secrets are passed to the plugin
+ // scripts.
+ SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,3,opt,name=secretRef"`
+ // Optional: Defaults to false (read/write). ReadOnly here will force
+ // the ReadOnly setting in VolumeMounts.
+ ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
+ // Optional: Extra command options if any.
+ Options map[string]string `json:"options,omitempty" protobuf:"bytes,5,rep,name=options"`
+}
+
+// Represents a Persistent Disk resource in AWS.
+//
+// An AWS EBS disk must exist before mounting to a container. The disk
+// must also be in the same AWS zone as the kubelet. An AWS EBS disk
+// can only be mounted as read/write once. AWS EBS volumes support
+// ownership management and SELinux relabeling.
+type AWSElasticBlockStoreVolumeSource struct {
+ // Unique ID of the persistent disk resource in AWS (Amazon EBS volume).
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#awselasticblockstore
+ VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"`
+ // Filesystem type of the volume that you want to mount.
+ // Tip: Ensure that the filesystem type is supported by the host operating system.
+ // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#awselasticblockstore
+ // TODO: how do we prevent errors in the filesystem from compromising the machine
+ FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
+ // The partition in the volume that you want to mount.
+ // If omitted, the default is to mount by volume name.
+ // Examples: For volume /dev/sda1, you specify the partition as "1".
+ // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
+ Partition int32 `json:"partition,omitempty" protobuf:"varint,3,opt,name=partition"`
+ // Specify "true" to force and set the ReadOnly property in VolumeMounts to "true".
+ // If omitted, the default is "false".
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#awselasticblockstore
+ ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
+}
+
+// Represents a volume that is populated with the contents of a git repository.
+// Git repo volumes do not support ownership management.
+// Git repo volumes support SELinux relabeling.
+type GitRepoVolumeSource struct {
+ // Repository URL
+ Repository string `json:"repository" protobuf:"bytes,1,opt,name=repository"`
+ // Commit hash for the specified revision.
+ Revision string `json:"revision,omitempty" protobuf:"bytes,2,opt,name=revision"`
+ // Target directory name.
+ // Must not contain or start with '..'. If '.' is supplied, the volume directory will be the
+ // git repository. Otherwise, if specified, the volume will contain the git repository in
+ // the subdirectory with the given name.
+ Directory string `json:"directory,omitempty" protobuf:"bytes,3,opt,name=directory"`
+}
+
+// Adapts a Secret into a volume.
+//
+// The contents of the target Secret's Data field will be presented in a volume
+// as files using the keys in the Data field as the file names.
+// Secret volumes support ownership management and SELinux relabeling.
+type SecretVolumeSource struct {
+ // Name of the secret in the pod's namespace to use.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#secrets
+ SecretName string `json:"secretName,omitempty" protobuf:"bytes,1,opt,name=secretName"`
+ // If unspecified, each key-value pair in the Data field of the referenced
+ // Secret will be projected into the volume as a file whose name is the
+ // key and content is the value. If specified, the listed keys will be
+ // projected into the specified paths, and unlisted keys will not be
+ // present. If a key is specified which is not present in the Secret,
+ // the volume setup will error. Paths must be relative and may not contain
+ // the '..' path or start with '..'.
+ Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
+}
+
+// Represents an NFS mount that lasts the lifetime of a pod.
+// NFS volumes do not support ownership management or SELinux relabeling.
+type NFSVolumeSource struct {
+ // Server is the hostname or IP address of the NFS server.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#nfs
+ Server string `json:"server" protobuf:"bytes,1,opt,name=server"`
+
+ // Path that is exported by the NFS server.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#nfs
+ Path string `json:"path" protobuf:"bytes,2,opt,name=path"`
+
+ // ReadOnly here will force
+ // the NFS export to be mounted with read-only permissions.
+ // Defaults to false.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#nfs
+ ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
+}
+
+// Represents an ISCSI disk.
+// ISCSI volumes can only be mounted as read/write once.
+// ISCSI volumes support ownership management and SELinux relabeling.
+type ISCSIVolumeSource struct {
+ // iSCSI target portal. The portal is either an IP or ip_addr:port if the port
+ // is other than default (typically TCP ports 860 and 3260).
+ TargetPortal string `json:"targetPortal" protobuf:"bytes,1,opt,name=targetPortal"`
+ // Target iSCSI Qualified Name.
+ IQN string `json:"iqn" protobuf:"bytes,2,opt,name=iqn"`
+ // iSCSI target lun number.
+ Lun int32 `json:"lun" protobuf:"varint,3,opt,name=lun"`
+ // Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport.
+ ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"`
+ // Filesystem type of the volume that you want to mount.
+ // Tip: Ensure that the filesystem type is supported by the host operating system.
+ // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#iscsi
+ // TODO: how do we prevent errors in the filesystem from compromising the machine
+ FSType string `json:"fsType,omitempty" protobuf:"bytes,5,opt,name=fsType"`
+ // ReadOnly here will force the ReadOnly setting in VolumeMounts.
+ // Defaults to false.
+ ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"`
+}
+
+// Represents a Fibre Channel volume.
+// Fibre Channel volumes can only be mounted as read/write once.
+// Fibre Channel volumes support ownership management and SELinux relabeling.
+type FCVolumeSource struct {
+ // Required: FC target world wide names (WWNs)
+ TargetWWNs []string `json:"targetWWNs" protobuf:"bytes,1,rep,name=targetWWNs"`
+ // Required: FC target lun number
+ Lun *int32 `json:"lun" protobuf:"varint,2,opt,name=lun"`
+ // Filesystem type to mount.
+ // Must be a filesystem type supported by the host operating system.
+ // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ // TODO: how do we prevent errors in the filesystem from compromising the machine
+ FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
+ // Optional: Defaults to false (read/write). ReadOnly here will force
+ // the ReadOnly setting in VolumeMounts.
+ ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
+}
+
+// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
+type AzureFileVolumeSource struct {
+ // the name of secret that contains Azure Storage Account Name and Key
+ SecretName string `json:"secretName" protobuf:"bytes,1,opt,name=secretName"`
+ // Share Name
+ ShareName string `json:"shareName" protobuf:"bytes,2,opt,name=shareName"`
+ // Defaults to false (read/write). ReadOnly here will force
+ // the ReadOnly setting in VolumeMounts.
+ ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
+}
+
+// Represents a vSphere volume resource.
+type VsphereVirtualDiskVolumeSource struct {
+ // Path that identifies vSphere volume vmdk
+ VolumePath string `json:"volumePath" protobuf:"bytes,1,opt,name=volumePath"`
+ // Filesystem type to mount.
+ // Must be a filesystem type supported by the host operating system.
+ // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
+}
+
+// Adapts a ConfigMap into a volume.
+//
+// The contents of the target ConfigMap's Data field will be presented in a
+// volume as files using the keys in the Data field as the file names, unless
+// the items element is populated with specific mappings of keys to paths.
+// ConfigMap volumes support ownership management and SELinux relabeling.
+type ConfigMapVolumeSource struct {
+ LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
+ // If unspecified, each key-value pair in the Data field of the referenced
+ // ConfigMap will be projected into the volume as a file whose name is the
+ // key and content is the value. If specified, the listed keys will be
+ // projected into the specified paths, and unlisted keys will not be
+ // present. If a key is specified which is not present in the ConfigMap,
+ // the volume setup will error. Paths must be relative and may not contain
+ // the '..' path or start with '..'.
+ Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
+}
+
+// Maps a string key to a path within a volume.
+type KeyToPath struct {
+ // The key to project.
+ Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
+
+ // The relative path of the file to map the key to.
+ // May not be an absolute path.
+ // May not contain the path element '..'.
+ // May not start with the string '..'.
+ Path string `json:"path" protobuf:"bytes,2,opt,name=path"`
+}
+
+// ContainerPort represents a network port in a single container.
+type ContainerPort struct {
+ // If specified, this must be an IANA_SVC_NAME and unique within the pod. Each
+ // named port in a pod must have a unique name. Name for the port that can be
+ // referred to by services.
+ Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
+ // Number of port to expose on the host.
+ // If specified, this must be a valid port number, 0 < x < 65536.
+ // If HostNetwork is specified, this must match ContainerPort.
+ // Most containers do not need this.
+ HostPort int32 `json:"hostPort,omitempty" protobuf:"varint,2,opt,name=hostPort"`
+ // Number of port to expose on the pod's IP address.
+ // This must be a valid port number, 0 < x < 65536.
+ ContainerPort int32 `json:"containerPort" protobuf:"varint,3,opt,name=containerPort"`
+ // Protocol for port. Must be UDP or TCP.
+ // Defaults to "TCP".
+ Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,4,opt,name=protocol,casttype=Protocol"`
+ // What host IP to bind the external port to.
+ HostIP string `json:"hostIP,omitempty" protobuf:"bytes,5,opt,name=hostIP"`
+}
+
+// VolumeMount describes a mounting of a Volume within a container.
+type VolumeMount struct {
+ // This must match the Name of a Volume.
+ Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+ // Mounted read-only if true, read-write otherwise (false or unspecified).
+ // Defaults to false.
+ ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"`
+ // Path within the container at which the volume should be mounted. Must
+ // not contain ':'.
+ MountPath string `json:"mountPath" protobuf:"bytes,3,opt,name=mountPath"`
+ // Path within the volume from which the container's volume should be mounted.
+ // Defaults to "" (volume's root).
+ SubPath string `json:"subPath,omitempty" protobuf:"bytes,4,opt,name=subPath"`
+}
+
+// EnvVar represents an environment variable present in a Container.
+type EnvVar struct {
+ // Name of the environment variable. Must be a C_IDENTIFIER.
+ Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+
+ // Optional: no more than one of the following may be specified.
+
+ // Variable references $(VAR_NAME) are expanded
+ // using the previous defined environment variables in the container and
+ // any service environment variables. If a variable cannot be resolved,
+ // the reference in the input string will be unchanged. The $(VAR_NAME)
+ // syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped
+ // references will never be expanded, regardless of whether the variable
+ // exists or not.
+ // Defaults to "".
+ Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"`
+ // Source for the environment variable's value. Cannot be used if value is not empty.
+ ValueFrom *EnvVarSource `json:"valueFrom,omitempty" protobuf:"bytes,3,opt,name=valueFrom"`
+}
+
+// EnvVarSource represents a source for the value of an EnvVar.
+type EnvVarSource struct {
+ // Selects a field of the pod; only name and namespace are supported.
+ FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty" protobuf:"bytes,1,opt,name=fieldRef"`
+ // Selects a resource of the container: only resources limits and requests
+ // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
+ ResourceFieldRef *ResourceFieldSelector `json:"resourceFieldRef,omitempty" protobuf:"bytes,2,opt,name=resourceFieldRef"`
+ // Selects a key of a ConfigMap.
+ ConfigMapKeyRef *ConfigMapKeySelector `json:"configMapKeyRef,omitempty" protobuf:"bytes,3,opt,name=configMapKeyRef"`
+ // Selects a key of a secret in the pod's namespace
+ SecretKeyRef *SecretKeySelector `json:"secretKeyRef,omitempty" protobuf:"bytes,4,opt,name=secretKeyRef"`
+}
+
+// ObjectFieldSelector selects an APIVersioned field of an object.
+type ObjectFieldSelector struct {
+ // Version of the schema the FieldPath is written in terms of, defaults to "v1".
+ APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,1,opt,name=apiVersion"`
+ // Path of the field to select in the specified API version.
+ FieldPath string `json:"fieldPath" protobuf:"bytes,2,opt,name=fieldPath"`
+}
+
+// ResourceFieldSelector represents container resources (cpu, memory) and their output format
+type ResourceFieldSelector struct {
+ // Container name: required for volumes, optional for env vars
+ ContainerName string `json:"containerName,omitempty" protobuf:"bytes,1,opt,name=containerName"`
+ // Required: resource to select
+ Resource string `json:"resource" protobuf:"bytes,2,opt,name=resource"`
+ // Specifies the output format of the exposed resources, defaults to "1"
+ Divisor resource.Quantity `json:"divisor,omitempty" protobuf:"bytes,3,opt,name=divisor"`
+}
+
+// Selects a key from a ConfigMap.
+type ConfigMapKeySelector struct {
+ // The ConfigMap to select from.
+ LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
+ // The key to select.
+ Key string `json:"key" protobuf:"bytes,2,opt,name=key"`
+}
+
+// SecretKeySelector selects a key of a Secret.
+type SecretKeySelector struct {
+ // The name of the secret in the pod's namespace to select from.
+ LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
+ // The key of the secret to select from. Must be a valid secret key.
+ Key string `json:"key" protobuf:"bytes,2,opt,name=key"`
+}
+
+// HTTPHeader describes a custom header to be used in HTTP probes
+type HTTPHeader struct {
+ // The header field name
+ Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+ // The header field value
+ Value string `json:"value" protobuf:"bytes,2,opt,name=value"`
+}
+
+// HTTPGetAction describes an action based on HTTP Get requests.
+type HTTPGetAction struct {
+ // Path to access on the HTTP server.
+ Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
+ // Name or number of the port to access on the container.
+ // Number must be in the range 1 to 65535.
+ // Name must be an IANA_SVC_NAME.
+ Port intstr.IntOrString `json:"port" protobuf:"bytes,2,opt,name=port"`
+ // Host name to connect to, defaults to the pod IP. You probably want to set
+ // "Host" in httpHeaders instead.
+ Host string `json:"host,omitempty" protobuf:"bytes,3,opt,name=host"`
+ // Scheme to use for connecting to the host.
+ // Defaults to HTTP.
+ Scheme URIScheme `json:"scheme,omitempty" protobuf:"bytes,4,opt,name=scheme,casttype=URIScheme"`
+ // Custom headers to set in the request. HTTP allows repeated headers.
+ HTTPHeaders []HTTPHeader `json:"httpHeaders,omitempty" protobuf:"bytes,5,rep,name=httpHeaders"`
+}
+
+// URIScheme identifies the scheme used for connection to a host for Get actions
+type URIScheme string
+
+const (
+ // URISchemeHTTP means that the scheme used will be http://
+ URISchemeHTTP URIScheme = "HTTP"
+ // URISchemeHTTPS means that the scheme used will be https://
+ URISchemeHTTPS URIScheme = "HTTPS"
+)
+
+// TCPSocketAction describes an action based on opening a socket
+type TCPSocketAction struct {
+ // Number or name of the port to access on the container.
+ // Number must be in the range 1 to 65535.
+ // Name must be an IANA_SVC_NAME.
+ Port intstr.IntOrString `json:"port" protobuf:"bytes,1,opt,name=port"`
+}
+
+// ExecAction describes a "run in container" action.
+type ExecAction struct {
+ // Command is the command line to execute inside the container, the working directory for the
+ // command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ // not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ // a shell, you need to explicitly call out to that shell.
+ // Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ Command []string `json:"command,omitempty" protobuf:"bytes,1,rep,name=command"`
+}
+
+// Probe describes a health check to be performed against a container to determine whether it is
+// alive or ready to receive traffic.
+type Probe struct {
+ // The action taken to determine the health of a container
+ Handler `json:",inline" protobuf:"bytes,1,opt,name=handler"`
+ // Number of seconds after the container has started before liveness probes are initiated.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-probes
+ InitialDelaySeconds int32 `json:"initialDelaySeconds,omitempty" protobuf:"varint,2,opt,name=initialDelaySeconds"`
+ // Number of seconds after which the probe times out.
+ // Defaults to 1 second. Minimum value is 1.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-probes
+ TimeoutSeconds int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,3,opt,name=timeoutSeconds"`
+ // How often (in seconds) to perform the probe.
+ // Default to 10 seconds. Minimum value is 1.
+ PeriodSeconds int32 `json:"periodSeconds,omitempty" protobuf:"varint,4,opt,name=periodSeconds"`
+ // Minimum consecutive successes for the probe to be considered successful after having failed.
+ // Defaults to 1. Must be 1 for liveness. Minimum value is 1.
+ SuccessThreshold int32 `json:"successThreshold,omitempty" protobuf:"varint,5,opt,name=successThreshold"`
+ // Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ // Defaults to 3. Minimum value is 1.
+ FailureThreshold int32 `json:"failureThreshold,omitempty" protobuf:"varint,6,opt,name=failureThreshold"`
+}
+
+// PullPolicy describes a policy for if/when to pull a container image
+type PullPolicy string
+
+const (
+ // PullAlways means that kubelet always attempts to pull the latest image. Container will fail If the pull fails.
+ PullAlways PullPolicy = "Always"
+ // PullNever means that kubelet never pulls an image, but only uses a local image. Container will fail if the image isn't present
+ PullNever PullPolicy = "Never"
+ // PullIfNotPresent means that kubelet pulls if the image isn't present on disk. Container will fail if the image isn't present and the pull fails.
+ PullIfNotPresent PullPolicy = "IfNotPresent"
+)
+
+// Capability represent POSIX capabilities type
+type Capability string
+
+// Adds and removes POSIX capabilities from running containers.
+type Capabilities struct {
+ // Added capabilities
+ Add []Capability `json:"add,omitempty" protobuf:"bytes,1,rep,name=add,casttype=Capability"`
+ // Removed capabilities
+ Drop []Capability `json:"drop,omitempty" protobuf:"bytes,2,rep,name=drop,casttype=Capability"`
+}
+
+// ResourceRequirements describes the compute resource requirements.
+type ResourceRequirements struct {
+ // Limits describes the maximum amount of compute resources allowed.
+ // More info: http://releases.k8s.io/HEAD/docs/design/resources.md#resource-specifications
+ Limits ResourceList `json:"limits,omitempty" protobuf:"bytes,1,rep,name=limits,casttype=ResourceList,castkey=ResourceName"`
+ // Requests describes the minimum amount of compute resources required.
+ // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ // otherwise to an implementation-defined value.
+ // More info: http://releases.k8s.io/HEAD/docs/design/resources.md#resource-specifications
+ Requests ResourceList `json:"requests,omitempty" protobuf:"bytes,2,rep,name=requests,casttype=ResourceList,castkey=ResourceName"`
+}
+
+const (
+ // TerminationMessagePathDefault means the default path to capture the application termination message running in a container
+ TerminationMessagePathDefault string = "/dev/termination-log"
+)
+
+// A single application container that you want to run within a pod.
+type Container struct {
+ // Name of the container specified as a DNS_LABEL.
+ // Each container in a pod must have a unique name (DNS_LABEL).
+ // Cannot be updated.
+ Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+ // Docker image name.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/images.md
+ Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"`
+ // Entrypoint array. Not executed within a shell.
+ // The docker image's ENTRYPOINT is used if this is not provided.
+ // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+ // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax
+ // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,
+ // regardless of whether the variable exists or not.
+ // Cannot be updated.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/containers.md#containers-and-commands
+ Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"`
+ // Arguments to the entrypoint.
+ // The docker image's CMD is used if this is not provided.
+ // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+ // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax
+ // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,
+ // regardless of whether the variable exists or not.
+ // Cannot be updated.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/containers.md#containers-and-commands
+ Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"`
+ // Container's working directory.
+ // If not specified, the container runtime's default will be used, which
+ // might be configured in the container image.
+ // Cannot be updated.
+ WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"`
+ // List of ports to expose from the container. Exposing a port here gives
+ // the system additional information about the network connections a
+ // container uses, but is primarily informational. Not specifying a port here
+ // DOES NOT prevent that port from being exposed. Any port which is
+ // listening on the default "0.0.0.0" address inside a container will be
+ // accessible from the network.
+ // Cannot be updated.
+ Ports []ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"`
+ // List of environment variables to set in the container.
+ // Cannot be updated.
+ Env []EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"`
+ // Compute Resources required by this container.
+ // Cannot be updated.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#resources
+ Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"`
+ // Pod volumes to mount into the container's filesystem.
+ // Cannot be updated.
+ VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,9,rep,name=volumeMounts"`
+ // Periodic probe of container liveness.
+ // Container will be restarted if the probe fails.
+ // Cannot be updated.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-probes
+ LivenessProbe *Probe `json:"livenessProbe,omitempty" protobuf:"bytes,10,opt,name=livenessProbe"`
+ // Periodic probe of container service readiness.
+ // Container will be removed from service endpoints if the probe fails.
+ // Cannot be updated.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-probes
+ ReadinessProbe *Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"`
+ // Actions that the management system should take in response to container lifecycle events.
+ // Cannot be updated.
+ Lifecycle *Lifecycle `json:"lifecycle,omitempty" protobuf:"bytes,12,opt,name=lifecycle"`
+ // Optional: Path at which the file to which the container's termination message
+ // will be written is mounted into the container's filesystem.
+ // Message written is intended to be brief final status, such as an assertion failure message.
+ // Defaults to /dev/termination-log.
+ // Cannot be updated.
+ TerminationMessagePath string `json:"terminationMessagePath,omitempty" protobuf:"bytes,13,opt,name=terminationMessagePath"`
+ // Image pull policy.
+ // One of Always, Never, IfNotPresent.
+ // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
+ // Cannot be updated.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/images.md#updating-images
+ ImagePullPolicy PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"`
+ // Security options the pod should run with.
+ // More info: http://releases.k8s.io/HEAD/docs/design/security_context.md
+ SecurityContext *SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"`
+
+ // Variables for interactive containers, these have very specialized use-cases (e.g. debugging)
+ // and shouldn't be used for general purpose containers.
+
+ // Whether this container should allocate a buffer for stdin in the container runtime. If this
+ // is not set, reads from stdin in the container will always result in EOF.
+ // Default is false.
+ Stdin bool `json:"stdin,omitempty" protobuf:"varint,16,opt,name=stdin"`
+ // Whether the container runtime should close the stdin channel after it has been opened by
+ // a single attach. When stdin is true the stdin stream will remain open across multiple attach
+ // sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the
+ // first client attaches to stdin, and then remains open and accepts data until the client disconnects,
+ // at which time stdin is closed and remains closed until the container is restarted. If this
+ // flag is false, a container processes that reads from stdin will never receive an EOF.
+ // Default is false
+ StdinOnce bool `json:"stdinOnce,omitempty" protobuf:"varint,17,opt,name=stdinOnce"`
+ // Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.
+ // Default is false.
+ TTY bool `json:"tty,omitempty" protobuf:"varint,18,opt,name=tty"`
+}
+
+// Handler defines a specific action that should be taken
+// TODO: pass structured data to these actions, and document that data here.
+type Handler struct {
+ // One and only one of the following should be specified.
+ // Exec specifies the action to take.
+ Exec *ExecAction `json:"exec,omitempty" protobuf:"bytes,1,opt,name=exec"`
+ // HTTPGet specifies the http request to perform.
+ HTTPGet *HTTPGetAction `json:"httpGet,omitempty" protobuf:"bytes,2,opt,name=httpGet"`
+ // TCPSocket specifies an action involving a TCP port.
+ // TCP hooks not yet supported
+ // TODO: implement a realistic TCP lifecycle hook
+ TCPSocket *TCPSocketAction `json:"tcpSocket,omitempty" protobuf:"bytes,3,opt,name=tcpSocket"`
+}
+
+// Lifecycle describes actions that the management system should take in response to container lifecycle
+// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks
+// until the action is complete, unless the container process fails, in which case the handler is aborted.
+type Lifecycle struct {
+ // PostStart is called immediately after a container is created. If the handler fails,
+ // the container is terminated and restarted according to its restart policy.
+ // Other management of the container blocks until the hook completes.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/container-environment.md#hook-details
+ PostStart *Handler `json:"postStart,omitempty" protobuf:"bytes,1,opt,name=postStart"`
+ // PreStop is called immediately before a container is terminated.
+ // The container is terminated after the handler completes.
+ // The reason for termination is passed to the handler.
+ // Regardless of the outcome of the handler, the container is eventually terminated.
+ // Other management of the container blocks until the hook completes.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/container-environment.md#hook-details
+ PreStop *Handler `json:"preStop,omitempty" protobuf:"bytes,2,opt,name=preStop"`
+}
+
+type ConditionStatus string
+
+// These are valid condition statuses. "ConditionTrue" means a resource is in the condition.
+// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes
+// can't decide if a resource is in the condition or not. In the future, we could add other
+// intermediate conditions, e.g. ConditionDegraded.
+const (
+ ConditionTrue ConditionStatus = "True"
+ ConditionFalse ConditionStatus = "False"
+ ConditionUnknown ConditionStatus = "Unknown"
+)
+
+// ContainerStateWaiting is a waiting state of a container.
+type ContainerStateWaiting struct {
+ // (brief) reason the container is not yet running.
+ Reason string `json:"reason,omitempty" protobuf:"bytes,1,opt,name=reason"`
+ // Message regarding why the container is not yet running.
+ Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"`
+}
+
+// ContainerStateRunning is a running state of a container.
+type ContainerStateRunning struct {
+ // Time at which the container was last (re-)started
+ StartedAt unversioned.Time `json:"startedAt,omitempty" protobuf:"bytes,1,opt,name=startedAt"`
+}
+
+// ContainerStateTerminated is a terminated state of a container.
+type ContainerStateTerminated struct {
+ // Exit status from the last termination of the container
+ ExitCode int32 `json:"exitCode" protobuf:"varint,1,opt,name=exitCode"`
+ // Signal from the last termination of the container
+ Signal int32 `json:"signal,omitempty" protobuf:"varint,2,opt,name=signal"`
+ // (brief) reason from the last termination of the container
+ Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
+ // Message regarding the last termination of the container
+ Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"`
+ // Time at which previous execution of the container started
+ StartedAt unversioned.Time `json:"startedAt,omitempty" protobuf:"bytes,5,opt,name=startedAt"`
+ // Time at which the container last terminated
+ FinishedAt unversioned.Time `json:"finishedAt,omitempty" protobuf:"bytes,6,opt,name=finishedAt"`
+ // Container's ID in the format 'docker://<container_id>'
+ ContainerID string `json:"containerID,omitempty" protobuf:"bytes,7,opt,name=containerID"`
+}
+
+// ContainerState holds a possible state of container.
+// Only one of its members may be specified.
+// If none of them is specified, the default one is ContainerStateWaiting.
+type ContainerState struct {
+ // Details about a waiting container
+ Waiting *ContainerStateWaiting `json:"waiting,omitempty" protobuf:"bytes,1,opt,name=waiting"`
+ // Details about a running container
+ Running *ContainerStateRunning `json:"running,omitempty" protobuf:"bytes,2,opt,name=running"`
+ // Details about a terminated container
+ Terminated *ContainerStateTerminated `json:"terminated,omitempty" protobuf:"bytes,3,opt,name=terminated"`
+}
+
+// ContainerStatus contains details for the current status of this container.
+type ContainerStatus struct {
+ // This must be a DNS_LABEL. Each container in a pod must have a unique name.
+ // Cannot be updated.
+ Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+ // Details about the container's current condition.
+ State ContainerState `json:"state,omitempty" protobuf:"bytes,2,opt,name=state"`
+ // Details about the container's last termination condition.
+ LastTerminationState ContainerState `json:"lastState,omitempty" protobuf:"bytes,3,opt,name=lastState"`
+ // Specifies whether the container has passed its readiness probe.
+ Ready bool `json:"ready" protobuf:"varint,4,opt,name=ready"`
+ // The number of times the container has been restarted, currently based on
+ // the number of dead containers that have not yet been removed.
+ // Note that this is calculated from dead containers. But those containers are subject to
+ // garbage collection. This value will get capped at 5 by GC.
+ RestartCount int32 `json:"restartCount" protobuf:"varint,5,opt,name=restartCount"`
+ // The image the container is running.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/images.md
+ // TODO(dchen1107): Which image the container is running with?
+ Image string `json:"image" protobuf:"bytes,6,opt,name=image"`
+ // ImageID of the container's image.
+ ImageID string `json:"imageID" protobuf:"bytes,7,opt,name=imageID"`
+ // Container's ID in the format 'docker://<container_id>'.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/container-environment.md#container-information
+ ContainerID string `json:"containerID,omitempty" protobuf:"bytes,8,opt,name=containerID"`
+}
+
+// PodPhase is a label for the condition of a pod at the current time.
+type PodPhase string
+
+// These are the valid statuses of pods.
+const (
+ // PodPending means the pod has been accepted by the system, but one or more of the containers
+ // has not been started. This includes time before being bound to a node, as well as time spent
+ // pulling images onto the host.
+ PodPending PodPhase = "Pending"
+ // PodRunning means the pod has been bound to a node and all of the containers have been started.
+ // At least one container is still running or is in the process of being restarted.
+ PodRunning PodPhase = "Running"
+ // PodSucceeded means that all containers in the pod have voluntarily terminated
+ // with a container exit code of 0, and the system is not going to restart any of these containers.
+ PodSucceeded PodPhase = "Succeeded"
+ // PodFailed means that all containers in the pod have terminated, and at least one container has
+ // terminated in a failure (exited with a non-zero exit code or was stopped by the system).
+ PodFailed PodPhase = "Failed"
+ // PodUnknown means that for some reason the state of the pod could not be obtained, typically due
+ // to an error in communicating with the host of the pod.
+ PodUnknown PodPhase = "Unknown"
+)
+
+// PodConditionType is a valid value for PodCondition.Type
+type PodConditionType string
+
+// These are valid conditions of pod.
+const (
+ // PodScheduled represents status of the scheduling process for this pod.
+ PodScheduled PodConditionType = "PodScheduled"
+ // PodReady means the pod is able to service requests and should be added to the
+ // load balancing pools of all matching services.
+ PodReady PodConditionType = "Ready"
+)
+
+// PodCondition contains details for the current condition of this pod.
+type PodCondition struct {
+ // Type is the type of the condition.
+ // Currently only Ready.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#pod-conditions
+ Type PodConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PodConditionType"`
+ // Status is the status of the condition.
+ // Can be True, False, Unknown.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#pod-conditions
+ Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
+ // Last time we probed the condition.
+ LastProbeTime unversioned.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"`
+ // Last time the condition transitioned from one status to another.
+ LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"`
+ // Unique, one-word, CamelCase reason for the condition's last transition.
+ Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"`
+ // Human-readable message indicating details about last transition.
+ Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
+}
+
+// RestartPolicy describes how the container should be restarted.
+// Only one of the following restart policies may be specified.
+// If none of the following policies is specified, the default one
+// is RestartPolicyAlways.
+type RestartPolicy string
+
+const (
+ RestartPolicyAlways RestartPolicy = "Always"
+ RestartPolicyOnFailure RestartPolicy = "OnFailure"
+ RestartPolicyNever RestartPolicy = "Never"
+)
+
+// DNSPolicy defines how a pod's DNS will be configured.
+type DNSPolicy string
+
+const (
+ // DNSClusterFirst indicates that the pod should use cluster DNS
+ // first, if it is available, then fall back on the default (as
+ // determined by kubelet) DNS settings.
+ DNSClusterFirst DNSPolicy = "ClusterFirst"
+
+ // DNSDefault indicates that the pod should use the default (as
+ // determined by kubelet) DNS settings.
+ DNSDefault DNSPolicy = "Default"
+
+ DefaultTerminationGracePeriodSeconds = 30
+)
+
+// A node selector represents the union of the results of one or more label queries
+// over a set of nodes; that is, it represents the OR of the selectors represented
+// by the node selector terms.
+type NodeSelector struct {
+ //Required. A list of node selector terms. The terms are ORed.
+ NodeSelectorTerms []NodeSelectorTerm `json:"nodeSelectorTerms" protobuf:"bytes,1,rep,name=nodeSelectorTerms"`
+}
+
+// A null or empty node selector term matches no objects.
+type NodeSelectorTerm struct {
+ //Required. A list of node selector requirements. The requirements are ANDed.
+ MatchExpressions []NodeSelectorRequirement `json:"matchExpressions" protobuf:"bytes,1,rep,name=matchExpressions"`
+}
+
+// A node selector requirement is a selector that contains values, a key, and an operator
+// that relates the key and values.
+type NodeSelectorRequirement struct {
+ // The label key that the selector applies to.
+ Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"`
+ // Represents a key's relationship to a set of values.
+ // Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ Operator NodeSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=NodeSelectorOperator"`
+ // An array of string values. If the operator is In or NotIn,
+ // the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ // the values array must be empty. If the operator is Gt or Lt, the values
+ // array must have a single element, which will be interpreted as an integer.
+ // This array is replaced during a strategic merge patch.
+ Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"`
+}
+
+// A node selector operator is the set of operators that can be used in
+// a node selector requirement.
+type NodeSelectorOperator string
+
+const (
+ NodeSelectorOpIn NodeSelectorOperator = "In"
+ NodeSelectorOpNotIn NodeSelectorOperator = "NotIn"
+ NodeSelectorOpExists NodeSelectorOperator = "Exists"
+ NodeSelectorOpDoesNotExist NodeSelectorOperator = "DoesNotExist"
+ NodeSelectorOpGt NodeSelectorOperator = "Gt"
+ NodeSelectorOpLt NodeSelectorOperator = "Lt"
+)
+
+// Affinity is a group of affinity scheduling rules.
+type Affinity struct {
+ // Describes node affinity scheduling rules for the pod.
+ NodeAffinity *NodeAffinity `json:"nodeAffinity,omitempty" protobuf:"bytes,1,opt,name=nodeAffinity"`
+ // Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
+ PodAffinity *PodAffinity `json:"podAffinity,omitempty" protobuf:"bytes,2,opt,name=podAffinity"`
+ // Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
+ PodAntiAffinity *PodAntiAffinity `json:"podAntiAffinity,omitempty" protobuf:"bytes,3,opt,name=podAntiAffinity"`
+}
+
+// Pod affinity is a group of inter pod affinity scheduling rules.
+type PodAffinity struct {
+ // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
+ // If the affinity requirements specified by this field are not met at
+ // scheduling time, the pod will not be scheduled onto the node.
+ // If the affinity requirements specified by this field cease to be met
+ // at some point during pod execution (e.g. due to a pod label update), the
+ // system will try to eventually evict the pod from its node.
+ // When there are multiple elements, the lists of nodes corresponding to each
+ // podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"`
+ // If the affinity requirements specified by this field are not met at
+ // scheduling time, the pod will not be scheduled onto the node.
+ // If the affinity requirements specified by this field cease to be met
+ // at some point during pod execution (e.g. due to a pod label update), the
+ // system may or may not try to eventually evict the pod from its node.
+ // When there are multiple elements, the lists of nodes corresponding to each
+ // podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,rep,name=requiredDuringSchedulingIgnoredDuringExecution"`
+ // The scheduler will prefer to schedule pods to nodes that satisfy
+ // the affinity expressions specified by this field, but it may choose
+ // a node that violates one or more of the expressions. The node that is
+ // most preferred is the one with the greatest sum of weights, i.e.
+ // for each node that meets all of the scheduling requirements (resource
+ // request, requiredDuringScheduling affinity expressions, etc.),
+ // compute a sum by iterating through the elements of this field and adding
+ // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
+ // node(s) with the highest sum are the most preferred.
+ PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"`
+}
+
+// Pod anti affinity is a group of inter pod anti affinity scheduling rules.
+type PodAntiAffinity struct {
+ // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
+ // If the anti-affinity requirements specified by this field are not met at
+ // scheduling time, the pod will not be scheduled onto the node.
+ // If the anti-affinity requirements specified by this field cease to be met
+ // at some point during pod execution (e.g. due to a pod label update), the
+ // system will try to eventually evict the pod from its node.
+ // When there are multiple elements, the lists of nodes corresponding to each
+ // podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"`
+ // If the anti-affinity requirements specified by this field are not met at
+ // scheduling time, the pod will not be scheduled onto the node.
+ // If the anti-affinity requirements specified by this field cease to be met
+ // at some point during pod execution (e.g. due to a pod label update), the
+ // system may or may not try to eventually evict the pod from its node.
+ // When there are multiple elements, the lists of nodes corresponding to each
+ // podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,rep,name=requiredDuringSchedulingIgnoredDuringExecution"`
+ // The scheduler will prefer to schedule pods to nodes that satisfy
+ // the anti-affinity expressions specified by this field, but it may choose
+ // a node that violates one or more of the expressions. The node that is
+ // most preferred is the one with the greatest sum of weights, i.e.
+ // for each node that meets all of the scheduling requirements (resource
+ // request, requiredDuringScheduling anti-affinity expressions, etc.),
+ // compute a sum by iterating through the elements of this field and adding
+ // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
+ // node(s) with the highest sum are the most preferred.
+ PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"`
+}
+
+// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
+type WeightedPodAffinityTerm struct {
+ // weight associated with matching the corresponding podAffinityTerm,
+ // in the range 1-100.
+ Weight int32 `json:"weight" protobuf:"varint,1,opt,name=weight"`
+ // Required. A pod affinity term, associated with the corresponding weight.
+ PodAffinityTerm PodAffinityTerm `json:"podAffinityTerm" protobuf:"bytes,2,opt,name=podAffinityTerm"`
+}
+
+// Defines a set of pods (namely those matching the labelSelector
+// relative to the given namespace(s)) that this pod should be
+// co-located (affinity) or not co-located (anti-affinity) with,
+// where co-located is defined as running on a node whose value of
+// the label with key <topologyKey> tches that of any node on which
+// a pod of the set of pods is running
+type PodAffinityTerm struct {
+ // A label query over a set of resources, in this case pods.
+ LabelSelector *unversioned.LabelSelector `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"`
+ // namespaces specifies which namespaces the labelSelector applies to (matches against);
+ // nil list means "this pod's namespace," empty list means "all namespaces"
+ // The json tag here is not "omitempty" since we need to distinguish nil and empty.
+ // See https://golang.org/pkg/encoding/json/#Marshal for more details.
+ Namespaces []string `json:"namespaces" protobuf:"bytes,2,rep,name=namespaces"`
+ // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ // the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ // whose value of the label with key topologyKey matches that of any node on which any of the
+ // selected pods is running.
+ // For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as "all topologies"
+ // ("all topologies" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains);
+ // for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed.
+ TopologyKey string `json:"topologyKey,omitempty" protobuf:"bytes,3,opt,name=topologyKey"`
+}
+
+// Node affinity is a group of node affinity scheduling rules.
+type NodeAffinity struct {
+ // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
+ // If the affinity requirements specified by this field are not met at
+ // scheduling time, the pod will not be scheduled onto the node.
+ // If the affinity requirements specified by this field cease to be met
+ // at some point during pod execution (e.g. due to an update), the system
+ // will try to eventually evict the pod from its node.
+ // RequiredDuringSchedulingRequiredDuringExecution *NodeSelector `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"`
+
+ // If the affinity requirements specified by this field are not met at
+ // scheduling time, the pod will not be scheduled onto the node.
+ // If the affinity requirements specified by this field cease to be met
+ // at some point during pod execution (e.g. due to an update), the system
+ // may or may not try to eventually evict the pod from its node.
+ RequiredDuringSchedulingIgnoredDuringExecution *NodeSelector `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,opt,name=requiredDuringSchedulingIgnoredDuringExecution"`
+ // The scheduler will prefer to schedule pods to nodes that satisfy
+ // the affinity expressions specified by this field, but it may choose
+ // a node that violates one or more of the expressions. The node that is
+ // most preferred is the one with the greatest sum of weights, i.e.
+ // for each node that meets all of the scheduling requirements (resource
+ // request, requiredDuringScheduling affinity expressions, etc.),
+ // compute a sum by iterating through the elements of this field and adding
+ // "weight" to the sum if the node matches the corresponding matchExpressions; the
+ // node(s) with the highest sum are the most preferred.
+ PreferredDuringSchedulingIgnoredDuringExecution []PreferredSchedulingTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"`
+}
+
+// An empty preferred scheduling term matches all objects with implicit weight 0
+// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
+type PreferredSchedulingTerm struct {
+ // Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
+ Weight int32 `json:"weight" protobuf:"varint,1,opt,name=weight"`
+ // A node selector term, associated with the corresponding weight.
+ Preference NodeSelectorTerm `json:"preference" protobuf:"bytes,2,opt,name=preference"`
+}
+
+// The node this Taint is attached to has the effect "effect" on
+// any pod that that does not tolerate the Taint.
+type Taint struct {
+ // Required. The taint key to be applied to a node.
+ Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"`
+ // Required. The taint value corresponding to the taint key.
+ Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"`
+ // Required. The effect of the taint on pods
+ // that do not tolerate the taint.
+ // Valid effects are NoSchedule and PreferNoSchedule.
+ Effect TaintEffect `json:"effect" protobuf:"bytes,3,opt,name=effect,casttype=TaintEffect"`
+}
+
+type TaintEffect string
+
+const (
+ // Do not allow new pods to schedule onto the node unless they tolerate the taint,
+ // but allow all pods submitted to Kubelet without going through the scheduler
+ // to start, and allow all already-running pods to continue running.
+ // Enforced by the scheduler.
+ TaintEffectNoSchedule TaintEffect = "NoSchedule"
+ // Like TaintEffectNoSchedule, but the scheduler tries not to schedule
+ // new pods onto the node, rather than prohibiting new pods from scheduling
+ // onto the node entirely. Enforced by the scheduler.
+ TaintEffectPreferNoSchedule TaintEffect = "PreferNoSchedule"
+ // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
+ // Do not allow new pods to schedule onto the node unless they tolerate the taint,
+ // do not allow pods to start on Kubelet unless they tolerate the taint,
+ // but allow all already-running pods to continue running.
+ // Enforced by the scheduler and Kubelet.
+ // TaintEffectNoScheduleNoAdmit TaintEffect = "NoScheduleNoAdmit"
+ // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
+ // Do not allow new pods to schedule onto the node unless they tolerate the taint,
+ // do not allow pods to start on Kubelet unless they tolerate the taint,
+ // and evict any already-running pods that do not tolerate the taint.
+ // Enforced by the scheduler and Kubelet.
+ // TaintEffectNoScheduleNoAdmitNoExecute = "NoScheduleNoAdmitNoExecute"
+)
+
+// The pod this Toleration is attached to tolerates any taint that matches
+// the triple <key,value,effect> using the matching operator <operator>.
+type Toleration struct {
+ // Required. Key is the taint key that the toleration applies to.
+ Key string `json:"key,omitempty" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"`
+ // operator represents a key's relationship to the value.
+ // Valid operators are Exists and Equal. Defaults to Equal.
+ // Exists is equivalent to wildcard for value, so that a pod can
+ // tolerate all taints of a particular category.
+ Operator TolerationOperator `json:"operator,omitempty" protobuf:"bytes,2,opt,name=operator,casttype=TolerationOperator"`
+ // Value is the taint value the toleration matches to.
+ // If the operator is Exists, the value should be empty, otherwise just a regular string.
+ Value string `json:"value,omitempty" protobuf:"bytes,3,opt,name=value"`
+ // Effect indicates the taint effect to match. Empty means match all taint effects.
+ // When specified, allowed values are NoSchedule and PreferNoSchedule.
+ Effect TaintEffect `json:"effect,omitempty" protobuf:"bytes,4,opt,name=effect,casttype=TaintEffect"`
+ // TODO: For forgiveness (#1574), we'd eventually add at least a grace period
+ // here, and possibly an occurrence threshold and period.
+}
+
+// A toleration operator is the set of operators that can be used in a toleration.
+type TolerationOperator string
+
+const (
+ TolerationOpExists TolerationOperator = "Exists"
+ TolerationOpEqual TolerationOperator = "Equal"
+)
+
+const (
+ // This annotation key will be used to contain an array of v1 JSON encoded Containers
+ // for init containers. The annotation will be placed into the internal type and cleared.
+ PodInitContainersAnnotationKey = "pod.alpha.kubernetes.io/init-containers"
+ // This annotation key will be used to contain an array of v1 JSON encoded
+ // ContainerStatuses for init containers. The annotation will be placed into the internal
+ // type and cleared.
+ PodInitContainerStatusesAnnotationKey = "pod.alpha.kubernetes.io/init-container-statuses"
+)
+
+// PodSpec is a description of a pod.
+type PodSpec struct {
+ // List of volumes that can be mounted by containers belonging to the pod.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md
+ Volumes []Volume `json:"volumes,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,1,rep,name=volumes"`
+ // List of initialization containers belonging to the pod.
+ // Init containers are executed in order prior to containers being started. If any
+ // init container fails, the pod is considered to have failed and is handled according
+ // to its restartPolicy. The name for an init container or normal container must be
+ // unique among all containers.
+ // Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes.
+ // The resourceRequirements of an init container are taken into account during scheduling
+ // by finding the highest request/limit for each resource type, and then using the max of
+ // of that value or the sum of the normal containers. Limits are applied to init containers
+ // in a similar fashion.
+ // Init containers cannot currently be added or removed.
+ // Init containers are in alpha state and may change without notice.
+ // Cannot be updated.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/containers.md
+ InitContainers []Container `json:"-" patchStrategy:"merge" patchMergeKey:"name"`
+ // List of containers belonging to the pod.
+ // Containers cannot currently be added or removed.
+ // There must be at least one container in a Pod.
+ // Cannot be updated.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/containers.md
+ Containers []Container `json:"containers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=containers"`
+ // Restart policy for all containers within the pod.
+ // One of Always, OnFailure, Never.
+ // Default to Always.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#restartpolicy
+ RestartPolicy RestartPolicy `json:"restartPolicy,omitempty" protobuf:"bytes,3,opt,name=restartPolicy,casttype=RestartPolicy"`
+ // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request.
+ // Value must be non-negative integer. The value zero indicates delete immediately.
+ // If this value is nil, the default grace period will be used instead.
+ // The grace period is the duration in seconds after the processes running in the pod are sent
+ // a termination signal and the time when the processes are forcibly halted with a kill signal.
+ // Set this value longer than the expected cleanup time for your process.
+ // Defaults to 30 seconds.
+ TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" protobuf:"varint,4,opt,name=terminationGracePeriodSeconds"`
+ // Optional duration in seconds the pod may be active on the node relative to
+ // StartTime before the system will actively try to mark it failed and kill associated containers.
+ // Value must be a positive integer.
+ ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,5,opt,name=activeDeadlineSeconds"`
+ // Set DNS policy for containers within the pod.
+ // One of 'ClusterFirst' or 'Default'.
+ // Defaults to "ClusterFirst".
+ DNSPolicy DNSPolicy `json:"dnsPolicy,omitempty" protobuf:"bytes,6,opt,name=dnsPolicy,casttype=DNSPolicy"`
+ // NodeSelector is a selector which must be true for the pod to fit on a node.
+ // Selector which must match a node's labels for the pod to be scheduled on that node.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/node-selection/README.md
+ NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,7,rep,name=nodeSelector"`
+
+ // ServiceAccountName is the name of the ServiceAccount to use to run this pod.
+ // More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md
+ ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,8,opt,name=serviceAccountName"`
+ // DeprecatedServiceAccount is a depreciated alias for ServiceAccountName.
+ // Deprecated: Use serviceAccountName instead.
+ // +k8s:conversion-gen=false
+ DeprecatedServiceAccount string `json:"serviceAccount,omitempty" protobuf:"bytes,9,opt,name=serviceAccount"`
+
+ // NodeName is a request to schedule this pod onto a specific node. If it is non-empty,
+ // the scheduler simply schedules this pod onto that node, assuming that it fits resource
+ // requirements.
+ NodeName string `json:"nodeName,omitempty" protobuf:"bytes,10,opt,name=nodeName"`
+ // Host networking requested for this pod. Use the host's network namespace.
+ // If this option is set, the ports that will be used must be specified.
+ // Default to false.
+ // +k8s:conversion-gen=false
+ HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,11,opt,name=hostNetwork"`
+ // Use the host's pid namespace.
+ // Optional: Default to false.
+ // +k8s:conversion-gen=false
+ HostPID bool `json:"hostPID,omitempty" protobuf:"varint,12,opt,name=hostPID"`
+ // Use the host's ipc namespace.
+ // Optional: Default to false.
+ // +k8s:conversion-gen=false
+ HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,13,opt,name=hostIPC"`
+ // SecurityContext holds pod-level security attributes and common container settings.
+ // Optional: Defaults to empty. See type description for default values of each field.
+ SecurityContext *PodSecurityContext `json:"securityContext,omitempty" protobuf:"bytes,14,opt,name=securityContext"`
+ // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.
+ // If specified, these secrets will be passed to individual puller implementations for them to use. For example,
+ // in the case of docker, only DockerConfig type secrets are honored.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/images.md#specifying-imagepullsecrets-on-a-pod
+ ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,15,rep,name=imagePullSecrets"`
+ // Specifies the hostname of the Pod
+ // If not specified, the pod's hostname will be set to a system-defined value.
+ Hostname string `json:"hostname,omitempty" protobuf:"bytes,16,opt,name=hostname"`
+ // If specified, the fully qualified Pod hostname will be "<hostname>.<subdomain>.<pod namespace>.svc.<cluster domain>".
+ // If not specified, the pod will not have a domainname at all.
+ Subdomain string `json:"subdomain,omitempty" protobuf:"bytes,17,opt,name=subdomain"`
+}
+
+// PodSecurityContext holds pod-level security attributes and common container settings.
+// Some fields are also present in container.securityContext. Field values of
+// container.securityContext take precedence over field values of PodSecurityContext.
+type PodSecurityContext struct {
+ // The SELinux context to be applied to all containers.
+ // If unspecified, the container runtime will allocate a random SELinux context for each
+ // container. May also be set in SecurityContext. If set in
+ // both SecurityContext and PodSecurityContext, the value specified in SecurityContext
+ // takes precedence for that container.
+ SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,1,opt,name=seLinuxOptions"`
+ // The UID to run the entrypoint of the container process.
+ // Defaults to user specified in image metadata if unspecified.
+ // May also be set in SecurityContext. If set in both SecurityContext and
+ // PodSecurityContext, the value specified in SecurityContext takes precedence
+ // for that container.
+ RunAsUser *int64 `json:"runAsUser,omitempty" protobuf:"varint,2,opt,name=runAsUser"`
+ // Indicates that the container must run as a non-root user.
+ // If true, the Kubelet will validate the image at runtime to ensure that it
+ // does not run as UID 0 (root) and fail to start the container if it does.
+ // If unset or false, no such validation will be performed.
+ // May also be set in SecurityContext. If set in both SecurityContext and
+ // PodSecurityContext, the value specified in SecurityContext takes precedence.
+ RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" protobuf:"varint,3,opt,name=runAsNonRoot"`
+ // A list of groups applied to the first process run in each container, in addition
+ // to the container's primary GID. If unspecified, no groups will be added to
+ // any container.
+ SupplementalGroups []int64 `json:"supplementalGroups,omitempty" protobuf:"varint,4,rep,name=supplementalGroups"`
+ // A special supplemental group that applies to all containers in a pod.
+ // Some volume types allow the Kubelet to change the ownership of that volume
+ // to be owned by the pod:
+ //
+ // 1. The owning GID will be the FSGroup
+ // 2. The setgid bit is set (new files created in the volume will be owned by FSGroup)
+ // 3. The permission bits are OR'd with rw-rw----
+ //
+ // If unset, the Kubelet will not modify the ownership and permissions of any volume.
+ FSGroup *int64 `json:"fsGroup,omitempty" protobuf:"varint,5,opt,name=fsGroup"`
+}
+
+// PodStatus represents information about the status of a pod. Status may trail the actual
+// state of a system.
+type PodStatus struct {
+ // Current condition of the pod.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#pod-phase
+ Phase PodPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PodPhase"`
+ // Current service state of pod.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#pod-conditions
+ Conditions []PodCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"`
+ // A human readable message indicating details about why the pod is in this condition.
+ Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"`
+ // A brief CamelCase message indicating details about why the pod is in this state.
+ // e.g. 'OutOfDisk'
+ Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
+
+ // IP address of the host to which the pod is assigned. Empty if not yet scheduled.
+ HostIP string `json:"hostIP,omitempty" protobuf:"bytes,5,opt,name=hostIP"`
+ // IP address allocated to the pod. Routable at least within the cluster.
+ // Empty if not yet allocated.
+ PodIP string `json:"podIP,omitempty" protobuf:"bytes,6,opt,name=podIP"`
+
+ // RFC 3339 date and time at which the object was acknowledged by the Kubelet.
+ // This is before the Kubelet pulled the container image(s) for the pod.
+ StartTime *unversioned.Time `json:"startTime,omitempty" protobuf:"bytes,7,opt,name=startTime"`
+
+ // The list has one entry per init container in the manifest. The most recent successful
+ // init container will have ready = true, the most recently started container will have
+ // startTime set.
+ // Init containers are in alpha state and may change without notice.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-statuses
+ InitContainerStatuses []ContainerStatus `json:"-"`
+ // The list has one entry per container in the manifest. Each entry is currently the output
+ // of `docker inspect`.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-statuses
+ ContainerStatuses []ContainerStatus `json:"containerStatuses,omitempty" protobuf:"bytes,8,rep,name=containerStatuses"`
+}
+
+// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded
+type PodStatusResult struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // Most recently observed status of the pod.
+ // This data may not be up to date.
+ // Populated by the system.
+ // Read-only.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ Status PodStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"`
+}
+
+// +genclient=true
+
+// Pod is a collection of containers that can run on a host. This resource is created
+// by clients and scheduled onto hosts.
+type Pod struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Specification of the desired behavior of the pod.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ Spec PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+ // Most recently observed status of the pod.
+ // This data may not be up to date.
+ // Populated by the system.
+ // Read-only.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ Status PodStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// PodList is a list of Pods.
+type PodList struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard list metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
+ unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // List of pods.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/pods.md
+ Items []Pod `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// PodTemplateSpec describes the data a pod should have when created from a template
+type PodTemplateSpec struct {
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Specification of the desired behavior of the pod.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ Spec PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+}
+
+// +genclient=true
+
+// PodTemplate describes a template for creating copies of a predefined pod.
+type PodTemplate struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Template defines the pods that will be created from this pod template.
+ // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ Template PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"`
+}
+
+// PodTemplateList is a list of PodTemplates.
+type PodTemplateList struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard list metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
+ unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // List of pod templates
+ Items []PodTemplate `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// ReplicationControllerSpec is the specification of a replication controller.
+type ReplicationControllerSpec struct {
+ // Replicas is the number of desired replicas.
+ // This is a pointer to distinguish between explicit zero and unspecified.
+ // Defaults to 1.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller
+ Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
+
+ // Selector is a label query over pods that should match the Replicas count.
+ // If Selector is empty, it is defaulted to the labels present on the Pod template.
+ // Label keys and values that must match in order to be controlled by this replication
+ // controller, if empty defaulted to labels on Pod template.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors
+ Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"`
+
+ // TemplateRef is a reference to an object that describes the pod that will be created if
+ // insufficient replicas are detected.
+ // Reference to an object that describes the pod that will be created if insufficient replicas are detected.
+ // TemplateRef *ObjectReference `json:"templateRef,omitempty"`
+
+ // Template is the object that describes the pod that will be created if
+ // insufficient replicas are detected. This takes precedence over a TemplateRef.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#pod-template
+ Template *PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"`
+}
+
+// ReplicationControllerStatus represents the current status of a replication
+// controller.
+type ReplicationControllerStatus struct {
+ // Replicas is the most recently oberved number of replicas.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller
+ Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"`
+
+ // The number of pods that have labels matching the labels of the pod template of the replication controller.
+ FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"`
+
+ // ObservedGeneration reflects the generation of the most recently observed replication controller.
+ ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"`
+}
+
+// +genclient=true
+
+// ReplicationController represents the configuration of a replication controller.
+type ReplicationController struct {
+ unversioned.TypeMeta `json:",inline"`
+
+ // If the Labels of a ReplicationController are empty, they are defaulted to
+ // be the same as the Pod(s) that the replication controller manages.
+ // Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Spec defines the specification of the desired behavior of the replication controller.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ Spec ReplicationControllerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+ // Status is the most recently observed status of the replication controller.
+ // This data may be out of date by some window of time.
+ // Populated by the system.
+ // Read-only.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ Status ReplicationControllerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// ReplicationControllerList is a collection of replication controllers.
+type ReplicationControllerList struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard list metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
+ unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // List of replication controllers.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md
+ Items []ReplicationController `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// Session Affinity Type string
+type ServiceAffinity string
+
+const (
+ // ServiceAffinityClientIP is the Client IP based.
+ ServiceAffinityClientIP ServiceAffinity = "ClientIP"
+
+ // ServiceAffinityNone - no session affinity.
+ ServiceAffinityNone ServiceAffinity = "None"
+)
+
+// Service Type string describes ingress methods for a service
+type ServiceType string
+
+const (
+ // ServiceTypeClusterIP means a service will only be accessible inside the
+ // cluster, via the cluster IP.
+ ServiceTypeClusterIP ServiceType = "ClusterIP"
+
+ // ServiceTypeNodePort means a service will be exposed on one port of
+ // every node, in addition to 'ClusterIP' type.
+ ServiceTypeNodePort ServiceType = "NodePort"
+
+ // ServiceTypeLoadBalancer means a service will be exposed via an
+ // external load balancer (if the cloud provider supports it), in addition
+ // to 'NodePort' type.
+ ServiceTypeLoadBalancer ServiceType = "LoadBalancer"
+)
+
+// ServiceStatus represents the current status of a service.
+type ServiceStatus struct {
+ // LoadBalancer contains the current status of the load-balancer,
+ // if one is present.
+ LoadBalancer LoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"`
+}
+
+// LoadBalancerStatus represents the status of a load-balancer.
+type LoadBalancerStatus struct {
+ // Ingress is a list containing ingress points for the load-balancer.
+ // Traffic intended for the service should be sent to these ingress points.
+ Ingress []LoadBalancerIngress `json:"ingress,omitempty" protobuf:"bytes,1,rep,name=ingress"`
+}
+
+// LoadBalancerIngress represents the status of a load-balancer ingress point:
+// traffic intended for the service should be sent to an ingress point.
+type LoadBalancerIngress struct {
+ // IP is set for load-balancer ingress points that are IP based
+ // (typically GCE or OpenStack load-balancers)
+ IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"`
+
+ // Hostname is set for load-balancer ingress points that are DNS based
+ // (typically AWS load-balancers)
+ Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"`
+}
+
+// ServiceSpec describes the attributes that a user creates on a service.
+type ServiceSpec struct {
+ // The list of ports that are exposed by this service.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#virtual-ips-and-service-proxies
+ Ports []ServicePort `json:"ports" patchStrategy:"merge" patchMergeKey:"port" protobuf:"bytes,1,rep,name=ports"`
+
+ // This service will route traffic to pods having labels matching this selector.
+ // Label keys and values that must match in order to receive traffic for this service.
+ // If not specified, endpoints must be manually specified and the system will not automatically manage them.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#overview
+ Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"`
+
+ // ClusterIP is usually assigned by the master and is the IP address of the service.
+ // If specified, it will be allocated to the service if it is unused
+ // or else creation of the service will fail.
+ // Valid values are None, empty string (""), or a valid IP address.
+ // 'None' can be specified for a headless service when proxying is not required.
+ // Cannot be updated.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#virtual-ips-and-service-proxies
+ ClusterIP string `json:"clusterIP,omitempty" protobuf:"bytes,3,opt,name=clusterIP"`
+
+ // Type of exposed service. Must be ClusterIP, NodePort, or LoadBalancer.
+ // Defaults to ClusterIP.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#external-services
+ Type ServiceType `json:"type,omitempty" protobuf:"bytes,4,opt,name=type,casttype=ServiceType"`
+
+ // externalIPs is a list of IP addresses for which nodes in the cluster
+ // will also accept traffic for this service. These IPs are not managed by
+ // Kubernetes. The user is responsible for ensuring that traffic arrives
+ // at a node with this IP. A common example is external load-balancers
+ // that are not part of the Kubernetes system. A previous form of this
+ // functionality exists as the deprecatedPublicIPs field. When using this
+ // field, callers should also clear the deprecatedPublicIPs field.
+ ExternalIPs []string `json:"externalIPs,omitempty" protobuf:"bytes,5,rep,name=externalIPs"`
+
+ // deprecatedPublicIPs is deprecated and replaced by the externalIPs field
+ // with almost the exact same semantics. This field is retained in the v1
+ // API for compatibility until at least 8/20/2016. It will be removed from
+ // any new API revisions. If both deprecatedPublicIPs *and* externalIPs are
+ // set, deprecatedPublicIPs is used.
+ // +k8s:conversion-gen=false
+ DeprecatedPublicIPs []string `json:"deprecatedPublicIPs,omitempty" protobuf:"bytes,6,rep,name=deprecatedPublicIPs"`
+
+ // Supports "ClientIP" and "None". Used to maintain session affinity.
+ // Enable client IP based session affinity.
+ // Must be ClientIP or None.
+ // Defaults to None.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#virtual-ips-and-service-proxies
+ SessionAffinity ServiceAffinity `json:"sessionAffinity,omitempty" protobuf:"bytes,7,opt,name=sessionAffinity,casttype=ServiceAffinity"`
+
+ // Only applies to Service Type: LoadBalancer
+ // LoadBalancer will get created with the IP specified in this field.
+ // This feature depends on whether the underlying cloud-provider supports specifying
+ // the loadBalancerIP when a load balancer is created.
+ // This field will be ignored if the cloud-provider does not support the feature.
+ LoadBalancerIP string `json:"loadBalancerIP,omitempty" protobuf:"bytes,8,opt,name=loadBalancerIP"`
+
+ // If specified and supported by the platform, this will restrict traffic through the cloud-provider
+ // load-balancer will be restricted to the specified client IPs. This field will be ignored if the
+ // cloud-provider does not support the feature."
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/services-firewalls.md
+ LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty" protobuf:"bytes,9,opt,name=loadBalancerSourceRanges"`
+}
+
+// ServicePort contains information on service's port.
+type ServicePort struct {
+ // The name of this port within the service. This must be a DNS_LABEL.
+ // All ports within a ServiceSpec must have unique names. This maps to
+ // the 'Name' field in EndpointPort objects.
+ // Optional if only one ServicePort is defined on this service.
+ Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
+
+ // The IP protocol for this port. Supports "TCP" and "UDP".
+ // Default is TCP.
+ Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,2,opt,name=protocol,casttype=Protocol"`
+
+ // The port that will be exposed by this service.
+ Port int32 `json:"port" protobuf:"varint,3,opt,name=port"`
+
+ // Number or name of the port to access on the pods targeted by the service.
+ // Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
+ // If this is a string, it will be looked up as a named port in the
+ // target Pod's container ports. If this is not specified, the value
+ // of the 'port' field is used (an identity map).
+ // This field is ignored for services with clusterIP=None, and should be
+ // omitted or set equal to the 'port' field.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#defining-a-service
+ TargetPort intstr.IntOrString `json:"targetPort,omitempty" protobuf:"bytes,4,opt,name=targetPort"`
+
+ // The port on each node on which this service is exposed when type=NodePort or LoadBalancer.
+ // Usually assigned by the system. If specified, it will be allocated to the service
+ // if unused or else creation of the service will fail.
+ // Default is to auto-allocate a port if the ServiceType of this Service requires one.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#type--nodeport
+ NodePort int32 `json:"nodePort,omitempty" protobuf:"varint,5,opt,name=nodePort"`
+}
+
+// +genclient=true
+
+// Service is a named abstraction of software service (for example, mysql) consisting of local port
+// (for example 3306) that the proxy listens on, and the selector that determines which pods
+// will answer requests sent through the proxy.
+type Service struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Spec defines the behavior of a service.
+ // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ Spec ServiceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+ // Most recently observed status of the service.
+ // Populated by the system.
+ // Read-only.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ Status ServiceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+const (
+ // ClusterIPNone - do not assign a cluster IP
+ // no proxying required and no environment variables should be created for pods
+ ClusterIPNone = "None"
+)
+
+// ServiceList holds a list of services.
+type ServiceList struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard list metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
+ unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // List of services
+ Items []Service `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +genclient=true
+
+// ServiceAccount binds together:
+// * a name, understood by users, and perhaps by peripheral systems, for an identity
+// * a principal that can be authenticated and authorized
+// * a set of secrets
+type ServiceAccount struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/secrets.md
+ Secrets []ObjectReference `json:"secrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=secrets"`
+
+ // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images
+ // in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets
+ // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/secrets.md#manually-specifying-an-imagepullsecret
+ ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty" protobuf:"bytes,3,rep,name=imagePullSecrets"`
+}
+
+// ServiceAccountList is a list of ServiceAccount objects
+type ServiceAccountList struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard list metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
+ unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // List of ServiceAccounts.
+ // More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md#service-accounts
+ Items []ServiceAccount `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +genclient=true
+
+// Endpoints is a collection of endpoints that implement the actual service. Example:
+// Name: "mysvc",
+// Subsets: [
+// {
+// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}],
+// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}]
+// },
+// {
+// Addresses: [{"ip": "10.10.3.3"}],
+// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}]
+// },
+// ]
+type Endpoints struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // The set of all endpoints is the union of all subsets. Addresses are placed into
+ // subsets according to the IPs they share. A single address with multiple ports,
+ // some of which are ready and some of which are not (because they come from
+ // different containers) will result in the address being displayed in different
+ // subsets for the different ports. No address will appear in both Addresses and
+ // NotReadyAddresses in the same subset.
+ // Sets of addresses and ports that comprise a service.
+ Subsets []EndpointSubset `json:"subsets" protobuf:"bytes,2,rep,name=subsets"`
+}
+
+// EndpointSubset is a group of addresses with a common set of ports. The
+// expanded set of endpoints is the Cartesian product of Addresses x Ports.
+// For example, given:
+// {
+// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}],
+// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}]
+// }
+// The resulting set of endpoints can be viewed as:
+// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ],
+// b: [ 10.10.1.1:309, 10.10.2.2:309 ]
+type EndpointSubset struct {
+ // IP addresses which offer the related ports that are marked as ready. These endpoints
+ // should be considered safe for load balancers and clients to utilize.
+ Addresses []EndpointAddress `json:"addresses,omitempty" protobuf:"bytes,1,rep,name=addresses"`
+ // IP addresses which offer the related ports but are not currently marked as ready
+ // because they have not yet finished starting, have recently failed a readiness check,
+ // or have recently failed a liveness check.
+ NotReadyAddresses []EndpointAddress `json:"notReadyAddresses,omitempty" protobuf:"bytes,2,rep,name=notReadyAddresses"`
+ // Port numbers available on the related IP addresses.
+ Ports []EndpointPort `json:"ports,omitempty" protobuf:"bytes,3,rep,name=ports"`
+}
+
+// EndpointAddress is a tuple that describes single IP address.
+type EndpointAddress struct {
+ // The IP of this endpoint.
+ // May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16),
+ // or link-local multicast ((224.0.0.0/24).
+ // IPv6 is also accepted but not fully supported on all platforms. Also, certain
+ // kubernetes components, like kube-proxy, are not IPv6 ready.
+ // TODO: This should allow hostname or IP, See #4447.
+ IP string `json:"ip" protobuf:"bytes,1,opt,name=ip"`
+ // The Hostname of this endpoint
+ Hostname string `json:"hostname,omitempty" protobuf:"bytes,3,opt,name=hostname"`
+ // Reference to object providing the endpoint.
+ TargetRef *ObjectReference `json:"targetRef,omitempty" protobuf:"bytes,2,opt,name=targetRef"`
+}
+
+// EndpointPort is a tuple that describes a single port.
+type EndpointPort struct {
+ // The name of this port (corresponds to ServicePort.Name).
+ // Must be a DNS_LABEL.
+ // Optional only if one port is defined.
+ Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
+
+ // The port number of the endpoint.
+ Port int32 `json:"port" protobuf:"varint,2,opt,name=port"`
+
+ // The IP protocol for this port.
+ // Must be UDP or TCP.
+ // Default is TCP.
+ Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,3,opt,name=protocol,casttype=Protocol"`
+}
+
+// EndpointsList is a list of endpoints.
+type EndpointsList struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard list metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
+ unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // List of endpoints.
+ Items []Endpoints `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// NodeSpec describes the attributes that a node is created with.
+type NodeSpec struct {
+ // PodCIDR represents the pod IP range assigned to the node.
+ PodCIDR string `json:"podCIDR,omitempty" protobuf:"bytes,1,opt,name=podCIDR"`
+ // External ID of the node assigned by some machine database (e.g. a cloud provider).
+ // Deprecated.
+ ExternalID string `json:"externalID,omitempty" protobuf:"bytes,2,opt,name=externalID"`
+ // ID of the node assigned by the cloud provider in the format: <ProviderName>://<ProviderSpecificNodeID>
+ ProviderID string `json:"providerID,omitempty" protobuf:"bytes,3,opt,name=providerID"`
+ // Unschedulable controls node schedulability of new pods. By default, node is schedulable.
+ // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#manual-node-administration"`
+ Unschedulable bool `json:"unschedulable,omitempty" protobuf:"varint,4,opt,name=unschedulable"`
+}
+
+// DaemonEndpoint contains information about a single Daemon endpoint.
+type DaemonEndpoint struct {
+ /*
+ The port tag was not properly in quotes in earlier releases, so it must be
+ uppercased for backwards compat (since it was falling back to var name of
+ 'Port').
+ */
+
+ // Port number of the given endpoint.
+ Port int32 `json:"Port" protobuf:"varint,1,opt,name=Port"`
+}
+
+// NodeDaemonEndpoints lists ports opened by daemons running on the Node.
+type NodeDaemonEndpoints struct {
+ // Endpoint on which Kubelet is listening.
+ KubeletEndpoint DaemonEndpoint `json:"kubeletEndpoint,omitempty" protobuf:"bytes,1,opt,name=kubeletEndpoint"`
+}
+
+// NodeSystemInfo is a set of ids/uuids to uniquely identify the node.
+type NodeSystemInfo struct {
+ // Machine ID reported by the node.
+ MachineID string `json:"machineID" protobuf:"bytes,1,opt,name=machineID"`
+ // System UUID reported by the node.
+ SystemUUID string `json:"systemUUID" protobuf:"bytes,2,opt,name=systemUUID"`
+ // Boot ID reported by the node.
+ BootID string `json:"bootID" protobuf:"bytes,3,opt,name=bootID"`
+ // Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).
+ KernelVersion string `json:"kernelVersion" protobuf:"bytes,4,opt,name=kernelVersion"`
+ // OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).
+ OSImage string `json:"osImage" protobuf:"bytes,5,opt,name=osImage"`
+ // ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0).
+ ContainerRuntimeVersion string `json:"containerRuntimeVersion" protobuf:"bytes,6,opt,name=containerRuntimeVersion"`
+ // Kubelet Version reported by the node.
+ KubeletVersion string `json:"kubeletVersion" protobuf:"bytes,7,opt,name=kubeletVersion"`
+ // KubeProxy Version reported by the node.
+ KubeProxyVersion string `json:"kubeProxyVersion" protobuf:"bytes,8,opt,name=kubeProxyVersion"`
+ // The Operating System reported by the node
+ OperatingSystem string `json:"operatingSystem" protobuf:"bytes,9,opt,name=operatingSystem"`
+ // The Architecture reported by the node
+ Architecture string `json:"architecture" protobuf:"bytes,10,opt,name=architecture"`
+}
+
+// NodeStatus is information about the current status of a node.
+type NodeStatus struct {
+ // Capacity represents the total resources of a node.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#capacity for more details.
+ Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`
+ // Allocatable represents the resources of a node that are available for scheduling.
+ // Defaults to Capacity.
+ Allocatable ResourceList `json:"allocatable,omitempty" protobuf:"bytes,2,rep,name=allocatable,casttype=ResourceList,castkey=ResourceName"`
+ // NodePhase is the recently observed lifecycle phase of the node.
+ // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-phase
+ Phase NodePhase `json:"phase,omitempty" protobuf:"bytes,3,opt,name=phase,casttype=NodePhase"`
+ // Conditions is an array of current observed node conditions.
+ // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-condition
+ Conditions []NodeCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"`
+ // List of addresses reachable to the node.
+ // Queried from cloud provider, if available.
+ // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-addresses
+ Addresses []NodeAddress `json:"addresses,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,5,rep,name=addresses"`
+ // Endpoints of daemons running on the Node.
+ DaemonEndpoints NodeDaemonEndpoints `json:"daemonEndpoints,omitempty" protobuf:"bytes,6,opt,name=daemonEndpoints"`
+ // Set of ids/uuids to uniquely identify the node.
+ // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-info
+ NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty" protobuf:"bytes,7,opt,name=nodeInfo"`
+ // List of container images on this node
+ Images []ContainerImage `json:"images,omitempty" protobuf:"bytes,8,rep,name=images"`
+ // List of attachable volumes in use (mounted) by the node.
+ VolumesInUse []UniqueVolumeName `json:"volumesInUse,omitempty" protobuf:"bytes,9,rep,name=volumesInUse"`
+ // List of volumes that are attached to the node.
+ VolumesAttached []AttachedVolume `json:"volumesAttached,omitempty" protobuf:"bytes,10,rep,name=volumesAttached"`
+}
+
+type UniqueVolumeName string
+
+// AttachedVolume describes a volume attached to a node
+type AttachedVolume struct {
+ // Name of the attached volume
+ Name UniqueVolumeName `json:"name" protobuf:"bytes,1,rep,name=name"`
+
+ // DevicePath represents the device path where the volume should be avilable
+ DevicePath string `json:"devicePath" protobuf:"bytes,2,rep,name=devicePath"`
+}
+
+// Describe a container image
+type ContainerImage struct {
+ // Names by which this image is known.
+ // e.g. ["gcr.io/google_containers/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"]
+ Names []string `json:"names" protobuf:"bytes,1,rep,name=names"`
+ // The size of the image in bytes.
+ SizeBytes int64 `json:"sizeBytes,omitempty" protobuf:"varint,2,opt,name=sizeBytes"`
+}
+
+type NodePhase string
+
+// These are the valid phases of node.
+const (
+ // NodePending means the node has been created/added by the system, but not configured.
+ NodePending NodePhase = "Pending"
+ // NodeRunning means the node has been configured and has Kubernetes components running.
+ NodeRunning NodePhase = "Running"
+ // NodeTerminated means the node has been removed from the cluster.
+ NodeTerminated NodePhase = "Terminated"
+)
+
+type NodeConditionType string
+
+// These are valid conditions of node. Currently, we don't have enough information to decide
+// node condition. In the future, we will add more. The proposed set of conditions are:
+// NodeReachable, NodeLive, NodeReady, NodeSchedulable, NodeRunnable.
+const (
+ // NodeReady means kubelet is healthy and ready to accept pods.
+ NodeReady NodeConditionType = "Ready"
+ // NodeOutOfDisk means the kubelet will not accept new pods due to insufficient free disk
+ // space on the node.
+ NodeOutOfDisk NodeConditionType = "OutOfDisk"
+ // NodeMemoryPressure means the kubelet is under pressure due to insufficient available memory.
+ NodeMemoryPressure NodeConditionType = "MemoryPressure"
+ // NodeNetworkUnavailable means that network for the node is not correctly configured.
+ NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable"
+)
+
+// NodeCondition contains condition infromation for a node.
+type NodeCondition struct {
+ // Type of node condition.
+ Type NodeConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=NodeConditionType"`
+ // Status of the condition, one of True, False, Unknown.
+ Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
+ // Last time we got an update on a given condition.
+ LastHeartbeatTime unversioned.Time `json:"lastHeartbeatTime,omitempty" protobuf:"bytes,3,opt,name=lastHeartbeatTime"`
+ // Last time the condition transit from one status to another.
+ LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"`
+ // (brief) reason for the condition's last transition.
+ Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"`
+ // Human readable message indicating details about last transition.
+ Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
+}
+
+type NodeAddressType string
+
+// These are valid address type of node.
+const (
+ NodeHostName NodeAddressType = "Hostname"
+ NodeExternalIP NodeAddressType = "ExternalIP"
+ NodeInternalIP NodeAddressType = "InternalIP"
+)
+
+// NodeAddress contains information for the node's address.
+type NodeAddress struct {
+ // Node address type, one of Hostname, ExternalIP or InternalIP.
+ Type NodeAddressType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=NodeAddressType"`
+ // The node address.
+ Address string `json:"address" protobuf:"bytes,2,opt,name=address"`
+}
+
+// ResourceName is the name identifying various resources in a ResourceList.
+type ResourceName string
+
+// Resource names must be not more than 63 characters, consisting of upper- or lower-case alphanumeric characters,
+// with the -, _, and . characters allowed anywhere, except the first or last character.
+// The default convention, matching that for annotations, is to use lower-case names, with dashes, rather than
+// camel case, separating compound words.
+// Fully-qualified resource typenames are constructed from a DNS-style subdomain, followed by a slash `/` and a name.
+const (
+ // CPU, in cores. (500m = .5 cores)
+ ResourceCPU ResourceName = "cpu"
+ // Memory, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
+ ResourceMemory ResourceName = "memory"
+ // Volume size, in bytes (e,g. 5Gi = 5GiB = 5 * 1024 * 1024 * 1024)
+ ResourceStorage ResourceName = "storage"
+ // NVIDIA GPU, in devices. Alpha, might change: although fractional and allowing values >1, only one whole device per node is assigned.
+ ResourceNvidiaGPU ResourceName = "alpha.kubernetes.io/nvidia-gpu"
+ // Number of Pods that may be running on this Node: see ResourcePods
+)
+
+// ResourceList is a set of (resource name, quantity) pairs.
+type ResourceList map[ResourceName]resource.Quantity
+
+// +genclient=true
+// +nonNamespaced=true
+
+// Node is a worker node in Kubernetes, formerly known as minion.
+// Each node will have a unique identifier in the cache (i.e. in etcd).
+type Node struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Spec defines the behavior of a node.
+ // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ Spec NodeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+ // Most recently observed status of the node.
+ // Populated by the system.
+ // Read-only.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ Status NodeStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// NodeList is the whole list of all Nodes which have been registered with master.
+type NodeList struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard list metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
+ unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // List of nodes
+ Items []Node `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+type FinalizerName string
+
+// These are internal finalizer values to Kubernetes, must be qualified name unless defined here
+const (
+ FinalizerKubernetes FinalizerName = "kubernetes"
+)
+
+// NamespaceSpec describes the attributes on a Namespace.
+type NamespaceSpec struct {
+ // Finalizers is an opaque list of values that must be empty to permanently remove object from storage.
+ // More info: http://releases.k8s.io/HEAD/docs/design/namespaces.md#finalizers
+ Finalizers []FinalizerName `json:"finalizers,omitempty" protobuf:"bytes,1,rep,name=finalizers,casttype=FinalizerName"`
+}
+
+// NamespaceStatus is information about the current status of a Namespace.
+type NamespaceStatus struct {
+ // Phase is the current lifecycle phase of the namespace.
+ // More info: http://releases.k8s.io/HEAD/docs/design/namespaces.md#phases
+ Phase NamespacePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=NamespacePhase"`
+}
+
+type NamespacePhase string
+
+// These are the valid phases of a namespace.
+const (
+ // NamespaceActive means the namespace is available for use in the system
+ NamespaceActive NamespacePhase = "Active"
+ // NamespaceTerminating means the namespace is undergoing graceful termination
+ NamespaceTerminating NamespacePhase = "Terminating"
+)
+
+// +genclient=true
+// +nonNamespaced=true
+
+// Namespace provides a scope for Names.
+// Use of multiple namespaces is optional.
+type Namespace struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Spec defines the behavior of the Namespace.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ Spec NamespaceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+ // Status describes the current status of a Namespace.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ Status NamespaceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// NamespaceList is a list of Namespaces.
+type NamespaceList struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard list metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
+ unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is the list of Namespace objects in the list.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/namespaces.md
+ Items []Namespace `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// Binding ties one object to another.
+// For example, a pod is bound to a node by a scheduler.
+type Binding struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // The target object that you want to bind to the standard object.
+ Target ObjectReference `json:"target" protobuf:"bytes,2,opt,name=target"`
+}
+
+// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.
+type Preconditions struct {
+ // Specifies the target UID.
+ UID *types.UID `json:"uid,omitempty" protobuf:"bytes,1,opt,name=uid,casttype=k8s.io/kubernetes/pkg/types.UID"`
+}
+
+// DeleteOptions may be provided when deleting an API object
+type DeleteOptions struct {
+ unversioned.TypeMeta `json:",inline"`
+
+ // The duration in seconds before the object should be deleted. Value must be non-negative integer.
+ // The value zero indicates delete immediately. If this value is nil, the default grace period for the
+ // specified type will be used.
+ // Defaults to a per object value if not specified. zero means delete immediately.
+ GracePeriodSeconds *int64 `json:"gracePeriodSeconds,omitempty" protobuf:"varint,1,opt,name=gracePeriodSeconds"`
+
+ // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be
+ // returned.
+ Preconditions *Preconditions `json:"preconditions,omitempty" protobuf:"bytes,2,opt,name=preconditions"`
+
+ // Should the dependent objects be orphaned. If true/false, the "orphan"
+ // finalizer will be added to/removed from the object's finalizers list.
+ OrphanDependents *bool `json:"orphanDependents,omitempty" protobuf:"varint,3,opt,name=orphanDependents"`
+}
+
+// ExportOptions is the query options to the standard REST get call.
+type ExportOptions struct {
+ unversioned.TypeMeta `json:",inline"`
+
+ // Should this value be exported. Export strips fields that a user can not specify.
+ Export bool `json:"export" protobuf:"varint,1,opt,name=export"`
+ // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'
+ Exact bool `json:"exact" protobuf:"varint,2,opt,name=exact"`
+}
+
+// ListOptions is the query options to a standard REST list call.
+type ListOptions struct {
+ unversioned.TypeMeta `json:",inline"`
+
+ // A selector to restrict the list of returned objects by their labels.
+ // Defaults to everything.
+ LabelSelector string `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"`
+ // A selector to restrict the list of returned objects by their fields.
+ // Defaults to everything.
+ FieldSelector string `json:"fieldSelector,omitempty" protobuf:"bytes,2,opt,name=fieldSelector"`
+ // Watch for changes to the described resources and return them as a stream of
+ // add, update, and remove notifications. Specify resourceVersion.
+ Watch bool `json:"watch,omitempty" protobuf:"varint,3,opt,name=watch"`
+ // When specified with a watch call, shows changes that occur after that particular version of a resource.
+ // Defaults to changes from the beginning of history.
+ ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,4,opt,name=resourceVersion"`
+ // Timeout for the list/watch call.
+ TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty" protobuf:"varint,5,opt,name=timeoutSeconds"`
+}
+
+// PodLogOptions is the query options for a Pod's logs REST call.
+type PodLogOptions struct {
+ unversioned.TypeMeta `json:",inline"`
+
+ // The container for which to stream logs. Defaults to only container if there is one container in the pod.
+ Container string `json:"container,omitempty" protobuf:"bytes,1,opt,name=container"`
+ // Follow the log stream of the pod. Defaults to false.
+ Follow bool `json:"follow,omitempty" protobuf:"varint,2,opt,name=follow"`
+ // Return previous terminated container logs. Defaults to false.
+ Previous bool `json:"previous,omitempty" protobuf:"varint,3,opt,name=previous"`
+ // A relative time in seconds before the current time from which to show logs. If this value
+ // precedes the time a pod was started, only logs since the pod start will be returned.
+ // If this value is in the future, no logs will be returned.
+ // Only one of sinceSeconds or sinceTime may be specified.
+ SinceSeconds *int64 `json:"sinceSeconds,omitempty" protobuf:"varint,4,opt,name=sinceSeconds"`
+ // An RFC3339 timestamp from which to show logs. If this value
+ // precedes the time a pod was started, only logs since the pod start will be returned.
+ // If this value is in the future, no logs will be returned.
+ // Only one of sinceSeconds or sinceTime may be specified.
+ SinceTime *unversioned.Time `json:"sinceTime,omitempty" protobuf:"bytes,5,opt,name=sinceTime"`
+ // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line
+ // of log output. Defaults to false.
+ Timestamps bool `json:"timestamps,omitempty" protobuf:"varint,6,opt,name=timestamps"`
+ // If set, the number of lines from the end of the logs to show. If not specified,
+ // logs are shown from the creation of the container or sinceSeconds or sinceTime
+ TailLines *int64 `json:"tailLines,omitempty" protobuf:"varint,7,opt,name=tailLines"`
+ // If set, the number of bytes to read from the server before terminating the
+ // log output. This may not display a complete final line of logging, and may return
+ // slightly more or slightly less than the specified limit.
+ LimitBytes *int64 `json:"limitBytes,omitempty" protobuf:"varint,8,opt,name=limitBytes"`
+}
+
+// PodAttachOptions is the query options to a Pod's remote attach call.
+// ---
+// TODO: merge w/ PodExecOptions below for stdin, stdout, etc
+// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY
+type PodAttachOptions struct {
+ unversioned.TypeMeta `json:",inline"`
+
+ // Stdin if true, redirects the standard input stream of the pod for this call.
+ // Defaults to false.
+ Stdin bool `json:"stdin,omitempty" protobuf:"varint,1,opt,name=stdin"`
+
+ // Stdout if true indicates that stdout is to be redirected for the attach call.
+ // Defaults to true.
+ Stdout bool `json:"stdout,omitempty" protobuf:"varint,2,opt,name=stdout"`
+
+ // Stderr if true indicates that stderr is to be redirected for the attach call.
+ // Defaults to true.
+ Stderr bool `json:"stderr,omitempty" protobuf:"varint,3,opt,name=stderr"`
+
+ // TTY if true indicates that a tty will be allocated for the attach call.
+ // This is passed through the container runtime so the tty
+ // is allocated on the worker node by the container runtime.
+ // Defaults to false.
+ TTY bool `json:"tty,omitempty" protobuf:"varint,4,opt,name=tty"`
+
+ // The container in which to execute the command.
+ // Defaults to only container if there is only one container in the pod.
+ Container string `json:"container,omitempty" protobuf:"bytes,5,opt,name=container"`
+}
+
+// PodExecOptions is the query options to a Pod's remote exec call.
+// ---
+// TODO: This is largely identical to PodAttachOptions above, make sure they stay in sync and see about merging
+// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY
+type PodExecOptions struct {
+ unversioned.TypeMeta `json:",inline"`
+
+ // Redirect the standard input stream of the pod for this call.
+ // Defaults to false.
+ Stdin bool `json:"stdin,omitempty" protobuf:"varint,1,opt,name=stdin"`
+
+ // Redirect the standard output stream of the pod for this call.
+ // Defaults to true.
+ Stdout bool `json:"stdout,omitempty" protobuf:"varint,2,opt,name=stdout"`
+
+ // Redirect the standard error stream of the pod for this call.
+ // Defaults to true.
+ Stderr bool `json:"stderr,omitempty" protobuf:"varint,3,opt,name=stderr"`
+
+ // TTY if true indicates that a tty will be allocated for the exec call.
+ // Defaults to false.
+ TTY bool `json:"tty,omitempty" protobuf:"varint,4,opt,name=tty"`
+
+ // Container in which to execute the command.
+ // Defaults to only container if there is only one container in the pod.
+ Container string `json:"container,omitempty" protobuf:"bytes,5,opt,name=container"`
+
+ // Command is the remote command to execute. argv array. Not executed within a shell.
+ Command []string `json:"command" protobuf:"bytes,6,rep,name=command"`
+}
+
+// PodProxyOptions is the query options to a Pod's proxy call.
+type PodProxyOptions struct {
+ unversioned.TypeMeta `json:",inline"`
+
+ // Path is the URL path to use for the current proxy request to pod.
+ Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
+}
+
+// NodeProxyOptions is the query options to a Node's proxy call.
+type NodeProxyOptions struct {
+ unversioned.TypeMeta `json:",inline"`
+
+ // Path is the URL path to use for the current proxy request to node.
+ Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
+}
+
+// ServiceProxyOptions is the query options to a Service's proxy call.
+type ServiceProxyOptions struct {
+ unversioned.TypeMeta `json:",inline"`
+
+ // Path is the part of URLs that include service endpoints, suffixes,
+ // and parameters to use for the current proxy request to service.
+ // For example, the whole request URL is
+ // http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy.
+ // Path is _search?q=user:kimchy.
+ Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
+}
+
+// OwnerReference contains enough information to let you identify an owning
+// object. Currently, an owning object must be in the same namespace, so there
+// is no namespace field.
+type OwnerReference struct {
+ // API version of the referent.
+ APIVersion string `json:"apiVersion" protobuf:"bytes,5,opt,name=apiVersion"`
+ // Kind of the referent.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
+ Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"`
+ // Name of the referent.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names
+ Name string `json:"name" protobuf:"bytes,3,opt,name=name"`
+ // UID of the referent.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#uids
+ UID types.UID `json:"uid" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/kubernetes/pkg/types.UID"`
+ // If true, this reference points to the managing controller.
+ Controller *bool `json:"controller,omitempty" protobuf:"varint,6,opt,name=controller"`
+}
+
+// ObjectReference contains enough information to let you inspect or modify the referred object.
+type ObjectReference struct {
+ // Kind of the referent.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
+ Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"`
+ // Namespace of the referent.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/namespaces.md
+ Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"`
+ // Name of the referent.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names
+ Name string `json:"name,omitempty" protobuf:"bytes,3,opt,name=name"`
+ // UID of the referent.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#uids
+ UID types.UID `json:"uid,omitempty" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/kubernetes/pkg/types.UID"`
+ // API version of the referent.
+ APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,5,opt,name=apiVersion"`
+ // Specific resourceVersion to which this reference is made, if any.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency
+ ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"`
+
+ // If referring to a piece of an object instead of an entire object, this string
+ // should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
+ // For example, if the object reference is to a container within a pod, this would take on a value like:
+ // "spec.containers{name}" (where "name" refers to the name of the container that triggered
+ // the event) or if no container name is specified "spec.containers[2]" (container with
+ // index 2 in this pod). This syntax is chosen only to have some well-defined way of
+ // referencing a part of an object.
+ // TODO: this design is not final and this field is subject to change in the future.
+ FieldPath string `json:"fieldPath,omitempty" protobuf:"bytes,7,opt,name=fieldPath"`
+}
+
+// LocalObjectReference contains enough information to let you locate the
+// referenced object inside the same namespace.
+type LocalObjectReference struct {
+ // Name of the referent.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names
+ // TODO: Add other useful fields. apiVersion, kind, uid?
+ Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
+}
+
+// SerializedReference is a reference to serialized object.
+type SerializedReference struct {
+ unversioned.TypeMeta `json:",inline"`
+ // The reference to an object in the system.
+ Reference ObjectReference `json:"reference,omitempty" protobuf:"bytes,1,opt,name=reference"`
+}
+
+// EventSource contains information for an event.
+type EventSource struct {
+ // Component from which the event is generated.
+ Component string `json:"component,omitempty" protobuf:"bytes,1,opt,name=component"`
+ // Host name on which the event is generated.
+ Host string `json:"host,omitempty" protobuf:"bytes,2,opt,name=host"`
+}
+
+// Valid values for event types (new types could be added in future)
+const (
+ // Information only and will not cause any problems
+ EventTypeNormal string = "Normal"
+ // These events are to warn that something might go wrong
+ EventTypeWarning string = "Warning"
+)
+
+// +genclient=true
+
+// Event is a report of an event somewhere in the cluster.
+// TODO: Decide whether to store these separately or with the object they apply to.
+type Event struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"`
+
+ // The object that this event is about.
+ InvolvedObject ObjectReference `json:"involvedObject" protobuf:"bytes,2,opt,name=involvedObject"`
+
+ // This should be a short, machine understandable string that gives the reason
+ // for the transition into the object's current status.
+ // TODO: provide exact specification for format.
+ Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
+
+ // A human-readable description of the status of this operation.
+ // TODO: decide on maximum length.
+ Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"`
+
+ // The component reporting this event. Should be a short machine understandable string.
+ Source EventSource `json:"source,omitempty" protobuf:"bytes,5,opt,name=source"`
+
+ // The time at which the event was first recorded. (Time of server receipt is in TypeMeta.)
+ FirstTimestamp unversioned.Time `json:"firstTimestamp,omitempty" protobuf:"bytes,6,opt,name=firstTimestamp"`
+
+ // The time at which the most recent occurrence of this event was recorded.
+ LastTimestamp unversioned.Time `json:"lastTimestamp,omitempty" protobuf:"bytes,7,opt,name=lastTimestamp"`
+
+ // The number of times this event has occurred.
+ Count int32 `json:"count,omitempty" protobuf:"varint,8,opt,name=count"`
+
+ // Type of this event (Normal, Warning), new types could be added in the future
+ Type string `json:"type,omitempty" protobuf:"bytes,9,opt,name=type"`
+}
+
+// EventList is a list of events.
+type EventList struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard list metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
+ unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // List of events
+ Items []Event `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// List holds a list of objects, which may not be known by the server.
+type List struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard list metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
+ unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // List of objects
+ Items []runtime.RawExtension `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// LimitType is a type of object that is limited
+type LimitType string
+
+const (
+ // Limit that applies to all pods in a namespace
+ LimitTypePod LimitType = "Pod"
+ // Limit that applies to all containers in a namespace
+ LimitTypeContainer LimitType = "Container"
+)
+
+// LimitRangeItem defines a min/max usage limit for any resource that matches on kind.
+type LimitRangeItem struct {
+ // Type of resource that this limit applies to.
+ Type LimitType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=LimitType"`
+ // Max usage constraints on this kind by resource name.
+ Max ResourceList `json:"max,omitempty" protobuf:"bytes,2,rep,name=max,casttype=ResourceList,castkey=ResourceName"`
+ // Min usage constraints on this kind by resource name.
+ Min ResourceList `json:"min,omitempty" protobuf:"bytes,3,rep,name=min,casttype=ResourceList,castkey=ResourceName"`
+ // Default resource requirement limit value by resource name if resource limit is omitted.
+ Default ResourceList `json:"default,omitempty" protobuf:"bytes,4,rep,name=default,casttype=ResourceList,castkey=ResourceName"`
+ // DefaultRequest is the default resource requirement request value by resource name if resource request is omitted.
+ DefaultRequest ResourceList `json:"defaultRequest,omitempty" protobuf:"bytes,5,rep,name=defaultRequest,casttype=ResourceList,castkey=ResourceName"`
+ // MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource.
+ MaxLimitRequestRatio ResourceList `json:"maxLimitRequestRatio,omitempty" protobuf:"bytes,6,rep,name=maxLimitRequestRatio,casttype=ResourceList,castkey=ResourceName"`
+}
+
+// LimitRangeSpec defines a min/max usage limit for resources that match on kind.
+type LimitRangeSpec struct {
+ // Limits is the list of LimitRangeItem objects that are enforced.
+ Limits []LimitRangeItem `json:"limits" protobuf:"bytes,1,rep,name=limits"`
+}
+
+// +genclient=true
+
+// LimitRange sets resource usage limits for each kind of resource in a Namespace.
+type LimitRange struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Spec defines the limits enforced.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ Spec LimitRangeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+}
+
+// LimitRangeList is a list of LimitRange items.
+type LimitRangeList struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard list metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
+ unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is a list of LimitRange objects.
+ // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_limit_range.md
+ Items []LimitRange `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// The following identify resource constants for Kubernetes object types
+const (
+ // Pods, number
+ ResourcePods ResourceName = "pods"
+ // Services, number
+ ResourceServices ResourceName = "services"
+ // ReplicationControllers, number
+ ResourceReplicationControllers ResourceName = "replicationcontrollers"
+ // ResourceQuotas, number
+ ResourceQuotas ResourceName = "resourcequotas"
+ // ResourceSecrets, number
+ ResourceSecrets ResourceName = "secrets"
+ // ResourceConfigMaps, number
+ ResourceConfigMaps ResourceName = "configmaps"
+ // ResourcePersistentVolumeClaims, number
+ ResourcePersistentVolumeClaims ResourceName = "persistentvolumeclaims"
+ // ResourceServicesNodePorts, number
+ ResourceServicesNodePorts ResourceName = "services.nodeports"
+ // ResourceServicesLoadBalancers, number
+ ResourceServicesLoadBalancers ResourceName = "services.loadbalancers"
+ // CPU request, in cores. (500m = .5 cores)
+ ResourceRequestsCPU ResourceName = "requests.cpu"
+ // Memory request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
+ ResourceRequestsMemory ResourceName = "requests.memory"
+ // CPU limit, in cores. (500m = .5 cores)
+ ResourceLimitsCPU ResourceName = "limits.cpu"
+ // Memory limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
+ ResourceLimitsMemory ResourceName = "limits.memory"
+)
+
+// A ResourceQuotaScope defines a filter that must match each object tracked by a quota
+type ResourceQuotaScope string
+
+const (
+ // Match all pod objects where spec.activeDeadlineSeconds
+ ResourceQuotaScopeTerminating ResourceQuotaScope = "Terminating"
+ // Match all pod objects where !spec.activeDeadlineSeconds
+ ResourceQuotaScopeNotTerminating ResourceQuotaScope = "NotTerminating"
+ // Match all pod objects that have best effort quality of service
+ ResourceQuotaScopeBestEffort ResourceQuotaScope = "BestEffort"
+ // Match all pod objects that do not have best effort quality of service
+ ResourceQuotaScopeNotBestEffort ResourceQuotaScope = "NotBestEffort"
+)
+
+// ResourceQuotaSpec defines the desired hard limits to enforce for Quota.
+type ResourceQuotaSpec struct {
+ // Hard is the set of desired hard limits for each named resource.
+ // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota
+ Hard ResourceList `json:"hard,omitempty" protobuf:"bytes,1,rep,name=hard,casttype=ResourceList,castkey=ResourceName"`
+ // A collection of filters that must match each object tracked by a quota.
+ // If not specified, the quota matches all objects.
+ Scopes []ResourceQuotaScope `json:"scopes,omitempty" protobuf:"bytes,2,rep,name=scopes,casttype=ResourceQuotaScope"`
+}
+
+// ResourceQuotaStatus defines the enforced hard limits and observed use.
+type ResourceQuotaStatus struct {
+ // Hard is the set of enforced hard limits for each named resource.
+ // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota
+ Hard ResourceList `json:"hard,omitempty" protobuf:"bytes,1,rep,name=hard,casttype=ResourceList,castkey=ResourceName"`
+ // Used is the current observed total usage of the resource in the namespace.
+ Used ResourceList `json:"used,omitempty" protobuf:"bytes,2,rep,name=used,casttype=ResourceList,castkey=ResourceName"`
+}
+
+// +genclient=true
+
+// ResourceQuota sets aggregate quota restrictions enforced per namespace
+type ResourceQuota struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Spec defines the desired quota.
+ // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ Spec ResourceQuotaSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+ // Status defines the actual enforced quota and its current usage.
+ // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ Status ResourceQuotaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// ResourceQuotaList is a list of ResourceQuota items.
+type ResourceQuotaList struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard list metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
+ unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is a list of ResourceQuota objects.
+ // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota
+ Items []ResourceQuota `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +genclient=true
+
+// Secret holds secret data of a certain type. The total bytes of the values in
+// the Data field must be less than MaxSecretSize bytes.
+type Secret struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Data contains the secret data. Each key must be a valid DNS_SUBDOMAIN
+ // or leading dot followed by valid DNS_SUBDOMAIN.
+ // The serialized form of the secret data is a base64 encoded string,
+ // representing the arbitrary (possibly non-string) data value here.
+ // Described in https://tools.ietf.org/html/rfc4648#section-4
+ Data map[string][]byte `json:"data,omitempty" protobuf:"bytes,2,rep,name=data"`
+
+ // stringData allows specifying non-binary secret data in string form.
+ // It is provided as a write-only convenience method.
+ // All keys and values are merged into the data field on write, overwriting any existing values.
+ // It is never output when reading from the API.
+ // +k8s:conversion-gen=false
+ StringData map[string]string `json:"stringData,omitempty" protobuf:"bytes,4,rep,name=stringData"`
+
+ // Used to facilitate programmatic handling of secret data.
+ Type SecretType `json:"type,omitempty" protobuf:"bytes,3,opt,name=type,casttype=SecretType"`
+}
+
+const MaxSecretSize = 1 * 1024 * 1024
+
+type SecretType string
+
+const (
+ // SecretTypeOpaque is the default. Arbitrary user-defined data
+ SecretTypeOpaque SecretType = "Opaque"
+
+ // SecretTypeServiceAccountToken contains a token that identifies a service account to the API
+ //
+ // Required fields:
+ // - Secret.Annotations["kubernetes.io/service-account.name"] - the name of the ServiceAccount the token identifies
+ // - Secret.Annotations["kubernetes.io/service-account.uid"] - the UID of the ServiceAccount the token identifies
+ // - Secret.Data["token"] - a token that identifies the service account to the API
+ SecretTypeServiceAccountToken SecretType = "kubernetes.io/service-account-token"
+
+ // ServiceAccountNameKey is the key of the required annotation for SecretTypeServiceAccountToken secrets
+ ServiceAccountNameKey = "kubernetes.io/service-account.name"
+ // ServiceAccountUIDKey is the key of the required annotation for SecretTypeServiceAccountToken secrets
+ ServiceAccountUIDKey = "kubernetes.io/service-account.uid"
+ // ServiceAccountTokenKey is the key of the required data for SecretTypeServiceAccountToken secrets
+ ServiceAccountTokenKey = "token"
+ // ServiceAccountKubeconfigKey is the key of the optional kubeconfig data for SecretTypeServiceAccountToken secrets
+ ServiceAccountKubeconfigKey = "kubernetes.kubeconfig"
+ // ServiceAccountRootCAKey is the key of the optional root certificate authority for SecretTypeServiceAccountToken secrets
+ ServiceAccountRootCAKey = "ca.crt"
+ // ServiceAccountNamespaceKey is the key of the optional namespace to use as the default for namespaced API calls
+ ServiceAccountNamespaceKey = "namespace"
+
+ // SecretTypeDockercfg contains a dockercfg file that follows the same format rules as ~/.dockercfg
+ //
+ // Required fields:
+ // - Secret.Data[".dockercfg"] - a serialized ~/.dockercfg file
+ SecretTypeDockercfg SecretType = "kubernetes.io/dockercfg"
+
+ // DockerConfigKey is the key of the required data for SecretTypeDockercfg secrets
+ DockerConfigKey = ".dockercfg"
+
+ // SecretTypeTLS contains information about a TLS client or server secret. It
+ // is primarily used with TLS termination of the Ingress resource, but may be
+ // used in other types.
+ //
+ // Required fields:
+ // - Secret.Data["tls.key"] - TLS private key.
+ // Secret.Data["tls.crt"] - TLS certificate.
+ // TODO: Consider supporting different formats, specifying CA/destinationCA.
+ SecretTypeTLS SecretType = "kubernetes.io/tls"
+
+ // TLSCertKey is the key for tls certificates in a TLS secert.
+ TLSCertKey = "tls.crt"
+ // TLSPrivateKeyKey is the key for the private key field in a TLS secret.
+ TLSPrivateKeyKey = "tls.key"
+)
+
+// SecretList is a list of Secret.
+type SecretList struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard list metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
+ unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is a list of secret objects.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/secrets.md
+ Items []Secret `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +genclient=true
+
+// ConfigMap holds configuration data for pods to consume.
+type ConfigMap struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Data contains the configuration data.
+ // Each key must be a valid DNS_SUBDOMAIN with an optional leading dot.
+ Data map[string]string `json:"data,omitempty" protobuf:"bytes,2,rep,name=data"`
+}
+
+// ConfigMapList is a resource containing a list of ConfigMap objects.
+type ConfigMapList struct {
+ unversioned.TypeMeta `json:",inline"`
+
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is the list of ConfigMaps.
+ Items []ConfigMap `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// Type and constants for component health validation.
+type ComponentConditionType string
+
+// These are the valid conditions for the component.
+const (
+ ComponentHealthy ComponentConditionType = "Healthy"
+)
+
+// Information about the condition of a component.
+type ComponentCondition struct {
+ // Type of condition for a component.
+ // Valid value: "Healthy"
+ Type ComponentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ComponentConditionType"`
+ // Status of the condition for a component.
+ // Valid values for "Healthy": "True", "False", or "Unknown".
+ Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
+ // Message about the condition for a component.
+ // For example, information about a health check.
+ Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"`
+ // Condition error code for a component.
+ // For example, a health check error code.
+ Error string `json:"error,omitempty" protobuf:"bytes,4,opt,name=error"`
+}
+
+// +genclient=true
+// +nonNamespaced=true
+
+// ComponentStatus (and ComponentStatusList) holds the cluster validation info.
+type ComponentStatus struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // List of component conditions observed
+ Conditions []ComponentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"`
+}
+
+// Status of all the conditions for the component as a list of ComponentStatus objects.
+type ComponentStatusList struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard list metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
+ unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // List of ComponentStatus objects.
+ Items []ComponentStatus `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// DownwardAPIVolumeSource represents a volume containing downward API info.
+// Downward API volumes support ownership management and SELinux relabeling.
+type DownwardAPIVolumeSource struct {
+ // Items is a list of downward API volume file
+ Items []DownwardAPIVolumeFile `json:"items,omitempty" protobuf:"bytes,1,rep,name=items"`
+}
+
+// DownwardAPIVolumeFile represents information to create the file containing the pod field
+type DownwardAPIVolumeFile struct {
+ // Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'
+ Path string `json:"path" protobuf:"bytes,1,opt,name=path"`
+ // Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.
+ FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty" protobuf:"bytes,2,opt,name=fieldRef"`
+ // Selects a resource of the container: only resources limits and requests
+ // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
+ ResourceFieldRef *ResourceFieldSelector `json:"resourceFieldRef,omitempty" protobuf:"bytes,3,opt,name=resourceFieldRef"`
+}
+
+// SecurityContext holds security configuration that will be applied to a container.
+// Some fields are present in both SecurityContext and PodSecurityContext. When both
+// are set, the values in SecurityContext take precedence.
+type SecurityContext struct {
+ // The capabilities to add/drop when running containers.
+ // Defaults to the default set of capabilities granted by the container runtime.
+ Capabilities *Capabilities `json:"capabilities,omitempty" protobuf:"bytes,1,opt,name=capabilities"`
+ // Run container in privileged mode.
+ // Processes in privileged containers are essentially equivalent to root on the host.
+ // Defaults to false.
+ Privileged *bool `json:"privileged,omitempty" protobuf:"varint,2,opt,name=privileged"`
+ // The SELinux context to be applied to the container.
+ // If unspecified, the container runtime will allocate a random SELinux context for each
+ // container. May also be set in PodSecurityContext. If set in both SecurityContext and
+ // PodSecurityContext, the value specified in SecurityContext takes precedence.
+ SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,3,opt,name=seLinuxOptions"`
+ // The UID to run the entrypoint of the container process.
+ // Defaults to user specified in image metadata if unspecified.
+ // May also be set in PodSecurityContext. If set in both SecurityContext and
+ // PodSecurityContext, the value specified in SecurityContext takes precedence.
+ RunAsUser *int64 `json:"runAsUser,omitempty" protobuf:"varint,4,opt,name=runAsUser"`
+ // Indicates that the container must run as a non-root user.
+ // If true, the Kubelet will validate the image at runtime to ensure that it
+ // does not run as UID 0 (root) and fail to start the container if it does.
+ // If unset or false, no such validation will be performed.
+ // May also be set in PodSecurityContext. If set in both SecurityContext and
+ // PodSecurityContext, the value specified in SecurityContext takes precedence.
+ RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" protobuf:"varint,5,opt,name=runAsNonRoot"`
+ // Whether this container has a read-only root filesystem.
+ // Default is false.
+ ReadOnlyRootFilesystem *bool `json:"readOnlyRootFilesystem,omitempty" protobuf:"varint,6,opt,name=readOnlyRootFilesystem"`
+}
+
+// SELinuxOptions are the labels to be applied to the container
+type SELinuxOptions struct {
+ // User is a SELinux user label that applies to the container.
+ User string `json:"user,omitempty" protobuf:"bytes,1,opt,name=user"`
+ // Role is a SELinux role label that applies to the container.
+ Role string `json:"role,omitempty" protobuf:"bytes,2,opt,name=role"`
+ // Type is a SELinux type label that applies to the container.
+ Type string `json:"type,omitempty" protobuf:"bytes,3,opt,name=type"`
+ // Level is SELinux level label that applies to the container.
+ Level string `json:"level,omitempty" protobuf:"bytes,4,opt,name=level"`
+}
+
+// RangeAllocation is not a public type.
+type RangeAllocation struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Range is string that identifies the range represented by 'data'.
+ Range string `json:"range" protobuf:"bytes,2,opt,name=range"`
+ // Data is a bit array containing all allocated addresses in the previous segment.
+ Data []byte `json:"data" protobuf:"bytes,3,opt,name=data"`
+}
+
+const (
+ // "default-scheduler" is the name of default scheduler.
+ DefaultSchedulerName = "default-scheduler"
+)
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/v1/types_swagger_doc_generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/v1/types_swagger_doc_generated.go
new file mode 100644
index 0000000..c5eb00d
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/v1/types_swagger_doc_generated.go
@@ -0,0 +1,1742 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE
+var map_AWSElasticBlockStoreVolumeSource = map[string]string{
+ "": "Represents a Persistent Disk resource in AWS.\n\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling.",
+ "volumeID": "Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#awselasticblockstore",
+ "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#awselasticblockstore",
+ "partition": "The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).",
+ "readOnly": "Specify \"true\" to force and set the ReadOnly property in VolumeMounts to \"true\". If omitted, the default is \"false\". More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#awselasticblockstore",
+}
+
+func (AWSElasticBlockStoreVolumeSource) SwaggerDoc() map[string]string {
+ return map_AWSElasticBlockStoreVolumeSource
+}
+
+var map_Affinity = map[string]string{
+ "": "Affinity is a group of affinity scheduling rules.",
+ "nodeAffinity": "Describes node affinity scheduling rules for the pod.",
+ "podAffinity": "Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).",
+ "podAntiAffinity": "Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).",
+}
+
+func (Affinity) SwaggerDoc() map[string]string {
+ return map_Affinity
+}
+
+var map_AttachedVolume = map[string]string{
+ "": "AttachedVolume describes a volume attached to a node",
+ "name": "Name of the attached volume",
+ "devicePath": "DevicePath represents the device path where the volume should be avilable",
+}
+
+func (AttachedVolume) SwaggerDoc() map[string]string {
+ return map_AttachedVolume
+}
+
+var map_AzureFileVolumeSource = map[string]string{
+ "": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.",
+ "secretName": "the name of secret that contains Azure Storage Account Name and Key",
+ "shareName": "Share Name",
+ "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
+}
+
+func (AzureFileVolumeSource) SwaggerDoc() map[string]string {
+ return map_AzureFileVolumeSource
+}
+
+var map_Binding = map[string]string{
+ "": "Binding ties one object to another. For example, a pod is bound to a node by a scheduler.",
+ "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
+ "target": "The target object that you want to bind to the standard object.",
+}
+
+func (Binding) SwaggerDoc() map[string]string {
+ return map_Binding
+}
+
+var map_Capabilities = map[string]string{
+ "": "Adds and removes POSIX capabilities from running containers.",
+ "add": "Added capabilities",
+ "drop": "Removed capabilities",
+}
+
+func (Capabilities) SwaggerDoc() map[string]string {
+ return map_Capabilities
+}
+
+var map_CephFSVolumeSource = map[string]string{
+ "": "Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.",
+ "monitors": "Required: Monitors is a collection of Ceph monitors More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it",
+ "path": "Optional: Used as the mounted root, rather than the full Ceph tree, default is /",
+ "user": "Optional: User is the rados user name, default is admin More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it",
+ "secretFile": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it",
+ "secretRef": "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it",
+ "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it",
+}
+
+func (CephFSVolumeSource) SwaggerDoc() map[string]string {
+ return map_CephFSVolumeSource
+}
+
+var map_CinderVolumeSource = map[string]string{
+ "": "Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.",
+ "volumeID": "volume id used to identify the volume in cinder More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md",
+ "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md",
+ "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md",
+}
+
+func (CinderVolumeSource) SwaggerDoc() map[string]string {
+ return map_CinderVolumeSource
+}
+
+var map_ComponentCondition = map[string]string{
+ "": "Information about the condition of a component.",
+ "type": "Type of condition for a component. Valid value: \"Healthy\"",
+ "status": "Status of the condition for a component. Valid values for \"Healthy\": \"True\", \"False\", or \"Unknown\".",
+ "message": "Message about the condition for a component. For example, information about a health check.",
+ "error": "Condition error code for a component. For example, a health check error code.",
+}
+
+func (ComponentCondition) SwaggerDoc() map[string]string {
+ return map_ComponentCondition
+}
+
+var map_ComponentStatus = map[string]string{
+ "": "ComponentStatus (and ComponentStatusList) holds the cluster validation info.",
+ "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
+ "conditions": "List of component conditions observed",
+}
+
+func (ComponentStatus) SwaggerDoc() map[string]string {
+ return map_ComponentStatus
+}
+
+var map_ComponentStatusList = map[string]string{
+ "": "Status of all the conditions for the component as a list of ComponentStatus objects.",
+ "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds",
+ "items": "List of ComponentStatus objects.",
+}
+
+func (ComponentStatusList) SwaggerDoc() map[string]string {
+ return map_ComponentStatusList
+}
+
+var map_ConfigMap = map[string]string{
+ "": "ConfigMap holds configuration data for pods to consume.",
+ "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
+ "data": "Data contains the configuration data. Each key must be a valid DNS_SUBDOMAIN with an optional leading dot.",
+}
+
+func (ConfigMap) SwaggerDoc() map[string]string {
+ return map_ConfigMap
+}
+
+var map_ConfigMapKeySelector = map[string]string{
+ "": "Selects a key from a ConfigMap.",
+ "key": "The key to select.",
+}
+
+func (ConfigMapKeySelector) SwaggerDoc() map[string]string {
+ return map_ConfigMapKeySelector
+}
+
+var map_ConfigMapList = map[string]string{
+ "": "ConfigMapList is a resource containing a list of ConfigMap objects.",
+ "metadata": "More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
+ "items": "Items is the list of ConfigMaps.",
+}
+
+func (ConfigMapList) SwaggerDoc() map[string]string {
+ return map_ConfigMapList
+}
+
+var map_ConfigMapVolumeSource = map[string]string{
+ "": "Adapts a ConfigMap into a volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. ConfigMap volumes support ownership management and SELinux relabeling.",
+ "items": "If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error. Paths must be relative and may not contain the '..' path or start with '..'.",
+}
+
+func (ConfigMapVolumeSource) SwaggerDoc() map[string]string {
+ return map_ConfigMapVolumeSource
+}
+
+var map_Container = map[string]string{
+ "": "A single application container that you want to run within a pod.",
+ "name": "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.",
+ "image": "Docker image name. More info: http://releases.k8s.io/HEAD/docs/user-guide/images.md",
+ "command": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: http://releases.k8s.io/HEAD/docs/user-guide/containers.md#containers-and-commands",
+ "args": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: http://releases.k8s.io/HEAD/docs/user-guide/containers.md#containers-and-commands",
+ "workingDir": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.",
+ "ports": "List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.",
+ "env": "List of environment variables to set in the container. Cannot be updated.",
+ "resources": "Compute Resources required by this container. Cannot be updated. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#resources",
+ "volumeMounts": "Pod volumes to mount into the container's filesystem. Cannot be updated.",
+ "livenessProbe": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-probes",
+ "readinessProbe": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-probes",
+ "lifecycle": "Actions that the management system should take in response to container lifecycle events. Cannot be updated.",
+ "terminationMessagePath": "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Defaults to /dev/termination-log. Cannot be updated.",
+ "imagePullPolicy": "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: http://releases.k8s.io/HEAD/docs/user-guide/images.md#updating-images",
+ "securityContext": "Security options the pod should run with. More info: http://releases.k8s.io/HEAD/docs/design/security_context.md",
+ "stdin": "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.",
+ "stdinOnce": "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false",
+ "tty": "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.",
+}
+
+func (Container) SwaggerDoc() map[string]string {
+ return map_Container
+}
+
+var map_ContainerImage = map[string]string{
+ "": "Describe a container image",
+ "names": "Names by which this image is known. e.g. [\"gcr.io/google_containers/hyperkube:v1.0.7\", \"dockerhub.io/google_containers/hyperkube:v1.0.7\"]",
+ "sizeBytes": "The size of the image in bytes.",
+}
+
+func (ContainerImage) SwaggerDoc() map[string]string {
+ return map_ContainerImage
+}
+
+var map_ContainerPort = map[string]string{
+ "": "ContainerPort represents a network port in a single container.",
+ "name": "If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services.",
+ "hostPort": "Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this.",
+ "containerPort": "Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536.",
+ "protocol": "Protocol for port. Must be UDP or TCP. Defaults to \"TCP\".",
+ "hostIP": "What host IP to bind the external port to.",
+}
+
+func (ContainerPort) SwaggerDoc() map[string]string {
+ return map_ContainerPort
+}
+
+var map_ContainerState = map[string]string{
+ "": "ContainerState holds a possible state of container. Only one of its members may be specified. If none of them is specified, the default one is ContainerStateWaiting.",
+ "waiting": "Details about a waiting container",
+ "running": "Details about a running container",
+ "terminated": "Details about a terminated container",
+}
+
+func (ContainerState) SwaggerDoc() map[string]string {
+ return map_ContainerState
+}
+
+var map_ContainerStateRunning = map[string]string{
+ "": "ContainerStateRunning is a running state of a container.",
+ "startedAt": "Time at which the container was last (re-)started",
+}
+
+func (ContainerStateRunning) SwaggerDoc() map[string]string {
+ return map_ContainerStateRunning
+}
+
+var map_ContainerStateTerminated = map[string]string{
+ "": "ContainerStateTerminated is a terminated state of a container.",
+ "exitCode": "Exit status from the last termination of the container",
+ "signal": "Signal from the last termination of the container",
+ "reason": "(brief) reason from the last termination of the container",
+ "message": "Message regarding the last termination of the container",
+ "startedAt": "Time at which previous execution of the container started",
+ "finishedAt": "Time at which the container last terminated",
+ "containerID": "Container's ID in the format 'docker://<container_id>'",
+}
+
+func (ContainerStateTerminated) SwaggerDoc() map[string]string {
+ return map_ContainerStateTerminated
+}
+
+var map_ContainerStateWaiting = map[string]string{
+ "": "ContainerStateWaiting is a waiting state of a container.",
+ "reason": "(brief) reason the container is not yet running.",
+ "message": "Message regarding why the container is not yet running.",
+}
+
+func (ContainerStateWaiting) SwaggerDoc() map[string]string {
+ return map_ContainerStateWaiting
+}
+
+var map_ContainerStatus = map[string]string{
+ "": "ContainerStatus contains details for the current status of this container.",
+ "name": "This must be a DNS_LABEL. Each container in a pod must have a unique name. Cannot be updated.",
+ "state": "Details about the container's current condition.",
+ "lastState": "Details about the container's last termination condition.",
+ "ready": "Specifies whether the container has passed its readiness probe.",
+ "restartCount": "The number of times the container has been restarted, currently based on the number of dead containers that have not yet been removed. Note that this is calculated from dead containers. But those containers are subject to garbage collection. This value will get capped at 5 by GC.",
+ "image": "The image the container is running. More info: http://releases.k8s.io/HEAD/docs/user-guide/images.md",
+ "imageID": "ImageID of the container's image.",
+ "containerID": "Container's ID in the format 'docker://<container_id>'. More info: http://releases.k8s.io/HEAD/docs/user-guide/container-environment.md#container-information",
+}
+
+func (ContainerStatus) SwaggerDoc() map[string]string {
+ return map_ContainerStatus
+}
+
+var map_DaemonEndpoint = map[string]string{
+ "": "DaemonEndpoint contains information about a single Daemon endpoint.",
+ "Port": "Port number of the given endpoint.",
+}
+
+func (DaemonEndpoint) SwaggerDoc() map[string]string {
+ return map_DaemonEndpoint
+}
+
+var map_DeleteOptions = map[string]string{
+ "": "DeleteOptions may be provided when deleting an API object",
+ "gracePeriodSeconds": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.",
+ "preconditions": "Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be returned.",
+ "orphanDependents": "Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list.",
+}
+
+func (DeleteOptions) SwaggerDoc() map[string]string {
+ return map_DeleteOptions
+}
+
+var map_DownwardAPIVolumeFile = map[string]string{
+ "": "DownwardAPIVolumeFile represents information to create the file containing the pod field",
+ "path": "Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'",
+ "fieldRef": "Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.",
+ "resourceFieldRef": "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.",
+}
+
+func (DownwardAPIVolumeFile) SwaggerDoc() map[string]string {
+ return map_DownwardAPIVolumeFile
+}
+
+var map_DownwardAPIVolumeSource = map[string]string{
+ "": "DownwardAPIVolumeSource represents a volume containing downward API info. Downward API volumes support ownership management and SELinux relabeling.",
+ "items": "Items is a list of downward API volume file",
+}
+
+func (DownwardAPIVolumeSource) SwaggerDoc() map[string]string {
+ return map_DownwardAPIVolumeSource
+}
+
+var map_EmptyDirVolumeSource = map[string]string{
+ "": "Represents an empty directory for a pod. Empty directory volumes support ownership management and SELinux relabeling.",
+ "medium": "What type of storage medium should back this directory. The default is \"\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#emptydir",
+}
+
+func (EmptyDirVolumeSource) SwaggerDoc() map[string]string {
+ return map_EmptyDirVolumeSource
+}
+
+var map_EndpointAddress = map[string]string{
+ "": "EndpointAddress is a tuple that describes single IP address.",
+ "ip": "The IP of this endpoint. May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), or link-local multicast ((224.0.0.0/24). IPv6 is also accepted but not fully supported on all platforms. Also, certain kubernetes components, like kube-proxy, are not IPv6 ready.",
+ "hostname": "The Hostname of this endpoint",
+ "targetRef": "Reference to object providing the endpoint.",
+}
+
+func (EndpointAddress) SwaggerDoc() map[string]string {
+ return map_EndpointAddress
+}
+
+var map_EndpointPort = map[string]string{
+ "": "EndpointPort is a tuple that describes a single port.",
+ "name": "The name of this port (corresponds to ServicePort.Name). Must be a DNS_LABEL. Optional only if one port is defined.",
+ "port": "The port number of the endpoint.",
+ "protocol": "The IP protocol for this port. Must be UDP or TCP. Default is TCP.",
+}
+
+func (EndpointPort) SwaggerDoc() map[string]string {
+ return map_EndpointPort
+}
+
+var map_EndpointSubset = map[string]string{
+ "": "EndpointSubset is a group of addresses with a common set of ports. The expanded set of endpoints is the Cartesian product of Addresses x Ports. For example, given:\n {\n Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n }\nThe resulting set of endpoints can be viewed as:\n a: [ 10.10.1.1:8675, 10.10.2.2:8675 ],\n b: [ 10.10.1.1:309, 10.10.2.2:309 ]",
+ "addresses": "IP addresses which offer the related ports that are marked as ready. These endpoints should be considered safe for load balancers and clients to utilize.",
+ "notReadyAddresses": "IP addresses which offer the related ports but are not currently marked as ready because they have not yet finished starting, have recently failed a readiness check, or have recently failed a liveness check.",
+ "ports": "Port numbers available on the related IP addresses.",
+}
+
+func (EndpointSubset) SwaggerDoc() map[string]string {
+ return map_EndpointSubset
+}
+
+var map_Endpoints = map[string]string{
+ "": "Endpoints is a collection of endpoints that implement the actual service. Example:\n Name: \"mysvc\",\n Subsets: [\n {\n Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n },\n {\n Addresses: [{\"ip\": \"10.10.3.3\"}],\n Ports: [{\"name\": \"a\", \"port\": 93}, {\"name\": \"b\", \"port\": 76}]\n },\n ]",
+ "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
+ "subsets": "The set of all endpoints is the union of all subsets. Addresses are placed into subsets according to the IPs they share. A single address with multiple ports, some of which are ready and some of which are not (because they come from different containers) will result in the address being displayed in different subsets for the different ports. No address will appear in both Addresses and NotReadyAddresses in the same subset. Sets of addresses and ports that comprise a service.",
+}
+
+func (Endpoints) SwaggerDoc() map[string]string {
+ return map_Endpoints
+}
+
+var map_EndpointsList = map[string]string{
+ "": "EndpointsList is a list of endpoints.",
+ "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds",
+ "items": "List of endpoints.",
+}
+
+func (EndpointsList) SwaggerDoc() map[string]string {
+ return map_EndpointsList
+}
+
+var map_EnvVar = map[string]string{
+ "": "EnvVar represents an environment variable present in a Container.",
+ "name": "Name of the environment variable. Must be a C_IDENTIFIER.",
+ "value": "Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \"\".",
+ "valueFrom": "Source for the environment variable's value. Cannot be used if value is not empty.",
+}
+
+func (EnvVar) SwaggerDoc() map[string]string {
+ return map_EnvVar
+}
+
+var map_EnvVarSource = map[string]string{
+ "": "EnvVarSource represents a source for the value of an EnvVar.",
+ "fieldRef": "Selects a field of the pod; only name and namespace are supported.",
+ "resourceFieldRef": "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.",
+ "configMapKeyRef": "Selects a key of a ConfigMap.",
+ "secretKeyRef": "Selects a key of a secret in the pod's namespace",
+}
+
+func (EnvVarSource) SwaggerDoc() map[string]string {
+ return map_EnvVarSource
+}
+
+var map_Event = map[string]string{
+ "": "Event is a report of an event somewhere in the cluster.",
+ "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
+ "involvedObject": "The object that this event is about.",
+ "reason": "This should be a short, machine understandable string that gives the reason for the transition into the object's current status.",
+ "message": "A human-readable description of the status of this operation.",
+ "source": "The component reporting this event. Should be a short machine understandable string.",
+ "firstTimestamp": "The time at which the event was first recorded. (Time of server receipt is in TypeMeta.)",
+ "lastTimestamp": "The time at which the most recent occurrence of this event was recorded.",
+ "count": "The number of times this event has occurred.",
+ "type": "Type of this event (Normal, Warning), new types could be added in the future",
+}
+
+func (Event) SwaggerDoc() map[string]string {
+ return map_Event
+}
+
+var map_EventList = map[string]string{
+ "": "EventList is a list of events.",
+ "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds",
+ "items": "List of events",
+}
+
+func (EventList) SwaggerDoc() map[string]string {
+ return map_EventList
+}
+
+var map_EventSource = map[string]string{
+ "": "EventSource contains information for an event.",
+ "component": "Component from which the event is generated.",
+ "host": "Host name on which the event is generated.",
+}
+
+func (EventSource) SwaggerDoc() map[string]string {
+ return map_EventSource
+}
+
+var map_ExecAction = map[string]string{
+ "": "ExecAction describes a \"run in container\" action.",
+ "command": "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.",
+}
+
+func (ExecAction) SwaggerDoc() map[string]string {
+ return map_ExecAction
+}
+
+var map_ExportOptions = map[string]string{
+ "": "ExportOptions is the query options to the standard REST get call.",
+ "export": "Should this value be exported. Export strips fields that a user can not specify.",
+ "exact": "Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'",
+}
+
+func (ExportOptions) SwaggerDoc() map[string]string {
+ return map_ExportOptions
+}
+
+var map_FCVolumeSource = map[string]string{
+ "": "Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling.",
+ "targetWWNs": "Required: FC target world wide names (WWNs)",
+ "lun": "Required: FC target lun number",
+ "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.",
+ "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
+}
+
+func (FCVolumeSource) SwaggerDoc() map[string]string {
+ return map_FCVolumeSource
+}
+
+var map_FlexVolumeSource = map[string]string{
+ "": "FlexVolume represents a generic volume resource that is provisioned/attached using a exec based plugin. This is an alpha feature and may change in future.",
+ "driver": "Driver is the name of the driver to use for this volume.",
+ "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.",
+ "secretRef": "Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.",
+ "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
+ "options": "Optional: Extra command options if any.",
+}
+
+func (FlexVolumeSource) SwaggerDoc() map[string]string {
+ return map_FlexVolumeSource
+}
+
+var map_FlockerVolumeSource = map[string]string{
+ "": "Represents a Flocker volume mounted by the Flocker agent. Flocker volumes do not support ownership management or SELinux relabeling.",
+ "datasetName": "Required: the volume name. This is going to be store on metadata -> name on the payload for Flocker",
+}
+
+func (FlockerVolumeSource) SwaggerDoc() map[string]string {
+ return map_FlockerVolumeSource
+}
+
+var map_GCEPersistentDiskVolumeSource = map[string]string{
+ "": "Represents a Persistent Disk resource in Google Compute Engine.\n\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling.",
+ "pdName": "Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk",
+ "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk",
+ "partition": "The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk",
+ "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk",
+}
+
+func (GCEPersistentDiskVolumeSource) SwaggerDoc() map[string]string {
+ return map_GCEPersistentDiskVolumeSource
+}
+
+var map_GitRepoVolumeSource = map[string]string{
+ "": "Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.",
+ "repository": "Repository URL",
+ "revision": "Commit hash for the specified revision.",
+ "directory": "Target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.",
+}
+
+func (GitRepoVolumeSource) SwaggerDoc() map[string]string {
+ return map_GitRepoVolumeSource
+}
+
+var map_GlusterfsVolumeSource = map[string]string{
+ "": "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.",
+ "endpoints": "EndpointsName is the endpoint name that details Glusterfs topology. More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod",
+ "path": "Path is the Glusterfs volume path. More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod",
+ "readOnly": "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod",
+}
+
+func (GlusterfsVolumeSource) SwaggerDoc() map[string]string {
+ return map_GlusterfsVolumeSource
+}
+
+var map_HTTPGetAction = map[string]string{
+ "": "HTTPGetAction describes an action based on HTTP Get requests.",
+ "path": "Path to access on the HTTP server.",
+ "port": "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.",
+ "host": "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead.",
+ "scheme": "Scheme to use for connecting to the host. Defaults to HTTP.",
+ "httpHeaders": "Custom headers to set in the request. HTTP allows repeated headers.",
+}
+
+func (HTTPGetAction) SwaggerDoc() map[string]string {
+ return map_HTTPGetAction
+}
+
+var map_HTTPHeader = map[string]string{
+ "": "HTTPHeader describes a custom header to be used in HTTP probes",
+ "name": "The header field name",
+ "value": "The header field value",
+}
+
+func (HTTPHeader) SwaggerDoc() map[string]string {
+ return map_HTTPHeader
+}
+
+var map_Handler = map[string]string{
+ "": "Handler defines a specific action that should be taken",
+ "exec": "One and only one of the following should be specified. Exec specifies the action to take.",
+ "httpGet": "HTTPGet specifies the http request to perform.",
+ "tcpSocket": "TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported",
+}
+
+func (Handler) SwaggerDoc() map[string]string {
+ return map_Handler
+}
+
+var map_HostPathVolumeSource = map[string]string{
+ "": "Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling.",
+ "path": "Path of the directory on the host. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#hostpath",
+}
+
+func (HostPathVolumeSource) SwaggerDoc() map[string]string {
+ return map_HostPathVolumeSource
+}
+
+var map_ISCSIVolumeSource = map[string]string{
+ "": "Represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.",
+ "targetPortal": "iSCSI target portal. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).",
+ "iqn": "Target iSCSI Qualified Name.",
+ "lun": "iSCSI target lun number.",
+ "iscsiInterface": "Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport.",
+ "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#iscsi",
+ "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.",
+}
+
+func (ISCSIVolumeSource) SwaggerDoc() map[string]string {
+ return map_ISCSIVolumeSource
+}
+
+var map_KeyToPath = map[string]string{
+ "": "Maps a string key to a path within a volume.",
+ "key": "The key to project.",
+ "path": "The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.",
+}
+
+func (KeyToPath) SwaggerDoc() map[string]string {
+ return map_KeyToPath
+}
+
+var map_Lifecycle = map[string]string{
+ "": "Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.",
+ "postStart": "PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: http://releases.k8s.io/HEAD/docs/user-guide/container-environment.md#hook-details",
+ "preStop": "PreStop is called immediately before a container is terminated. The container is terminated after the handler completes. The reason for termination is passed to the handler. Regardless of the outcome of the handler, the container is eventually terminated. Other management of the container blocks until the hook completes. More info: http://releases.k8s.io/HEAD/docs/user-guide/container-environment.md#hook-details",
+}
+
+func (Lifecycle) SwaggerDoc() map[string]string {
+ return map_Lifecycle
+}
+
+var map_LimitRange = map[string]string{
+ "": "LimitRange sets resource usage limits for each kind of resource in a Namespace.",
+ "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
+ "spec": "Spec defines the limits enforced. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status",
+}
+
+func (LimitRange) SwaggerDoc() map[string]string {
+ return map_LimitRange
+}
+
+var map_LimitRangeItem = map[string]string{
+ "": "LimitRangeItem defines a min/max usage limit for any resource that matches on kind.",
+ "type": "Type of resource that this limit applies to.",
+ "max": "Max usage constraints on this kind by resource name.",
+ "min": "Min usage constraints on this kind by resource name.",
+ "default": "Default resource requirement limit value by resource name if resource limit is omitted.",
+ "defaultRequest": "DefaultRequest is the default resource requirement request value by resource name if resource request is omitted.",
+ "maxLimitRequestRatio": "MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource.",
+}
+
+func (LimitRangeItem) SwaggerDoc() map[string]string {
+ return map_LimitRangeItem
+}
+
+var map_LimitRangeList = map[string]string{
+ "": "LimitRangeList is a list of LimitRange items.",
+ "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds",
+ "items": "Items is a list of LimitRange objects. More info: http://releases.k8s.io/HEAD/docs/design/admission_control_limit_range.md",
+}
+
+func (LimitRangeList) SwaggerDoc() map[string]string {
+ return map_LimitRangeList
+}
+
+var map_LimitRangeSpec = map[string]string{
+ "": "LimitRangeSpec defines a min/max usage limit for resources that match on kind.",
+ "limits": "Limits is the list of LimitRangeItem objects that are enforced.",
+}
+
+func (LimitRangeSpec) SwaggerDoc() map[string]string {
+ return map_LimitRangeSpec
+}
+
+var map_List = map[string]string{
+ "": "List holds a list of objects, which may not be known by the server.",
+ "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds",
+ "items": "List of objects",
+}
+
+func (List) SwaggerDoc() map[string]string {
+ return map_List
+}
+
+var map_ListOptions = map[string]string{
+ "": "ListOptions is the query options to a standard REST list call.",
+ "labelSelector": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "fieldSelector": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "watch": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "resourceVersion": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history.",
+ "timeoutSeconds": "Timeout for the list/watch call.",
+}
+
+func (ListOptions) SwaggerDoc() map[string]string {
+ return map_ListOptions
+}
+
+var map_LoadBalancerIngress = map[string]string{
+ "": "LoadBalancerIngress represents the status of a load-balancer ingress point: traffic intended for the service should be sent to an ingress point.",
+ "ip": "IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers)",
+ "hostname": "Hostname is set for load-balancer ingress points that are DNS based (typically AWS load-balancers)",
+}
+
+func (LoadBalancerIngress) SwaggerDoc() map[string]string {
+ return map_LoadBalancerIngress
+}
+
+var map_LoadBalancerStatus = map[string]string{
+ "": "LoadBalancerStatus represents the status of a load-balancer.",
+ "ingress": "Ingress is a list containing ingress points for the load-balancer. Traffic intended for the service should be sent to these ingress points.",
+}
+
+func (LoadBalancerStatus) SwaggerDoc() map[string]string {
+ return map_LoadBalancerStatus
+}
+
+var map_LocalObjectReference = map[string]string{
+ "": "LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.",
+ "name": "Name of the referent. More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names",
+}
+
+func (LocalObjectReference) SwaggerDoc() map[string]string {
+ return map_LocalObjectReference
+}
+
+var map_NFSVolumeSource = map[string]string{
+ "": "Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling.",
+ "server": "Server is the hostname or IP address of the NFS server. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#nfs",
+ "path": "Path that is exported by the NFS server. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#nfs",
+ "readOnly": "ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#nfs",
+}
+
+func (NFSVolumeSource) SwaggerDoc() map[string]string {
+ return map_NFSVolumeSource
+}
+
+var map_Namespace = map[string]string{
+ "": "Namespace provides a scope for Names. Use of multiple namespaces is optional.",
+ "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
+ "spec": "Spec defines the behavior of the Namespace. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status",
+ "status": "Status describes the current status of a Namespace. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status",
+}
+
+func (Namespace) SwaggerDoc() map[string]string {
+ return map_Namespace
+}
+
+var map_NamespaceList = map[string]string{
+ "": "NamespaceList is a list of Namespaces.",
+ "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds",
+ "items": "Items is the list of Namespace objects in the list. More info: http://releases.k8s.io/HEAD/docs/user-guide/namespaces.md",
+}
+
+func (NamespaceList) SwaggerDoc() map[string]string {
+ return map_NamespaceList
+}
+
+var map_NamespaceSpec = map[string]string{
+ "": "NamespaceSpec describes the attributes on a Namespace.",
+ "finalizers": "Finalizers is an opaque list of values that must be empty to permanently remove object from storage. More info: http://releases.k8s.io/HEAD/docs/design/namespaces.md#finalizers",
+}
+
+func (NamespaceSpec) SwaggerDoc() map[string]string {
+ return map_NamespaceSpec
+}
+
+var map_NamespaceStatus = map[string]string{
+ "": "NamespaceStatus is information about the current status of a Namespace.",
+ "phase": "Phase is the current lifecycle phase of the namespace. More info: http://releases.k8s.io/HEAD/docs/design/namespaces.md#phases",
+}
+
+func (NamespaceStatus) SwaggerDoc() map[string]string {
+ return map_NamespaceStatus
+}
+
+var map_Node = map[string]string{
+ "": "Node is a worker node in Kubernetes, formerly known as minion. Each node will have a unique identifier in the cache (i.e. in etcd).",
+ "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
+ "spec": "Spec defines the behavior of a node. http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status",
+ "status": "Most recently observed status of the node. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status",
+}
+
+func (Node) SwaggerDoc() map[string]string {
+ return map_Node
+}
+
+var map_NodeAddress = map[string]string{
+ "": "NodeAddress contains information for the node's address.",
+ "type": "Node address type, one of Hostname, ExternalIP or InternalIP.",
+ "address": "The node address.",
+}
+
+func (NodeAddress) SwaggerDoc() map[string]string {
+ return map_NodeAddress
+}
+
+var map_NodeAffinity = map[string]string{
+ "": "Node affinity is a group of node affinity scheduling rules.",
+ "requiredDuringSchedulingIgnoredDuringExecution": "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.",
+ "preferredDuringSchedulingIgnoredDuringExecution": "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.",
+}
+
+func (NodeAffinity) SwaggerDoc() map[string]string {
+ return map_NodeAffinity
+}
+
+var map_NodeCondition = map[string]string{
+ "": "NodeCondition contains condition infromation for a node.",
+ "type": "Type of node condition.",
+ "status": "Status of the condition, one of True, False, Unknown.",
+ "lastHeartbeatTime": "Last time we got an update on a given condition.",
+ "lastTransitionTime": "Last time the condition transit from one status to another.",
+ "reason": "(brief) reason for the condition's last transition.",
+ "message": "Human readable message indicating details about last transition.",
+}
+
+func (NodeCondition) SwaggerDoc() map[string]string {
+ return map_NodeCondition
+}
+
+var map_NodeDaemonEndpoints = map[string]string{
+ "": "NodeDaemonEndpoints lists ports opened by daemons running on the Node.",
+ "kubeletEndpoint": "Endpoint on which Kubelet is listening.",
+}
+
+func (NodeDaemonEndpoints) SwaggerDoc() map[string]string {
+ return map_NodeDaemonEndpoints
+}
+
+var map_NodeList = map[string]string{
+ "": "NodeList is the whole list of all Nodes which have been registered with master.",
+ "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds",
+ "items": "List of nodes",
+}
+
+func (NodeList) SwaggerDoc() map[string]string {
+ return map_NodeList
+}
+
+var map_NodeProxyOptions = map[string]string{
+ "": "NodeProxyOptions is the query options to a Node's proxy call.",
+ "path": "Path is the URL path to use for the current proxy request to node.",
+}
+
+func (NodeProxyOptions) SwaggerDoc() map[string]string {
+ return map_NodeProxyOptions
+}
+
+var map_NodeSelector = map[string]string{
+ "": "A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms.",
+ "nodeSelectorTerms": "Required. A list of node selector terms. The terms are ORed.",
+}
+
+func (NodeSelector) SwaggerDoc() map[string]string {
+ return map_NodeSelector
+}
+
+var map_NodeSelectorRequirement = map[string]string{
+ "": "A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.",
+ "key": "The label key that the selector applies to.",
+ "operator": "Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.",
+ "values": "An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.",
+}
+
+func (NodeSelectorRequirement) SwaggerDoc() map[string]string {
+ return map_NodeSelectorRequirement
+}
+
+var map_NodeSelectorTerm = map[string]string{
+ "": "A null or empty node selector term matches no objects.",
+ "matchExpressions": "Required. A list of node selector requirements. The requirements are ANDed.",
+}
+
+func (NodeSelectorTerm) SwaggerDoc() map[string]string {
+ return map_NodeSelectorTerm
+}
+
+var map_NodeSpec = map[string]string{
+ "": "NodeSpec describes the attributes that a node is created with.",
+ "podCIDR": "PodCIDR represents the pod IP range assigned to the node.",
+ "externalID": "External ID of the node assigned by some machine database (e.g. a cloud provider). Deprecated.",
+ "providerID": "ID of the node assigned by the cloud provider in the format: <ProviderName>://<ProviderSpecificNodeID>",
+ "unschedulable": "Unschedulable controls node schedulability of new pods. By default, node is schedulable. More info: http://releases.k8s.io/HEAD/docs/admin/node.md#manual-node-administration\"`",
+}
+
+func (NodeSpec) SwaggerDoc() map[string]string {
+ return map_NodeSpec
+}
+
+var map_NodeStatus = map[string]string{
+ "": "NodeStatus is information about the current status of a node.",
+ "capacity": "Capacity represents the total resources of a node. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#capacity for more details.",
+ "allocatable": "Allocatable represents the resources of a node that are available for scheduling. Defaults to Capacity.",
+ "phase": "NodePhase is the recently observed lifecycle phase of the node. More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-phase",
+ "conditions": "Conditions is an array of current observed node conditions. More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-condition",
+ "addresses": "List of addresses reachable to the node. Queried from cloud provider, if available. More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-addresses",
+ "daemonEndpoints": "Endpoints of daemons running on the Node.",
+ "nodeInfo": "Set of ids/uuids to uniquely identify the node. More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-info",
+ "images": "List of container images on this node",
+ "volumesInUse": "List of attachable volumes in use (mounted) by the node.",
+ "volumesAttached": "List of volumes that are attached to the node.",
+}
+
+func (NodeStatus) SwaggerDoc() map[string]string {
+ return map_NodeStatus
+}
+
+var map_NodeSystemInfo = map[string]string{
+ "": "NodeSystemInfo is a set of ids/uuids to uniquely identify the node.",
+ "machineID": "Machine ID reported by the node.",
+ "systemUUID": "System UUID reported by the node.",
+ "bootID": "Boot ID reported by the node.",
+ "kernelVersion": "Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).",
+ "osImage": "OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).",
+ "containerRuntimeVersion": "ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0).",
+ "kubeletVersion": "Kubelet Version reported by the node.",
+ "kubeProxyVersion": "KubeProxy Version reported by the node.",
+ "operatingSystem": "The Operating System reported by the node",
+ "architecture": "The Architecture reported by the node",
+}
+
+func (NodeSystemInfo) SwaggerDoc() map[string]string {
+ return map_NodeSystemInfo
+}
+
+var map_ObjectFieldSelector = map[string]string{
+ "": "ObjectFieldSelector selects an APIVersioned field of an object.",
+ "apiVersion": "Version of the schema the FieldPath is written in terms of, defaults to \"v1\".",
+ "fieldPath": "Path of the field to select in the specified API version.",
+}
+
+func (ObjectFieldSelector) SwaggerDoc() map[string]string {
+ return map_ObjectFieldSelector
+}
+
+var map_ObjectMeta = map[string]string{
+ "": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.",
+ "name": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names",
+ "generateName": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#idempotency",
+ "namespace": "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://releases.k8s.io/HEAD/docs/user-guide/namespaces.md",
+ "selfLink": "SelfLink is a URL representing this object. Populated by the system. Read-only.",
+ "uid": "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#uids",
+ "resourceVersion": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency",
+ "generation": "A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.",
+ "creationTimestamp": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
+ "deletionTimestamp": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource will be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field. Once set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. Once the resource is deleted in the API, the Kubelet will send a hard termination signal to the container. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
+ "deletionGracePeriodSeconds": "Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.",
+ "labels": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md",
+ "annotations": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://releases.k8s.io/HEAD/docs/user-guide/annotations.md",
+ "ownerReferences": "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.",
+ "finalizers": "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed.",
+}
+
+func (ObjectMeta) SwaggerDoc() map[string]string {
+ return map_ObjectMeta
+}
+
+var map_ObjectReference = map[string]string{
+ "": "ObjectReference contains enough information to let you inspect or modify the referred object.",
+ "kind": "Kind of the referent. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds",
+ "namespace": "Namespace of the referent. More info: http://releases.k8s.io/HEAD/docs/user-guide/namespaces.md",
+ "name": "Name of the referent. More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names",
+ "uid": "UID of the referent. More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#uids",
+ "apiVersion": "API version of the referent.",
+ "resourceVersion": "Specific resourceVersion to which this reference is made, if any. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency",
+ "fieldPath": "If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered the event) or if no container name is specified \"spec.containers[2]\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object.",
+}
+
+func (ObjectReference) SwaggerDoc() map[string]string {
+ return map_ObjectReference
+}
+
+var map_OwnerReference = map[string]string{
+ "": "OwnerReference contains enough information to let you identify an owning object. Currently, an owning object must be in the same namespace, so there is no namespace field.",
+ "apiVersion": "API version of the referent.",
+ "kind": "Kind of the referent. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds",
+ "name": "Name of the referent. More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names",
+ "uid": "UID of the referent. More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#uids",
+ "controller": "If true, this reference points to the managing controller.",
+}
+
+func (OwnerReference) SwaggerDoc() map[string]string {
+ return map_OwnerReference
+}
+
+var map_PersistentVolume = map[string]string{
+ "": "PersistentVolume (PV) is a storage resource provisioned by an administrator. It is analogous to a node. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md",
+ "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
+ "spec": "Spec defines a specification of a persistent volume owned by the cluster. Provisioned by an administrator. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistent-volumes",
+ "status": "Status represents the current information/status for the persistent volume. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistent-volumes",
+}
+
+func (PersistentVolume) SwaggerDoc() map[string]string {
+ return map_PersistentVolume
+}
+
+var map_PersistentVolumeClaim = map[string]string{
+ "": "PersistentVolumeClaim is a user's request for and claim to a persistent volume",
+ "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
+ "spec": "Spec defines the desired characteristics of a volume requested by a pod author. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistentvolumeclaims",
+ "status": "Status represents the current information/status of a persistent volume claim. Read-only. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistentvolumeclaims",
+}
+
+func (PersistentVolumeClaim) SwaggerDoc() map[string]string {
+ return map_PersistentVolumeClaim
+}
+
+var map_PersistentVolumeClaimList = map[string]string{
+ "": "PersistentVolumeClaimList is a list of PersistentVolumeClaim items.",
+ "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds",
+ "items": "A list of persistent volume claims. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistentvolumeclaims",
+}
+
+func (PersistentVolumeClaimList) SwaggerDoc() map[string]string {
+ return map_PersistentVolumeClaimList
+}
+
+var map_PersistentVolumeClaimSpec = map[string]string{
+ "": "PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes",
+ "accessModes": "AccessModes contains the desired access modes the volume should have. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#access-modes-1",
+ "selector": "A label query over volumes to consider for binding.",
+ "resources": "Resources represents the minimum resources the volume should have. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#resources",
+ "volumeName": "VolumeName is the binding reference to the PersistentVolume backing this claim.",
+}
+
+func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string {
+ return map_PersistentVolumeClaimSpec
+}
+
+var map_PersistentVolumeClaimStatus = map[string]string{
+ "": "PersistentVolumeClaimStatus is the current status of a persistent volume claim.",
+ "phase": "Phase represents the current phase of PersistentVolumeClaim.",
+ "accessModes": "AccessModes contains the actual access modes the volume backing the PVC has. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#access-modes-1",
+ "capacity": "Represents the actual resources of the underlying volume.",
+}
+
+func (PersistentVolumeClaimStatus) SwaggerDoc() map[string]string {
+ return map_PersistentVolumeClaimStatus
+}
+
+var map_PersistentVolumeClaimVolumeSource = map[string]string{
+ "": "PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. This volume finds the bound PV and mounts that volume for the pod. A PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another type of volume that is owned by someone else (the system).",
+ "claimName": "ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistentvolumeclaims",
+ "readOnly": "Will force the ReadOnly setting in VolumeMounts. Default false.",
+}
+
+func (PersistentVolumeClaimVolumeSource) SwaggerDoc() map[string]string {
+ return map_PersistentVolumeClaimVolumeSource
+}
+
+var map_PersistentVolumeList = map[string]string{
+ "": "PersistentVolumeList is a list of PersistentVolume items.",
+ "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds",
+ "items": "List of persistent volumes. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md",
+}
+
+func (PersistentVolumeList) SwaggerDoc() map[string]string {
+ return map_PersistentVolumeList
+}
+
+var map_PersistentVolumeSource = map[string]string{
+ "": "PersistentVolumeSource is similar to VolumeSource but meant for the administrator who creates PVs. Exactly one of its members must be set.",
+ "gcePersistentDisk": "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk",
+ "awsElasticBlockStore": "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#awselasticblockstore",
+ "hostPath": "HostPath represents a directory on the host. Provisioned by a developer or tester. This is useful for single-node development and testing only! On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#hostpath",
+ "glusterfs": "Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md",
+ "nfs": "NFS represents an NFS mount on the host. Provisioned by an admin. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#nfs",
+ "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md",
+ "iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin.",
+ "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md",
+ "cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime",
+ "fc": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.",
+ "flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running",
+ "flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using a exec based plugin. This is an alpha feature and may change in future.",
+ "azureFile": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.",
+ "vsphereVolume": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine",
+}
+
+func (PersistentVolumeSource) SwaggerDoc() map[string]string {
+ return map_PersistentVolumeSource
+}
+
+var map_PersistentVolumeSpec = map[string]string{
+ "": "PersistentVolumeSpec is the specification of a persistent volume.",
+ "capacity": "A description of the persistent volume's resources and capacity. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#capacity",
+ "accessModes": "AccessModes contains all ways the volume can be mounted. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#access-modes",
+ "claimRef": "ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. Expected to be non-nil when bound. claim.VolumeName is the authoritative bind between PV and PVC. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#binding",
+ "persistentVolumeReclaimPolicy": "What happens to a persistent volume when released from its claim. Valid options are Retain (default) and Recycle. Recyling must be supported by the volume plugin underlying this persistent volume. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#recycling-policy",
+}
+
+func (PersistentVolumeSpec) SwaggerDoc() map[string]string {
+ return map_PersistentVolumeSpec
+}
+
+var map_PersistentVolumeStatus = map[string]string{
+ "": "PersistentVolumeStatus is the current status of a persistent volume.",
+ "phase": "Phase indicates if a volume is available, bound to a claim, or released by a claim. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#phase",
+ "message": "A human-readable message indicating details about why the volume is in this state.",
+ "reason": "Reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.",
+}
+
+func (PersistentVolumeStatus) SwaggerDoc() map[string]string {
+ return map_PersistentVolumeStatus
+}
+
+var map_Pod = map[string]string{
+ "": "Pod is a collection of containers that can run on a host. This resource is created by clients and scheduled onto hosts.",
+ "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
+ "spec": "Specification of the desired behavior of the pod. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status",
+ "status": "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status",
+}
+
+func (Pod) SwaggerDoc() map[string]string {
+ return map_Pod
+}
+
+var map_PodAffinity = map[string]string{
+ "": "Pod affinity is a group of inter pod affinity scheduling rules.",
+ "requiredDuringSchedulingIgnoredDuringExecution": "NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system will try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:\"requiredDuringSchedulingRequiredDuringExecution,omitempty\"` If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.",
+ "preferredDuringSchedulingIgnoredDuringExecution": "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.",
+}
+
+func (PodAffinity) SwaggerDoc() map[string]string {
+ return map_PodAffinity
+}
+
+var map_PodAffinityTerm = map[string]string{
+ "": "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> tches that of any node on which a pod of the set of pods is running",
+ "labelSelector": "A label query over a set of resources, in this case pods.",
+ "namespaces": "namespaces specifies which namespaces the labelSelector applies to (matches against); nil list means \"this pod's namespace,\" empty list means \"all namespaces\" The json tag here is not \"omitempty\" since we need to distinguish nil and empty. See https://golang.org/pkg/encoding/json/#Marshal for more details.",
+ "topologyKey": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as \"all topologies\" (\"all topologies\" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains); for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed.",
+}
+
+func (PodAffinityTerm) SwaggerDoc() map[string]string {
+ return map_PodAffinityTerm
+}
+
+var map_PodAntiAffinity = map[string]string{
+ "": "Pod anti affinity is a group of inter pod anti affinity scheduling rules.",
+ "requiredDuringSchedulingIgnoredDuringExecution": "NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system will try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:\"requiredDuringSchedulingRequiredDuringExecution,omitempty\"` If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.",
+ "preferredDuringSchedulingIgnoredDuringExecution": "The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.",
+}
+
+func (PodAntiAffinity) SwaggerDoc() map[string]string {
+ return map_PodAntiAffinity
+}
+
+var map_PodAttachOptions = map[string]string{
+ "": "PodAttachOptions is the query options to a Pod's remote attach call.",
+ "stdin": "Stdin if true, redirects the standard input stream of the pod for this call. Defaults to false.",
+ "stdout": "Stdout if true indicates that stdout is to be redirected for the attach call. Defaults to true.",
+ "stderr": "Stderr if true indicates that stderr is to be redirected for the attach call. Defaults to true.",
+ "tty": "TTY if true indicates that a tty will be allocated for the attach call. This is passed through the container runtime so the tty is allocated on the worker node by the container runtime. Defaults to false.",
+ "container": "The container in which to execute the command. Defaults to only container if there is only one container in the pod.",
+}
+
+func (PodAttachOptions) SwaggerDoc() map[string]string {
+ return map_PodAttachOptions
+}
+
+var map_PodCondition = map[string]string{
+ "": "PodCondition contains details for the current condition of this pod.",
+ "type": "Type is the type of the condition. Currently only Ready. More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#pod-conditions",
+ "status": "Status is the status of the condition. Can be True, False, Unknown. More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#pod-conditions",
+ "lastProbeTime": "Last time we probed the condition.",
+ "lastTransitionTime": "Last time the condition transitioned from one status to another.",
+ "reason": "Unique, one-word, CamelCase reason for the condition's last transition.",
+ "message": "Human-readable message indicating details about last transition.",
+}
+
+func (PodCondition) SwaggerDoc() map[string]string {
+ return map_PodCondition
+}
+
+var map_PodExecOptions = map[string]string{
+ "": "PodExecOptions is the query options to a Pod's remote exec call.",
+ "stdin": "Redirect the standard input stream of the pod for this call. Defaults to false.",
+ "stdout": "Redirect the standard output stream of the pod for this call. Defaults to true.",
+ "stderr": "Redirect the standard error stream of the pod for this call. Defaults to true.",
+ "tty": "TTY if true indicates that a tty will be allocated for the exec call. Defaults to false.",
+ "container": "Container in which to execute the command. Defaults to only container if there is only one container in the pod.",
+ "command": "Command is the remote command to execute. argv array. Not executed within a shell.",
+}
+
+func (PodExecOptions) SwaggerDoc() map[string]string {
+ return map_PodExecOptions
+}
+
+var map_PodList = map[string]string{
+ "": "PodList is a list of Pods.",
+ "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds",
+ "items": "List of pods. More info: http://releases.k8s.io/HEAD/docs/user-guide/pods.md",
+}
+
+func (PodList) SwaggerDoc() map[string]string {
+ return map_PodList
+}
+
+var map_PodLogOptions = map[string]string{
+ "": "PodLogOptions is the query options for a Pod's logs REST call.",
+ "container": "The container for which to stream logs. Defaults to only container if there is one container in the pod.",
+ "follow": "Follow the log stream of the pod. Defaults to false.",
+ "previous": "Return previous terminated container logs. Defaults to false.",
+ "sinceSeconds": "A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.",
+ "sinceTime": "An RFC3339 timestamp from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.",
+ "timestamps": "If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.",
+ "tailLines": "If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime",
+ "limitBytes": "If set, the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.",
+}
+
+func (PodLogOptions) SwaggerDoc() map[string]string {
+ return map_PodLogOptions
+}
+
+var map_PodProxyOptions = map[string]string{
+ "": "PodProxyOptions is the query options to a Pod's proxy call.",
+ "path": "Path is the URL path to use for the current proxy request to pod.",
+}
+
+func (PodProxyOptions) SwaggerDoc() map[string]string {
+ return map_PodProxyOptions
+}
+
+var map_PodSecurityContext = map[string]string{
+ "": "PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.",
+ "seLinuxOptions": "The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.",
+ "runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.",
+ "runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
+ "supplementalGroups": "A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.",
+ "fsGroup": "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw ",
+}
+
+func (PodSecurityContext) SwaggerDoc() map[string]string {
+ return map_PodSecurityContext
+}
+
+var map_PodSpec = map[string]string{
+ "": "PodSpec is a description of a pod.",
+ "volumes": "List of volumes that can be mounted by containers belonging to the pod. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md",
+ "containers": "List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated. More info: http://releases.k8s.io/HEAD/docs/user-guide/containers.md",
+ "restartPolicy": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#restartpolicy",
+ "terminationGracePeriodSeconds": "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.",
+ "activeDeadlineSeconds": "Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.",
+ "dnsPolicy": "Set DNS policy for containers within the pod. One of 'ClusterFirst' or 'Default'. Defaults to \"ClusterFirst\".",
+ "nodeSelector": "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: http://releases.k8s.io/HEAD/docs/user-guide/node-selection/README.md",
+ "serviceAccountName": "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md",
+ "serviceAccount": "DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.",
+ "nodeName": "NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements.",
+ "hostNetwork": "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.",
+ "hostPID": "Use the host's pid namespace. Optional: Default to false.",
+ "hostIPC": "Use the host's ipc namespace. Optional: Default to false.",
+ "securityContext": "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.",
+ "imagePullSecrets": "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: http://releases.k8s.io/HEAD/docs/user-guide/images.md#specifying-imagepullsecrets-on-a-pod",
+ "hostname": "Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.",
+ "subdomain": "If specified, the fully qualified Pod hostname will be \"<hostname>.<subdomain>.<pod namespace>.svc.<cluster domain>\". If not specified, the pod will not have a domainname at all.",
+}
+
+func (PodSpec) SwaggerDoc() map[string]string {
+ return map_PodSpec
+}
+
+var map_PodStatus = map[string]string{
+ "": "PodStatus represents information about the status of a pod. Status may trail the actual state of a system.",
+ "phase": "Current condition of the pod. More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#pod-phase",
+ "conditions": "Current service state of pod. More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#pod-conditions",
+ "message": "A human readable message indicating details about why the pod is in this condition.",
+ "reason": "A brief CamelCase message indicating details about why the pod is in this state. e.g. 'OutOfDisk'",
+ "hostIP": "IP address of the host to which the pod is assigned. Empty if not yet scheduled.",
+ "podIP": "IP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.",
+ "startTime": "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.",
+ "containerStatuses": "The list has one entry per container in the manifest. Each entry is currently the output of `docker inspect`. More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-statuses",
+}
+
+func (PodStatus) SwaggerDoc() map[string]string {
+ return map_PodStatus
+}
+
+var map_PodStatusResult = map[string]string{
+ "": "PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded",
+ "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
+ "status": "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status",
+}
+
+func (PodStatusResult) SwaggerDoc() map[string]string {
+ return map_PodStatusResult
+}
+
+var map_PodTemplate = map[string]string{
+ "": "PodTemplate describes a template for creating copies of a predefined pod.",
+ "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
+ "template": "Template defines the pods that will be created from this pod template. http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status",
+}
+
+func (PodTemplate) SwaggerDoc() map[string]string {
+ return map_PodTemplate
+}
+
+var map_PodTemplateList = map[string]string{
+ "": "PodTemplateList is a list of PodTemplates.",
+ "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds",
+ "items": "List of pod templates",
+}
+
+func (PodTemplateList) SwaggerDoc() map[string]string {
+ return map_PodTemplateList
+}
+
+var map_PodTemplateSpec = map[string]string{
+ "": "PodTemplateSpec describes the data a pod should have when created from a template",
+ "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
+ "spec": "Specification of the desired behavior of the pod. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status",
+}
+
+func (PodTemplateSpec) SwaggerDoc() map[string]string {
+ return map_PodTemplateSpec
+}
+
+var map_Preconditions = map[string]string{
+ "": "Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.",
+ "uid": "Specifies the target UID.",
+}
+
+func (Preconditions) SwaggerDoc() map[string]string {
+ return map_Preconditions
+}
+
+var map_PreferredSchedulingTerm = map[string]string{
+ "": "An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).",
+ "weight": "Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.",
+ "preference": "A node selector term, associated with the corresponding weight.",
+}
+
+func (PreferredSchedulingTerm) SwaggerDoc() map[string]string {
+ return map_PreferredSchedulingTerm
+}
+
+var map_Probe = map[string]string{
+ "": "Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.",
+ "initialDelaySeconds": "Number of seconds after the container has started before liveness probes are initiated. More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-probes",
+ "timeoutSeconds": "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-probes",
+ "periodSeconds": "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.",
+ "successThreshold": "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.",
+ "failureThreshold": "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.",
+}
+
+func (Probe) SwaggerDoc() map[string]string {
+ return map_Probe
+}
+
+var map_RBDVolumeSource = map[string]string{
+ "": "Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.",
+ "monitors": "A collection of Ceph monitors. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it",
+ "image": "The rados image name. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it",
+ "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#rbd",
+ "pool": "The rados pool name. Default is rbd. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it.",
+ "user": "The rados user name. Default is admin. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it",
+ "keyring": "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it",
+ "secretRef": "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it",
+ "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it",
+}
+
+func (RBDVolumeSource) SwaggerDoc() map[string]string {
+ return map_RBDVolumeSource
+}
+
+var map_RangeAllocation = map[string]string{
+ "": "RangeAllocation is not a public type.",
+ "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
+ "range": "Range is string that identifies the range represented by 'data'.",
+ "data": "Data is a bit array containing all allocated addresses in the previous segment.",
+}
+
+func (RangeAllocation) SwaggerDoc() map[string]string {
+ return map_RangeAllocation
+}
+
+var map_ReplicationController = map[string]string{
+ "": "ReplicationController represents the configuration of a replication controller.",
+ "metadata": "If the Labels of a ReplicationController are empty, they are defaulted to be the same as the Pod(s) that the replication controller manages. Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
+ "spec": "Spec defines the specification of the desired behavior of the replication controller. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status",
+ "status": "Status is the most recently observed status of the replication controller. This data may be out of date by some window of time. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status",
+}
+
+func (ReplicationController) SwaggerDoc() map[string]string {
+ return map_ReplicationController
+}
+
+var map_ReplicationControllerList = map[string]string{
+ "": "ReplicationControllerList is a collection of replication controllers.",
+ "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds",
+ "items": "List of replication controllers. More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md",
+}
+
+func (ReplicationControllerList) SwaggerDoc() map[string]string {
+ return map_ReplicationControllerList
+}
+
+var map_ReplicationControllerSpec = map[string]string{
+ "": "ReplicationControllerSpec is the specification of a replication controller.",
+ "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller",
+ "selector": "Selector is a label query over pods that should match the Replicas count. If Selector is empty, it is defaulted to the labels present on the Pod template. Label keys and values that must match in order to be controlled by this replication controller, if empty defaulted to labels on Pod template. More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors",
+ "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. This takes precedence over a TemplateRef. More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#pod-template",
+}
+
+func (ReplicationControllerSpec) SwaggerDoc() map[string]string {
+ return map_ReplicationControllerSpec
+}
+
+var map_ReplicationControllerStatus = map[string]string{
+ "": "ReplicationControllerStatus represents the current status of a replication controller.",
+ "replicas": "Replicas is the most recently oberved number of replicas. More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller",
+ "fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replication controller.",
+ "observedGeneration": "ObservedGeneration reflects the generation of the most recently observed replication controller.",
+}
+
+func (ReplicationControllerStatus) SwaggerDoc() map[string]string {
+ return map_ReplicationControllerStatus
+}
+
+var map_ResourceFieldSelector = map[string]string{
+ "": "ResourceFieldSelector represents container resources (cpu, memory) and their output format",
+ "containerName": "Container name: required for volumes, optional for env vars",
+ "resource": "Required: resource to select",
+ "divisor": "Specifies the output format of the exposed resources, defaults to \"1\"",
+}
+
+func (ResourceFieldSelector) SwaggerDoc() map[string]string {
+ return map_ResourceFieldSelector
+}
+
+var map_ResourceQuota = map[string]string{
+ "": "ResourceQuota sets aggregate quota restrictions enforced per namespace",
+ "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
+ "spec": "Spec defines the desired quota. http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status",
+ "status": "Status defines the actual enforced quota and its current usage. http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status",
+}
+
+func (ResourceQuota) SwaggerDoc() map[string]string {
+ return map_ResourceQuota
+}
+
+var map_ResourceQuotaList = map[string]string{
+ "": "ResourceQuotaList is a list of ResourceQuota items.",
+ "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds",
+ "items": "Items is a list of ResourceQuota objects. More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota",
+}
+
+func (ResourceQuotaList) SwaggerDoc() map[string]string {
+ return map_ResourceQuotaList
+}
+
+var map_ResourceQuotaSpec = map[string]string{
+ "": "ResourceQuotaSpec defines the desired hard limits to enforce for Quota.",
+ "hard": "Hard is the set of desired hard limits for each named resource. More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota",
+ "scopes": "A collection of filters that must match each object tracked by a quota. If not specified, the quota matches all objects.",
+}
+
+func (ResourceQuotaSpec) SwaggerDoc() map[string]string {
+ return map_ResourceQuotaSpec
+}
+
+var map_ResourceQuotaStatus = map[string]string{
+ "": "ResourceQuotaStatus defines the enforced hard limits and observed use.",
+ "hard": "Hard is the set of enforced hard limits for each named resource. More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota",
+ "used": "Used is the current observed total usage of the resource in the namespace.",
+}
+
+func (ResourceQuotaStatus) SwaggerDoc() map[string]string {
+ return map_ResourceQuotaStatus
+}
+
+var map_ResourceRequirements = map[string]string{
+ "": "ResourceRequirements describes the compute resource requirements.",
+ "limits": "Limits describes the maximum amount of compute resources allowed. More info: http://releases.k8s.io/HEAD/docs/design/resources.md#resource-specifications",
+ "requests": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: http://releases.k8s.io/HEAD/docs/design/resources.md#resource-specifications",
+}
+
+func (ResourceRequirements) SwaggerDoc() map[string]string {
+ return map_ResourceRequirements
+}
+
+var map_SELinuxOptions = map[string]string{
+ "": "SELinuxOptions are the labels to be applied to the container",
+ "user": "User is a SELinux user label that applies to the container.",
+ "role": "Role is a SELinux role label that applies to the container.",
+ "type": "Type is a SELinux type label that applies to the container.",
+ "level": "Level is SELinux level label that applies to the container.",
+}
+
+func (SELinuxOptions) SwaggerDoc() map[string]string {
+ return map_SELinuxOptions
+}
+
+var map_Secret = map[string]string{
+ "": "Secret holds secret data of a certain type. The total bytes of the values in the Data field must be less than MaxSecretSize bytes.",
+ "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
+ "data": "Data contains the secret data. Each key must be a valid DNS_SUBDOMAIN or leading dot followed by valid DNS_SUBDOMAIN. The serialized form of the secret data is a base64 encoded string, representing the arbitrary (possibly non-string) data value here. Described in https://tools.ietf.org/html/rfc4648#section-4",
+ "stringData": "stringData allows specifying non-binary secret data in string form. It is provided as a write-only convenience method. All keys and values are merged into the data field on write, overwriting any existing values. It is never output when reading from the API.",
+ "type": "Used to facilitate programmatic handling of secret data.",
+}
+
+func (Secret) SwaggerDoc() map[string]string {
+ return map_Secret
+}
+
+var map_SecretKeySelector = map[string]string{
+ "": "SecretKeySelector selects a key of a Secret.",
+ "key": "The key of the secret to select from. Must be a valid secret key.",
+}
+
+func (SecretKeySelector) SwaggerDoc() map[string]string {
+ return map_SecretKeySelector
+}
+
+var map_SecretList = map[string]string{
+ "": "SecretList is a list of Secret.",
+ "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds",
+ "items": "Items is a list of secret objects. More info: http://releases.k8s.io/HEAD/docs/user-guide/secrets.md",
+}
+
+func (SecretList) SwaggerDoc() map[string]string {
+ return map_SecretList
+}
+
+var map_SecretVolumeSource = map[string]string{
+ "": "Adapts a Secret into a volume.\n\nThe contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. Secret volumes support ownership management and SELinux relabeling.",
+ "secretName": "Name of the secret in the pod's namespace to use. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#secrets",
+ "items": "If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error. Paths must be relative and may not contain the '..' path or start with '..'.",
+}
+
+func (SecretVolumeSource) SwaggerDoc() map[string]string {
+ return map_SecretVolumeSource
+}
+
+var map_SecurityContext = map[string]string{
+ "": "SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence.",
+ "capabilities": "The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.",
+ "privileged": "Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false.",
+ "seLinuxOptions": "The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
+ "runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
+ "runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
+ "readOnlyRootFilesystem": "Whether this container has a read-only root filesystem. Default is false.",
+}
+
+func (SecurityContext) SwaggerDoc() map[string]string {
+ return map_SecurityContext
+}
+
+var map_SerializedReference = map[string]string{
+ "": "SerializedReference is a reference to serialized object.",
+ "reference": "The reference to an object in the system.",
+}
+
+func (SerializedReference) SwaggerDoc() map[string]string {
+ return map_SerializedReference
+}
+
+var map_Service = map[string]string{
+ "": "Service is a named abstraction of software service (for example, mysql) consisting of local port (for example 3306) that the proxy listens on, and the selector that determines which pods will answer requests sent through the proxy.",
+ "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
+ "spec": "Spec defines the behavior of a service. http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status",
+ "status": "Most recently observed status of the service. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status",
+}
+
+func (Service) SwaggerDoc() map[string]string {
+ return map_Service
+}
+
+var map_ServiceAccount = map[string]string{
+ "": "ServiceAccount binds together: * a name, understood by users, and perhaps by peripheral systems, for an identity * a principal that can be authenticated and authorized * a set of secrets",
+ "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
+ "secrets": "Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. More info: http://releases.k8s.io/HEAD/docs/user-guide/secrets.md",
+ "imagePullSecrets": "ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: http://releases.k8s.io/HEAD/docs/user-guide/secrets.md#manually-specifying-an-imagepullsecret",
+}
+
+func (ServiceAccount) SwaggerDoc() map[string]string {
+ return map_ServiceAccount
+}
+
+var map_ServiceAccountList = map[string]string{
+ "": "ServiceAccountList is a list of ServiceAccount objects",
+ "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds",
+ "items": "List of ServiceAccounts. More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md#service-accounts",
+}
+
+func (ServiceAccountList) SwaggerDoc() map[string]string {
+ return map_ServiceAccountList
+}
+
+var map_ServiceList = map[string]string{
+ "": "ServiceList holds a list of services.",
+ "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds",
+ "items": "List of services",
+}
+
+func (ServiceList) SwaggerDoc() map[string]string {
+ return map_ServiceList
+}
+
+var map_ServicePort = map[string]string{
+ "": "ServicePort contains information on service's port.",
+ "name": "The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. This maps to the 'Name' field in EndpointPort objects. Optional if only one ServicePort is defined on this service.",
+ "protocol": "The IP protocol for this port. Supports \"TCP\" and \"UDP\". Default is TCP.",
+ "port": "The port that will be exposed by this service.",
+ "targetPort": "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#defining-a-service",
+ "nodePort": "The port on each node on which this service is exposed when type=NodePort or LoadBalancer. Usually assigned by the system. If specified, it will be allocated to the service if unused or else creation of the service will fail. Default is to auto-allocate a port if the ServiceType of this Service requires one. More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#type--nodeport",
+}
+
+func (ServicePort) SwaggerDoc() map[string]string {
+ return map_ServicePort
+}
+
+var map_ServiceProxyOptions = map[string]string{
+ "": "ServiceProxyOptions is the query options to a Service's proxy call.",
+ "path": "Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.",
+}
+
+func (ServiceProxyOptions) SwaggerDoc() map[string]string {
+ return map_ServiceProxyOptions
+}
+
+var map_ServiceSpec = map[string]string{
+ "": "ServiceSpec describes the attributes that a user creates on a service.",
+ "ports": "The list of ports that are exposed by this service. More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#virtual-ips-and-service-proxies",
+ "selector": "This service will route traffic to pods having labels matching this selector. Label keys and values that must match in order to receive traffic for this service. If not specified, endpoints must be manually specified and the system will not automatically manage them. More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#overview",
+ "clusterIP": "ClusterIP is usually assigned by the master and is the IP address of the service. If specified, it will be allocated to the service if it is unused or else creation of the service will fail. Valid values are None, empty string (\"\"), or a valid IP address. 'None' can be specified for a headless service when proxying is not required. Cannot be updated. More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#virtual-ips-and-service-proxies",
+ "type": "Type of exposed service. Must be ClusterIP, NodePort, or LoadBalancer. Defaults to ClusterIP. More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#external-services",
+ "externalIPs": "externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system. A previous form of this functionality exists as the deprecatedPublicIPs field. When using this field, callers should also clear the deprecatedPublicIPs field.",
+ "deprecatedPublicIPs": "deprecatedPublicIPs is deprecated and replaced by the externalIPs field with almost the exact same semantics. This field is retained in the v1 API for compatibility until at least 8/20/2016. It will be removed from any new API revisions. If both deprecatedPublicIPs *and* externalIPs are set, deprecatedPublicIPs is used.",
+ "sessionAffinity": "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#virtual-ips-and-service-proxies",
+ "loadBalancerIP": "Only applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature.",
+ "loadBalancerSourceRanges": "If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\" More info: http://releases.k8s.io/HEAD/docs/user-guide/services-firewalls.md",
+}
+
+func (ServiceSpec) SwaggerDoc() map[string]string {
+ return map_ServiceSpec
+}
+
+var map_ServiceStatus = map[string]string{
+ "": "ServiceStatus represents the current status of a service.",
+ "loadBalancer": "LoadBalancer contains the current status of the load-balancer, if one is present.",
+}
+
+func (ServiceStatus) SwaggerDoc() map[string]string {
+ return map_ServiceStatus
+}
+
+var map_TCPSocketAction = map[string]string{
+ "": "TCPSocketAction describes an action based on opening a socket",
+ "port": "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.",
+}
+
+func (TCPSocketAction) SwaggerDoc() map[string]string {
+ return map_TCPSocketAction
+}
+
+var map_Taint = map[string]string{
+ "": "The node this Taint is attached to has the effect \"effect\" on any pod that that does not tolerate the Taint.",
+ "key": "Required. The taint key to be applied to a node.",
+ "value": "Required. The taint value corresponding to the taint key.",
+ "effect": "Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule and PreferNoSchedule.",
+}
+
+func (Taint) SwaggerDoc() map[string]string {
+ return map_Taint
+}
+
+var map_Toleration = map[string]string{
+ "": "The pod this Toleration is attached to tolerates any taint that matches the triple <key,value,effect> using the matching operator <operator>.",
+ "key": "Required. Key is the taint key that the toleration applies to.",
+ "operator": "operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.",
+ "value": "Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.",
+ "effect": "Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule and PreferNoSchedule.",
+}
+
+func (Toleration) SwaggerDoc() map[string]string {
+ return map_Toleration
+}
+
+var map_Volume = map[string]string{
+ "": "Volume represents a named volume in a pod that may be accessed by any container in the pod.",
+ "name": "Volume's name. Must be a DNS_LABEL and unique within the pod. More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names",
+}
+
+func (Volume) SwaggerDoc() map[string]string {
+ return map_Volume
+}
+
+var map_VolumeMount = map[string]string{
+ "": "VolumeMount describes a mounting of a Volume within a container.",
+ "name": "This must match the Name of a Volume.",
+ "readOnly": "Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.",
+ "mountPath": "Path within the container at which the volume should be mounted. Must not contain ':'.",
+ "subPath": "Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root).",
+}
+
+func (VolumeMount) SwaggerDoc() map[string]string {
+ return map_VolumeMount
+}
+
+var map_VolumeSource = map[string]string{
+ "": "Represents the source of a volume to mount. Only one of its members may be specified.",
+ "hostPath": "HostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#hostpath",
+ "emptyDir": "EmptyDir represents a temporary directory that shares a pod's lifetime. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#emptydir",
+ "gcePersistentDisk": "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk",
+ "awsElasticBlockStore": "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#awselasticblockstore",
+ "gitRepo": "GitRepo represents a git repository at a particular revision.",
+ "secret": "Secret represents a secret that should populate this volume. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#secrets",
+ "nfs": "NFS represents an NFS mount on the host that shares a pod's lifetime More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#nfs",
+ "iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: http://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md",
+ "glusterfs": "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md",
+ "persistentVolumeClaim": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistentvolumeclaims",
+ "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md",
+ "flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using a exec based plugin. This is an alpha feature and may change in future.",
+ "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md",
+ "cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime",
+ "flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running",
+ "downwardAPI": "DownwardAPI represents downward API about the pod that should populate this volume",
+ "fc": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.",
+ "azureFile": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.",
+ "configMap": "ConfigMap represents a configMap that should populate this volume",
+ "vsphereVolume": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine",
+}
+
+func (VolumeSource) SwaggerDoc() map[string]string {
+ return map_VolumeSource
+}
+
+var map_VsphereVirtualDiskVolumeSource = map[string]string{
+ "": "Represents a vSphere volume resource.",
+ "volumePath": "Path that identifies vSphere volume vmdk",
+ "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.",
+}
+
+func (VsphereVirtualDiskVolumeSource) SwaggerDoc() map[string]string {
+ return map_VsphereVirtualDiskVolumeSource
+}
+
+var map_WeightedPodAffinityTerm = map[string]string{
+ "": "The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)",
+ "weight": "weight associated with matching the corresponding podAffinityTerm, in the range 1-100.",
+ "podAffinityTerm": "Required. A pod affinity term, associated with the corresponding weight.",
+}
+
+func (WeightedPodAffinityTerm) SwaggerDoc() map[string]string {
+ return map_WeightedPodAffinityTerm
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/validation/doc.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/validation/doc.go
new file mode 100644
index 0000000..f17a15c
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/validation/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package validation has functions for validating the correctness of api
+// objects and explaining what is wrong with them when they aren't valid.
+package validation
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/validation/events.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/validation/events.go
new file mode 100644
index 0000000..754cf88
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/validation/events.go
@@ -0,0 +1,45 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validation
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/util/validation"
+ "k8s.io/kubernetes/pkg/util/validation/field"
+)
+
+// ValidateEvent makes sure that the event makes sense.
+func ValidateEvent(event *api.Event) field.ErrorList {
+ allErrs := field.ErrorList{}
+ // There is no namespace required for node or persistent volume.
+ // However, older client code accidentally sets event.Namespace
+ // to api.NamespaceDefault, so we accept that too, but "" is preferred.
+ if (event.InvolvedObject.Kind == "Node" || event.InvolvedObject.Kind == "PersistentVolume") &&
+ event.Namespace != api.NamespaceDefault &&
+ event.Namespace != "" {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, "not allowed for node"))
+ }
+ if event.InvolvedObject.Kind != "Node" &&
+ event.InvolvedObject.Kind != "PersistentVolume" &&
+ event.Namespace != event.InvolvedObject.Namespace {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, "does not match involvedObject"))
+ }
+ for _, msg := range validation.IsDNS1123Subdomain(event.Namespace) {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("namespace"), event.Namespace, msg))
+ }
+ return allErrs
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/validation/name.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/validation/name.go
new file mode 100644
index 0000000..1358e6e
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/validation/name.go
@@ -0,0 +1,66 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validation
+
+import (
+ "fmt"
+ "strings"
+)
+
+// NameMayNotBe specifies strings that cannot be used as names specified as path segments (like the REST API or etcd store)
+var NameMayNotBe = []string{".", ".."}
+
+// NameMayNotContain specifies substrings that cannot be used in names specified as path segments (like the REST API or etcd store)
+var NameMayNotContain = []string{"/", "%"}
+
+// IsValidPathSegmentName validates the name can be safely encoded as a path segment
+func IsValidPathSegmentName(name string) []string {
+ for _, illegalName := range NameMayNotBe {
+ if name == illegalName {
+ return []string{fmt.Sprintf(`may not be '%s'`, illegalName)}
+ }
+ }
+
+ for _, illegalContent := range NameMayNotContain {
+ if strings.Contains(name, illegalContent) {
+ return []string{fmt.Sprintf(`may not contain '%s'`, illegalContent)}
+ }
+ }
+
+ return nil
+}
+
+// IsValidPathSegmentPrefix validates the name can be used as a prefix for a name which will be encoded as a path segment
+// It does not check for exact matches with disallowed names, since an arbitrary suffix might make the name valid
+func IsValidPathSegmentPrefix(name string) []string {
+ for _, illegalContent := range NameMayNotContain {
+ if strings.Contains(name, illegalContent) {
+ return []string{fmt.Sprintf(`may not contain '%s'`, illegalContent)}
+ }
+ }
+
+ return nil
+}
+
+// ValidatePathSegmentName validates the name can be safely encoded as a path segment
+func ValidatePathSegmentName(name string, prefix bool) []string {
+ if prefix {
+ return IsValidPathSegmentPrefix(name)
+ } else {
+ return IsValidPathSegmentName(name)
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/validation/schema.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/validation/schema.go
new file mode 100644
index 0000000..f6a4d07
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/validation/schema.go
@@ -0,0 +1,370 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validation
+
+import (
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "regexp"
+ "strings"
+
+ "github.com/emicklei/go-restful/swagger"
+ "github.com/golang/glog"
+ apiutil "k8s.io/kubernetes/pkg/api/util"
+ "k8s.io/kubernetes/pkg/runtime"
+ utilerrors "k8s.io/kubernetes/pkg/util/errors"
+ "k8s.io/kubernetes/pkg/util/yaml"
+)
+
+type InvalidTypeError struct {
+ ExpectedKind reflect.Kind
+ ObservedKind reflect.Kind
+ FieldName string
+}
+
+func (i *InvalidTypeError) Error() string {
+ return fmt.Sprintf("expected type %s, for field %s, got %s", i.ExpectedKind.String(), i.FieldName, i.ObservedKind.String())
+}
+
+func NewInvalidTypeError(expected reflect.Kind, observed reflect.Kind, fieldName string) error {
+ return &InvalidTypeError{expected, observed, fieldName}
+}
+
+// TypeNotFoundError is returned when specified type
+// can not found in schema
+type TypeNotFoundError string
+
+func (tnfe TypeNotFoundError) Error() string {
+ return fmt.Sprintf("couldn't find type: %s", string(tnfe))
+}
+
+// Schema is an interface that knows how to validate an API object serialized to a byte array.
+type Schema interface {
+ ValidateBytes(data []byte) error
+}
+
+type NullSchema struct{}
+
+func (NullSchema) ValidateBytes(data []byte) error { return nil }
+
+type SwaggerSchema struct {
+ api swagger.ApiDeclaration
+ delegate Schema // For delegating to other api groups
+}
+
+func NewSwaggerSchemaFromBytes(data []byte, factory Schema) (Schema, error) {
+ schema := &SwaggerSchema{}
+ err := json.Unmarshal(data, &schema.api)
+ if err != nil {
+ return nil, err
+ }
+ schema.delegate = factory
+ return schema, nil
+}
+
+// validateList unpacks a list and validate every item in the list.
+// It return nil if every item is ok.
+// Otherwise it return an error list contain errors of every item.
+func (s *SwaggerSchema) validateList(obj map[string]interface{}) []error {
+ items, exists := obj["items"]
+ if !exists {
+ return []error{fmt.Errorf("no items field in %#v", obj)}
+ }
+ return s.validateItems(items)
+}
+
+func (s *SwaggerSchema) validateItems(items interface{}) []error {
+ allErrs := []error{}
+ itemList, ok := items.([]interface{})
+ if !ok {
+ return append(allErrs, fmt.Errorf("items isn't a slice"))
+ }
+ for i, item := range itemList {
+ fields, ok := item.(map[string]interface{})
+ if !ok {
+ allErrs = append(allErrs, fmt.Errorf("items[%d] isn't a map[string]interface{}", i))
+ continue
+ }
+ groupVersion := fields["apiVersion"]
+ if groupVersion == nil {
+ allErrs = append(allErrs, fmt.Errorf("items[%d].apiVersion not set", i))
+ continue
+ }
+ itemVersion, ok := groupVersion.(string)
+ if !ok {
+ allErrs = append(allErrs, fmt.Errorf("items[%d].apiVersion isn't string type", i))
+ continue
+ }
+ if len(itemVersion) == 0 {
+ allErrs = append(allErrs, fmt.Errorf("items[%d].apiVersion is empty", i))
+ }
+ kind := fields["kind"]
+ if kind == nil {
+ allErrs = append(allErrs, fmt.Errorf("items[%d].kind not set", i))
+ continue
+ }
+ itemKind, ok := kind.(string)
+ if !ok {
+ allErrs = append(allErrs, fmt.Errorf("items[%d].kind isn't string type", i))
+ continue
+ }
+ if len(itemKind) == 0 {
+ allErrs = append(allErrs, fmt.Errorf("items[%d].kind is empty", i))
+ }
+ version := apiutil.GetVersion(itemVersion)
+ errs := s.ValidateObject(item, "", version+"."+itemKind)
+ if len(errs) >= 1 {
+ allErrs = append(allErrs, errs...)
+ }
+ }
+
+ return allErrs
+}
+
+func (s *SwaggerSchema) ValidateBytes(data []byte) error {
+ var obj interface{}
+ out, err := yaml.ToJSON(data)
+ if err != nil {
+ return err
+ }
+ data = out
+ if err := json.Unmarshal(data, &obj); err != nil {
+ return err
+ }
+ fields, ok := obj.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("error in unmarshaling data %s", string(data))
+ }
+ groupVersion := fields["apiVersion"]
+ if groupVersion == nil {
+ return fmt.Errorf("apiVersion not set")
+ }
+ if _, ok := groupVersion.(string); !ok {
+ return fmt.Errorf("apiVersion isn't string type")
+ }
+ kind := fields["kind"]
+ if kind == nil {
+ return fmt.Errorf("kind not set")
+ }
+ if _, ok := kind.(string); !ok {
+ return fmt.Errorf("kind isn't string type")
+ }
+ if strings.HasSuffix(kind.(string), "List") {
+ return utilerrors.NewAggregate(s.validateList(fields))
+ }
+ version := apiutil.GetVersion(groupVersion.(string))
+ allErrs := s.ValidateObject(obj, "", version+"."+kind.(string))
+ if len(allErrs) == 1 {
+ return allErrs[0]
+ }
+ return utilerrors.NewAggregate(allErrs)
+}
+
+func (s *SwaggerSchema) ValidateObject(obj interface{}, fieldName, typeName string) []error {
+ allErrs := []error{}
+ models := s.api.Models
+ model, ok := models.At(typeName)
+
+ // Verify the api version matches. This is required for nested types with differing api versions because
+ // s.api only has schema for 1 api version (the parent object type's version).
+ // e.g. an extensions/v1beta1 Template embedding a /v1 Service requires the schema for the extensions/v1beta1
+ // api to delegate to the schema for the /v1 api.
+ // Only do this for !ok objects so that cross ApiVersion vendored types take precedence.
+ if !ok && s.delegate != nil {
+ fields, mapOk := obj.(map[string]interface{})
+ if !mapOk {
+ return append(allErrs, fmt.Errorf("field %s: expected object of type map[string]interface{}, but the actual type is %T", fieldName, obj))
+ }
+ if delegated, err := s.delegateIfDifferentApiVersion(runtime.Unstructured{Object: fields}); delegated {
+ if err != nil {
+ allErrs = append(allErrs, err)
+ }
+ return allErrs
+ }
+ }
+
+ if !ok {
+ return append(allErrs, TypeNotFoundError(typeName))
+ }
+ properties := model.Properties
+ if len(properties.List) == 0 {
+ // The object does not have any sub-fields.
+ return nil
+ }
+ fields, ok := obj.(map[string]interface{})
+ if !ok {
+ return append(allErrs, fmt.Errorf("field %s: expected object of type map[string]interface{}, but the actual type is %T", fieldName, obj))
+ }
+ if len(fieldName) > 0 {
+ fieldName = fieldName + "."
+ }
+ // handle required fields
+ for _, requiredKey := range model.Required {
+ if _, ok := fields[requiredKey]; !ok {
+ allErrs = append(allErrs, fmt.Errorf("field %s: is required", requiredKey))
+ }
+ }
+ for key, value := range fields {
+ details, ok := properties.At(key)
+
+ // Special case for runtime.RawExtension and runtime.Objects because they always fail to validate
+ // This is because the actual values will be of some sub-type (e.g. Deployment) not the expected
+ // super-type (RawExtension)
+ if s.isGenericArray(details) {
+ errs := s.validateItems(value)
+ if len(errs) > 0 {
+ allErrs = append(allErrs, errs...)
+ }
+ continue
+ }
+ if !ok {
+ allErrs = append(allErrs, fmt.Errorf("found invalid field %s for %s", key, typeName))
+ continue
+ }
+ if details.Type == nil && details.Ref == nil {
+ allErrs = append(allErrs, fmt.Errorf("could not find the type of %s from object: %v", key, details))
+ }
+ var fieldType string
+ if details.Type != nil {
+ fieldType = *details.Type
+ } else {
+ fieldType = *details.Ref
+ }
+ if value == nil {
+ glog.V(2).Infof("Skipping nil field: %s", key)
+ continue
+ }
+ errs := s.validateField(value, fieldName+key, fieldType, &details)
+ if len(errs) > 0 {
+ allErrs = append(allErrs, errs...)
+ }
+ }
+ return allErrs
+}
+
+// delegateIfDifferentApiVersion delegates the validation of an object if its ApiGroup does not match the
+// current SwaggerSchema.
+// First return value is true if the validation was delegated (by a different ApiGroup SwaggerSchema)
+// Second return value is the result of the delegated validation if performed.
+func (s *SwaggerSchema) delegateIfDifferentApiVersion(obj runtime.Unstructured) (bool, error) {
+ // Never delegate objects in the same ApiVersion or we will get infinite recursion
+ if !s.isDifferentApiVersion(obj) {
+ return false, nil
+ }
+
+ // Convert the object back into bytes so that we can pass it to the ValidateBytes function
+ m, err := json.Marshal(obj.Object)
+ if err != nil {
+ return true, err
+ }
+
+ // Delegate validation of this object to the correct SwaggerSchema for its ApiGroup
+ return true, s.delegate.ValidateBytes(m)
+}
+
+// isDifferentApiVersion Returns true if obj lives in a different ApiVersion than the SwaggerSchema does.
+// The SwaggerSchema will not be able to process objects in different ApiVersions unless they are vendored.
+func (s *SwaggerSchema) isDifferentApiVersion(obj runtime.Unstructured) bool {
+ groupVersion := obj.GetAPIVersion()
+ return len(groupVersion) > 0 && s.api.ApiVersion != groupVersion
+}
+
+// isGenericArray Returns true if p is an array of generic Objects - either RawExtension or Object.
+func (s *SwaggerSchema) isGenericArray(p swagger.ModelProperty) bool {
+ return p.DataTypeFields.Type != nil &&
+ *p.DataTypeFields.Type == "array" &&
+ p.Items != nil &&
+ p.Items.Ref != nil &&
+ (*p.Items.Ref == "runtime.RawExtension" || *p.Items.Ref == "runtime.Object")
+}
+
+// This matches type name in the swagger spec, such as "v1.Binding".
+var versionRegexp = regexp.MustCompile(`^(v.+|unversioned)\..*`)
+
+func (s *SwaggerSchema) validateField(value interface{}, fieldName, fieldType string, fieldDetails *swagger.ModelProperty) []error {
+ allErrs := []error{}
+ if reflect.TypeOf(value) == nil {
+ return append(allErrs, fmt.Errorf("unexpected nil value for field %v", fieldName))
+ }
+ // TODO: caesarxuchao: because we have multiple group/versions and objects
+ // may reference objects in other group, the commented out way of checking
+ // if a filedType is a type defined by us is outdated. We use a hacky way
+ // for now.
+ // TODO: the type name in the swagger spec is something like "v1.Binding",
+ // and the "v1" is generated from the package name, not the groupVersion of
+ // the type. We need to fix go-restful to embed the group name in the type
+ // name, otherwise we couldn't handle identically named types in different
+ // groups correctly.
+ if versionRegexp.MatchString(fieldType) {
+ // if strings.HasPrefix(fieldType, apiVersion) {
+ return s.ValidateObject(value, fieldName, fieldType)
+ }
+ switch fieldType {
+ case "string":
+ // Be loose about what we accept for 'string' since we use IntOrString in a couple of places
+ _, isString := value.(string)
+ _, isNumber := value.(float64)
+ _, isInteger := value.(int)
+ if !isString && !isNumber && !isInteger {
+ return append(allErrs, NewInvalidTypeError(reflect.String, reflect.TypeOf(value).Kind(), fieldName))
+ }
+ case "array":
+ arr, ok := value.([]interface{})
+ if !ok {
+ return append(allErrs, NewInvalidTypeError(reflect.Array, reflect.TypeOf(value).Kind(), fieldName))
+ }
+ var arrType string
+ if fieldDetails.Items.Ref == nil && fieldDetails.Items.Type == nil {
+ return append(allErrs, NewInvalidTypeError(reflect.Array, reflect.TypeOf(value).Kind(), fieldName))
+ }
+ if fieldDetails.Items.Ref != nil {
+ arrType = *fieldDetails.Items.Ref
+ } else {
+ arrType = *fieldDetails.Items.Type
+ }
+ for ix := range arr {
+ errs := s.validateField(arr[ix], fmt.Sprintf("%s[%d]", fieldName, ix), arrType, nil)
+ if len(errs) > 0 {
+ allErrs = append(allErrs, errs...)
+ }
+ }
+ case "uint64":
+ case "int64":
+ case "integer":
+ _, isNumber := value.(float64)
+ _, isInteger := value.(int)
+ if !isNumber && !isInteger {
+ return append(allErrs, NewInvalidTypeError(reflect.Int, reflect.TypeOf(value).Kind(), fieldName))
+ }
+ case "float64":
+ if _, ok := value.(float64); !ok {
+ return append(allErrs, NewInvalidTypeError(reflect.Float64, reflect.TypeOf(value).Kind(), fieldName))
+ }
+ case "boolean":
+ if _, ok := value.(bool); !ok {
+ return append(allErrs, NewInvalidTypeError(reflect.Bool, reflect.TypeOf(value).Kind(), fieldName))
+ }
+ // API servers before release 1.3 produce swagger spec with `type: "any"` as the fallback type, while newer servers produce spec with `type: "object"`.
+ // We have both here so that kubectl can work with both old and new api servers.
+ case "object":
+ case "any":
+ default:
+ return append(allErrs, fmt.Errorf("unexpected type: %v", fieldType))
+ }
+ return allErrs
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/validation/validation.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/validation/validation.go
new file mode 100644
index 0000000..3854004
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/api/validation/validation.go
@@ -0,0 +1,3193 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validation
+
+import (
+ "encoding/json"
+ "fmt"
+ "net"
+ "os"
+ "path"
+ "reflect"
+ "strings"
+
+ "github.com/golang/glog"
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/endpoints"
+ utilpod "k8s.io/kubernetes/pkg/api/pod"
+ "k8s.io/kubernetes/pkg/api/resource"
+ apiservice "k8s.io/kubernetes/pkg/api/service"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ unversionedvalidation "k8s.io/kubernetes/pkg/api/unversioned/validation"
+ "k8s.io/kubernetes/pkg/api/v1"
+ "k8s.io/kubernetes/pkg/capabilities"
+ "k8s.io/kubernetes/pkg/labels"
+ "k8s.io/kubernetes/pkg/util/intstr"
+ "k8s.io/kubernetes/pkg/util/sets"
+ "k8s.io/kubernetes/pkg/util/validation"
+ "k8s.io/kubernetes/pkg/util/validation/field"
+)
+
+// TODO: delete this global variable when we enable the validation of common
+// fields by default.
+var RepairMalformedUpdates bool = true
+
+const isNegativeErrorMsg string = `must be greater than or equal to 0`
+const isInvalidQuotaResource string = `must be a standard resource for quota`
+const fieldImmutableErrorMsg string = `field is immutable`
+const isNotIntegerErrorMsg string = `must be an integer`
+
+var pdPartitionErrorMsg string = validation.InclusiveRangeError(1, 255)
+
+const totalAnnotationSizeLimitB int = 256 * (1 << 10) // 256 kB
+
+// BannedOwners is a black list of object that are not allowed to be owners.
+var BannedOwners = map[unversioned.GroupVersionKind]struct{}{
+ v1.SchemeGroupVersion.WithKind("Event"): {},
+}
+
+// ValidateHasLabel requires that api.ObjectMeta has a Label with key and expectedValue
+func ValidateHasLabel(meta api.ObjectMeta, fldPath *field.Path, key, expectedValue string) field.ErrorList {
+ allErrs := field.ErrorList{}
+ actualValue, found := meta.Labels[key]
+ if !found {
+ allErrs = append(allErrs, field.Required(fldPath.Child("labels").Key(key),
+ fmt.Sprintf("must be '%s'", expectedValue)))
+ return allErrs
+ }
+ if actualValue != expectedValue {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("labels").Key(key), meta.Labels,
+ fmt.Sprintf("must be '%s'", expectedValue)))
+ }
+ return allErrs
+}
+
+// ValidateAnnotations validates that a set of annotations are correctly defined.
+func ValidateAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ var totalSize int64
+ for k, v := range annotations {
+ for _, msg := range validation.IsQualifiedName(strings.ToLower(k)) {
+ allErrs = append(allErrs, field.Invalid(fldPath, k, msg))
+ }
+ totalSize += (int64)(len(k)) + (int64)(len(v))
+ }
+ if totalSize > (int64)(totalAnnotationSizeLimitB) {
+ allErrs = append(allErrs, field.TooLong(fldPath, "", totalAnnotationSizeLimitB))
+ }
+ return allErrs
+}
+
+func ValidateDNS1123Label(value string, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ for _, msg := range validation.IsDNS1123Label(value) {
+ allErrs = append(allErrs, field.Invalid(fldPath, value, msg))
+ }
+ return allErrs
+}
+
+func ValidatePodSpecificAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if annotations[api.AffinityAnnotationKey] != "" {
+ allErrs = append(allErrs, ValidateAffinityInPodAnnotations(annotations, fldPath)...)
+ }
+
+ if annotations[api.TolerationsAnnotationKey] != "" {
+ allErrs = append(allErrs, ValidateTolerationsInPodAnnotations(annotations, fldPath)...)
+ }
+
+ // TODO: remove these after we EOL the annotations.
+ if hostname, exists := annotations[utilpod.PodHostnameAnnotation]; exists {
+ allErrs = append(allErrs, ValidateDNS1123Label(hostname, fldPath.Key(utilpod.PodHostnameAnnotation))...)
+ }
+ if subdomain, exists := annotations[utilpod.PodSubdomainAnnotation]; exists {
+ allErrs = append(allErrs, ValidateDNS1123Label(subdomain, fldPath.Key(utilpod.PodSubdomainAnnotation))...)
+ }
+
+ allErrs = append(allErrs, ValidateSeccompPodAnnotations(annotations, fldPath)...)
+
+ return allErrs
+}
+
+func ValidateEndpointsSpecificAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ // TODO: remove this after we EOL the annotation.
+ hostnamesMap, exists := annotations[endpoints.PodHostnamesAnnotation]
+ if exists && !isValidHostnamesMap(hostnamesMap) {
+ allErrs = append(allErrs, field.Invalid(fldPath, endpoints.PodHostnamesAnnotation,
+ `must be a valid json representation of map[string(IP)][HostRecord] e.g. "{"10.245.1.6":{"HostName":"my-webserver"}}"`))
+ }
+
+ return allErrs
+}
+
+func validateOwnerReference(ownerReference api.OwnerReference, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ gvk := unversioned.FromAPIVersionAndKind(ownerReference.APIVersion, ownerReference.Kind)
+ // gvk.Group is empty for the legacy group.
+ if len(gvk.Version) == 0 {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("apiVersion"), ownerReference.APIVersion, "version must not be empty"))
+ }
+ if len(gvk.Kind) == 0 {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("kind"), ownerReference.Kind, "kind must not be empty"))
+ }
+ if len(ownerReference.Name) == 0 {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), ownerReference.Name, "name must not be empty"))
+ }
+ if len(ownerReference.UID) == 0 {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("uid"), ownerReference.UID, "uid must not be empty"))
+ }
+ if _, ok := BannedOwners[gvk]; ok {
+ allErrs = append(allErrs, field.Invalid(fldPath, ownerReference, fmt.Sprintf("%s is disallowed from being an owner", gvk)))
+ }
+ return allErrs
+}
+
+func ValidateOwnerReferences(ownerReferences []api.OwnerReference, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ controllerName := ""
+ for _, ref := range ownerReferences {
+ allErrs = append(allErrs, validateOwnerReference(ref, fldPath)...)
+ if ref.Controller != nil && *ref.Controller {
+ if controllerName != "" {
+ allErrs = append(allErrs, field.Invalid(fldPath, ownerReferences,
+ fmt.Sprintf("Only one reference can have Controller set to true. Found \"true\" in references for %v and %v", controllerName, ref.Name)))
+ } else {
+ controllerName = ref.Name
+ }
+ }
+ }
+ return allErrs
+}
+
+// ValidateNameFunc validates that the provided name is valid for a given resource type.
+// Not all resources have the same validation rules for names. Prefix is true
+// if the name will have a value appended to it. If the name is not valid,
+// this returns a list of descriptions of individual characteristics of the
+// value that were not valid. Otherwise this returns an empty list or nil.
+type ValidateNameFunc func(name string, prefix bool) []string
+
+// maskTrailingDash replaces the final character of a string with a subdomain safe
+// value if is a dash.
+func maskTrailingDash(name string) string {
+ if strings.HasSuffix(name, "-") {
+ return name[:len(name)-2] + "a"
+ }
+ return name
+}
+
+// ValidatePodName can be used to check whether the given pod name is valid.
+// Prefix indicates this name will be used as part of generation, in which case
+// trailing dashes are allowed.
+var ValidatePodName = NameIsDNSSubdomain
+
+// ValidateReplicationControllerName can be used to check whether the given replication
+// controller name is valid.
+// Prefix indicates this name will be used as part of generation, in which case
+// trailing dashes are allowed.
+var ValidateReplicationControllerName = NameIsDNSSubdomain
+
+// ValidateServiceName can be used to check whether the given service name is valid.
+// Prefix indicates this name will be used as part of generation, in which case
+// trailing dashes are allowed.
+var ValidateServiceName = NameIsDNS952Label
+
+// ValidateNodeName can be used to check whether the given node name is valid.
+// Prefix indicates this name will be used as part of generation, in which case
+// trailing dashes are allowed.
+var ValidateNodeName = NameIsDNSSubdomain
+
+// ValidateNamespaceName can be used to check whether the given namespace name is valid.
+// Prefix indicates this name will be used as part of generation, in which case
+// trailing dashes are allowed.
+var ValidateNamespaceName = NameIsDNSLabel
+
+// ValidateLimitRangeName can be used to check whether the given limit range name is valid.
+// Prefix indicates this name will be used as part of generation, in which case
+// trailing dashes are allowed.
+var ValidateLimitRangeName = NameIsDNSSubdomain
+
+// ValidateResourceQuotaName can be used to check whether the given
+// resource quota name is valid.
+// Prefix indicates this name will be used as part of generation, in which case
+// trailing dashes are allowed.
+var ValidateResourceQuotaName = NameIsDNSSubdomain
+
+// ValidateSecretName can be used to check whether the given secret name is valid.
+// Prefix indicates this name will be used as part of generation, in which case
+// trailing dashes are allowed.
+var ValidateSecretName = NameIsDNSSubdomain
+
+// ValidateServiceAccountName can be used to check whether the given service account name is valid.
+// Prefix indicates this name will be used as part of generation, in which case
+// trailing dashes are allowed.
+var ValidateServiceAccountName = NameIsDNSSubdomain
+
+// ValidateEndpointsName can be used to check whether the given endpoints name is valid.
+// Prefix indicates this name will be used as part of generation, in which case
+// trailing dashes are allowed.
+var ValidateEndpointsName = NameIsDNSSubdomain
+
+// NameIsDNSSubdomain is a ValidateNameFunc for names that must be a DNS subdomain.
+func NameIsDNSSubdomain(name string, prefix bool) []string {
+ if prefix {
+ name = maskTrailingDash(name)
+ }
+ return validation.IsDNS1123Subdomain(name)
+}
+
+// NameIsDNSLabel is a ValidateNameFunc for names that must be a DNS 1123 label.
+func NameIsDNSLabel(name string, prefix bool) []string {
+ if prefix {
+ name = maskTrailingDash(name)
+ }
+ return validation.IsDNS1123Label(name)
+}
+
+// NameIsDNS952Label is a ValidateNameFunc for names that must be a DNS 952 label.
+func NameIsDNS952Label(name string, prefix bool) []string {
+ if prefix {
+ name = maskTrailingDash(name)
+ }
+ return validation.IsDNS952Label(name)
+}
+
+// Validates that given value is not negative.
+func ValidateNonnegativeField(value int64, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if value < 0 {
+ allErrs = append(allErrs, field.Invalid(fldPath, value, isNegativeErrorMsg))
+ }
+ return allErrs
+}
+
+// Validates that a Quantity is not negative
+func ValidateNonnegativeQuantity(value resource.Quantity, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if value.Cmp(resource.Quantity{}) < 0 {
+ allErrs = append(allErrs, field.Invalid(fldPath, value.String(), isNegativeErrorMsg))
+ }
+ return allErrs
+}
+
+func ValidateImmutableField(newVal, oldVal interface{}, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if !api.Semantic.DeepEqual(oldVal, newVal) {
+ allErrs = append(allErrs, field.Invalid(fldPath, newVal, fieldImmutableErrorMsg))
+ }
+ return allErrs
+}
+
+// ValidateObjectMeta validates an object's metadata on creation. It expects that name generation has already
+// been performed.
+// It doesn't return an error for rootscoped resources with namespace, because namespace should already be cleared before.
+// TODO: Remove calls to this method scattered in validations of specific resources, e.g., ValidatePodUpdate.
+func ValidateObjectMeta(meta *api.ObjectMeta, requiresNamespace bool, nameFn ValidateNameFunc, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ if len(meta.GenerateName) != 0 {
+ for _, msg := range nameFn(meta.GenerateName, true) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("generateName"), meta.GenerateName, msg))
+ }
+ }
+ // If the generated name validates, but the calculated value does not, it's a problem with generation, and we
+ // report it here. This may confuse users, but indicates a programming bug and still must be validated.
+ // If there are multiple fields out of which one is required then add a or as a separator
+ if len(meta.Name) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("name"), "name or generateName is required"))
+ } else {
+ for _, msg := range nameFn(meta.Name, false) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), meta.Name, msg))
+ }
+ }
+ if requiresNamespace {
+ if len(meta.Namespace) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("namespace"), ""))
+ } else {
+ for _, msg := range ValidateNamespaceName(meta.Namespace, false) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("namespace"), meta.Namespace, msg))
+ }
+ }
+ } else {
+ if len(meta.Namespace) != 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("namespace"), "not allowed on this type"))
+ }
+ }
+ allErrs = append(allErrs, ValidateNonnegativeField(meta.Generation, fldPath.Child("generation"))...)
+ allErrs = append(allErrs, unversionedvalidation.ValidateLabels(meta.Labels, fldPath.Child("labels"))...)
+ allErrs = append(allErrs, ValidateAnnotations(meta.Annotations, fldPath.Child("annotations"))...)
+ allErrs = append(allErrs, ValidateOwnerReferences(meta.OwnerReferences, fldPath.Child("ownerReferences"))...)
+ for _, finalizer := range meta.Finalizers {
+ allErrs = append(allErrs, validateFinalizerName(finalizer, fldPath.Child("finalizers"))...)
+ }
+ return allErrs
+}
+
+// ValidateObjectMetaUpdate validates an object's metadata when updated
+func ValidateObjectMetaUpdate(newMeta, oldMeta *api.ObjectMeta, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ if !RepairMalformedUpdates && newMeta.UID != oldMeta.UID {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("uid"), newMeta.UID, "field is immutable"))
+ }
+ // in the event it is left empty, set it, to allow clients more flexibility
+ // TODO: remove the following code that repairs the update request when we retire the clients that modify the immutable fields.
+ // Please do not copy this pattern elsewhere; validation functions should not be modifying the objects they are passed!
+ if RepairMalformedUpdates {
+ if len(newMeta.UID) == 0 {
+ newMeta.UID = oldMeta.UID
+ }
+ // ignore changes to timestamp
+ if oldMeta.CreationTimestamp.IsZero() {
+ oldMeta.CreationTimestamp = newMeta.CreationTimestamp
+ } else {
+ newMeta.CreationTimestamp = oldMeta.CreationTimestamp
+ }
+ // an object can never remove a deletion timestamp or clear/change grace period seconds
+ if !oldMeta.DeletionTimestamp.IsZero() {
+ newMeta.DeletionTimestamp = oldMeta.DeletionTimestamp
+ }
+ if oldMeta.DeletionGracePeriodSeconds != nil && newMeta.DeletionGracePeriodSeconds == nil {
+ newMeta.DeletionGracePeriodSeconds = oldMeta.DeletionGracePeriodSeconds
+ }
+ }
+
+ // TODO: needs to check if newMeta==nil && oldMeta !=nil after the repair logic is removed.
+ if newMeta.DeletionGracePeriodSeconds != nil && (oldMeta.DeletionGracePeriodSeconds == nil || *newMeta.DeletionGracePeriodSeconds != *oldMeta.DeletionGracePeriodSeconds) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("deletionGracePeriodSeconds"), newMeta.DeletionGracePeriodSeconds, "field is immutable; may only be changed via deletion"))
+ }
+ if newMeta.DeletionTimestamp != nil && (oldMeta.DeletionTimestamp == nil || !newMeta.DeletionTimestamp.Equal(*oldMeta.DeletionTimestamp)) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("deletionTimestamp"), newMeta.DeletionTimestamp, "field is immutable; may only be changed via deletion"))
+ }
+
+ // Reject updates that don't specify a resource version
+ if len(newMeta.ResourceVersion) == 0 {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("resourceVersion"), newMeta.ResourceVersion, "must be specified for an update"))
+ }
+
+ // Generation shouldn't be decremented
+ if newMeta.Generation < oldMeta.Generation {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("generation"), newMeta.Generation, "must not be decremented"))
+ }
+
+ allErrs = append(allErrs, ValidateImmutableField(newMeta.Name, oldMeta.Name, fldPath.Child("name"))...)
+ allErrs = append(allErrs, ValidateImmutableField(newMeta.Namespace, oldMeta.Namespace, fldPath.Child("namespace"))...)
+ allErrs = append(allErrs, ValidateImmutableField(newMeta.UID, oldMeta.UID, fldPath.Child("uid"))...)
+ allErrs = append(allErrs, ValidateImmutableField(newMeta.CreationTimestamp, oldMeta.CreationTimestamp, fldPath.Child("creationTimestamp"))...)
+
+ allErrs = append(allErrs, unversionedvalidation.ValidateLabels(newMeta.Labels, fldPath.Child("labels"))...)
+ allErrs = append(allErrs, ValidateAnnotations(newMeta.Annotations, fldPath.Child("annotations"))...)
+ allErrs = append(allErrs, ValidateOwnerReferences(newMeta.OwnerReferences, fldPath.Child("ownerReferences"))...)
+
+ return allErrs
+}
+
+func validateVolumes(volumes []api.Volume, fldPath *field.Path) (sets.String, field.ErrorList) {
+ allErrs := field.ErrorList{}
+
+ allNames := sets.String{}
+ for i, vol := range volumes {
+ idxPath := fldPath.Index(i)
+ namePath := idxPath.Child("name")
+ el := validateVolumeSource(&vol.VolumeSource, idxPath)
+ if len(vol.Name) == 0 {
+ el = append(el, field.Required(namePath, ""))
+ } else {
+ el = append(el, ValidateDNS1123Label(vol.Name, namePath)...)
+ }
+ if allNames.Has(vol.Name) {
+ el = append(el, field.Duplicate(namePath, vol.Name))
+ }
+ if len(el) == 0 {
+ allNames.Insert(vol.Name)
+ } else {
+ allErrs = append(allErrs, el...)
+ }
+
+ }
+ return allNames, allErrs
+}
+
+func validateVolumeSource(source *api.VolumeSource, fldPath *field.Path) field.ErrorList {
+ numVolumes := 0
+ allErrs := field.ErrorList{}
+ if source.EmptyDir != nil {
+ numVolumes++
+ // EmptyDirs have nothing to validate
+ }
+ if source.HostPath != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("hostPath"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateHostPathVolumeSource(source.HostPath, fldPath.Child("hostPath"))...)
+ }
+ }
+ if source.GitRepo != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("gitRepo"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateGitRepoVolumeSource(source.GitRepo, fldPath.Child("gitRepo"))...)
+ }
+ }
+ if source.GCEPersistentDisk != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("gcePersistentDisk"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateGCEPersistentDiskVolumeSource(source.GCEPersistentDisk, fldPath.Child("persistentDisk"))...)
+ }
+ }
+ if source.AWSElasticBlockStore != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("awsElasticBlockStore"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateAWSElasticBlockStoreVolumeSource(source.AWSElasticBlockStore, fldPath.Child("awsElasticBlockStore"))...)
+ }
+ }
+ if source.Secret != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("secret"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateSecretVolumeSource(source.Secret, fldPath.Child("secret"))...)
+ }
+ }
+ if source.NFS != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("nfs"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateNFSVolumeSource(source.NFS, fldPath.Child("nfs"))...)
+ }
+ }
+ if source.ISCSI != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("iscsi"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateISCSIVolumeSource(source.ISCSI, fldPath.Child("iscsi"))...)
+ }
+ }
+ if source.Glusterfs != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("glusterfs"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateGlusterfs(source.Glusterfs, fldPath.Child("glusterfs"))...)
+ }
+ }
+ if source.Flocker != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("flocker"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateFlockerVolumeSource(source.Flocker, fldPath.Child("flocker"))...)
+ }
+ }
+ if source.PersistentVolumeClaim != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("persistentVolumeClaim"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validatePersistentClaimVolumeSource(source.PersistentVolumeClaim, fldPath.Child("persistentVolumeClaim"))...)
+ }
+ }
+ if source.RBD != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("rbd"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateRBDVolumeSource(source.RBD, fldPath.Child("rbd"))...)
+ }
+ }
+ if source.Cinder != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("cinder"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateCinderVolumeSource(source.Cinder, fldPath.Child("cinder"))...)
+ }
+ }
+ if source.CephFS != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("cephFS"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateCephFSVolumeSource(source.CephFS, fldPath.Child("cephfs"))...)
+ }
+ }
+ if source.DownwardAPI != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("downwarAPI"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateDownwardAPIVolumeSource(source.DownwardAPI, fldPath.Child("downwardAPI"))...)
+ }
+ }
+ if source.FC != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("fc"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateFCVolumeSource(source.FC, fldPath.Child("fc"))...)
+ }
+ }
+ if source.FlexVolume != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("flexVolume"), "may not specifiy more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateFlexVolumeSource(source.FlexVolume, fldPath.Child("flexVolume"))...)
+ }
+ }
+ if source.ConfigMap != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("configMap"), "may not specifiy more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateConfigMapVolumeSource(source.ConfigMap, fldPath.Child("configMap"))...)
+ }
+ }
+ if source.AzureFile != nil {
+ numVolumes++
+ allErrs = append(allErrs, validateAzureFile(source.AzureFile, fldPath.Child("azureFile"))...)
+ }
+ if source.VsphereVolume != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("vsphereVolume"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateVsphereVolumeSource(source.VsphereVolume, fldPath.Child("vsphereVolume"))...)
+ }
+ }
+ if numVolumes == 0 {
+ allErrs = append(allErrs, field.Required(fldPath, "must specify a volume type"))
+ }
+
+ return allErrs
+}
+
+func validateHostPathVolumeSource(hostPath *api.HostPathVolumeSource, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if len(hostPath.Path) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("path"), ""))
+ }
+ return allErrs
+}
+
+func validateGitRepoVolumeSource(gitRepo *api.GitRepoVolumeSource, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if len(gitRepo.Repository) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("repository"), ""))
+ }
+
+ pathErrs := validateVolumeSourcePath(gitRepo.Directory, fldPath.Child("directory"))
+ allErrs = append(allErrs, pathErrs...)
+ return allErrs
+}
+
+func validateISCSIVolumeSource(iscsi *api.ISCSIVolumeSource, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if len(iscsi.TargetPortal) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("targetPortal"), ""))
+ }
+ if len(iscsi.IQN) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("iqn"), ""))
+ }
+ if iscsi.Lun < 0 || iscsi.Lun > 255 {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("lun"), iscsi.Lun, validation.InclusiveRangeError(0, 255)))
+ }
+ return allErrs
+}
+
+func validateFCVolumeSource(fc *api.FCVolumeSource, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if len(fc.TargetWWNs) < 1 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("targetWWNs"), ""))
+ }
+
+ if fc.Lun == nil {
+ allErrs = append(allErrs, field.Required(fldPath.Child("lun"), ""))
+ } else {
+ if *fc.Lun < 0 || *fc.Lun > 255 {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("lun"), fc.Lun, validation.InclusiveRangeError(0, 255)))
+ }
+ }
+ return allErrs
+}
+
+func validateGCEPersistentDiskVolumeSource(pd *api.GCEPersistentDiskVolumeSource, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if len(pd.PDName) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("pdName"), ""))
+ }
+ if pd.Partition < 0 || pd.Partition > 255 {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("partition"), pd.Partition, pdPartitionErrorMsg))
+ }
+ return allErrs
+}
+
+func validateAWSElasticBlockStoreVolumeSource(PD *api.AWSElasticBlockStoreVolumeSource, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if len(PD.VolumeID) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("volumeID"), ""))
+ }
+ if PD.Partition < 0 || PD.Partition > 255 {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("partition"), PD.Partition, pdPartitionErrorMsg))
+ }
+ return allErrs
+}
+
+func validateSecretVolumeSource(secretSource *api.SecretVolumeSource, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if len(secretSource.SecretName) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("secretName"), ""))
+ }
+ return allErrs
+}
+
+func validateConfigMapVolumeSource(configMapSource *api.ConfigMapVolumeSource, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if len(configMapSource.Name) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("name"), ""))
+ }
+ return allErrs
+}
+
+func validatePersistentClaimVolumeSource(claim *api.PersistentVolumeClaimVolumeSource, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if len(claim.ClaimName) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("claimName"), ""))
+ }
+ return allErrs
+}
+
+func validateNFSVolumeSource(nfs *api.NFSVolumeSource, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if len(nfs.Server) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("server"), ""))
+ }
+ if len(nfs.Path) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("path"), ""))
+ }
+ if !path.IsAbs(nfs.Path) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("path"), nfs.Path, "must be an absolute path"))
+ }
+ return allErrs
+}
+
+func validateGlusterfs(glusterfs *api.GlusterfsVolumeSource, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if len(glusterfs.EndpointsName) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("endpoints"), ""))
+ }
+ if len(glusterfs.Path) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("path"), ""))
+ }
+ return allErrs
+}
+
+func validateFlockerVolumeSource(flocker *api.FlockerVolumeSource, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if len(flocker.DatasetName) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("datasetName"), ""))
+ }
+ if strings.Contains(flocker.DatasetName, "/") {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("datasetName"), flocker.DatasetName, "must not contain '/'"))
+ }
+ return allErrs
+}
+
+var validDownwardAPIFieldPathExpressions = sets.NewString("metadata.name", "metadata.namespace", "metadata.labels", "metadata.annotations")
+
+func validateDownwardAPIVolumeSource(downwardAPIVolume *api.DownwardAPIVolumeSource, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ for _, downwardAPIVolumeFile := range downwardAPIVolume.Items {
+ if len(downwardAPIVolumeFile.Path) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("path"), ""))
+ }
+ allErrs = append(allErrs, validateVolumeSourcePath(downwardAPIVolumeFile.Path, fldPath.Child("path"))...)
+ if downwardAPIVolumeFile.FieldRef != nil {
+ allErrs = append(allErrs, validateObjectFieldSelector(downwardAPIVolumeFile.FieldRef, &validDownwardAPIFieldPathExpressions, fldPath.Child("fieldRef"))...)
+ if downwardAPIVolumeFile.ResourceFieldRef != nil {
+ allErrs = append(allErrs, field.Invalid(fldPath, "resource", "fieldRef and resourceFieldRef can not be specified simultaneously"))
+ }
+ } else if downwardAPIVolumeFile.ResourceFieldRef != nil {
+ allErrs = append(allErrs, validateContainerResourceFieldSelector(downwardAPIVolumeFile.ResourceFieldRef, &validContainerResourceFieldPathExpressions, fldPath.Child("resourceFieldRef"), true)...)
+ } else {
+ allErrs = append(allErrs, field.Required(fldPath, "one of fieldRef and resourceFieldRef is required"))
+ }
+ }
+ return allErrs
+}
+
+// This validate will make sure targetPath:
+// 1. is not abs path
+// 2. does not start with '../'
+// 3. does not contain '/../'
+// 4. does not end with '/..'
+func validateSubPath(targetPath string, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if path.IsAbs(targetPath) {
+ allErrs = append(allErrs, field.Invalid(fldPath, targetPath, "must be a relative path"))
+ }
+ if strings.HasPrefix(targetPath, "../") {
+ allErrs = append(allErrs, field.Invalid(fldPath, targetPath, "must not start with '../'"))
+ }
+ if strings.Contains(targetPath, "/../") {
+ allErrs = append(allErrs, field.Invalid(fldPath, targetPath, "must not contain '/../'"))
+ }
+ if strings.HasSuffix(targetPath, "/..") {
+ allErrs = append(allErrs, field.Invalid(fldPath, targetPath, "must not end with '/..'"))
+ }
+ return allErrs
+}
+
+// This validate will make sure targetPath:
+// 1. is not abs path
+// 2. does not contain '..'
+// 3. does not start with '..'
+func validateVolumeSourcePath(targetPath string, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if path.IsAbs(targetPath) {
+ allErrs = append(allErrs, field.Invalid(fldPath, targetPath, "must be a relative path"))
+ }
+ // TODO assume OS of api server & nodes are the same for now
+ items := strings.Split(targetPath, string(os.PathSeparator))
+
+ for _, item := range items {
+ if item == ".." {
+ allErrs = append(allErrs, field.Invalid(fldPath, targetPath, "must not contain '..'"))
+ }
+ }
+ if strings.HasPrefix(items[0], "..") && len(items[0]) > 2 {
+ allErrs = append(allErrs, field.Invalid(fldPath, targetPath, "must not start with '..'"))
+ }
+ return allErrs
+}
+
+func validateRBDVolumeSource(rbd *api.RBDVolumeSource, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if len(rbd.CephMonitors) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("monitors"), ""))
+ }
+ if len(rbd.RBDImage) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("image"), ""))
+ }
+ return allErrs
+}
+
+func validateCinderVolumeSource(cd *api.CinderVolumeSource, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if len(cd.VolumeID) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("volumeID"), ""))
+ }
+ return allErrs
+}
+
+func validateCephFSVolumeSource(cephfs *api.CephFSVolumeSource, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if len(cephfs.Monitors) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("monitors"), ""))
+ }
+ return allErrs
+}
+
+func validateFlexVolumeSource(fv *api.FlexVolumeSource, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if len(fv.Driver) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("driver"), ""))
+ }
+ return allErrs
+}
+
+func validateAzureFile(azure *api.AzureFileVolumeSource, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if azure.SecretName == "" {
+ allErrs = append(allErrs, field.Required(fldPath.Child("secretName"), ""))
+ }
+ if azure.ShareName == "" {
+ allErrs = append(allErrs, field.Required(fldPath.Child("shareName"), ""))
+ }
+ return allErrs
+}
+
+func validateVsphereVolumeSource(cd *api.VsphereVirtualDiskVolumeSource, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if len(cd.VolumePath) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("volumePath"), ""))
+ }
+ return allErrs
+}
+
+// ValidatePersistentVolumeName checks that a name is appropriate for a
+// PersistentVolumeName object.
+var ValidatePersistentVolumeName = NameIsDNSSubdomain
+
+var supportedAccessModes = sets.NewString(string(api.ReadWriteOnce), string(api.ReadOnlyMany), string(api.ReadWriteMany))
+
+func ValidatePersistentVolume(pv *api.PersistentVolume) field.ErrorList {
+ allErrs := ValidateObjectMeta(&pv.ObjectMeta, false, ValidatePersistentVolumeName, field.NewPath("metadata"))
+
+ specPath := field.NewPath("spec")
+ if len(pv.Spec.AccessModes) == 0 {
+ allErrs = append(allErrs, field.Required(specPath.Child("accessModes"), ""))
+ }
+ for _, mode := range pv.Spec.AccessModes {
+ if !supportedAccessModes.Has(string(mode)) {
+ allErrs = append(allErrs, field.NotSupported(specPath.Child("accessModes"), mode, supportedAccessModes.List()))
+ }
+ }
+
+ if len(pv.Spec.Capacity) == 0 {
+ allErrs = append(allErrs, field.Required(specPath.Child("capacity"), ""))
+ }
+
+ if _, ok := pv.Spec.Capacity[api.ResourceStorage]; !ok || len(pv.Spec.Capacity) > 1 {
+ allErrs = append(allErrs, field.NotSupported(specPath.Child("capacity"), pv.Spec.Capacity, []string{string(api.ResourceStorage)}))
+ }
+ capPath := specPath.Child("capacity")
+ for r, qty := range pv.Spec.Capacity {
+ allErrs = append(allErrs, validateBasicResource(qty, capPath.Key(string(r)))...)
+ }
+
+ numVolumes := 0
+ if pv.Spec.HostPath != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(specPath.Child("hostPath"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateHostPathVolumeSource(pv.Spec.HostPath, specPath.Child("hostPath"))...)
+ }
+ }
+ if pv.Spec.GCEPersistentDisk != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(specPath.Child("gcePersistentDisk"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateGCEPersistentDiskVolumeSource(pv.Spec.GCEPersistentDisk, specPath.Child("persistentDisk"))...)
+ }
+ }
+ if pv.Spec.AWSElasticBlockStore != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(specPath.Child("awsElasticBlockStore"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateAWSElasticBlockStoreVolumeSource(pv.Spec.AWSElasticBlockStore, specPath.Child("awsElasticBlockStore"))...)
+ }
+ }
+ if pv.Spec.Glusterfs != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(specPath.Child("glusterfs"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateGlusterfs(pv.Spec.Glusterfs, specPath.Child("glusterfs"))...)
+ }
+ }
+ if pv.Spec.Flocker != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(specPath.Child("flocker"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateFlockerVolumeSource(pv.Spec.Flocker, specPath.Child("flocker"))...)
+ }
+ }
+ if pv.Spec.NFS != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(specPath.Child("nfs"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateNFSVolumeSource(pv.Spec.NFS, specPath.Child("nfs"))...)
+ }
+ }
+ if pv.Spec.RBD != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(specPath.Child("rbd"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateRBDVolumeSource(pv.Spec.RBD, specPath.Child("rbd"))...)
+ }
+ }
+ if pv.Spec.CephFS != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(specPath.Child("cephFS"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateCephFSVolumeSource(pv.Spec.CephFS, specPath.Child("cephfs"))...)
+ }
+ }
+ if pv.Spec.ISCSI != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(specPath.Child("iscsi"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateISCSIVolumeSource(pv.Spec.ISCSI, specPath.Child("iscsi"))...)
+ }
+ }
+ if pv.Spec.Cinder != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(specPath.Child("cinder"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateCinderVolumeSource(pv.Spec.Cinder, specPath.Child("cinder"))...)
+ }
+ }
+ if pv.Spec.FC != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(specPath.Child("fc"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateFCVolumeSource(pv.Spec.FC, specPath.Child("fc"))...)
+ }
+ }
+ if pv.Spec.FlexVolume != nil {
+ numVolumes++
+ allErrs = append(allErrs, validateFlexVolumeSource(pv.Spec.FlexVolume, specPath.Child("flexVolume"))...)
+ }
+ if pv.Spec.AzureFile != nil {
+ numVolumes++
+ allErrs = append(allErrs, validateAzureFile(pv.Spec.AzureFile, specPath.Child("azureFile"))...)
+ }
+ if pv.Spec.VsphereVolume != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(specPath.Child("vsphereVolume"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateVsphereVolumeSource(pv.Spec.VsphereVolume, specPath.Child("vsphereVolume"))...)
+ }
+ }
+ if numVolumes == 0 {
+ allErrs = append(allErrs, field.Required(specPath, "must specify a volume type"))
+ }
+ return allErrs
+}
+
+// ValidatePersistentVolumeUpdate tests to see if the update is legal for an end user to make.
+// newPv is updated with fields that cannot be changed.
+func ValidatePersistentVolumeUpdate(newPv, oldPv *api.PersistentVolume) field.ErrorList {
+ allErrs := field.ErrorList{}
+ allErrs = ValidatePersistentVolume(newPv)
+ newPv.Status = oldPv.Status
+ return allErrs
+}
+
+// ValidatePersistentVolumeStatusUpdate tests to see if the status update is legal for an end user to make.
+// newPv is updated with fields that cannot be changed.
+func ValidatePersistentVolumeStatusUpdate(newPv, oldPv *api.PersistentVolume) field.ErrorList {
+ allErrs := ValidateObjectMetaUpdate(&newPv.ObjectMeta, &oldPv.ObjectMeta, field.NewPath("metadata"))
+ if len(newPv.ResourceVersion) == 0 {
+ allErrs = append(allErrs, field.Required(field.NewPath("resourceVersion"), ""))
+ }
+ newPv.Spec = oldPv.Spec
+ return allErrs
+}
+
+func ValidatePersistentVolumeClaim(pvc *api.PersistentVolumeClaim) field.ErrorList {
+ allErrs := ValidateObjectMeta(&pvc.ObjectMeta, true, ValidatePersistentVolumeName, field.NewPath("metadata"))
+ specPath := field.NewPath("spec")
+ if len(pvc.Spec.AccessModes) == 0 {
+ allErrs = append(allErrs, field.Required(specPath.Child("accessModes"), "at least 1 accessMode is required"))
+ }
+ if pvc.Spec.Selector != nil {
+ allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(pvc.Spec.Selector, specPath.Child("selector"))...)
+ }
+ for _, mode := range pvc.Spec.AccessModes {
+ if mode != api.ReadWriteOnce && mode != api.ReadOnlyMany && mode != api.ReadWriteMany {
+ allErrs = append(allErrs, field.NotSupported(specPath.Child("accessModes"), mode, supportedAccessModes.List()))
+ }
+ }
+ if _, ok := pvc.Spec.Resources.Requests[api.ResourceStorage]; !ok {
+ allErrs = append(allErrs, field.Required(specPath.Child("resources").Key(string(api.ResourceStorage)), ""))
+ }
+ return allErrs
+}
+
+func ValidatePersistentVolumeClaimUpdate(newPvc, oldPvc *api.PersistentVolumeClaim) field.ErrorList {
+ allErrs := ValidateObjectMetaUpdate(&newPvc.ObjectMeta, &oldPvc.ObjectMeta, field.NewPath("metadata"))
+ allErrs = append(allErrs, ValidatePersistentVolumeClaim(newPvc)...)
+ // PVController needs to update PVC.Spec w/ VolumeName.
+ // Claims are immutable in order to enforce quota, range limits, etc. without gaming the system.
+ if len(oldPvc.Spec.VolumeName) == 0 {
+ // volumeName changes are allowed once.
+ // Reset back to empty string after equality check
+ oldPvc.Spec.VolumeName = newPvc.Spec.VolumeName
+ defer func() { oldPvc.Spec.VolumeName = "" }()
+ }
+ // changes to Spec are not allowed, but updates to label/annotations are OK.
+ // no-op updates pass validation.
+ if !api.Semantic.DeepEqual(newPvc.Spec, oldPvc.Spec) {
+ allErrs = append(allErrs, field.Forbidden(field.NewPath("spec"), "field is immutable after creation"))
+ }
+ newPvc.Status = oldPvc.Status
+ return allErrs
+}
+
+func ValidatePersistentVolumeClaimStatusUpdate(newPvc, oldPvc *api.PersistentVolumeClaim) field.ErrorList {
+ allErrs := ValidateObjectMetaUpdate(&newPvc.ObjectMeta, &oldPvc.ObjectMeta, field.NewPath("metadata"))
+ if len(newPvc.ResourceVersion) == 0 {
+ allErrs = append(allErrs, field.Required(field.NewPath("resourceVersion"), ""))
+ }
+ if len(newPvc.Spec.AccessModes) == 0 {
+ allErrs = append(allErrs, field.Required(field.NewPath("Spec", "accessModes"), ""))
+ }
+ capPath := field.NewPath("status", "capacity")
+ for r, qty := range newPvc.Status.Capacity {
+ allErrs = append(allErrs, validateBasicResource(qty, capPath.Key(string(r)))...)
+ }
+ newPvc.Spec = oldPvc.Spec
+ return allErrs
+}
+
+var supportedPortProtocols = sets.NewString(string(api.ProtocolTCP), string(api.ProtocolUDP))
+
+func validateContainerPorts(ports []api.ContainerPort, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ allNames := sets.String{}
+ for i, port := range ports {
+ idxPath := fldPath.Index(i)
+ if len(port.Name) > 0 {
+ if msgs := validation.IsValidPortName(port.Name); len(msgs) != 0 {
+ for i = range msgs {
+ allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), port.Name, msgs[i]))
+ }
+ } else if allNames.Has(port.Name) {
+ allErrs = append(allErrs, field.Duplicate(idxPath.Child("name"), port.Name))
+ } else {
+ allNames.Insert(port.Name)
+ }
+ }
+ if port.ContainerPort == 0 {
+ allErrs = append(allErrs, field.Required(idxPath.Child("containerPort"), ""))
+ } else {
+ for _, msg := range validation.IsValidPortNum(int(port.ContainerPort)) {
+ allErrs = append(allErrs, field.Invalid(idxPath.Child("containerPort"), port.ContainerPort, msg))
+ }
+ }
+ if port.HostPort != 0 {
+ for _, msg := range validation.IsValidPortNum(int(port.HostPort)) {
+ allErrs = append(allErrs, field.Invalid(idxPath.Child("hostPort"), port.HostPort, msg))
+ }
+ }
+ if len(port.Protocol) == 0 {
+ allErrs = append(allErrs, field.Required(idxPath.Child("protocol"), ""))
+ } else if !supportedPortProtocols.Has(string(port.Protocol)) {
+ allErrs = append(allErrs, field.NotSupported(idxPath.Child("protocol"), port.Protocol, supportedPortProtocols.List()))
+ }
+ }
+ return allErrs
+}
+
+func validateEnv(vars []api.EnvVar, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ for i, ev := range vars {
+ idxPath := fldPath.Index(i)
+ if len(ev.Name) == 0 {
+ allErrs = append(allErrs, field.Required(idxPath.Child("name"), ""))
+ } else {
+ for _, msg := range validation.IsCIdentifier(ev.Name) {
+ allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), ev.Name, msg))
+ }
+ }
+ allErrs = append(allErrs, validateEnvVarValueFrom(ev, idxPath.Child("valueFrom"))...)
+ }
+ return allErrs
+}
+
+var validFieldPathExpressionsEnv = sets.NewString("metadata.name", "metadata.namespace", "status.podIP")
+var validContainerResourceFieldPathExpressions = sets.NewString("limits.cpu", "limits.memory", "requests.cpu", "requests.memory")
+
+func validateEnvVarValueFrom(ev api.EnvVar, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ if ev.ValueFrom == nil {
+ return allErrs
+ }
+
+ numSources := 0
+
+ if ev.ValueFrom.FieldRef != nil {
+ numSources++
+ allErrs = append(allErrs, validateObjectFieldSelector(ev.ValueFrom.FieldRef, &validFieldPathExpressionsEnv, fldPath.Child("fieldRef"))...)
+ }
+ if ev.ValueFrom.ResourceFieldRef != nil {
+ numSources++
+ allErrs = append(allErrs, validateContainerResourceFieldSelector(ev.ValueFrom.ResourceFieldRef, &validContainerResourceFieldPathExpressions, fldPath.Child("resourceFieldRef"), false)...)
+ }
+ if ev.ValueFrom.ConfigMapKeyRef != nil {
+ numSources++
+ allErrs = append(allErrs, validateConfigMapKeySelector(ev.ValueFrom.ConfigMapKeyRef, fldPath.Child("configMapKeyRef"))...)
+ }
+ if ev.ValueFrom.SecretKeyRef != nil {
+ numSources++
+ allErrs = append(allErrs, validateSecretKeySelector(ev.ValueFrom.SecretKeyRef, fldPath.Child("secretKeyRef"))...)
+ }
+
+ if len(ev.Value) != 0 {
+ if numSources != 0 {
+ allErrs = append(allErrs, field.Invalid(fldPath, "", "may not be specified when `value` is not empty"))
+ }
+ } else if numSources != 1 {
+ allErrs = append(allErrs, field.Invalid(fldPath, "", "may not have more than one field specified at a time"))
+ }
+
+ return allErrs
+}
+
+func validateObjectFieldSelector(fs *api.ObjectFieldSelector, expressions *sets.String, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ if len(fs.APIVersion) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("apiVersion"), ""))
+ } else if len(fs.FieldPath) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("fieldPath"), ""))
+ } else {
+ internalFieldPath, _, err := api.Scheme.ConvertFieldLabel(fs.APIVersion, "Pod", fs.FieldPath, "")
+ if err != nil {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("fieldPath"), fs.FieldPath, fmt.Sprintf("error converting fieldPath: %v", err)))
+ } else if !expressions.Has(internalFieldPath) {
+ allErrs = append(allErrs, field.NotSupported(fldPath.Child("fieldPath"), internalFieldPath, expressions.List()))
+ }
+ }
+
+ return allErrs
+}
+
+func validateContainerResourceFieldSelector(fs *api.ResourceFieldSelector, expressions *sets.String, fldPath *field.Path, volume bool) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ if volume && len(fs.ContainerName) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("containerName"), ""))
+ } else if len(fs.Resource) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("resource"), ""))
+ } else if !expressions.Has(fs.Resource) {
+ allErrs = append(allErrs, field.NotSupported(fldPath.Child("resource"), fs.Resource, expressions.List()))
+ }
+ allErrs = append(allErrs, validateContainerResourceDivisor(fs.Resource, fs.Divisor, fldPath)...)
+ return allErrs
+}
+
+var validContainerResourceDivisorForCPU = sets.NewString("1m", "1")
+var validContainerResourceDivisorForMemory = sets.NewString("1", "1k", "1M", "1G", "1T", "1P", "1E", "1Ki", "1Mi", "1Gi", "1Ti", "1Pi", "1Ei")
+
+func validateContainerResourceDivisor(rName string, divisor resource.Quantity, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ unsetDivisor := resource.Quantity{}
+ if unsetDivisor.Cmp(divisor) == 0 {
+ return allErrs
+ }
+ switch rName {
+ case "limits.cpu", "requests.cpu":
+ if !validContainerResourceDivisorForCPU.Has(divisor.String()) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("divisor"), rName, fmt.Sprintf("only divisor's values 1m and 1 are supported with the cpu resource")))
+ }
+ case "limits.memory", "requests.memory":
+ if !validContainerResourceDivisorForMemory.Has(divisor.String()) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("divisor"), rName, fmt.Sprintf("only divisor's values 1, 1k, 1M, 1G, 1T, 1P, 1E, 1Ki, 1Mi, 1Gi, 1Ti, 1Pi, 1Ei are supported with the memory resource")))
+ }
+ }
+ return allErrs
+}
+
+func validateConfigMapKeySelector(s *api.ConfigMapKeySelector, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ if len(s.Name) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("name"), ""))
+ }
+ if len(s.Key) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("key"), ""))
+ } else {
+ for _, msg := range validation.IsConfigMapKey(s.Key) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("key"), s.Key, msg))
+ }
+ }
+
+ return allErrs
+}
+
+func validateSecretKeySelector(s *api.SecretKeySelector, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ if len(s.Name) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("name"), ""))
+ }
+ if len(s.Key) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("key"), ""))
+ } else {
+ for _, msg := range validation.IsConfigMapKey(s.Key) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("key"), s.Key, msg))
+ }
+ }
+
+ return allErrs
+}
+
+func validateVolumeMounts(mounts []api.VolumeMount, volumes sets.String, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ mountpoints := sets.NewString()
+
+ for i, mnt := range mounts {
+ idxPath := fldPath.Index(i)
+ if len(mnt.Name) == 0 {
+ allErrs = append(allErrs, field.Required(idxPath.Child("name"), ""))
+ } else if !volumes.Has(mnt.Name) {
+ allErrs = append(allErrs, field.NotFound(idxPath.Child("name"), mnt.Name))
+ }
+ if len(mnt.MountPath) == 0 {
+ allErrs = append(allErrs, field.Required(idxPath.Child("mountPath"), ""))
+ } else if strings.Contains(mnt.MountPath, ":") {
+ allErrs = append(allErrs, field.Invalid(idxPath.Child("mountPath"), mnt.MountPath, "must not contain ':'"))
+ }
+ if mountpoints.Has(mnt.MountPath) {
+ allErrs = append(allErrs, field.Invalid(idxPath.Child("mountPath"), mnt.MountPath, "must be unique"))
+ }
+ mountpoints.Insert(mnt.MountPath)
+ if len(mnt.SubPath) > 0 {
+ allErrs = append(allErrs, validateSubPath(mnt.SubPath, fldPath.Child("subPath"))...)
+ }
+ }
+ return allErrs
+}
+
+func validateProbe(probe *api.Probe, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ if probe == nil {
+ return allErrs
+ }
+ allErrs = append(allErrs, validateHandler(&probe.Handler, fldPath)...)
+
+ allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.InitialDelaySeconds), fldPath.Child("initialDelaySeconds"))...)
+ allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.TimeoutSeconds), fldPath.Child("timeoutSeconds"))...)
+ allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.PeriodSeconds), fldPath.Child("periodSeconds"))...)
+ allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.SuccessThreshold), fldPath.Child("successThreshold"))...)
+ allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.FailureThreshold), fldPath.Child("failureThreshold"))...)
+ return allErrs
+}
+
+// AccumulateUniqueHostPorts extracts each HostPort of each Container,
+// accumulating the results and returning an error if any ports conflict.
+func AccumulateUniqueHostPorts(containers []api.Container, accumulator *sets.String, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ for ci, ctr := range containers {
+ idxPath := fldPath.Index(ci)
+ portsPath := idxPath.Child("ports")
+ for pi := range ctr.Ports {
+ idxPath := portsPath.Index(pi)
+ port := ctr.Ports[pi].HostPort
+ if port == 0 {
+ continue
+ }
+ str := fmt.Sprintf("%d/%s", port, ctr.Ports[pi].Protocol)
+ if accumulator.Has(str) {
+ allErrs = append(allErrs, field.Duplicate(idxPath.Child("hostPort"), str))
+ } else {
+ accumulator.Insert(str)
+ }
+ }
+ }
+ return allErrs
+}
+
+// checkHostPortConflicts checks for colliding Port.HostPort values across
+// a slice of containers.
+func checkHostPortConflicts(containers []api.Container, fldPath *field.Path) field.ErrorList {
+ allPorts := sets.String{}
+ return AccumulateUniqueHostPorts(containers, &allPorts, fldPath)
+}
+
+func validateExecAction(exec *api.ExecAction, fldPath *field.Path) field.ErrorList {
+ allErrors := field.ErrorList{}
+ if len(exec.Command) == 0 {
+ allErrors = append(allErrors, field.Required(fldPath.Child("command"), ""))
+ }
+ return allErrors
+}
+
+var supportedHTTPSchemes = sets.NewString(string(api.URISchemeHTTP), string(api.URISchemeHTTPS))
+
+func validateHTTPGetAction(http *api.HTTPGetAction, fldPath *field.Path) field.ErrorList {
+ allErrors := field.ErrorList{}
+ if len(http.Path) == 0 {
+ allErrors = append(allErrors, field.Required(fldPath.Child("path"), ""))
+ }
+ allErrors = append(allErrors, ValidatePortNumOrName(http.Port, fldPath.Child("port"))...)
+ if !supportedHTTPSchemes.Has(string(http.Scheme)) {
+ allErrors = append(allErrors, field.NotSupported(fldPath.Child("scheme"), http.Scheme, supportedHTTPSchemes.List()))
+ }
+ for _, header := range http.HTTPHeaders {
+ for _, msg := range validation.IsHTTPHeaderName(header.Name) {
+ allErrors = append(allErrors, field.Invalid(fldPath.Child("httpHeaders"), header.Name, msg))
+ }
+ }
+ return allErrors
+}
+
+func ValidatePortNumOrName(port intstr.IntOrString, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if port.Type == intstr.Int {
+ for _, msg := range validation.IsValidPortNum(port.IntValue()) {
+ allErrs = append(allErrs, field.Invalid(fldPath, port.IntValue(), msg))
+ }
+ } else if port.Type == intstr.String {
+ for _, msg := range validation.IsValidPortName(port.StrVal) {
+ allErrs = append(allErrs, field.Invalid(fldPath, port.StrVal, msg))
+ }
+ } else {
+ allErrs = append(allErrs, field.InternalError(fldPath, fmt.Errorf("unknown type: %v", port.Type)))
+ }
+ return allErrs
+}
+
+func validateTCPSocketAction(tcp *api.TCPSocketAction, fldPath *field.Path) field.ErrorList {
+ return ValidatePortNumOrName(tcp.Port, fldPath.Child("port"))
+}
+
+func validateHandler(handler *api.Handler, fldPath *field.Path) field.ErrorList {
+ numHandlers := 0
+ allErrors := field.ErrorList{}
+ if handler.Exec != nil {
+ if numHandlers > 0 {
+ allErrors = append(allErrors, field.Forbidden(fldPath.Child("exec"), "may not specify more than 1 handler type"))
+ } else {
+ numHandlers++
+ allErrors = append(allErrors, validateExecAction(handler.Exec, fldPath.Child("exec"))...)
+ }
+ }
+ if handler.HTTPGet != nil {
+ if numHandlers > 0 {
+ allErrors = append(allErrors, field.Forbidden(fldPath.Child("httpGet"), "may not specify more than 1 handler type"))
+ } else {
+ numHandlers++
+ allErrors = append(allErrors, validateHTTPGetAction(handler.HTTPGet, fldPath.Child("httpGet"))...)
+ }
+ }
+ if handler.TCPSocket != nil {
+ if numHandlers > 0 {
+ allErrors = append(allErrors, field.Forbidden(fldPath.Child("tcpSocket"), "may not specify more than 1 handler type"))
+ } else {
+ numHandlers++
+ allErrors = append(allErrors, validateTCPSocketAction(handler.TCPSocket, fldPath.Child("tcpSocket"))...)
+ }
+ }
+ if numHandlers == 0 {
+ allErrors = append(allErrors, field.Required(fldPath, "must specify a handler type"))
+ }
+ return allErrors
+}
+
+func validateLifecycle(lifecycle *api.Lifecycle, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if lifecycle.PostStart != nil {
+ allErrs = append(allErrs, validateHandler(lifecycle.PostStart, fldPath.Child("postStart"))...)
+ }
+ if lifecycle.PreStop != nil {
+ allErrs = append(allErrs, validateHandler(lifecycle.PreStop, fldPath.Child("preStop"))...)
+ }
+ return allErrs
+}
+
+var supportedPullPolicies = sets.NewString(string(api.PullAlways), string(api.PullIfNotPresent), string(api.PullNever))
+
+func validatePullPolicy(policy api.PullPolicy, fldPath *field.Path) field.ErrorList {
+ allErrors := field.ErrorList{}
+
+ switch policy {
+ case api.PullAlways, api.PullIfNotPresent, api.PullNever:
+ break
+ case "":
+ allErrors = append(allErrors, field.Required(fldPath, ""))
+ default:
+ allErrors = append(allErrors, field.NotSupported(fldPath, policy, supportedPullPolicies.List()))
+ }
+
+ return allErrors
+}
+
+func validateInitContainers(containers, otherContainers []api.Container, volumes sets.String, fldPath *field.Path) field.ErrorList {
+ var allErrs field.ErrorList
+ if len(containers) > 0 {
+ allErrs = append(allErrs, validateContainers(containers, volumes, fldPath)...)
+ }
+
+ allNames := sets.String{}
+ for _, ctr := range otherContainers {
+ allNames.Insert(ctr.Name)
+ }
+ for i, ctr := range containers {
+ idxPath := fldPath.Index(i)
+ if allNames.Has(ctr.Name) {
+ allErrs = append(allErrs, field.Duplicate(idxPath.Child("name"), ctr.Name))
+ }
+ if len(ctr.Name) > 0 {
+ allNames.Insert(ctr.Name)
+ }
+ if ctr.Lifecycle != nil {
+ allErrs = append(allErrs, field.Invalid(idxPath.Child("lifecycle"), ctr.Lifecycle, "must not be set for init containers"))
+ }
+ if ctr.LivenessProbe != nil {
+ allErrs = append(allErrs, field.Invalid(idxPath.Child("livenessProbe"), ctr.LivenessProbe, "must not be set for init containers"))
+ }
+ if ctr.ReadinessProbe != nil {
+ allErrs = append(allErrs, field.Invalid(idxPath.Child("readinessProbe"), ctr.ReadinessProbe, "must not be set for init containers"))
+ }
+ }
+ return allErrs
+}
+
+func validateContainers(containers []api.Container, volumes sets.String, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ if len(containers) == 0 {
+ return append(allErrs, field.Required(fldPath, ""))
+ }
+
+ allNames := sets.String{}
+ for i, ctr := range containers {
+ idxPath := fldPath.Index(i)
+ namePath := idxPath.Child("name")
+ if len(ctr.Name) == 0 {
+ allErrs = append(allErrs, field.Required(namePath, ""))
+ } else {
+ allErrs = append(allErrs, ValidateDNS1123Label(ctr.Name, namePath)...)
+ }
+ if allNames.Has(ctr.Name) {
+ allErrs = append(allErrs, field.Duplicate(namePath, ctr.Name))
+ } else {
+ allNames.Insert(ctr.Name)
+ }
+ if len(ctr.Image) == 0 {
+ allErrs = append(allErrs, field.Required(idxPath.Child("image"), ""))
+ }
+ if ctr.Lifecycle != nil {
+ allErrs = append(allErrs, validateLifecycle(ctr.Lifecycle, idxPath.Child("lifecycle"))...)
+ }
+ allErrs = append(allErrs, validateProbe(ctr.LivenessProbe, idxPath.Child("livenessProbe"))...)
+ // Liveness-specific validation
+ if ctr.LivenessProbe != nil && ctr.LivenessProbe.SuccessThreshold != 1 {
+ allErrs = append(allErrs, field.Invalid(idxPath.Child("livenessProbe", "successThreshold"), ctr.LivenessProbe.SuccessThreshold, "must be 1"))
+ }
+
+ allErrs = append(allErrs, validateProbe(ctr.ReadinessProbe, idxPath.Child("readinessProbe"))...)
+ allErrs = append(allErrs, validateContainerPorts(ctr.Ports, idxPath.Child("ports"))...)
+ allErrs = append(allErrs, validateEnv(ctr.Env, idxPath.Child("env"))...)
+ allErrs = append(allErrs, validateVolumeMounts(ctr.VolumeMounts, volumes, idxPath.Child("volumeMounts"))...)
+ allErrs = append(allErrs, validatePullPolicy(ctr.ImagePullPolicy, idxPath.Child("imagePullPolicy"))...)
+ allErrs = append(allErrs, ValidateResourceRequirements(&ctr.Resources, idxPath.Child("resources"))...)
+ allErrs = append(allErrs, ValidateSecurityContext(ctr.SecurityContext, idxPath.Child("securityContext"))...)
+ }
+ // Check for colliding ports across all containers.
+ allErrs = append(allErrs, checkHostPortConflicts(containers, fldPath)...)
+
+ return allErrs
+}
+
+func validateRestartPolicy(restartPolicy *api.RestartPolicy, fldPath *field.Path) field.ErrorList {
+ allErrors := field.ErrorList{}
+ switch *restartPolicy {
+ case api.RestartPolicyAlways, api.RestartPolicyOnFailure, api.RestartPolicyNever:
+ break
+ case "":
+ allErrors = append(allErrors, field.Required(fldPath, ""))
+ default:
+ validValues := []string{string(api.RestartPolicyAlways), string(api.RestartPolicyOnFailure), string(api.RestartPolicyNever)}
+ allErrors = append(allErrors, field.NotSupported(fldPath, *restartPolicy, validValues))
+ }
+
+ return allErrors
+}
+
+func validateDNSPolicy(dnsPolicy *api.DNSPolicy, fldPath *field.Path) field.ErrorList {
+ allErrors := field.ErrorList{}
+ switch *dnsPolicy {
+ case api.DNSClusterFirst, api.DNSDefault:
+ break
+ case "":
+ allErrors = append(allErrors, field.Required(fldPath, ""))
+ default:
+ validValues := []string{string(api.DNSClusterFirst), string(api.DNSDefault)}
+ allErrors = append(allErrors, field.NotSupported(fldPath, dnsPolicy, validValues))
+ }
+ return allErrors
+}
+
+func validateHostNetwork(hostNetwork bool, containers []api.Container, fldPath *field.Path) field.ErrorList {
+ allErrors := field.ErrorList{}
+ if hostNetwork {
+ for i, container := range containers {
+ portsPath := fldPath.Index(i).Child("ports")
+ for i, port := range container.Ports {
+ idxPath := portsPath.Index(i)
+ if port.HostPort != port.ContainerPort {
+ allErrors = append(allErrors, field.Invalid(idxPath.Child("containerPort"), port.ContainerPort, "must match `hostPort` when `hostNetwork` is true"))
+ }
+ }
+ }
+ }
+ return allErrors
+}
+
+// validateImagePullSecrets checks to make sure the pull secrets are well
+// formed. Right now, we only expect name to be set (it's the only field). If
+// this ever changes and someone decides to set those fields, we'd like to
+// know.
+func validateImagePullSecrets(imagePullSecrets []api.LocalObjectReference, fldPath *field.Path) field.ErrorList {
+ allErrors := field.ErrorList{}
+ for i, currPullSecret := range imagePullSecrets {
+ idxPath := fldPath.Index(i)
+ strippedRef := api.LocalObjectReference{Name: currPullSecret.Name}
+ if !reflect.DeepEqual(strippedRef, currPullSecret) {
+ allErrors = append(allErrors, field.Invalid(idxPath, currPullSecret, "only name may be set"))
+ }
+ }
+ return allErrors
+}
+
+func validateTaintEffect(effect *api.TaintEffect, allowEmpty bool, fldPath *field.Path) field.ErrorList {
+ if !allowEmpty && len(*effect) == 0 {
+ return field.ErrorList{field.Required(fldPath, "")}
+ }
+
+ allErrors := field.ErrorList{}
+ switch *effect {
+ // TODO: Replace next line with subsequent commented-out line when implement TaintEffectNoScheduleNoAdmit, TaintEffectNoScheduleNoAdmitNoExecute.
+ case api.TaintEffectNoSchedule, api.TaintEffectPreferNoSchedule:
+ // case api.TaintEffectNoSchedule, api.TaintEffectPreferNoSchedule, api.TaintEffectNoScheduleNoAdmit, api.TaintEffectNoScheduleNoAdmitNoExecute:
+ default:
+ validValues := []string{
+ string(api.TaintEffectNoSchedule),
+ string(api.TaintEffectPreferNoSchedule),
+ // TODO: Uncomment this block when implement TaintEffectNoScheduleNoAdmit, TaintEffectNoScheduleNoAdmitNoExecute.
+ // string(api.TaintEffectNoScheduleNoAdmit),
+ // string(api.TaintEffectNoScheduleNoAdmitNoExecute),
+ }
+ allErrors = append(allErrors, field.NotSupported(fldPath, effect, validValues))
+ }
+ return allErrors
+}
+
+// validateTolerations tests if given tolerations have valid data.
+func validateTolerations(tolerations []api.Toleration, fldPath *field.Path) field.ErrorList {
+ allErrors := field.ErrorList{}
+ for i, toleration := range tolerations {
+ idxPath := fldPath.Index(i)
+ // validate the toleration key
+ allErrors = append(allErrors, unversionedvalidation.ValidateLabelName(toleration.Key, idxPath.Child("key"))...)
+
+ // validate toleration operator and value
+ switch toleration.Operator {
+ case api.TolerationOpEqual, "":
+ if errs := validation.IsValidLabelValue(toleration.Value); len(errs) != 0 {
+ allErrors = append(allErrors, field.Invalid(idxPath.Child("operator"), toleration.Value, strings.Join(errs, ";")))
+ }
+ case api.TolerationOpExists:
+ if len(toleration.Value) > 0 {
+ allErrors = append(allErrors, field.Invalid(idxPath.Child("operator"), toleration, "value must be empty when `operator` is 'Exists'"))
+ }
+ default:
+ validValues := []string{string(api.TolerationOpEqual), string(api.TolerationOpExists)}
+ allErrors = append(allErrors, field.NotSupported(idxPath.Child("operator"), toleration.Operator, validValues))
+ }
+
+ // validate toleration effect
+ if len(toleration.Effect) > 0 {
+ allErrors = append(allErrors, validateTaintEffect(&toleration.Effect, true, idxPath.Child("effect"))...)
+ }
+ }
+ return allErrors
+}
+
+// ValidatePod tests if required fields in the pod are set.
+func ValidatePod(pod *api.Pod) field.ErrorList {
+ fldPath := field.NewPath("metadata")
+ allErrs := ValidateObjectMeta(&pod.ObjectMeta, true, ValidatePodName, fldPath)
+ allErrs = append(allErrs, ValidatePodSpecificAnnotations(pod.ObjectMeta.Annotations, fldPath.Child("annotations"))...)
+ allErrs = append(allErrs, ValidatePodSpec(&pod.Spec, field.NewPath("spec"))...)
+ return allErrs
+}
+
+// ValidatePodSpec tests that the specified PodSpec has valid data.
+// This includes checking formatting and uniqueness. It also canonicalizes the
+// structure by setting default values and implementing any backwards-compatibility
+// tricks.
+func ValidatePodSpec(spec *api.PodSpec, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ allVolumes, vErrs := validateVolumes(spec.Volumes, fldPath.Child("volumes"))
+ allErrs = append(allErrs, vErrs...)
+ allErrs = append(allErrs, validateContainers(spec.Containers, allVolumes, fldPath.Child("containers"))...)
+ allErrs = append(allErrs, validateInitContainers(spec.InitContainers, spec.Containers, allVolumes, fldPath.Child("initContainers"))...)
+ allErrs = append(allErrs, validateRestartPolicy(&spec.RestartPolicy, fldPath.Child("restartPolicy"))...)
+ allErrs = append(allErrs, validateDNSPolicy(&spec.DNSPolicy, fldPath.Child("dnsPolicy"))...)
+ allErrs = append(allErrs, unversionedvalidation.ValidateLabels(spec.NodeSelector, fldPath.Child("nodeSelector"))...)
+ allErrs = append(allErrs, ValidatePodSecurityContext(spec.SecurityContext, spec, fldPath, fldPath.Child("securityContext"))...)
+ allErrs = append(allErrs, validateImagePullSecrets(spec.ImagePullSecrets, fldPath.Child("imagePullSecrets"))...)
+ if len(spec.ServiceAccountName) > 0 {
+ for _, msg := range ValidateServiceAccountName(spec.ServiceAccountName, false) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("serviceAccountName"), spec.ServiceAccountName, msg))
+ }
+ }
+
+ if len(spec.NodeName) > 0 {
+ for _, msg := range ValidateNodeName(spec.NodeName, false) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("nodeName"), spec.NodeName, msg))
+ }
+ }
+
+ if spec.ActiveDeadlineSeconds != nil {
+ if *spec.ActiveDeadlineSeconds <= 0 {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("activeDeadlineSeconds"), spec.ActiveDeadlineSeconds, "must be greater than 0"))
+ }
+ }
+
+ if len(spec.Hostname) > 0 {
+ allErrs = append(allErrs, ValidateDNS1123Label(spec.Hostname, fldPath.Child("hostname"))...)
+ }
+
+ if len(spec.Subdomain) > 0 {
+ allErrs = append(allErrs, ValidateDNS1123Label(spec.Subdomain, fldPath.Child("subdomain"))...)
+ }
+
+ return allErrs
+}
+
+// ValidateNodeSelectorRequirement tests that the specified NodeSelectorRequirement fields has valid data
+func ValidateNodeSelectorRequirement(rq api.NodeSelectorRequirement, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ switch rq.Operator {
+ case api.NodeSelectorOpIn, api.NodeSelectorOpNotIn:
+ if len(rq.Values) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("values"), "must be specified when `operator` is 'In' or 'NotIn'"))
+ }
+ case api.NodeSelectorOpExists, api.NodeSelectorOpDoesNotExist:
+ if len(rq.Values) > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("values"), "may not be specified when `operator` is 'Exists' or 'DoesNotExist'"))
+ }
+
+ case api.NodeSelectorOpGt, api.NodeSelectorOpLt:
+ if len(rq.Values) != 1 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("values"), "must be specified single value when `operator` is 'Lt' or 'Gt'"))
+ }
+ default:
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), rq.Operator, "not a valid selector operator"))
+ }
+ allErrs = append(allErrs, unversionedvalidation.ValidateLabelName(rq.Key, fldPath.Child("key"))...)
+ return allErrs
+}
+
+// ValidateNodeSelectorTerm tests that the specified node selector term has valid data
+func ValidateNodeSelectorTerm(term api.NodeSelectorTerm, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ if len(term.MatchExpressions) == 0 {
+ return append(allErrs, field.Required(fldPath.Child("matchExpressions"), "must have at least one node selector requirement"))
+ }
+ for j, req := range term.MatchExpressions {
+ allErrs = append(allErrs, ValidateNodeSelectorRequirement(req, fldPath.Child("matchExpressions").Index(j))...)
+ }
+ return allErrs
+}
+
+// ValidateNodeSelector tests that the specified nodeSelector fields has valid data
+func ValidateNodeSelector(nodeSelector *api.NodeSelector, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ termFldPath := fldPath.Child("nodeSelectorTerms")
+ if len(nodeSelector.NodeSelectorTerms) == 0 {
+ return append(allErrs, field.Required(termFldPath, "must have at least one node selector term"))
+ }
+
+ for i, term := range nodeSelector.NodeSelectorTerms {
+ allErrs = append(allErrs, ValidateNodeSelectorTerm(term, termFldPath.Index(i))...)
+ }
+
+ return allErrs
+}
+
+// ValidatePreferredSchedulingTerms tests that the specified SoftNodeAffinity fields has valid data
+func ValidatePreferredSchedulingTerms(terms []api.PreferredSchedulingTerm, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ for i, term := range terms {
+ if term.Weight <= 0 || term.Weight > 100 {
+ allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("weight"), term.Weight, "must be in the range 1-100"))
+ }
+
+ allErrs = append(allErrs, ValidateNodeSelectorTerm(term.Preference, fldPath.Index(i).Child("preference"))...)
+ }
+ return allErrs
+}
+
+// validatePodAffinityTerm tests that the specified podAffinityTerm fields have valid data
+func validatePodAffinityTerm(podAffinityTerm api.PodAffinityTerm, allowEmptyTopologyKey bool, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(podAffinityTerm.LabelSelector, fldPath.Child("matchExpressions"))...)
+ for _, name := range podAffinityTerm.Namespaces {
+ for _, msg := range ValidateNamespaceName(name, false) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("namespace"), name, msg))
+ }
+ }
+ if !allowEmptyTopologyKey && len(podAffinityTerm.TopologyKey) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("topologyKey"), "can only be empty for PreferredDuringScheduling pod anti affinity"))
+ }
+ if len(podAffinityTerm.TopologyKey) != 0 {
+ allErrs = append(allErrs, unversionedvalidation.ValidateLabelName(podAffinityTerm.TopologyKey, fldPath.Child("topologyKey"))...)
+ }
+ return allErrs
+}
+
+// validatePodAffinityTerms tests that the specified podAffinityTerms fields have valid data
+func validatePodAffinityTerms(podAffinityTerms []api.PodAffinityTerm, allowEmptyTopologyKey bool, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ for i, podAffinityTerm := range podAffinityTerms {
+ allErrs = append(allErrs, validatePodAffinityTerm(podAffinityTerm, allowEmptyTopologyKey, fldPath.Index(i))...)
+ }
+ return allErrs
+}
+
+// validateWeightedPodAffinityTerms tests that the specified weightedPodAffinityTerms fields have valid data
+func validateWeightedPodAffinityTerms(weightedPodAffinityTerms []api.WeightedPodAffinityTerm, allowEmptyTopologyKey bool, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ for j, weightedTerm := range weightedPodAffinityTerms {
+ if weightedTerm.Weight <= 0 || weightedTerm.Weight > 100 {
+ allErrs = append(allErrs, field.Invalid(fldPath.Index(j).Child("weight"), weightedTerm.Weight, "must be in the range 1-100"))
+ }
+ allErrs = append(allErrs, validatePodAffinityTerm(weightedTerm.PodAffinityTerm, allowEmptyTopologyKey, fldPath.Index(j).Child("podAffinityTerm"))...)
+ }
+ return allErrs
+}
+
+// validatePodAntiAffinity tests that the specified podAntiAffinity fields have valid data
+func validatePodAntiAffinity(podAntiAffinity *api.PodAntiAffinity, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ // TODO:Uncomment below code once RequiredDuringSchedulingRequiredDuringExecution is implemented.
+ // if podAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution != nil {
+ // allErrs = append(allErrs, validatePodAffinityTerms(podAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution, false,
+ // fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...)
+ //}
+ if podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
+ // empty topologyKey is not allowed for hard pod anti-affinity
+ allErrs = append(allErrs, validatePodAffinityTerms(podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution, false,
+ fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...)
+ }
+ if podAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution != nil {
+ // empty topologyKey is allowed for soft pod anti-affinity
+ allErrs = append(allErrs, validateWeightedPodAffinityTerms(podAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution, true,
+ fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...)
+ }
+ return allErrs
+}
+
+// validatePodAffinity tests that the specified podAffinity fields have valid data
+func validatePodAffinity(podAffinity *api.PodAffinity, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ // TODO:Uncomment below code once RequiredDuringSchedulingRequiredDuringExecution is implemented.
+ // if podAffinity.RequiredDuringSchedulingRequiredDuringExecution != nil {
+ // allErrs = append(allErrs, validatePodAffinityTerms(podAffinity.RequiredDuringSchedulingRequiredDuringExecution, false,
+ // fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...)
+ //}
+ if podAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
+ // empty topologyKey is not allowed for hard pod affinity
+ allErrs = append(allErrs, validatePodAffinityTerms(podAffinity.RequiredDuringSchedulingIgnoredDuringExecution, false,
+ fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...)
+ }
+ if podAffinity.PreferredDuringSchedulingIgnoredDuringExecution != nil {
+ // empty topologyKey is not allowed for soft pod affinity
+ allErrs = append(allErrs, validateWeightedPodAffinityTerms(podAffinity.PreferredDuringSchedulingIgnoredDuringExecution, false,
+ fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...)
+ }
+ return allErrs
+}
+
+// ValidateAffinityInPodAnnotations tests that the serialized Affinity in Pod.Annotations has valid data
+func ValidateAffinityInPodAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ affinity, err := api.GetAffinityFromPodAnnotations(annotations)
+ if err != nil {
+ allErrs = append(allErrs, field.Invalid(fldPath, api.AffinityAnnotationKey, err.Error()))
+ return allErrs
+ }
+
+ affinityFldPath := fldPath.Child(api.AffinityAnnotationKey)
+ if affinity.NodeAffinity != nil {
+ na := affinity.NodeAffinity
+ naFldPath := affinityFldPath.Child("nodeAffinity")
+ // TODO: Uncomment the next three lines once RequiredDuringSchedulingRequiredDuringExecution is implemented.
+ // if na.RequiredDuringSchedulingRequiredDuringExecution != nil {
+ // allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingRequiredDuringExecution, naFldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...)
+ // }
+
+ if na.RequiredDuringSchedulingIgnoredDuringExecution != nil {
+ allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingIgnoredDuringExecution, naFldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...)
+ }
+
+ if len(na.PreferredDuringSchedulingIgnoredDuringExecution) > 0 {
+ allErrs = append(allErrs, ValidatePreferredSchedulingTerms(na.PreferredDuringSchedulingIgnoredDuringExecution, naFldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...)
+ }
+ }
+ if affinity.PodAffinity != nil {
+ allErrs = append(allErrs, validatePodAffinity(affinity.PodAffinity, affinityFldPath.Child("podAffinity"))...)
+ }
+ if affinity.PodAntiAffinity != nil {
+ allErrs = append(allErrs, validatePodAntiAffinity(affinity.PodAntiAffinity, affinityFldPath.Child("podAntiAffinity"))...)
+ }
+
+ return allErrs
+}
+
+// ValidateTolerationsInPodAnnotations tests that the serialized tolerations in Pod.Annotations has valid data
+func ValidateTolerationsInPodAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ tolerations, err := api.GetTolerationsFromPodAnnotations(annotations)
+ if err != nil {
+ allErrs = append(allErrs, field.Invalid(fldPath, api.TolerationsAnnotationKey, err.Error()))
+ return allErrs
+ }
+ if len(tolerations) > 0 {
+ allErrs = append(allErrs, validateTolerations(tolerations, fldPath.Child(api.TolerationsAnnotationKey))...)
+ }
+
+ return allErrs
+}
+
+func validateSeccompProfile(p string, fldPath *field.Path) field.ErrorList {
+ if p == "docker/default" {
+ return nil
+ }
+ if p == "unconfined" {
+ return nil
+ }
+ if strings.HasPrefix(p, "localhost/") {
+ return validateSubPath(strings.TrimPrefix(p, "localhost/"), fldPath)
+ }
+ return field.ErrorList{field.Invalid(fldPath, p, "must be a valid seccomp profile")}
+}
+
+func ValidateSeccompPodAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if p, exists := annotations[api.SeccompPodAnnotationKey]; exists {
+ allErrs = append(allErrs, validateSeccompProfile(p, fldPath.Child(api.SeccompPodAnnotationKey))...)
+ }
+ for k, p := range annotations {
+ if strings.HasPrefix(k, api.SeccompContainerAnnotationKeyPrefix) {
+ allErrs = append(allErrs, validateSeccompProfile(p, fldPath.Child(k))...)
+ }
+ }
+
+ return allErrs
+}
+
+// ValidatePodSecurityContext test that the specified PodSecurityContext has valid data.
+func ValidatePodSecurityContext(securityContext *api.PodSecurityContext, spec *api.PodSpec, specPath, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ if securityContext != nil {
+ allErrs = append(allErrs, validateHostNetwork(securityContext.HostNetwork, spec.Containers, specPath.Child("containers"))...)
+ if securityContext.FSGroup != nil {
+ for _, msg := range validation.IsValidGroupId(*securityContext.FSGroup) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("fsGroup"), *(securityContext.FSGroup), msg))
+ }
+ }
+ if securityContext.RunAsUser != nil {
+ for _, msg := range validation.IsValidUserId(*securityContext.RunAsUser) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("runAsUser"), *(securityContext.RunAsUser), msg))
+ }
+ }
+ for g, gid := range securityContext.SupplementalGroups {
+ for _, msg := range validation.IsValidGroupId(gid) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("supplementalGroups").Index(g), gid, msg))
+ }
+ }
+ }
+
+ return allErrs
+}
+
+// ValidatePodUpdate tests to see if the update is legal for an end user to make. newPod is updated with fields
+// that cannot be changed.
+func ValidatePodUpdate(newPod, oldPod *api.Pod) field.ErrorList {
+ fldPath := field.NewPath("metadata")
+ allErrs := ValidateObjectMetaUpdate(&newPod.ObjectMeta, &oldPod.ObjectMeta, fldPath)
+ allErrs = append(allErrs, ValidatePodSpecificAnnotations(newPod.ObjectMeta.Annotations, fldPath.Child("annotations"))...)
+ specPath := field.NewPath("spec")
+ if len(newPod.Spec.Containers) != len(oldPod.Spec.Containers) {
+ //TODO: Pinpoint the specific container that causes the invalid error after we have strategic merge diff
+ allErrs = append(allErrs, field.Forbidden(specPath.Child("containers"), "pod updates may not add or remove containers"))
+ return allErrs
+ }
+
+ // validate updateable fields:
+ // 1. containers[*].image
+ // 2. spec.activeDeadlineSeconds
+
+ // validate updated container images
+ for i, ctr := range newPod.Spec.Containers {
+ if len(ctr.Image) == 0 {
+ allErrs = append(allErrs, field.Required(specPath.Child("containers").Index(i).Child("image"), ""))
+ }
+ }
+
+ // validate updated spec.activeDeadlineSeconds. two types of updates are allowed:
+ // 1. from nil to a positive value
+ // 2. from a positive value to a lesser, non-negative value
+ if newPod.Spec.ActiveDeadlineSeconds != nil {
+ newActiveDeadlineSeconds := *newPod.Spec.ActiveDeadlineSeconds
+ if newActiveDeadlineSeconds < 0 {
+ allErrs = append(allErrs, field.Invalid(specPath.Child("activeDeadlineSeconds"), newActiveDeadlineSeconds, isNegativeErrorMsg))
+ return allErrs
+ }
+ if oldPod.Spec.ActiveDeadlineSeconds != nil {
+ oldActiveDeadlineSeconds := *oldPod.Spec.ActiveDeadlineSeconds
+ if oldActiveDeadlineSeconds < newActiveDeadlineSeconds {
+ allErrs = append(allErrs, field.Invalid(specPath.Child("activeDeadlineSeconds"), newActiveDeadlineSeconds, "must be less than or equal to previous value"))
+ return allErrs
+ }
+ }
+ } else if oldPod.Spec.ActiveDeadlineSeconds != nil {
+ allErrs = append(allErrs, field.Invalid(specPath.Child("activeDeadlineSeconds"), newPod.Spec.ActiveDeadlineSeconds, "must not update from a positive integer to nil value"))
+ }
+
+ // handle updateable fields by munging those fields prior to deep equal comparison.
+ mungedPod := *newPod
+ // munge containers[*].image
+ var newContainers []api.Container
+ for ix, container := range mungedPod.Spec.Containers {
+ container.Image = oldPod.Spec.Containers[ix].Image
+ newContainers = append(newContainers, container)
+ }
+ mungedPod.Spec.Containers = newContainers
+ // munge spec.activeDeadlineSeconds
+ mungedPod.Spec.ActiveDeadlineSeconds = nil
+ if oldPod.Spec.ActiveDeadlineSeconds != nil {
+ activeDeadlineSeconds := *oldPod.Spec.ActiveDeadlineSeconds
+ mungedPod.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds
+ }
+ if !api.Semantic.DeepEqual(mungedPod.Spec, oldPod.Spec) {
+ //TODO: Pinpoint the specific field that causes the invalid error after we have strategic merge diff
+ allErrs = append(allErrs, field.Forbidden(specPath, "pod updates may not change fields other than `containers[*].image` or `spec.activeDeadlineSeconds`"))
+ }
+
+ return allErrs
+}
+
+// ValidatePodStatusUpdate tests to see if the update is legal for an end user to make. newPod is updated with fields
+// that cannot be changed.
+func ValidatePodStatusUpdate(newPod, oldPod *api.Pod) field.ErrorList {
+ allErrs := ValidateObjectMetaUpdate(&newPod.ObjectMeta, &oldPod.ObjectMeta, field.NewPath("metadata"))
+
+ // TODO: allow change when bindings are properly decoupled from pods
+ if newPod.Spec.NodeName != oldPod.Spec.NodeName {
+ allErrs = append(allErrs, field.Forbidden(field.NewPath("status", "nodeName"), "may not be changed directly"))
+ }
+
+ // For status update we ignore changes to pod spec.
+ newPod.Spec = oldPod.Spec
+
+ return allErrs
+}
+
+// ValidatePodBinding tests if required fields in the pod binding are legal.
+func ValidatePodBinding(binding *api.Binding) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ if len(binding.Target.Kind) != 0 && binding.Target.Kind != "Node" {
+ // TODO: When validation becomes versioned, this gets more complicated.
+ allErrs = append(allErrs, field.NotSupported(field.NewPath("target", "kind"), binding.Target.Kind, []string{"Node", "<empty>"}))
+ }
+ if len(binding.Target.Name) == 0 {
+ // TODO: When validation becomes versioned, this gets more complicated.
+ allErrs = append(allErrs, field.Required(field.NewPath("target", "name"), ""))
+ }
+
+ return allErrs
+}
+
+// ValidatePodTemplate tests if required fields in the pod template are set.
+func ValidatePodTemplate(pod *api.PodTemplate) field.ErrorList {
+ allErrs := ValidateObjectMeta(&pod.ObjectMeta, true, ValidatePodName, field.NewPath("metadata"))
+ allErrs = append(allErrs, ValidatePodTemplateSpec(&pod.Template, field.NewPath("template"))...)
+ return allErrs
+}
+
+// ValidatePodTemplateUpdate tests to see if the update is legal for an end user to make. newPod is updated with fields
+// that cannot be changed.
+func ValidatePodTemplateUpdate(newPod, oldPod *api.PodTemplate) field.ErrorList {
+ allErrs := ValidateObjectMetaUpdate(&oldPod.ObjectMeta, &newPod.ObjectMeta, field.NewPath("metadata"))
+ allErrs = append(allErrs, ValidatePodTemplateSpec(&newPod.Template, field.NewPath("template"))...)
+ return allErrs
+}
+
+var supportedSessionAffinityType = sets.NewString(string(api.ServiceAffinityClientIP), string(api.ServiceAffinityNone))
+var supportedServiceType = sets.NewString(string(api.ServiceTypeClusterIP), string(api.ServiceTypeNodePort),
+ string(api.ServiceTypeLoadBalancer))
+
+// ValidateService tests if required fields in the service are set.
+func ValidateService(service *api.Service) field.ErrorList {
+ allErrs := ValidateObjectMeta(&service.ObjectMeta, true, ValidateServiceName, field.NewPath("metadata"))
+
+ specPath := field.NewPath("spec")
+ if len(service.Spec.Ports) == 0 && service.Spec.ClusterIP != api.ClusterIPNone {
+ allErrs = append(allErrs, field.Required(specPath.Child("ports"), ""))
+ }
+ if service.Spec.Type == api.ServiceTypeLoadBalancer {
+ for ix := range service.Spec.Ports {
+ port := &service.Spec.Ports[ix]
+ // This is a workaround for broken cloud environments that
+ // over-open firewalls. Hopefully it can go away when more clouds
+ // understand containers better.
+ if port.Port == 10250 {
+ portPath := specPath.Child("ports").Index(ix)
+ allErrs = append(allErrs, field.Invalid(portPath, port.Port, "may not expose port 10250 externally since it is used by kubelet"))
+ }
+ }
+ }
+
+ isHeadlessService := service.Spec.ClusterIP == api.ClusterIPNone
+ allPortNames := sets.String{}
+ portsPath := specPath.Child("ports")
+ for i := range service.Spec.Ports {
+ portPath := portsPath.Index(i)
+ allErrs = append(allErrs, validateServicePort(&service.Spec.Ports[i], len(service.Spec.Ports) > 1, isHeadlessService, &allPortNames, portPath)...)
+ }
+
+ if service.Spec.Selector != nil {
+ allErrs = append(allErrs, unversionedvalidation.ValidateLabels(service.Spec.Selector, specPath.Child("selector"))...)
+ }
+
+ if len(service.Spec.SessionAffinity) == 0 {
+ allErrs = append(allErrs, field.Required(specPath.Child("sessionAffinity"), ""))
+ } else if !supportedSessionAffinityType.Has(string(service.Spec.SessionAffinity)) {
+ allErrs = append(allErrs, field.NotSupported(specPath.Child("sessionAffinity"), service.Spec.SessionAffinity, supportedSessionAffinityType.List()))
+ }
+
+ if api.IsServiceIPSet(service) {
+ if ip := net.ParseIP(service.Spec.ClusterIP); ip == nil {
+ allErrs = append(allErrs, field.Invalid(specPath.Child("clusterIP"), service.Spec.ClusterIP, "must be empty, 'None', or a valid IP address"))
+ }
+ }
+
+ ipPath := specPath.Child("externalIPs")
+ for i, ip := range service.Spec.ExternalIPs {
+ idxPath := ipPath.Index(i)
+ if msgs := validation.IsValidIP(ip); len(msgs) != 0 {
+ for i := range msgs {
+ allErrs = append(allErrs, field.Invalid(idxPath, ip, msgs[i]))
+ }
+ } else {
+ allErrs = append(allErrs, validateNonSpecialIP(ip, idxPath)...)
+ }
+ }
+
+ if len(service.Spec.Type) == 0 {
+ allErrs = append(allErrs, field.Required(specPath.Child("type"), ""))
+ } else if !supportedServiceType.Has(string(service.Spec.Type)) {
+ allErrs = append(allErrs, field.NotSupported(specPath.Child("type"), service.Spec.Type, supportedServiceType.List()))
+ }
+
+ if service.Spec.Type == api.ServiceTypeLoadBalancer {
+ portsPath := specPath.Child("ports")
+ includeProtocols := sets.NewString()
+ for i := range service.Spec.Ports {
+ portPath := portsPath.Index(i)
+ if !supportedPortProtocols.Has(string(service.Spec.Ports[i].Protocol)) {
+ allErrs = append(allErrs, field.Invalid(portPath.Child("protocol"), service.Spec.Ports[i].Protocol, "cannot create an external load balancer with non-TCP/UDP ports"))
+ } else {
+ includeProtocols.Insert(string(service.Spec.Ports[i].Protocol))
+ }
+ }
+ if includeProtocols.Len() > 1 {
+ allErrs = append(allErrs, field.Invalid(portsPath, service.Spec.Ports, "cannot create an external load balancer with mix protocols"))
+ }
+ }
+
+ if service.Spec.Type == api.ServiceTypeClusterIP {
+ portsPath := specPath.Child("ports")
+ for i := range service.Spec.Ports {
+ portPath := portsPath.Index(i)
+ if service.Spec.Ports[i].NodePort != 0 {
+ allErrs = append(allErrs, field.Invalid(portPath.Child("nodePort"), service.Spec.Ports[i].NodePort, "may not be used when `type` is 'ClusterIP'"))
+ }
+ }
+ }
+
+ // Check for duplicate NodePorts, considering (protocol,port) pairs
+ portsPath = specPath.Child("ports")
+ nodePorts := make(map[api.ServicePort]bool)
+ for i := range service.Spec.Ports {
+ port := &service.Spec.Ports[i]
+ if port.NodePort == 0 {
+ continue
+ }
+ portPath := portsPath.Index(i)
+ var key api.ServicePort
+ key.Protocol = port.Protocol
+ key.NodePort = port.NodePort
+ _, found := nodePorts[key]
+ if found {
+ allErrs = append(allErrs, field.Duplicate(portPath.Child("nodePort"), port.NodePort))
+ }
+ nodePorts[key] = true
+ }
+
+ // Validate SourceRange field and annotation
+ _, ok := service.Annotations[apiservice.AnnotationLoadBalancerSourceRangesKey]
+ if len(service.Spec.LoadBalancerSourceRanges) > 0 || ok {
+ var fieldPath *field.Path
+ var val string
+ if len(service.Spec.LoadBalancerSourceRanges) > 0 {
+ fieldPath = specPath.Child("LoadBalancerSourceRanges")
+ val = fmt.Sprintf("%v", service.Spec.LoadBalancerSourceRanges)
+ } else {
+ fieldPath = field.NewPath("metadata", "annotations").Key(apiservice.AnnotationLoadBalancerSourceRangesKey)
+ val = service.Annotations[apiservice.AnnotationLoadBalancerSourceRangesKey]
+ }
+ if service.Spec.Type != api.ServiceTypeLoadBalancer {
+ allErrs = append(allErrs, field.Invalid(fieldPath, "", "may only be used when `type` is 'LoadBalancer'"))
+ }
+ _, err := apiservice.GetLoadBalancerSourceRanges(service)
+ if err != nil {
+ allErrs = append(allErrs, field.Invalid(fieldPath, val, "must be a list of IP ranges. For example, 10.240.0.0/24,10.250.0.0/24 "))
+ }
+ }
+ return allErrs
+}
+
+func validateServicePort(sp *api.ServicePort, requireName, isHeadlessService bool, allNames *sets.String, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ if requireName && len(sp.Name) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("name"), ""))
+ } else if len(sp.Name) != 0 {
+ allErrs = append(allErrs, ValidateDNS1123Label(sp.Name, fldPath.Child("name"))...)
+ if allNames.Has(sp.Name) {
+ allErrs = append(allErrs, field.Duplicate(fldPath.Child("name"), sp.Name))
+ } else {
+ allNames.Insert(sp.Name)
+ }
+ }
+
+ for _, msg := range validation.IsValidPortNum(int(sp.Port)) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("port"), sp.Port, msg))
+ }
+
+ if len(sp.Protocol) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("protocol"), ""))
+ } else if !supportedPortProtocols.Has(string(sp.Protocol)) {
+ allErrs = append(allErrs, field.NotSupported(fldPath.Child("protocol"), sp.Protocol, supportedPortProtocols.List()))
+ }
+
+ allErrs = append(allErrs, ValidatePortNumOrName(sp.TargetPort, fldPath.Child("targetPort"))...)
+
+ // in the v1 API, targetPorts on headless services were tolerated.
+ // once we have version-specific validation, we can reject this on newer API versions, but until then, we have to tolerate it for compatibility.
+ //
+ // if isHeadlessService {
+ // if sp.TargetPort.Type == intstr.String || (sp.TargetPort.Type == intstr.Int && sp.Port != sp.TargetPort.IntValue()) {
+ // allErrs = append(allErrs, field.Invalid(fldPath.Child("targetPort"), sp.TargetPort, "must be equal to the value of 'port' when clusterIP = None"))
+ // }
+ // }
+
+ return allErrs
+}
+
+// ValidateServiceUpdate tests if required fields in the service are set during an update
+func ValidateServiceUpdate(service, oldService *api.Service) field.ErrorList {
+ allErrs := ValidateObjectMetaUpdate(&service.ObjectMeta, &oldService.ObjectMeta, field.NewPath("metadata"))
+
+ if api.IsServiceIPSet(oldService) {
+ allErrs = append(allErrs, ValidateImmutableField(service.Spec.ClusterIP, oldService.Spec.ClusterIP, field.NewPath("spec", "clusterIP"))...)
+ }
+
+ allErrs = append(allErrs, ValidateService(service)...)
+ return allErrs
+}
+
+// ValidateServiceStatusUpdate tests if required fields in the Service are set when updating status.
+func ValidateServiceStatusUpdate(service, oldService *api.Service) field.ErrorList {
+ allErrs := ValidateObjectMetaUpdate(&service.ObjectMeta, &oldService.ObjectMeta, field.NewPath("metadata"))
+ allErrs = append(allErrs, ValidateLoadBalancerStatus(&service.Status.LoadBalancer, field.NewPath("status", "loadBalancer"))...)
+ return allErrs
+}
+
+// ValidateReplicationController tests if required fields in the replication controller are set.
+func ValidateReplicationController(controller *api.ReplicationController) field.ErrorList {
+ allErrs := ValidateObjectMeta(&controller.ObjectMeta, true, ValidateReplicationControllerName, field.NewPath("metadata"))
+ allErrs = append(allErrs, ValidateReplicationControllerSpec(&controller.Spec, field.NewPath("spec"))...)
+ return allErrs
+}
+
+// ValidateReplicationControllerUpdate tests if required fields in the replication controller are set.
+func ValidateReplicationControllerUpdate(controller, oldController *api.ReplicationController) field.ErrorList {
+ allErrs := ValidateObjectMetaUpdate(&controller.ObjectMeta, &oldController.ObjectMeta, field.NewPath("metadata"))
+ allErrs = append(allErrs, ValidateReplicationControllerSpec(&controller.Spec, field.NewPath("spec"))...)
+ return allErrs
+}
+
+// ValidateReplicationControllerStatusUpdate tests if required fields in the replication controller are set.
+func ValidateReplicationControllerStatusUpdate(controller, oldController *api.ReplicationController) field.ErrorList {
+ allErrs := ValidateObjectMetaUpdate(&controller.ObjectMeta, &oldController.ObjectMeta, field.NewPath("metadata"))
+ statusPath := field.NewPath("status")
+ allErrs = append(allErrs, ValidateNonnegativeField(int64(controller.Status.Replicas), statusPath.Child("replicas"))...)
+ allErrs = append(allErrs, ValidateNonnegativeField(int64(controller.Status.FullyLabeledReplicas), statusPath.Child("fullyLabeledReplicas"))...)
+ allErrs = append(allErrs, ValidateNonnegativeField(int64(controller.Status.ObservedGeneration), statusPath.Child("observedGeneration"))...)
+ return allErrs
+}
+
+// Validates that the given selector is non-empty.
+func ValidateNonEmptySelector(selectorMap map[string]string, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ selector := labels.Set(selectorMap).AsSelector()
+ if selector.Empty() {
+ allErrs = append(allErrs, field.Required(fldPath, ""))
+ }
+ return allErrs
+}
+
+// Validates the given template and ensures that it is in accordance with the desired selector and replicas.
+func ValidatePodTemplateSpecForRC(template *api.PodTemplateSpec, selectorMap map[string]string, replicas int32, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if template == nil {
+ allErrs = append(allErrs, field.Required(fldPath, ""))
+ } else {
+ selector := labels.Set(selectorMap).AsSelector()
+ if !selector.Empty() {
+ // Verify that the RC selector matches the labels in template.
+ labels := labels.Set(template.Labels)
+ if !selector.Matches(labels) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("metadata", "labels"), template.Labels, "`selector` does not match template `labels`"))
+ }
+ }
+ allErrs = append(allErrs, ValidatePodTemplateSpec(template, fldPath)...)
+ if replicas > 1 {
+ allErrs = append(allErrs, ValidateReadOnlyPersistentDisks(template.Spec.Volumes, fldPath.Child("spec", "volumes"))...)
+ }
+ // RestartPolicy has already been first-order validated as per ValidatePodTemplateSpec().
+ if template.Spec.RestartPolicy != api.RestartPolicyAlways {
+ allErrs = append(allErrs, field.NotSupported(fldPath.Child("spec", "restartPolicy"), template.Spec.RestartPolicy, []string{string(api.RestartPolicyAlways)}))
+ }
+ }
+ return allErrs
+}
+
+// ValidateReplicationControllerSpec tests if required fields in the replication controller spec are set.
+func ValidateReplicationControllerSpec(spec *api.ReplicationControllerSpec, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ allErrs = append(allErrs, ValidateNonEmptySelector(spec.Selector, fldPath.Child("selector"))...)
+ allErrs = append(allErrs, ValidateNonnegativeField(int64(spec.Replicas), fldPath.Child("replicas"))...)
+ allErrs = append(allErrs, ValidatePodTemplateSpecForRC(spec.Template, spec.Selector, spec.Replicas, fldPath.Child("template"))...)
+ return allErrs
+}
+
+// ValidatePodTemplateSpec validates the spec of a pod template
+func ValidatePodTemplateSpec(spec *api.PodTemplateSpec, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ allErrs = append(allErrs, unversionedvalidation.ValidateLabels(spec.Labels, fldPath.Child("labels"))...)
+ allErrs = append(allErrs, ValidateAnnotations(spec.Annotations, fldPath.Child("annotations"))...)
+ allErrs = append(allErrs, ValidatePodSpecificAnnotations(spec.Annotations, fldPath.Child("annotations"))...)
+ allErrs = append(allErrs, ValidatePodSpec(&spec.Spec, fldPath.Child("spec"))...)
+ return allErrs
+}
+
+func ValidateReadOnlyPersistentDisks(volumes []api.Volume, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ for i := range volumes {
+ vol := &volumes[i]
+ idxPath := fldPath.Index(i)
+ if vol.GCEPersistentDisk != nil {
+ if vol.GCEPersistentDisk.ReadOnly == false {
+ allErrs = append(allErrs, field.Invalid(idxPath.Child("gcePersistentDisk", "readOnly"), false, "must be true for replicated pods > 1; GCE PD can only be mounted on multiple machines if it is read-only"))
+ }
+ }
+ // TODO: What to do for AWS? It doesn't support replicas
+ }
+ return allErrs
+}
+
+// validateTaints tests if given taints have valid data.
+func validateTaints(taints []api.Taint, fldPath *field.Path) field.ErrorList {
+ allErrors := field.ErrorList{}
+ for i, currTaint := range taints {
+ idxPath := fldPath.Index(i)
+ // validate the taint key
+ allErrors = append(allErrors, unversionedvalidation.ValidateLabelName(currTaint.Key, idxPath.Child("key"))...)
+ // validate the taint value
+ if errs := validation.IsValidLabelValue(currTaint.Value); len(errs) != 0 {
+ allErrors = append(allErrors, field.Invalid(idxPath.Child("value"), currTaint.Value, strings.Join(errs, ";")))
+ }
+ // validate the taint effect
+ allErrors = append(allErrors, validateTaintEffect(&currTaint.Effect, false, idxPath.Child("effect"))...)
+ }
+ return allErrors
+}
+
+// ValidateTaintsInNodeAnnotations tests that the serialized taints in Node.Annotations has valid data
+func ValidateTaintsInNodeAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ taints, err := api.GetTaintsFromNodeAnnotations(annotations)
+ if err != nil {
+ allErrs = append(allErrs, field.Invalid(fldPath, api.TaintsAnnotationKey, err.Error()))
+ return allErrs
+ }
+ if len(taints) > 0 {
+ allErrs = append(allErrs, validateTaints(taints, fldPath.Child(api.TaintsAnnotationKey))...)
+ }
+
+ return allErrs
+}
+
+func ValidateNodeSpecificAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList {
+ if annotations[api.TaintsAnnotationKey] != "" {
+ return ValidateTaintsInNodeAnnotations(annotations, fldPath)
+ }
+ return field.ErrorList{}
+}
+
+// ValidateNode tests if required fields in the node are set.
+func ValidateNode(node *api.Node) field.ErrorList {
+ fldPath := field.NewPath("metadata")
+ allErrs := ValidateObjectMeta(&node.ObjectMeta, false, ValidateNodeName, fldPath)
+ allErrs = append(allErrs, ValidateNodeSpecificAnnotations(node.ObjectMeta.Annotations, fldPath.Child("annotations"))...)
+
+ // Only validate spec. All status fields are optional and can be updated later.
+
+ // external ID is required.
+ if len(node.Spec.ExternalID) == 0 {
+ allErrs = append(allErrs, field.Required(field.NewPath("spec", "externalID"), ""))
+ }
+
+ // TODO(rjnagal): Ignore PodCIDR till its completely implemented.
+ return allErrs
+}
+
+// ValidateNodeUpdate tests to make sure a node update can be applied. Modifies oldNode.
+func ValidateNodeUpdate(node, oldNode *api.Node) field.ErrorList {
+ fldPath := field.NewPath("metadata")
+ allErrs := ValidateObjectMetaUpdate(&node.ObjectMeta, &oldNode.ObjectMeta, fldPath)
+ allErrs = append(allErrs, ValidateNodeSpecificAnnotations(node.ObjectMeta.Annotations, fldPath.Child("annotations"))...)
+
+ // TODO: Enable the code once we have better api object.status update model. Currently,
+ // anyone can update node status.
+ // if !api.Semantic.DeepEqual(node.Status, api.NodeStatus{}) {
+ // allErrs = append(allErrs, field.Invalid("status", node.Status, "must be empty"))
+ // }
+
+ // Validte no duplicate addresses in node status.
+ addresses := make(map[api.NodeAddress]bool)
+ for i, address := range node.Status.Addresses {
+ if _, ok := addresses[address]; ok {
+ allErrs = append(allErrs, field.Duplicate(field.NewPath("status", "addresses").Index(i), address))
+ }
+ addresses[address] = true
+ }
+
+ if len(oldNode.Spec.PodCIDR) == 0 {
+ // Allow the controller manager to assign a CIDR to a node if it doesn't have one.
+ oldNode.Spec.PodCIDR = node.Spec.PodCIDR
+ } else {
+ if oldNode.Spec.PodCIDR != node.Spec.PodCIDR {
+ allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "podCIDR"), "node updates may not change podCIDR except from \"\" to valid"))
+ }
+ }
+ // TODO: move reset function to its own location
+ // Ignore metadata changes now that they have been tested
+ oldNode.ObjectMeta = node.ObjectMeta
+ // Allow users to update capacity
+ oldNode.Status.Capacity = node.Status.Capacity
+ // Allow users to unschedule node
+ oldNode.Spec.Unschedulable = node.Spec.Unschedulable
+ // Clear status
+ oldNode.Status = node.Status
+
+ // TODO: Add a 'real' error type for this error and provide print actual diffs.
+ if !api.Semantic.DeepEqual(oldNode, node) {
+ glog.V(4).Infof("Update failed validation %#v vs %#v", oldNode, node)
+ allErrs = append(allErrs, field.Forbidden(field.NewPath(""), "node updates may only change labels or capacity"))
+ }
+
+ return allErrs
+}
+
+// Validate compute resource typename.
+// Refer to docs/design/resources.md for more details.
+func validateResourceName(value string, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ for _, msg := range validation.IsQualifiedName(value) {
+ allErrs = append(allErrs, field.Invalid(fldPath, value, msg))
+ }
+ if len(allErrs) != 0 {
+ return allErrs
+ }
+
+ if len(strings.Split(value, "/")) == 1 {
+ if !api.IsStandardResourceName(value) {
+ return append(allErrs, field.Invalid(fldPath, value, "must be a standard resource type or fully qualified"))
+ }
+ }
+
+ return field.ErrorList{}
+}
+
+// Validate container resource name
+// Refer to docs/design/resources.md for more details.
+func validateContainerResourceName(value string, fldPath *field.Path) field.ErrorList {
+ allErrs := validateResourceName(value, fldPath)
+ if len(strings.Split(value, "/")) == 1 {
+ if !api.IsStandardContainerResourceName(value) {
+ return append(allErrs, field.Invalid(fldPath, value, "must be a standard resource for containers"))
+ }
+ }
+ return field.ErrorList{}
+}
+
+// Validate resource names that can go in a resource quota
+// Refer to docs/design/resources.md for more details.
+func ValidateResourceQuotaResourceName(value string, fldPath *field.Path) field.ErrorList {
+ allErrs := validateResourceName(value, fldPath)
+ if len(strings.Split(value, "/")) == 1 {
+ if !api.IsStandardQuotaResourceName(value) {
+ return append(allErrs, field.Invalid(fldPath, value, isInvalidQuotaResource))
+ }
+ }
+ return field.ErrorList{}
+}
+
+// Validate limit range types
+func validateLimitRangeTypeName(value string, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ for _, msg := range validation.IsQualifiedName(value) {
+ allErrs = append(allErrs, field.Invalid(fldPath, value, msg))
+ }
+ if len(allErrs) != 0 {
+ return allErrs
+ }
+
+ if len(strings.Split(value, "/")) == 1 {
+ if !api.IsStandardLimitRangeType(value) {
+ return append(allErrs, field.Invalid(fldPath, value, "must be a standard limit type or fully qualified"))
+ }
+ }
+
+ return allErrs
+}
+
+// Validate limit range resource name
+// limit types (other than Pod/Container) could contain storage not just cpu or memory
+func validateLimitRangeResourceName(limitType api.LimitType, value string, fldPath *field.Path) field.ErrorList {
+ switch limitType {
+ case api.LimitTypePod, api.LimitTypeContainer:
+ return validateContainerResourceName(value, fldPath)
+ default:
+ return validateResourceName(value, fldPath)
+ }
+}
+
+// ValidateLimitRange tests if required fields in the LimitRange are set.
+func ValidateLimitRange(limitRange *api.LimitRange) field.ErrorList {
+ allErrs := ValidateObjectMeta(&limitRange.ObjectMeta, true, ValidateLimitRangeName, field.NewPath("metadata"))
+
+ // ensure resource names are properly qualified per docs/design/resources.md
+ limitTypeSet := map[api.LimitType]bool{}
+ fldPath := field.NewPath("spec", "limits")
+ for i := range limitRange.Spec.Limits {
+ idxPath := fldPath.Index(i)
+ limit := &limitRange.Spec.Limits[i]
+ allErrs = append(allErrs, validateLimitRangeTypeName(string(limit.Type), idxPath.Child("type"))...)
+
+ _, found := limitTypeSet[limit.Type]
+ if found {
+ allErrs = append(allErrs, field.Duplicate(idxPath.Child("type"), limit.Type))
+ }
+ limitTypeSet[limit.Type] = true
+
+ keys := sets.String{}
+ min := map[string]resource.Quantity{}
+ max := map[string]resource.Quantity{}
+ defaults := map[string]resource.Quantity{}
+ defaultRequests := map[string]resource.Quantity{}
+ maxLimitRequestRatios := map[string]resource.Quantity{}
+
+ for k, q := range limit.Max {
+ allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, string(k), idxPath.Child("max").Key(string(k)))...)
+ keys.Insert(string(k))
+ max[string(k)] = q
+ }
+ for k, q := range limit.Min {
+ allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, string(k), idxPath.Child("min").Key(string(k)))...)
+ keys.Insert(string(k))
+ min[string(k)] = q
+ }
+
+ if limit.Type == api.LimitTypePod {
+ if len(limit.Default) > 0 {
+ allErrs = append(allErrs, field.Forbidden(idxPath.Child("default"), "may not be specified when `type` is 'Pod'"))
+ }
+ if len(limit.DefaultRequest) > 0 {
+ allErrs = append(allErrs, field.Forbidden(idxPath.Child("defaultRequest"), "may not be specified when `type` is 'Pod'"))
+ }
+ } else {
+ for k, q := range limit.Default {
+ allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, string(k), idxPath.Child("default").Key(string(k)))...)
+ keys.Insert(string(k))
+ defaults[string(k)] = q
+ }
+ for k, q := range limit.DefaultRequest {
+ allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, string(k), idxPath.Child("defaultRequest").Key(string(k)))...)
+ keys.Insert(string(k))
+ defaultRequests[string(k)] = q
+ }
+ }
+
+ for k, q := range limit.MaxLimitRequestRatio {
+ allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, string(k), idxPath.Child("maxLimitRequestRatio").Key(string(k)))...)
+ keys.Insert(string(k))
+ maxLimitRequestRatios[string(k)] = q
+ }
+
+ for k := range keys {
+ minQuantity, minQuantityFound := min[k]
+ maxQuantity, maxQuantityFound := max[k]
+ defaultQuantity, defaultQuantityFound := defaults[k]
+ defaultRequestQuantity, defaultRequestQuantityFound := defaultRequests[k]
+ maxRatio, maxRatioFound := maxLimitRequestRatios[k]
+
+ if minQuantityFound && maxQuantityFound && minQuantity.Cmp(maxQuantity) > 0 {
+ allErrs = append(allErrs, field.Invalid(idxPath.Child("min").Key(string(k)), minQuantity, fmt.Sprintf("min value %s is greater than max value %s", minQuantity.String(), maxQuantity.String())))
+ }
+
+ if defaultRequestQuantityFound && minQuantityFound && minQuantity.Cmp(defaultRequestQuantity) > 0 {
+ allErrs = append(allErrs, field.Invalid(idxPath.Child("defaultRequest").Key(string(k)), defaultRequestQuantity, fmt.Sprintf("min value %s is greater than default request value %s", minQuantity.String(), defaultRequestQuantity.String())))
+ }
+
+ if defaultRequestQuantityFound && maxQuantityFound && defaultRequestQuantity.Cmp(maxQuantity) > 0 {
+ allErrs = append(allErrs, field.Invalid(idxPath.Child("defaultRequest").Key(string(k)), defaultRequestQuantity, fmt.Sprintf("default request value %s is greater than max value %s", defaultRequestQuantity.String(), maxQuantity.String())))
+ }
+
+ if defaultRequestQuantityFound && defaultQuantityFound && defaultRequestQuantity.Cmp(defaultQuantity) > 0 {
+ allErrs = append(allErrs, field.Invalid(idxPath.Child("defaultRequest").Key(string(k)), defaultRequestQuantity, fmt.Sprintf("default request value %s is greater than default limit value %s", defaultRequestQuantity.String(), defaultQuantity.String())))
+ }
+
+ if defaultQuantityFound && minQuantityFound && minQuantity.Cmp(defaultQuantity) > 0 {
+ allErrs = append(allErrs, field.Invalid(idxPath.Child("default").Key(string(k)), minQuantity, fmt.Sprintf("min value %s is greater than default value %s", minQuantity.String(), defaultQuantity.String())))
+ }
+
+ if defaultQuantityFound && maxQuantityFound && defaultQuantity.Cmp(maxQuantity) > 0 {
+ allErrs = append(allErrs, field.Invalid(idxPath.Child("default").Key(string(k)), maxQuantity, fmt.Sprintf("default value %s is greater than max value %s", defaultQuantity.String(), maxQuantity.String())))
+ }
+ if maxRatioFound && maxRatio.Cmp(*resource.NewQuantity(1, resource.DecimalSI)) < 0 {
+ allErrs = append(allErrs, field.Invalid(idxPath.Child("maxLimitRequestRatio").Key(string(k)), maxRatio, fmt.Sprintf("ratio %s is less than 1", maxRatio.String())))
+ }
+ if maxRatioFound && minQuantityFound && maxQuantityFound {
+ maxRatioValue := float64(maxRatio.Value())
+ minQuantityValue := minQuantity.Value()
+ maxQuantityValue := maxQuantity.Value()
+ if maxRatio.Value() < resource.MaxMilliValue && minQuantityValue < resource.MaxMilliValue && maxQuantityValue < resource.MaxMilliValue {
+ maxRatioValue = float64(maxRatio.MilliValue()) / 1000
+ minQuantityValue = minQuantity.MilliValue()
+ maxQuantityValue = maxQuantity.MilliValue()
+ }
+ maxRatioLimit := float64(maxQuantityValue) / float64(minQuantityValue)
+ if maxRatioValue > maxRatioLimit {
+ allErrs = append(allErrs, field.Invalid(idxPath.Child("maxLimitRequestRatio").Key(string(k)), maxRatio, fmt.Sprintf("ratio %s is greater than max/min = %f", maxRatio.String(), maxRatioLimit)))
+ }
+ }
+ }
+ }
+
+ return allErrs
+}
+
+// ValidateServiceAccount tests if required fields in the ServiceAccount are set.
+func ValidateServiceAccount(serviceAccount *api.ServiceAccount) field.ErrorList {
+ allErrs := ValidateObjectMeta(&serviceAccount.ObjectMeta, true, ValidateServiceAccountName, field.NewPath("metadata"))
+ return allErrs
+}
+
+// ValidateServiceAccountUpdate tests if required fields in the ServiceAccount are set.
+func ValidateServiceAccountUpdate(newServiceAccount, oldServiceAccount *api.ServiceAccount) field.ErrorList {
+ allErrs := ValidateObjectMetaUpdate(&newServiceAccount.ObjectMeta, &oldServiceAccount.ObjectMeta, field.NewPath("metadata"))
+ allErrs = append(allErrs, ValidateServiceAccount(newServiceAccount)...)
+ return allErrs
+}
+
+// ValidateSecret tests if required fields in the Secret are set.
+func ValidateSecret(secret *api.Secret) field.ErrorList {
+ allErrs := ValidateObjectMeta(&secret.ObjectMeta, true, ValidateSecretName, field.NewPath("metadata"))
+
+ dataPath := field.NewPath("data")
+ totalSize := 0
+ for key, value := range secret.Data {
+ for _, msg := range validation.IsConfigMapKey(key) {
+ allErrs = append(allErrs, field.Invalid(dataPath.Key(key), key, msg))
+ }
+ totalSize += len(value)
+ }
+ if totalSize > api.MaxSecretSize {
+ allErrs = append(allErrs, field.TooLong(dataPath, "", api.MaxSecretSize))
+ }
+
+ switch secret.Type {
+ case api.SecretTypeServiceAccountToken:
+ // Only require Annotations[kubernetes.io/service-account.name]
+ // Additional fields (like Annotations[kubernetes.io/service-account.uid] and Data[token]) might be contributed later by a controller loop
+ if value := secret.Annotations[api.ServiceAccountNameKey]; len(value) == 0 {
+ allErrs = append(allErrs, field.Required(field.NewPath("metadata", "annotations").Key(api.ServiceAccountNameKey), ""))
+ }
+ case api.SecretTypeOpaque, "":
+ // no-op
+ case api.SecretTypeDockercfg:
+ dockercfgBytes, exists := secret.Data[api.DockerConfigKey]
+ if !exists {
+ allErrs = append(allErrs, field.Required(dataPath.Key(api.DockerConfigKey), ""))
+ break
+ }
+
+ // make sure that the content is well-formed json.
+ if err := json.Unmarshal(dockercfgBytes, &map[string]interface{}{}); err != nil {
+ allErrs = append(allErrs, field.Invalid(dataPath.Key(api.DockerConfigKey), "<secret contents redacted>", err.Error()))
+ }
+ case api.SecretTypeDockerConfigJson:
+ dockerConfigJsonBytes, exists := secret.Data[api.DockerConfigJsonKey]
+ if !exists {
+ allErrs = append(allErrs, field.Required(dataPath.Key(api.DockerConfigJsonKey), ""))
+ break
+ }
+
+ // make sure that the content is well-formed json.
+ if err := json.Unmarshal(dockerConfigJsonBytes, &map[string]interface{}{}); err != nil {
+ allErrs = append(allErrs, field.Invalid(dataPath.Key(api.DockerConfigJsonKey), "<secret contents redacted>", err.Error()))
+ }
+ case api.SecretTypeBasicAuth:
+ _, usernameFieldExists := secret.Data[api.BasicAuthUsernameKey]
+ _, passwordFieldExists := secret.Data[api.BasicAuthPasswordKey]
+
+ // username or password might be empty, but the field must be present
+ if !usernameFieldExists && !passwordFieldExists {
+ allErrs = append(allErrs, field.Required(field.NewPath("data[%s]").Key(api.BasicAuthUsernameKey), ""))
+ allErrs = append(allErrs, field.Required(field.NewPath("data[%s]").Key(api.BasicAuthPasswordKey), ""))
+ break
+ }
+ case api.SecretTypeSSHAuth:
+ if len(secret.Data[api.SSHAuthPrivateKey]) == 0 {
+ allErrs = append(allErrs, field.Required(field.NewPath("data[%s]").Key(api.SSHAuthPrivateKey), ""))
+ break
+ }
+
+ case api.SecretTypeTLS:
+ if _, exists := secret.Data[api.TLSCertKey]; !exists {
+ allErrs = append(allErrs, field.Required(dataPath.Key(api.TLSCertKey), ""))
+ }
+ if _, exists := secret.Data[api.TLSPrivateKeyKey]; !exists {
+ allErrs = append(allErrs, field.Required(dataPath.Key(api.TLSPrivateKeyKey), ""))
+ }
+ // TODO: Verify that the key matches the cert.
+ default:
+ // no-op
+ }
+
+ return allErrs
+}
+
+// ValidateSecretUpdate tests if required fields in the Secret are set.
+func ValidateSecretUpdate(newSecret, oldSecret *api.Secret) field.ErrorList {
+ allErrs := ValidateObjectMetaUpdate(&newSecret.ObjectMeta, &oldSecret.ObjectMeta, field.NewPath("metadata"))
+
+ if len(newSecret.Type) == 0 {
+ newSecret.Type = oldSecret.Type
+ }
+
+ allErrs = append(allErrs, ValidateImmutableField(newSecret.Type, oldSecret.Type, field.NewPath("type"))...)
+
+ allErrs = append(allErrs, ValidateSecret(newSecret)...)
+ return allErrs
+}
+
+// ValidateConfigMapName can be used to check whether the given ConfigMap name is valid.
+// Prefix indicates this name will be used as part of generation, in which case
+// trailing dashes are allowed.
+var ValidateConfigMapName = NameIsDNSSubdomain
+
+// ValidateConfigMap tests whether required fields in the ConfigMap are set.
+func ValidateConfigMap(cfg *api.ConfigMap) field.ErrorList {
+ allErrs := field.ErrorList{}
+ allErrs = append(allErrs, ValidateObjectMeta(&cfg.ObjectMeta, true, ValidateConfigMapName, field.NewPath("metadata"))...)
+
+ totalSize := 0
+
+ for key, value := range cfg.Data {
+ for _, msg := range validation.IsConfigMapKey(key) {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("data").Key(key), key, msg))
+ }
+ totalSize += len(value)
+ }
+ if totalSize > api.MaxSecretSize {
+ allErrs = append(allErrs, field.TooLong(field.NewPath("data"), "", api.MaxSecretSize))
+ }
+
+ return allErrs
+}
+
+// ValidateConfigMapUpdate tests if required fields in the ConfigMap are set.
+func ValidateConfigMapUpdate(newCfg, oldCfg *api.ConfigMap) field.ErrorList {
+ allErrs := field.ErrorList{}
+ allErrs = append(allErrs, ValidateObjectMetaUpdate(&newCfg.ObjectMeta, &oldCfg.ObjectMeta, field.NewPath("metadata"))...)
+ allErrs = append(allErrs, ValidateConfigMap(newCfg)...)
+
+ return allErrs
+}
+
+func validateBasicResource(quantity resource.Quantity, fldPath *field.Path) field.ErrorList {
+ if quantity.Value() < 0 {
+ return field.ErrorList{field.Invalid(fldPath, quantity.Value(), "must be a valid resource quantity")}
+ }
+ return field.ErrorList{}
+}
+
+// Validates resource requirement spec.
+func ValidateResourceRequirements(requirements *api.ResourceRequirements, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ limPath := fldPath.Child("limits")
+ reqPath := fldPath.Child("requests")
+ for resourceName, quantity := range requirements.Limits {
+ fldPath := limPath.Key(string(resourceName))
+ // Validate resource name.
+ allErrs = append(allErrs, validateContainerResourceName(string(resourceName), fldPath)...)
+ if api.IsStandardResourceName(string(resourceName)) {
+ allErrs = append(allErrs, validateBasicResource(quantity, fldPath.Key(string(resourceName)))...)
+ }
+ // Check that request <= limit.
+ requestQuantity, exists := requirements.Requests[resourceName]
+ if exists {
+ // For GPUs, require that no request be set.
+ if resourceName == api.ResourceNvidiaGPU {
+ allErrs = append(allErrs, field.Invalid(reqPath, requestQuantity.String(), "cannot be set"))
+ } else if quantity.Cmp(requestQuantity) < 0 {
+ allErrs = append(allErrs, field.Invalid(fldPath, quantity.String(), "must be greater than or equal to request"))
+ }
+ }
+ }
+ for resourceName, quantity := range requirements.Requests {
+ fldPath := reqPath.Key(string(resourceName))
+ // Validate resource name.
+ allErrs = append(allErrs, validateContainerResourceName(string(resourceName), fldPath)...)
+ if api.IsStandardResourceName(string(resourceName)) {
+ allErrs = append(allErrs, validateBasicResource(quantity, fldPath.Key(string(resourceName)))...)
+ }
+ }
+ return allErrs
+}
+
+// validateResourceQuotaScopes ensures that each enumerated hard resource constraint is valid for set of scopes
+func validateResourceQuotaScopes(resourceQuotaSpec *api.ResourceQuotaSpec, fld *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if len(resourceQuotaSpec.Scopes) == 0 {
+ return allErrs
+ }
+ hardLimits := sets.NewString()
+ for k := range resourceQuotaSpec.Hard {
+ hardLimits.Insert(string(k))
+ }
+ fldPath := fld.Child("scopes")
+ scopeSet := sets.NewString()
+ for _, scope := range resourceQuotaSpec.Scopes {
+ if !api.IsStandardResourceQuotaScope(string(scope)) {
+ allErrs = append(allErrs, field.Invalid(fldPath, resourceQuotaSpec.Scopes, "unsupported scope"))
+ }
+ for _, k := range hardLimits.List() {
+ if api.IsStandardQuotaResourceName(k) && !api.IsResourceQuotaScopeValidForResource(scope, k) {
+ allErrs = append(allErrs, field.Invalid(fldPath, resourceQuotaSpec.Scopes, "unsupported scope applied to resource"))
+ }
+ }
+ scopeSet.Insert(string(scope))
+ }
+ invalidScopePairs := []sets.String{
+ sets.NewString(string(api.ResourceQuotaScopeBestEffort), string(api.ResourceQuotaScopeNotBestEffort)),
+ sets.NewString(string(api.ResourceQuotaScopeTerminating), string(api.ResourceQuotaScopeNotTerminating)),
+ }
+ for _, invalidScopePair := range invalidScopePairs {
+ if scopeSet.HasAll(invalidScopePair.List()...) {
+ allErrs = append(allErrs, field.Invalid(fldPath, resourceQuotaSpec.Scopes, "conflicting scopes"))
+ }
+ }
+ return allErrs
+}
+
+// ValidateResourceQuota tests if required fields in the ResourceQuota are set.
+func ValidateResourceQuota(resourceQuota *api.ResourceQuota) field.ErrorList {
+ allErrs := ValidateObjectMeta(&resourceQuota.ObjectMeta, true, ValidateResourceQuotaName, field.NewPath("metadata"))
+
+ allErrs = append(allErrs, ValidateResourceQuotaSpec(&resourceQuota.Spec, field.NewPath("spec"))...)
+ allErrs = append(allErrs, ValidateResourceQuotaStatus(&resourceQuota.Status, field.NewPath("status"))...)
+
+ return allErrs
+}
+
+func ValidateResourceQuotaStatus(status *api.ResourceQuotaStatus, fld *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ fldPath := fld.Child("hard")
+ for k, v := range status.Hard {
+ resPath := fldPath.Key(string(k))
+ allErrs = append(allErrs, ValidateResourceQuotaResourceName(string(k), resPath)...)
+ allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...)
+ }
+ fldPath = fld.Child("used")
+ for k, v := range status.Used {
+ resPath := fldPath.Key(string(k))
+ allErrs = append(allErrs, ValidateResourceQuotaResourceName(string(k), resPath)...)
+ allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...)
+ }
+
+ return allErrs
+}
+
+func ValidateResourceQuotaSpec(resourceQuotaSpec *api.ResourceQuotaSpec, fld *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ fldPath := fld.Child("hard")
+ for k, v := range resourceQuotaSpec.Hard {
+ resPath := fldPath.Key(string(k))
+ allErrs = append(allErrs, ValidateResourceQuotaResourceName(string(k), resPath)...)
+ allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...)
+ }
+ allErrs = append(allErrs, validateResourceQuotaScopes(resourceQuotaSpec, fld)...)
+
+ return allErrs
+}
+
+// ValidateResourceQuantityValue enforces that specified quantity is valid for specified resource
+func ValidateResourceQuantityValue(resource string, value resource.Quantity, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ allErrs = append(allErrs, ValidateNonnegativeQuantity(value, fldPath)...)
+ if api.IsIntegerResourceName(resource) {
+ if value.MilliValue()%int64(1000) != int64(0) {
+ allErrs = append(allErrs, field.Invalid(fldPath, value, isNotIntegerErrorMsg))
+ }
+ }
+ return allErrs
+}
+
+// ValidateResourceQuotaUpdate tests to see if the update is legal for an end user to make.
+// newResourceQuota is updated with fields that cannot be changed.
+func ValidateResourceQuotaUpdate(newResourceQuota, oldResourceQuota *api.ResourceQuota) field.ErrorList {
+ allErrs := ValidateObjectMetaUpdate(&newResourceQuota.ObjectMeta, &oldResourceQuota.ObjectMeta, field.NewPath("metadata"))
+ allErrs = append(allErrs, ValidateResourceQuotaSpec(&newResourceQuota.Spec, field.NewPath("spec"))...)
+
+ // ensure scopes cannot change, and that resources are still valid for scope
+ fldPath := field.NewPath("spec", "scopes")
+ oldScopes := sets.NewString()
+ newScopes := sets.NewString()
+ for _, scope := range newResourceQuota.Spec.Scopes {
+ newScopes.Insert(string(scope))
+ }
+ for _, scope := range oldResourceQuota.Spec.Scopes {
+ oldScopes.Insert(string(scope))
+ }
+ if !oldScopes.Equal(newScopes) {
+ allErrs = append(allErrs, field.Invalid(fldPath, newResourceQuota.Spec.Scopes, "field is immutable"))
+ }
+
+ newResourceQuota.Status = oldResourceQuota.Status
+ return allErrs
+}
+
+// ValidateResourceQuotaStatusUpdate tests to see if the status update is legal for an end user to make.
+// newResourceQuota is updated with fields that cannot be changed.
+func ValidateResourceQuotaStatusUpdate(newResourceQuota, oldResourceQuota *api.ResourceQuota) field.ErrorList {
+ allErrs := ValidateObjectMetaUpdate(&newResourceQuota.ObjectMeta, &oldResourceQuota.ObjectMeta, field.NewPath("metadata"))
+ if len(newResourceQuota.ResourceVersion) == 0 {
+ allErrs = append(allErrs, field.Required(field.NewPath("resourceVersion"), ""))
+ }
+ fldPath := field.NewPath("status", "hard")
+ for k, v := range newResourceQuota.Status.Hard {
+ resPath := fldPath.Key(string(k))
+ allErrs = append(allErrs, ValidateResourceQuotaResourceName(string(k), resPath)...)
+ allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...)
+ }
+ fldPath = field.NewPath("status", "used")
+ for k, v := range newResourceQuota.Status.Used {
+ resPath := fldPath.Key(string(k))
+ allErrs = append(allErrs, ValidateResourceQuotaResourceName(string(k), resPath)...)
+ allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...)
+ }
+ newResourceQuota.Spec = oldResourceQuota.Spec
+ return allErrs
+}
+
+// ValidateNamespace tests if required fields are set.
+func ValidateNamespace(namespace *api.Namespace) field.ErrorList {
+ allErrs := ValidateObjectMeta(&namespace.ObjectMeta, false, ValidateNamespaceName, field.NewPath("metadata"))
+ for i := range namespace.Spec.Finalizers {
+ allErrs = append(allErrs, validateFinalizerName(string(namespace.Spec.Finalizers[i]), field.NewPath("spec", "finalizers"))...)
+ }
+ return allErrs
+}
+
+// Validate finalizer names
+func validateFinalizerName(stringValue string, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ for _, msg := range validation.IsQualifiedName(stringValue) {
+ allErrs = append(allErrs, field.Invalid(fldPath, stringValue, msg))
+ }
+ if len(allErrs) != 0 {
+ return allErrs
+ }
+
+ if len(strings.Split(stringValue, "/")) == 1 {
+ if !api.IsStandardFinalizerName(stringValue) {
+ return append(allErrs, field.Invalid(fldPath, stringValue, fmt.Sprintf("name is neither a standard finalizer name nor is it fully qualified")))
+ }
+ }
+
+ return field.ErrorList{}
+}
+
+// ValidateNamespaceUpdate tests to make sure a namespace update can be applied.
+// newNamespace is updated with fields that cannot be changed
+func ValidateNamespaceUpdate(newNamespace *api.Namespace, oldNamespace *api.Namespace) field.ErrorList {
+ allErrs := ValidateObjectMetaUpdate(&newNamespace.ObjectMeta, &oldNamespace.ObjectMeta, field.NewPath("metadata"))
+ newNamespace.Spec.Finalizers = oldNamespace.Spec.Finalizers
+ newNamespace.Status = oldNamespace.Status
+ return allErrs
+}
+
+// ValidateNamespaceStatusUpdate tests to see if the update is legal for an end user to make. newNamespace is updated with fields
+// that cannot be changed.
+func ValidateNamespaceStatusUpdate(newNamespace, oldNamespace *api.Namespace) field.ErrorList {
+ allErrs := ValidateObjectMetaUpdate(&newNamespace.ObjectMeta, &oldNamespace.ObjectMeta, field.NewPath("metadata"))
+ newNamespace.Spec = oldNamespace.Spec
+ if newNamespace.DeletionTimestamp.IsZero() {
+ if newNamespace.Status.Phase != api.NamespaceActive {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("status", "Phase"), newNamespace.Status.Phase, "may only be 'Active' if `deletionTimestamp` is empty"))
+ }
+ } else {
+ if newNamespace.Status.Phase != api.NamespaceTerminating {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("status", "Phase"), newNamespace.Status.Phase, "may only be 'Terminating' if `deletionTimestamp` is not empty"))
+ }
+ }
+ return allErrs
+}
+
+// ValidateNamespaceFinalizeUpdate tests to see if the update is legal for an end user to make.
+// newNamespace is updated with fields that cannot be changed.
+func ValidateNamespaceFinalizeUpdate(newNamespace, oldNamespace *api.Namespace) field.ErrorList {
+ allErrs := ValidateObjectMetaUpdate(&newNamespace.ObjectMeta, &oldNamespace.ObjectMeta, field.NewPath("metadata"))
+
+ fldPath := field.NewPath("spec", "finalizers")
+ for i := range newNamespace.Spec.Finalizers {
+ idxPath := fldPath.Index(i)
+ allErrs = append(allErrs, validateFinalizerName(string(newNamespace.Spec.Finalizers[i]), idxPath)...)
+ }
+ newNamespace.Status = oldNamespace.Status
+ return allErrs
+}
+
+// ValidateEndpoints tests if required fields are set.
+func ValidateEndpoints(endpoints *api.Endpoints) field.ErrorList {
+ allErrs := ValidateObjectMeta(&endpoints.ObjectMeta, true, ValidateEndpointsName, field.NewPath("metadata"))
+ allErrs = append(allErrs, ValidateEndpointsSpecificAnnotations(endpoints.Annotations, field.NewPath("annotations"))...)
+ allErrs = append(allErrs, validateEndpointSubsets(endpoints.Subsets, field.NewPath("subsets"))...)
+ return allErrs
+}
+
+func validateEndpointSubsets(subsets []api.EndpointSubset, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ for i := range subsets {
+ ss := &subsets[i]
+ idxPath := fldPath.Index(i)
+
+ if len(ss.Addresses) == 0 && len(ss.NotReadyAddresses) == 0 {
+ //TODO: consider adding a RequiredOneOf() error for this and similar cases
+ allErrs = append(allErrs, field.Required(idxPath, "must specify `addresses` or `notReadyAddresses`"))
+ }
+ if len(ss.Ports) == 0 {
+ allErrs = append(allErrs, field.Required(idxPath.Child("ports"), ""))
+ }
+ for addr := range ss.Addresses {
+ allErrs = append(allErrs, validateEndpointAddress(&ss.Addresses[addr], idxPath.Child("addresses").Index(addr))...)
+ }
+ for addr := range ss.NotReadyAddresses {
+ allErrs = append(allErrs, validateEndpointAddress(&ss.NotReadyAddresses[addr], idxPath.Child("notReadyAddresses").Index(addr))...)
+ }
+ for port := range ss.Ports {
+ allErrs = append(allErrs, validateEndpointPort(&ss.Ports[port], len(ss.Ports) > 1, idxPath.Child("ports").Index(port))...)
+ }
+ }
+
+ return allErrs
+}
+
+func validateEndpointAddress(address *api.EndpointAddress, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ for _, msg := range validation.IsValidIP(address.IP) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("ip"), address.IP, msg))
+ }
+ if len(address.Hostname) > 0 {
+ allErrs = append(allErrs, ValidateDNS1123Label(address.Hostname, fldPath.Child("hostname"))...)
+ }
+ if len(allErrs) > 0 {
+ return allErrs
+ }
+ allErrs = append(allErrs, validateNonSpecialIP(address.IP, fldPath.Child("ip"))...)
+ return allErrs
+}
+
+func validateNonSpecialIP(ipAddress string, fldPath *field.Path) field.ErrorList {
+ // We disallow some IPs as endpoints or external-ips. Specifically,
+ // unspecified and loopback addresses are nonsensical and link-local
+ // addresses tend to be used for node-centric purposes (e.g. metadata
+ // service).
+ allErrs := field.ErrorList{}
+ ip := net.ParseIP(ipAddress)
+ if ip == nil {
+ allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, "must be a valid IP address"))
+ return allErrs
+ }
+ if ip.IsUnspecified() {
+ allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, "may not be unspecified (0.0.0.0)"))
+ }
+ if ip.IsLoopback() {
+ allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, "may not be in the loopback range (127.0.0.0/8)"))
+ }
+ if ip.IsLinkLocalUnicast() {
+ allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, "may not be in the link-local range (169.254.0.0/16)"))
+ }
+ if ip.IsLinkLocalMulticast() {
+ allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, "may not be in the link-local multicast range (224.0.0.0/24)"))
+ }
+ return allErrs
+}
+
+func validateEndpointPort(port *api.EndpointPort, requireName bool, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if requireName && len(port.Name) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("name"), ""))
+ } else if len(port.Name) != 0 {
+ allErrs = append(allErrs, ValidateDNS1123Label(port.Name, fldPath.Child("name"))...)
+ }
+ for _, msg := range validation.IsValidPortNum(int(port.Port)) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("port"), port.Port, msg))
+ }
+ if len(port.Protocol) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("protocol"), ""))
+ } else if !supportedPortProtocols.Has(string(port.Protocol)) {
+ allErrs = append(allErrs, field.NotSupported(fldPath.Child("protocol"), port.Protocol, supportedPortProtocols.List()))
+ }
+ return allErrs
+}
+
+// ValidateEndpointsUpdate tests to make sure an endpoints update can be applied.
+func ValidateEndpointsUpdate(newEndpoints, oldEndpoints *api.Endpoints) field.ErrorList {
+ allErrs := ValidateObjectMetaUpdate(&newEndpoints.ObjectMeta, &oldEndpoints.ObjectMeta, field.NewPath("metadata"))
+ allErrs = append(allErrs, validateEndpointSubsets(newEndpoints.Subsets, field.NewPath("subsets"))...)
+ allErrs = append(allErrs, ValidateEndpointsSpecificAnnotations(newEndpoints.Annotations, field.NewPath("annotations"))...)
+ return allErrs
+}
+
+// ValidateSecurityContext ensure the security context contains valid settings
+func ValidateSecurityContext(sc *api.SecurityContext, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ //this should only be true for testing since SecurityContext is defaulted by the api
+ if sc == nil {
+ return allErrs
+ }
+
+ if sc.Privileged != nil {
+ if *sc.Privileged && !capabilities.Get().AllowPrivileged {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("privileged"), "disallowed by policy"))
+ }
+ }
+
+ if sc.RunAsUser != nil {
+ if *sc.RunAsUser < 0 {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("runAsUser"), *sc.RunAsUser, isNegativeErrorMsg))
+ }
+ }
+ return allErrs
+}
+
+func ValidatePodLogOptions(opts *api.PodLogOptions) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if opts.TailLines != nil && *opts.TailLines < 0 {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("tailLines"), *opts.TailLines, isNegativeErrorMsg))
+ }
+ if opts.LimitBytes != nil && *opts.LimitBytes < 1 {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("limitBytes"), *opts.LimitBytes, "must be greater than 0"))
+ }
+ switch {
+ case opts.SinceSeconds != nil && opts.SinceTime != nil:
+ allErrs = append(allErrs, field.Forbidden(field.NewPath(""), "at most one of `sinceTime` or `sinceSeconds` may be specified"))
+ case opts.SinceSeconds != nil:
+ if *opts.SinceSeconds < 1 {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("sinceSeconds"), *opts.SinceSeconds, "must be greater than 0"))
+ }
+ }
+ return allErrs
+}
+
+// ValidateLoadBalancerStatus validates required fields on a LoadBalancerStatus
+func ValidateLoadBalancerStatus(status *api.LoadBalancerStatus, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ for i, ingress := range status.Ingress {
+ idxPath := fldPath.Child("ingress").Index(i)
+ if len(ingress.IP) > 0 {
+ if isIP := (net.ParseIP(ingress.IP) != nil); !isIP {
+ allErrs = append(allErrs, field.Invalid(idxPath.Child("ip"), ingress.IP, "must be a valid IP address"))
+ }
+ }
+ if len(ingress.Hostname) > 0 {
+ for _, msg := range validation.IsDNS1123Subdomain(ingress.Hostname) {
+ allErrs = append(allErrs, field.Invalid(idxPath.Child("hostname"), ingress.Hostname, msg))
+ }
+ if isIP := (net.ParseIP(ingress.Hostname) != nil); isIP {
+ allErrs = append(allErrs, field.Invalid(idxPath.Child("hostname"), ingress.Hostname, "must be a DNS name, not an IP address"))
+ }
+ }
+ }
+ return allErrs
+}
+
+// TODO: remove this after we EOL the annotation that carries it.
+func isValidHostnamesMap(serializedPodHostNames string) bool {
+ if len(serializedPodHostNames) == 0 {
+ return false
+ }
+ podHostNames := map[string]endpoints.HostRecord{}
+ err := json.Unmarshal([]byte(serializedPodHostNames), &podHostNames)
+ if err != nil {
+ return false
+ }
+
+ for ip, hostRecord := range podHostNames {
+ if len(validation.IsDNS1123Label(hostRecord.HostName)) != 0 {
+ return false
+ }
+ if net.ParseIP(ip) == nil {
+ return false
+ }
+ }
+ return true
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apimachinery/doc.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apimachinery/doc.go
new file mode 100644
index 0000000..ede22b3
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apimachinery/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package apimachinery contains the generic API machinery code that
+// is common to both server and clients.
+// This package should never import specific API objects.
+package apimachinery
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apimachinery/registered/registered.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apimachinery/registered/registered.go
new file mode 100644
index 0000000..307f55d
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apimachinery/registered/registered.go
@@ -0,0 +1,346 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package to keep track of API Versions that can be registered and are enabled in api.Scheme.
+package registered
+
+import (
+ "fmt"
+ "os"
+ "sort"
+ "strings"
+
+ "github.com/golang/glog"
+
+ "k8s.io/kubernetes/pkg/api/meta"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/apimachinery"
+ "k8s.io/kubernetes/pkg/util/sets"
+)
+
+var (
+ // registeredGroupVersions stores all API group versions for which RegisterGroup is called.
+ registeredVersions = map[unversioned.GroupVersion]struct{}{}
+
+ // thirdPartyGroupVersions are API versions which are dynamically
+ // registered (and unregistered) via API calls to the apiserver
+ thirdPartyGroupVersions []unversioned.GroupVersion
+
+ // enabledVersions represents all enabled API versions. It should be a
+ // subset of registeredVersions. Please call EnableVersions() to add
+ // enabled versions.
+ enabledVersions = map[unversioned.GroupVersion]struct{}{}
+
+ // map of group meta for all groups.
+ groupMetaMap = map[string]*apimachinery.GroupMeta{}
+
+ // envRequestedVersions represents the versions requested via the
+ // KUBE_API_VERSIONS environment variable. The install package of each group
+ // checks this list before add their versions to the latest package and
+ // Scheme. This list is small and order matters, so represent as a slice
+ envRequestedVersions = []unversioned.GroupVersion{}
+)
+
+func init() {
+ // Env var KUBE_API_VERSIONS is a comma separated list of API versions that
+ // should be registered in the scheme.
+ kubeAPIVersions := os.Getenv("KUBE_API_VERSIONS")
+ if len(kubeAPIVersions) != 0 {
+ for _, version := range strings.Split(kubeAPIVersions, ",") {
+ gv, err := unversioned.ParseGroupVersion(version)
+ if err != nil {
+ glog.Fatalf("invalid api version: %s in KUBE_API_VERSIONS: %s.",
+ version, os.Getenv("KUBE_API_VERSIONS"))
+ }
+ envRequestedVersions = append(envRequestedVersions, gv)
+ }
+ }
+}
+
+// RegisterVersions adds the given group versions to the list of registered group versions.
+func RegisterVersions(availableVersions []unversioned.GroupVersion) {
+ for _, v := range availableVersions {
+ registeredVersions[v] = struct{}{}
+ }
+}
+
+// RegisterGroup adds the given group to the list of registered groups.
+func RegisterGroup(groupMeta apimachinery.GroupMeta) error {
+ groupName := groupMeta.GroupVersion.Group
+ if _, found := groupMetaMap[groupName]; found {
+ return fmt.Errorf("group %v is already registered", groupMetaMap)
+ }
+ groupMetaMap[groupName] = &groupMeta
+ return nil
+}
+
+// EnableVersions adds the versions for the given group to the list of enabled versions.
+// Note that the caller should call RegisterGroup before calling this method.
+// The caller of this function is responsible to add the versions to scheme and RESTMapper.
+func EnableVersions(versions ...unversioned.GroupVersion) error {
+ var unregisteredVersions []unversioned.GroupVersion
+ for _, v := range versions {
+ if _, found := registeredVersions[v]; !found {
+ unregisteredVersions = append(unregisteredVersions, v)
+ }
+ enabledVersions[v] = struct{}{}
+ }
+ if len(unregisteredVersions) != 0 {
+ return fmt.Errorf("Please register versions before enabling them: %v", unregisteredVersions)
+ }
+ return nil
+}
+
+// IsAllowedVersion returns if the version is allowed by the KUBE_API_VERSIONS
+// environment variable. If the environment variable is empty, then it always
+// returns true.
+func IsAllowedVersion(v unversioned.GroupVersion) bool {
+ if len(envRequestedVersions) == 0 {
+ return true
+ }
+ for _, envGV := range envRequestedVersions {
+ if v == envGV {
+ return true
+ }
+ }
+ return false
+}
+
+// IsEnabledVersion returns if a version is enabled.
+func IsEnabledVersion(v unversioned.GroupVersion) bool {
+ _, found := enabledVersions[v]
+ return found
+}
+
+// EnabledVersions returns all enabled versions. Groups are randomly ordered, but versions within groups
+// are priority order from best to worst
+func EnabledVersions() []unversioned.GroupVersion {
+ ret := []unversioned.GroupVersion{}
+ for _, groupMeta := range groupMetaMap {
+ ret = append(ret, groupMeta.GroupVersions...)
+ }
+ return ret
+}
+
+// EnabledVersionsForGroup returns all enabled versions for a group in order of best to worst
+func EnabledVersionsForGroup(group string) []unversioned.GroupVersion {
+ groupMeta, ok := groupMetaMap[group]
+ if !ok {
+ return []unversioned.GroupVersion{}
+ }
+
+ return append([]unversioned.GroupVersion{}, groupMeta.GroupVersions...)
+}
+
+// Group returns the metadata of a group if the gruop is registered, otherwise
+// an erorr is returned.
+func Group(group string) (*apimachinery.GroupMeta, error) {
+ groupMeta, found := groupMetaMap[group]
+ if !found {
+ return nil, fmt.Errorf("group %v has not been registered", group)
+ }
+ groupMetaCopy := *groupMeta
+ return &groupMetaCopy, nil
+}
+
+// IsRegistered takes a string and determines if it's one of the registered groups
+func IsRegistered(group string) bool {
+ _, found := groupMetaMap[group]
+ return found
+}
+
+// IsRegisteredVersion returns if a version is registered.
+func IsRegisteredVersion(v unversioned.GroupVersion) bool {
+ _, found := registeredVersions[v]
+ return found
+}
+
+// RegisteredGroupVersions returns all registered group versions.
+func RegisteredGroupVersions() []unversioned.GroupVersion {
+ ret := []unversioned.GroupVersion{}
+ for groupVersion := range registeredVersions {
+ ret = append(ret, groupVersion)
+ }
+ return ret
+}
+
+// IsThirdPartyAPIGroupVersion returns true if the api version is a user-registered group/version.
+func IsThirdPartyAPIGroupVersion(gv unversioned.GroupVersion) bool {
+ for ix := range thirdPartyGroupVersions {
+ if thirdPartyGroupVersions[ix] == gv {
+ return true
+ }
+ }
+ return false
+}
+
+// AddThirdPartyAPIGroupVersions sets the list of third party versions,
+// registers them in the API machinery and enables them.
+// Skips GroupVersions that are already registered.
+// Returns the list of GroupVersions that were skipped.
+func AddThirdPartyAPIGroupVersions(gvs ...unversioned.GroupVersion) []unversioned.GroupVersion {
+ filteredGVs := []unversioned.GroupVersion{}
+ skippedGVs := []unversioned.GroupVersion{}
+ for ix := range gvs {
+ if !IsRegisteredVersion(gvs[ix]) {
+ filteredGVs = append(filteredGVs, gvs[ix])
+ } else {
+ glog.V(3).Infof("Skipping %s, because its already registered", gvs[ix].String())
+ skippedGVs = append(skippedGVs, gvs[ix])
+ }
+ }
+ if len(filteredGVs) == 0 {
+ return skippedGVs
+ }
+ RegisterVersions(filteredGVs)
+ EnableVersions(filteredGVs...)
+ next := make([]unversioned.GroupVersion, len(gvs))
+ for ix := range filteredGVs {
+ next[ix] = filteredGVs[ix]
+ }
+ thirdPartyGroupVersions = next
+
+ return skippedGVs
+}
+
+// TODO: This is an expedient function, because we don't check if a Group is
+// supported throughout the code base. We will abandon this function and
+// checking the error returned by the Group() function.
+func GroupOrDie(group string) *apimachinery.GroupMeta {
+ groupMeta, found := groupMetaMap[group]
+ if !found {
+ if group == "" {
+ panic("The legacy v1 API is not registered.")
+ } else {
+ panic(fmt.Sprintf("Group %s is not registered.", group))
+ }
+ }
+ groupMetaCopy := *groupMeta
+ return &groupMetaCopy
+}
+
+// RESTMapper returns a union RESTMapper of all known types with priorities chosen in the following order:
+// 1. if KUBE_API_VERSIONS is specified, then KUBE_API_VERSIONS in order, OR
+// 1. legacy kube group preferred version, extensions preferred version, metrics perferred version, legacy
+// kube any version, extensions any version, metrics any version, all other groups alphabetical preferred version,
+// all other groups alphabetical.
+func RESTMapper(versionPatterns ...unversioned.GroupVersion) meta.RESTMapper {
+ unionMapper := meta.MultiRESTMapper{}
+ unionedGroups := sets.NewString()
+ for enabledVersion := range enabledVersions {
+ if !unionedGroups.Has(enabledVersion.Group) {
+ unionedGroups.Insert(enabledVersion.Group)
+ groupMeta := groupMetaMap[enabledVersion.Group]
+ unionMapper = append(unionMapper, groupMeta.RESTMapper)
+ }
+ }
+
+ if len(versionPatterns) != 0 {
+ resourcePriority := []unversioned.GroupVersionResource{}
+ kindPriority := []unversioned.GroupVersionKind{}
+ for _, versionPriority := range versionPatterns {
+ resourcePriority = append(resourcePriority, versionPriority.WithResource(meta.AnyResource))
+ kindPriority = append(kindPriority, versionPriority.WithKind(meta.AnyKind))
+ }
+
+ return meta.PriorityRESTMapper{Delegate: unionMapper, ResourcePriority: resourcePriority, KindPriority: kindPriority}
+ }
+
+ if len(envRequestedVersions) != 0 {
+ resourcePriority := []unversioned.GroupVersionResource{}
+ kindPriority := []unversioned.GroupVersionKind{}
+
+ for _, versionPriority := range envRequestedVersions {
+ resourcePriority = append(resourcePriority, versionPriority.WithResource(meta.AnyResource))
+ kindPriority = append(kindPriority, versionPriority.WithKind(meta.AnyKind))
+ }
+
+ return meta.PriorityRESTMapper{Delegate: unionMapper, ResourcePriority: resourcePriority, KindPriority: kindPriority}
+ }
+
+ prioritizedGroups := []string{"", "extensions", "metrics"}
+ resourcePriority, kindPriority := prioritiesForGroups(prioritizedGroups...)
+
+ prioritizedGroupsSet := sets.NewString(prioritizedGroups...)
+ remainingGroups := sets.String{}
+ for enabledVersion := range enabledVersions {
+ if !prioritizedGroupsSet.Has(enabledVersion.Group) {
+ remainingGroups.Insert(enabledVersion.Group)
+ }
+ }
+
+ remainingResourcePriority, remainingKindPriority := prioritiesForGroups(remainingGroups.List()...)
+ resourcePriority = append(resourcePriority, remainingResourcePriority...)
+ kindPriority = append(kindPriority, remainingKindPriority...)
+
+ return meta.PriorityRESTMapper{Delegate: unionMapper, ResourcePriority: resourcePriority, KindPriority: kindPriority}
+}
+
+// prioritiesForGroups returns the resource and kind priorities for a PriorityRESTMapper, preferring the preferred version of each group first,
+// then any non-preferred version of the group second.
+func prioritiesForGroups(groups ...string) ([]unversioned.GroupVersionResource, []unversioned.GroupVersionKind) {
+ resourcePriority := []unversioned.GroupVersionResource{}
+ kindPriority := []unversioned.GroupVersionKind{}
+
+ for _, group := range groups {
+ availableVersions := EnabledVersionsForGroup(group)
+ if len(availableVersions) > 0 {
+ resourcePriority = append(resourcePriority, availableVersions[0].WithResource(meta.AnyResource))
+ kindPriority = append(kindPriority, availableVersions[0].WithKind(meta.AnyKind))
+ }
+ }
+ for _, group := range groups {
+ resourcePriority = append(resourcePriority, unversioned.GroupVersionResource{Group: group, Version: meta.AnyVersion, Resource: meta.AnyResource})
+ kindPriority = append(kindPriority, unversioned.GroupVersionKind{Group: group, Version: meta.AnyVersion, Kind: meta.AnyKind})
+ }
+
+ return resourcePriority, kindPriority
+}
+
+// AllPreferredGroupVersions returns the preferred versions of all registered
+// groups in the form of "group1/version1,group2/version2,..."
+func AllPreferredGroupVersions() string {
+ if len(groupMetaMap) == 0 {
+ return ""
+ }
+ var defaults []string
+ for _, groupMeta := range groupMetaMap {
+ defaults = append(defaults, groupMeta.GroupVersion.String())
+ }
+ sort.Strings(defaults)
+ return strings.Join(defaults, ",")
+}
+
+// ValidateEnvRequestedVersions returns a list of versions that are requested in
+// the KUBE_API_VERSIONS environment variable, but not enabled.
+func ValidateEnvRequestedVersions() []unversioned.GroupVersion {
+ var missingVersions []unversioned.GroupVersion
+ for _, v := range envRequestedVersions {
+ if _, found := enabledVersions[v]; !found {
+ missingVersions = append(missingVersions, v)
+ }
+ }
+ return missingVersions
+}
+
+// Resets the state.
+// Should not be used by anyone else than tests.
+func reset() {
+ registeredVersions = map[unversioned.GroupVersion]struct{}{}
+ enabledVersions = map[unversioned.GroupVersion]struct{}{}
+ groupMetaMap = map[string]*apimachinery.GroupMeta{}
+
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apimachinery/types.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apimachinery/types.go
new file mode 100644
index 0000000..0e90cbb
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apimachinery/types.go
@@ -0,0 +1,52 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package apimachinery
+
+import (
+ "k8s.io/kubernetes/pkg/api/meta"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/runtime"
+)
+
+// GroupMeta stores the metadata of a group.
+type GroupMeta struct {
+ // GroupVersion represents the preferred version of the group.
+ GroupVersion unversioned.GroupVersion
+
+ // GroupVersions is Group + all versions in that group.
+ GroupVersions []unversioned.GroupVersion
+
+ // Codec is the default codec for serializing output that should use
+ // the preferred version. Use this Codec when writing to
+ // disk, a data store that is not dynamically versioned, or in tests.
+ // This codec can decode any object that the schema is aware of.
+ Codec runtime.Codec
+
+ // SelfLinker can set or get the SelfLink field of all API types.
+ // TODO: when versioning changes, make this part of each API definition.
+ // TODO(lavalamp): Combine SelfLinker & ResourceVersioner interfaces, force all uses
+ // to go through the InterfacesFor method below.
+ SelfLinker runtime.SelfLinker
+
+ // RESTMapper provides the default mapping between REST paths and the objects declared in api.Scheme and all known
+ // versions.
+ RESTMapper meta.RESTMapper
+
+ // InterfacesFor returns the default Codec and ResourceVersioner for a given version
+ // or an error if the version is not known.
+ InterfacesFor func(version unversioned.GroupVersion) (*meta.VersionInterfaces, error)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/deep_copy_generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/deep_copy_generated.go
new file mode 100644
index 0000000..bdeb0b1
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/deep_copy_generated.go
@@ -0,0 +1,111 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by deepcopy-gen. Do not edit it manually!
+
+package apps
+
+import (
+ api "k8s.io/kubernetes/pkg/api"
+ unversioned "k8s.io/kubernetes/pkg/api/unversioned"
+ conversion "k8s.io/kubernetes/pkg/conversion"
+)
+
+func init() {
+ if err := api.Scheme.AddGeneratedDeepCopyFuncs(
+ DeepCopy_apps_PetSet,
+ DeepCopy_apps_PetSetList,
+ DeepCopy_apps_PetSetSpec,
+ DeepCopy_apps_PetSetStatus,
+ ); err != nil {
+ // if one of the deep copy functions is malformed, detect it immediately.
+ panic(err)
+ }
+}
+
+func DeepCopy_apps_PetSet(in PetSet, out *PetSet, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_apps_PetSetSpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_apps_PetSetStatus(in.Status, &out.Status, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_apps_PetSetList(in PetSetList, out *PetSetList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]PetSet, len(in))
+ for i := range in {
+ if err := DeepCopy_apps_PetSet(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_apps_PetSetSpec(in PetSetSpec, out *PetSetSpec, c *conversion.Cloner) error {
+ out.Replicas = in.Replicas
+ if in.Selector != nil {
+ in, out := in.Selector, &out.Selector
+ *out = new(unversioned.LabelSelector)
+ if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.Selector = nil
+ }
+ if err := api.DeepCopy_api_PodTemplateSpec(in.Template, &out.Template, c); err != nil {
+ return err
+ }
+ if in.VolumeClaimTemplates != nil {
+ in, out := in.VolumeClaimTemplates, &out.VolumeClaimTemplates
+ *out = make([]api.PersistentVolumeClaim, len(in))
+ for i := range in {
+ if err := api.DeepCopy_api_PersistentVolumeClaim(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.VolumeClaimTemplates = nil
+ }
+ out.ServiceName = in.ServiceName
+ return nil
+}
+
+func DeepCopy_apps_PetSetStatus(in PetSetStatus, out *PetSetStatus, c *conversion.Cloner) error {
+ if in.ObservedGeneration != nil {
+ in, out := in.ObservedGeneration, &out.ObservedGeneration
+ *out = new(int64)
+ **out = *in
+ } else {
+ out.ObservedGeneration = nil
+ }
+ out.Replicas = in.Replicas
+ return nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/doc.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/doc.go
new file mode 100644
index 0000000..bca1ff4
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package,register
+
+package apps
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/install/install.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/install/install.go
new file mode 100644
index 0000000..346740e
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/install/install.go
@@ -0,0 +1,125 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package install installs the apps API group, making it available as
+// an option to all of the API encoding/decoding machinery.
+package install
+
+import (
+ "fmt"
+
+ "github.com/golang/glog"
+
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/meta"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/apimachinery"
+ "k8s.io/kubernetes/pkg/apimachinery/registered"
+ "k8s.io/kubernetes/pkg/apis/apps"
+ "k8s.io/kubernetes/pkg/apis/apps/v1alpha1"
+ "k8s.io/kubernetes/pkg/runtime"
+ "k8s.io/kubernetes/pkg/util/sets"
+)
+
+const importPrefix = "k8s.io/kubernetes/pkg/apis/apps"
+
+var accessor = meta.NewAccessor()
+
+// availableVersions lists all known external versions for this group from most preferred to least preferred
+var availableVersions = []unversioned.GroupVersion{v1alpha1.SchemeGroupVersion}
+
+func init() {
+ registered.RegisterVersions(availableVersions)
+ externalVersions := []unversioned.GroupVersion{}
+ for _, v := range availableVersions {
+ if registered.IsAllowedVersion(v) {
+ externalVersions = append(externalVersions, v)
+ }
+ }
+ if len(externalVersions) == 0 {
+ glog.V(4).Infof("No version is registered for group %v", apps.GroupName)
+ return
+ }
+
+ if err := registered.EnableVersions(externalVersions...); err != nil {
+ glog.V(4).Infof("%v", err)
+ return
+ }
+ if err := enableVersions(externalVersions); err != nil {
+ glog.V(4).Infof("%v", err)
+ return
+ }
+}
+
+func enableVersions(externalVersions []unversioned.GroupVersion) error {
+ addVersionsToScheme(externalVersions...)
+ preferredExternalVersion := externalVersions[0]
+
+ groupMeta := apimachinery.GroupMeta{
+ GroupVersion: preferredExternalVersion,
+ GroupVersions: externalVersions,
+ RESTMapper: newRESTMapper(externalVersions),
+ SelfLinker: runtime.SelfLinker(accessor),
+ InterfacesFor: interfacesFor,
+ }
+
+ if err := registered.RegisterGroup(groupMeta); err != nil {
+ return err
+ }
+ api.RegisterRESTMapper(groupMeta.RESTMapper)
+ return nil
+}
+
+func newRESTMapper(externalVersions []unversioned.GroupVersion) meta.RESTMapper {
+ // the list of kinds that are scoped at the root of the api hierarchy
+ // if a kind is not enumerated here, it is assumed to have a namespace scope
+ rootScoped := sets.NewString()
+
+ ignoredKinds := sets.NewString()
+
+ return api.NewDefaultRESTMapper(externalVersions, interfacesFor, importPrefix, ignoredKinds, rootScoped)
+}
+
+// interfacesFor returns the default Codec and ResourceVersioner for a given version
+// string, or an error if the version is not known.
+func interfacesFor(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) {
+ switch version {
+ case v1alpha1.SchemeGroupVersion:
+ return &meta.VersionInterfaces{
+ ObjectConvertor: api.Scheme,
+ MetadataAccessor: accessor,
+ }, nil
+ default:
+ g, _ := registered.Group(apps.GroupName)
+ return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, g.GroupVersions)
+ }
+}
+
+func addVersionsToScheme(externalVersions ...unversioned.GroupVersion) {
+ // add the internal version to Scheme
+ apps.AddToScheme(api.Scheme)
+ // add the enabled external versions to Scheme
+ for _, v := range externalVersions {
+ if !registered.IsEnabledVersion(v) {
+ glog.Errorf("Version %s is not enabled, so it will not be added to the Scheme.", v)
+ continue
+ }
+ switch v {
+ case v1alpha1.SchemeGroupVersion:
+ v1alpha1.AddToScheme(api.Scheme)
+ }
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/register.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/register.go
new file mode 100644
index 0000000..90acc12
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/register.go
@@ -0,0 +1,54 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package apps
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/runtime"
+)
+
+func AddToScheme(scheme *runtime.Scheme) {
+ // Add the API to Scheme.
+ addKnownTypes(scheme)
+}
+
+// GroupName is the group name use in this package
+const GroupName = "apps"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
+
+// Kind takes an unqualified kind and returns back a Group qualified GroupKind
+func Kind(kind string) unversioned.GroupKind {
+ return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// Resource takes an unqualified resource and returns back a Group qualified GroupResource
+func Resource(resource string) unversioned.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) {
+ // TODO this will get cleaned up with the scheme types are fixed
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &PetSet{},
+ &PetSetList{},
+ &api.ListOptions{},
+ )
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/types.generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/types.generated.go
new file mode 100644
index 0000000..be8a54a
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/types.generated.go
@@ -0,0 +1,1634 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// ************************************************************
+// DO NOT EDIT.
+// THIS FILE IS AUTO-GENERATED BY codecgen.
+// ************************************************************
+
+package apps
+
+import (
+ "errors"
+ "fmt"
+ codec1978 "github.com/ugorji/go/codec"
+ pkg2_api "k8s.io/kubernetes/pkg/api"
+ pkg4_resource "k8s.io/kubernetes/pkg/api/resource"
+ pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned"
+ pkg3_types "k8s.io/kubernetes/pkg/types"
+ pkg5_intstr "k8s.io/kubernetes/pkg/util/intstr"
+ "reflect"
+ "runtime"
+ time "time"
+)
+
+const (
+ // ----- content types ----
+ codecSelferC_UTF81234 = 1
+ codecSelferC_RAW1234 = 0
+ // ----- value types used ----
+ codecSelferValueTypeArray1234 = 10
+ codecSelferValueTypeMap1234 = 9
+ // ----- containerStateValues ----
+ codecSelfer_containerMapKey1234 = 2
+ codecSelfer_containerMapValue1234 = 3
+ codecSelfer_containerMapEnd1234 = 4
+ codecSelfer_containerArrayElem1234 = 6
+ codecSelfer_containerArrayEnd1234 = 7
+)
+
+var (
+ codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits())
+ codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`)
+)
+
+type codecSelfer1234 struct{}
+
+func init() {
+ if codec1978.GenVersion != 5 {
+ _, file, _, _ := runtime.Caller(0)
+ err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v",
+ 5, codec1978.GenVersion, file)
+ panic(err)
+ }
+ if false { // reference the types, but skip this branch at build/run time
+ var v0 pkg2_api.ObjectMeta
+ var v1 pkg4_resource.Quantity
+ var v2 pkg1_unversioned.TypeMeta
+ var v3 pkg3_types.UID
+ var v4 pkg5_intstr.IntOrString
+ var v5 time.Time
+ _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5
+ }
+}
+
+func (x *PetSet) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = true
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy14 := &x.Status
+ yy14.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy16 := &x.Status
+ yy16.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PetSet) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PetSet) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_api.ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = PetSetSpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = PetSetStatus{}
+ } else {
+ yyv6 := &x.Status
+ yyv6.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PetSet) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_api.ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = PetSetSpec{}
+ } else {
+ yyv11 := &x.Spec
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = PetSetStatus{}
+ } else {
+ yyv12 := &x.Status
+ yyv12.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PetSetSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Replicas != 0
+ yyq2[1] = x.Selector != nil
+ yyq2[3] = len(x.VolumeClaimTemplates) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Replicas))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("replicas"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Replicas))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Selector == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.Selector) {
+ } else {
+ z.EncFallback(x.Selector)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("selector"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Selector == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.Selector) {
+ } else {
+ z.EncFallback(x.Selector)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy10 := &x.Template
+ yy10.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("template"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy12 := &x.Template
+ yy12.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ if x.VolumeClaimTemplates == nil {
+ r.EncodeNil()
+ } else {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ h.encSliceapi_PersistentVolumeClaim(([]pkg2_api.PersistentVolumeClaim)(x.VolumeClaimTemplates), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("volumeClaimTemplates"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.VolumeClaimTemplates == nil {
+ r.EncodeNil()
+ } else {
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ h.encSliceapi_PersistentVolumeClaim(([]pkg2_api.PersistentVolumeClaim)(x.VolumeClaimTemplates), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ServiceName))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("serviceName"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ServiceName))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PetSetSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PetSetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "replicas":
+ if r.TryDecodeAsNil() {
+ x.Replicas = 0
+ } else {
+ x.Replicas = int(r.DecodeInt(codecSelferBitsize1234))
+ }
+ case "selector":
+ if r.TryDecodeAsNil() {
+ if x.Selector != nil {
+ x.Selector = nil
+ }
+ } else {
+ if x.Selector == nil {
+ x.Selector = new(pkg1_unversioned.LabelSelector)
+ }
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.Selector) {
+ } else {
+ z.DecFallback(x.Selector, false)
+ }
+ }
+ case "template":
+ if r.TryDecodeAsNil() {
+ x.Template = pkg2_api.PodTemplateSpec{}
+ } else {
+ yyv7 := &x.Template
+ yyv7.CodecDecodeSelf(d)
+ }
+ case "volumeClaimTemplates":
+ if r.TryDecodeAsNil() {
+ x.VolumeClaimTemplates = nil
+ } else {
+ yyv8 := &x.VolumeClaimTemplates
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.decSliceapi_PersistentVolumeClaim((*[]pkg2_api.PersistentVolumeClaim)(yyv8), d)
+ }
+ }
+ case "serviceName":
+ if r.TryDecodeAsNil() {
+ x.ServiceName = ""
+ } else {
+ x.ServiceName = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PetSetSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj11 int
+ var yyb11 bool
+ var yyhl11 bool = l >= 0
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Replicas = 0
+ } else {
+ x.Replicas = int(r.DecodeInt(codecSelferBitsize1234))
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Selector != nil {
+ x.Selector = nil
+ }
+ } else {
+ if x.Selector == nil {
+ x.Selector = new(pkg1_unversioned.LabelSelector)
+ }
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.Selector) {
+ } else {
+ z.DecFallback(x.Selector, false)
+ }
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Template = pkg2_api.PodTemplateSpec{}
+ } else {
+ yyv15 := &x.Template
+ yyv15.CodecDecodeSelf(d)
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.VolumeClaimTemplates = nil
+ } else {
+ yyv16 := &x.VolumeClaimTemplates
+ yym17 := z.DecBinary()
+ _ = yym17
+ if false {
+ } else {
+ h.decSliceapi_PersistentVolumeClaim((*[]pkg2_api.PersistentVolumeClaim)(yyv16), d)
+ }
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ServiceName = ""
+ } else {
+ x.ServiceName = string(r.DecodeString())
+ }
+ for {
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj11-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PetSetStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.ObservedGeneration != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.ObservedGeneration == nil {
+ r.EncodeNil()
+ } else {
+ yy4 := *x.ObservedGeneration
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(yy4))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("observedGeneration"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.ObservedGeneration == nil {
+ r.EncodeNil()
+ } else {
+ yy6 := *x.ObservedGeneration
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeInt(int64(yy6))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Replicas))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("replicas"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Replicas))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PetSetStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PetSetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "observedGeneration":
+ if r.TryDecodeAsNil() {
+ if x.ObservedGeneration != nil {
+ x.ObservedGeneration = nil
+ }
+ } else {
+ if x.ObservedGeneration == nil {
+ x.ObservedGeneration = new(int64)
+ }
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64))
+ }
+ }
+ case "replicas":
+ if r.TryDecodeAsNil() {
+ x.Replicas = 0
+ } else {
+ x.Replicas = int(r.DecodeInt(codecSelferBitsize1234))
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PetSetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.ObservedGeneration != nil {
+ x.ObservedGeneration = nil
+ }
+ } else {
+ if x.ObservedGeneration == nil {
+ x.ObservedGeneration = new(int64)
+ }
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else {
+ *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64))
+ }
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Replicas = 0
+ } else {
+ x.Replicas = int(r.DecodeInt(codecSelferBitsize1234))
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PetSetList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSlicePetSet(([]PetSet)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSlicePetSet(([]PetSet)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PetSetList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PetSetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSlicePetSet((*[]PetSet)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PetSetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSlicePetSet((*[]PetSet)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) encSliceapi_PersistentVolumeClaim(v []pkg2_api.PersistentVolumeClaim, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceapi_PersistentVolumeClaim(v *[]pkg2_api.PersistentVolumeClaim, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []pkg2_api.PersistentVolumeClaim{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 352)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]pkg2_api.PersistentVolumeClaim, yyrl1)
+ }
+ } else {
+ yyv1 = make([]pkg2_api.PersistentVolumeClaim, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = pkg2_api.PersistentVolumeClaim{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, pkg2_api.PersistentVolumeClaim{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = pkg2_api.PersistentVolumeClaim{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, pkg2_api.PersistentVolumeClaim{}) // var yyz1 pkg2_api.PersistentVolumeClaim
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = pkg2_api.PersistentVolumeClaim{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []pkg2_api.PersistentVolumeClaim{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSlicePetSet(v []PetSet, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSlicePetSet(v *[]PetSet, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []PetSet{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 744)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]PetSet, yyrl1)
+ }
+ } else {
+ yyv1 = make([]PetSet, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PetSet{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, PetSet{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PetSet{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, PetSet{}) // var yyz1 PetSet
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PetSet{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []PetSet{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/types.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/types.go
new file mode 100644
index 0000000..c3e2897
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/types.go
@@ -0,0 +1,94 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package apps
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+)
+
+// PetSet represents a set of pods with consistent identities.
+// Identities are defined as:
+// - Network: A single stable DNS and hostname.
+// - Storage: As many VolumeClaims as requested.
+// The PetSet guarantees that a given network identity will always
+// map to the same storage identity. PetSet is currently in alpha and
+// and subject to change without notice.
+type PetSet struct {
+ unversioned.TypeMeta `json:",inline"`
+ api.ObjectMeta `json:"metadata,omitempty"`
+
+ // Spec defines the desired identities of pets in this set.
+ Spec PetSetSpec `json:"spec,omitempty"`
+
+ // Status is the current status of Pets in this PetSet. This data
+ // may be out of date by some window of time.
+ Status PetSetStatus `json:"status,omitempty"`
+}
+
+// A PetSetSpec is the specification of a PetSet.
+type PetSetSpec struct {
+ // Replicas is the desired number of replicas of the given Template.
+ // These are replicas in the sense that they are instantiations of the
+ // same Template, but individual replicas also have a consistent identity.
+ // If unspecified, defaults to 1.
+ // TODO: Consider a rename of this field.
+ Replicas int `json:"replicas,omitempty"`
+
+ // Selector is a label query over pods that should match the replica count.
+ // If empty, defaulted to labels on the pod template.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors
+ Selector *unversioned.LabelSelector `json:"selector,omitempty"`
+
+ // Template is the object that describes the pod that will be created if
+ // insufficient replicas are detected. Each pod stamped out by the PetSet
+ // will fulfill this Template, but have a unique identity from the rest
+ // of the PetSet.
+ Template api.PodTemplateSpec `json:"template"`
+
+ // VolumeClaimTemplates is a list of claims that pets are allowed to reference.
+ // The PetSet controller is responsible for mapping network identities to
+ // claims in a way that maintains the identity of a pet. Every claim in
+ // this list must have at least one matching (by name) volumeMount in one
+ // container in the template. A claim in this list takes precedence over
+ // any volumes in the template, with the same name.
+ // TODO: Define the behavior if a claim already exists with the same name.
+ VolumeClaimTemplates []api.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty"`
+
+ // ServiceName is the name of the service that governs this PetSet.
+ // This service must exist before the PetSet, and is responsible for
+ // the network identity of the set. Pets get DNS/hostnames that follow the
+ // pattern: pet-specific-string.serviceName.default.svc.cluster.local
+ // where "pet-specific-string" is managed by the PetSet controller.
+ ServiceName string `json:"serviceName"`
+}
+
+// PetSetStatus represents the current state of a PetSet.
+type PetSetStatus struct {
+ // most recent generation observed by this autoscaler.
+ ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
+
+ // Replicas is the number of actual replicas.
+ Replicas int `json:"replicas"`
+}
+
+// PetSetList is a collection of PetSets.
+type PetSetList struct {
+ unversioned.TypeMeta `json:",inline"`
+ unversioned.ListMeta `json:"metadata,omitempty"`
+ Items []PetSet `json:"items"`
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/conversion.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/conversion.go
new file mode 100644
index 0000000..b2fb1be
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/conversion.go
@@ -0,0 +1,118 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ "fmt"
+
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ v1 "k8s.io/kubernetes/pkg/api/v1"
+ "k8s.io/kubernetes/pkg/apis/apps"
+ "k8s.io/kubernetes/pkg/conversion"
+ "k8s.io/kubernetes/pkg/runtime"
+)
+
+func addConversionFuncs(scheme *runtime.Scheme) {
+ // Add non-generated conversion functions to handle the *int32 -> int
+ // conversion. A pointer is useful in the versioned type so we can default
+ // it, but a plain int32 is more convenient in the internal type. These
+ // functions are the same as the autogenerated ones in every other way.
+ err := scheme.AddConversionFuncs(
+ Convert_v1alpha1_PetSetSpec_To_apps_PetSetSpec,
+ Convert_apps_PetSetSpec_To_v1alpha1_PetSetSpec,
+ )
+ if err != nil {
+ // If one of the conversion functions is malformed, detect it immediately.
+ panic(err)
+ }
+
+ err = api.Scheme.AddFieldLabelConversionFunc("apps/v1alpha1", "PetSet",
+ func(label, value string) (string, string, error) {
+ switch label {
+ case "metadata.name", "metadata.namespace", "status.successful":
+ return label, value, nil
+ default:
+ return "", "", fmt.Errorf("field label not supported: %s", label)
+ }
+ })
+ if err != nil {
+ // If one of the conversion functions is malformed, detect it immediately.
+ panic(err)
+ }
+}
+
+func Convert_v1alpha1_PetSetSpec_To_apps_PetSetSpec(in *PetSetSpec, out *apps.PetSetSpec, s conversion.Scope) error {
+ if in.Replicas != nil {
+ out.Replicas = int(*in.Replicas)
+ }
+ if in.Selector != nil {
+ in, out := &in.Selector, &out.Selector
+ *out = new(unversioned.LabelSelector)
+ if err := s.Convert(*in, *out, 0); err != nil {
+ return err
+ }
+ } else {
+ out.Selector = nil
+ }
+ if err := s.Convert(&in.Template, &out.Template, 0); err != nil {
+ return err
+ }
+ if in.VolumeClaimTemplates != nil {
+ in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates
+ *out = make([]api.PersistentVolumeClaim, len(*in))
+ for i := range *in {
+ if err := s.Convert(&(*in)[i], &(*out)[i], 0); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.VolumeClaimTemplates = nil
+ }
+ out.ServiceName = in.ServiceName
+ return nil
+}
+
+func Convert_apps_PetSetSpec_To_v1alpha1_PetSetSpec(in *apps.PetSetSpec, out *PetSetSpec, s conversion.Scope) error {
+ out.Replicas = new(int32)
+ *out.Replicas = int32(in.Replicas)
+ if in.Selector != nil {
+ in, out := &in.Selector, &out.Selector
+ *out = new(unversioned.LabelSelector)
+ if err := s.Convert(*in, *out, 0); err != nil {
+ return err
+ }
+ } else {
+ out.Selector = nil
+ }
+ if err := s.Convert(&in.Template, &out.Template, 0); err != nil {
+ return err
+ }
+ if in.VolumeClaimTemplates != nil {
+ in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates
+ *out = make([]v1.PersistentVolumeClaim, len(*in))
+ for i := range *in {
+ if err := s.Convert(&(*in)[i], &(*out)[i], 0); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.VolumeClaimTemplates = nil
+ }
+ out.ServiceName = in.ServiceName
+ return nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/conversion_generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/conversion_generated.go
new file mode 100644
index 0000000..3cf4728
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/conversion_generated.go
@@ -0,0 +1,156 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by conversion-gen. Do not edit it manually!
+
+package v1alpha1
+
+import (
+ api "k8s.io/kubernetes/pkg/api"
+ apps "k8s.io/kubernetes/pkg/apis/apps"
+ conversion "k8s.io/kubernetes/pkg/conversion"
+)
+
+func init() {
+ if err := api.Scheme.AddGeneratedConversionFuncs(
+ Convert_v1alpha1_PetSet_To_apps_PetSet,
+ Convert_apps_PetSet_To_v1alpha1_PetSet,
+ Convert_v1alpha1_PetSetList_To_apps_PetSetList,
+ Convert_apps_PetSetList_To_v1alpha1_PetSetList,
+ Convert_v1alpha1_PetSetSpec_To_apps_PetSetSpec,
+ Convert_apps_PetSetSpec_To_v1alpha1_PetSetSpec,
+ Convert_v1alpha1_PetSetStatus_To_apps_PetSetStatus,
+ Convert_apps_PetSetStatus_To_v1alpha1_PetSetStatus,
+ ); err != nil {
+ // if one of the conversion functions is malformed, detect it immediately.
+ panic(err)
+ }
+}
+
+func autoConvert_v1alpha1_PetSet_To_apps_PetSet(in *PetSet, out *apps.PetSet, s conversion.Scope) error {
+ SetDefaults_PetSet(in)
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ if err := Convert_v1alpha1_PetSetSpec_To_apps_PetSetSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1alpha1_PetSetStatus_To_apps_PetSetStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1alpha1_PetSet_To_apps_PetSet(in *PetSet, out *apps.PetSet, s conversion.Scope) error {
+ return autoConvert_v1alpha1_PetSet_To_apps_PetSet(in, out, s)
+}
+
+func autoConvert_apps_PetSet_To_v1alpha1_PetSet(in *apps.PetSet, out *PetSet, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ if err := Convert_apps_PetSetSpec_To_v1alpha1_PetSetSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_apps_PetSetStatus_To_v1alpha1_PetSetStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_apps_PetSet_To_v1alpha1_PetSet(in *apps.PetSet, out *PetSet, s conversion.Scope) error {
+ return autoConvert_apps_PetSet_To_v1alpha1_PetSet(in, out, s)
+}
+
+func autoConvert_v1alpha1_PetSetList_To_apps_PetSetList(in *PetSetList, out *apps.PetSetList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]apps.PetSet, len(*in))
+ for i := range *in {
+ if err := Convert_v1alpha1_PetSet_To_apps_PetSet(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_v1alpha1_PetSetList_To_apps_PetSetList(in *PetSetList, out *apps.PetSetList, s conversion.Scope) error {
+ return autoConvert_v1alpha1_PetSetList_To_apps_PetSetList(in, out, s)
+}
+
+func autoConvert_apps_PetSetList_To_v1alpha1_PetSetList(in *apps.PetSetList, out *PetSetList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]PetSet, len(*in))
+ for i := range *in {
+ if err := Convert_apps_PetSet_To_v1alpha1_PetSet(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_apps_PetSetList_To_v1alpha1_PetSetList(in *apps.PetSetList, out *PetSetList, s conversion.Scope) error {
+ return autoConvert_apps_PetSetList_To_v1alpha1_PetSetList(in, out, s)
+}
+
+func autoConvert_v1alpha1_PetSetStatus_To_apps_PetSetStatus(in *PetSetStatus, out *apps.PetSetStatus, s conversion.Scope) error {
+ out.ObservedGeneration = in.ObservedGeneration
+ out.Replicas = int(in.Replicas)
+ return nil
+}
+
+func Convert_v1alpha1_PetSetStatus_To_apps_PetSetStatus(in *PetSetStatus, out *apps.PetSetStatus, s conversion.Scope) error {
+ return autoConvert_v1alpha1_PetSetStatus_To_apps_PetSetStatus(in, out, s)
+}
+
+func autoConvert_apps_PetSetStatus_To_v1alpha1_PetSetStatus(in *apps.PetSetStatus, out *PetSetStatus, s conversion.Scope) error {
+ out.ObservedGeneration = in.ObservedGeneration
+ out.Replicas = int32(in.Replicas)
+ return nil
+}
+
+func Convert_apps_PetSetStatus_To_v1alpha1_PetSetStatus(in *apps.PetSetStatus, out *PetSetStatus, s conversion.Scope) error {
+ return autoConvert_apps_PetSetStatus_To_v1alpha1_PetSetStatus(in, out, s)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/deep_copy_generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/deep_copy_generated.go
new file mode 100644
index 0000000..8718942
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/deep_copy_generated.go
@@ -0,0 +1,118 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by deepcopy-gen. Do not edit it manually!
+
+package v1alpha1
+
+import (
+ api "k8s.io/kubernetes/pkg/api"
+ unversioned "k8s.io/kubernetes/pkg/api/unversioned"
+ v1 "k8s.io/kubernetes/pkg/api/v1"
+ conversion "k8s.io/kubernetes/pkg/conversion"
+)
+
+func init() {
+ if err := api.Scheme.AddGeneratedDeepCopyFuncs(
+ DeepCopy_v1alpha1_PetSet,
+ DeepCopy_v1alpha1_PetSetList,
+ DeepCopy_v1alpha1_PetSetSpec,
+ DeepCopy_v1alpha1_PetSetStatus,
+ ); err != nil {
+ // if one of the deep copy functions is malformed, detect it immediately.
+ panic(err)
+ }
+}
+
+func DeepCopy_v1alpha1_PetSet(in PetSet, out *PetSet, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_v1alpha1_PetSetSpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_v1alpha1_PetSetStatus(in.Status, &out.Status, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_v1alpha1_PetSetList(in PetSetList, out *PetSetList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]PetSet, len(in))
+ for i := range in {
+ if err := DeepCopy_v1alpha1_PetSet(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1alpha1_PetSetSpec(in PetSetSpec, out *PetSetSpec, c *conversion.Cloner) error {
+ if in.Replicas != nil {
+ in, out := in.Replicas, &out.Replicas
+ *out = new(int32)
+ **out = *in
+ } else {
+ out.Replicas = nil
+ }
+ if in.Selector != nil {
+ in, out := in.Selector, &out.Selector
+ *out = new(unversioned.LabelSelector)
+ if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.Selector = nil
+ }
+ if err := v1.DeepCopy_v1_PodTemplateSpec(in.Template, &out.Template, c); err != nil {
+ return err
+ }
+ if in.VolumeClaimTemplates != nil {
+ in, out := in.VolumeClaimTemplates, &out.VolumeClaimTemplates
+ *out = make([]v1.PersistentVolumeClaim, len(in))
+ for i := range in {
+ if err := v1.DeepCopy_v1_PersistentVolumeClaim(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.VolumeClaimTemplates = nil
+ }
+ out.ServiceName = in.ServiceName
+ return nil
+}
+
+func DeepCopy_v1alpha1_PetSetStatus(in PetSetStatus, out *PetSetStatus, c *conversion.Cloner) error {
+ if in.ObservedGeneration != nil {
+ in, out := in.ObservedGeneration, &out.ObservedGeneration
+ *out = new(int64)
+ **out = *in
+ } else {
+ out.ObservedGeneration = nil
+ }
+ out.Replicas = in.Replicas
+ return nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/defaults.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/defaults.go
new file mode 100644
index 0000000..e578e8c
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/defaults.go
@@ -0,0 +1,46 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/runtime"
+)
+
+func addDefaultingFuncs(scheme *runtime.Scheme) {
+ scheme.AddDefaultingFuncs(
+ SetDefaults_PetSet,
+ )
+}
+
+func SetDefaults_PetSet(obj *PetSet) {
+ labels := obj.Spec.Template.Labels
+ if labels != nil {
+ if obj.Spec.Selector == nil {
+ obj.Spec.Selector = &unversioned.LabelSelector{
+ MatchLabels: labels,
+ }
+ }
+ if len(obj.Labels) == 0 {
+ obj.Labels = labels
+ }
+ }
+ if obj.Spec.Replicas == nil {
+ obj.Spec.Replicas = new(int32)
+ *obj.Spec.Replicas = 1
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/doc.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/doc.go
new file mode 100644
index 0000000..53d9fca
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package,register
+// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/apps
+
+package v1alpha1
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/generated.pb.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/generated.pb.go
new file mode 100644
index 0000000..7e76067
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/generated.pb.go
@@ -0,0 +1,969 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by protoc-gen-gogo.
+// source: k8s.io/kubernetes/pkg/apis/apps/v1alpha1/generated.proto
+// DO NOT EDIT!
+
+/*
+ Package v1alpha1 is a generated protocol buffer package.
+
+ It is generated from these files:
+ k8s.io/kubernetes/pkg/apis/apps/v1alpha1/generated.proto
+
+ It has these top-level messages:
+ PetSet
+ PetSetList
+ PetSetSpec
+ PetSetStatus
+*/
+package v1alpha1
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+import k8s_io_kubernetes_pkg_api_unversioned "k8s.io/kubernetes/pkg/api/unversioned"
+import k8s_io_kubernetes_pkg_api_v1 "k8s.io/kubernetes/pkg/api/v1"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+func (m *PetSet) Reset() { *m = PetSet{} }
+func (m *PetSet) String() string { return proto.CompactTextString(m) }
+func (*PetSet) ProtoMessage() {}
+
+func (m *PetSetList) Reset() { *m = PetSetList{} }
+func (m *PetSetList) String() string { return proto.CompactTextString(m) }
+func (*PetSetList) ProtoMessage() {}
+
+func (m *PetSetSpec) Reset() { *m = PetSetSpec{} }
+func (m *PetSetSpec) String() string { return proto.CompactTextString(m) }
+func (*PetSetSpec) ProtoMessage() {}
+
+func (m *PetSetStatus) Reset() { *m = PetSetStatus{} }
+func (m *PetSetStatus) String() string { return proto.CompactTextString(m) }
+func (*PetSetStatus) ProtoMessage() {}
+
+func init() {
+ proto.RegisterType((*PetSet)(nil), "k8s.io.kubernetes.pkg.apis.apps.v1alpha1.PetSet")
+ proto.RegisterType((*PetSetList)(nil), "k8s.io.kubernetes.pkg.apis.apps.v1alpha1.PetSetList")
+ proto.RegisterType((*PetSetSpec)(nil), "k8s.io.kubernetes.pkg.apis.apps.v1alpha1.PetSetSpec")
+ proto.RegisterType((*PetSetStatus)(nil), "k8s.io.kubernetes.pkg.apis.apps.v1alpha1.PetSetStatus")
+}
+func (m *PetSet) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *PetSet) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
+ n1, err := m.ObjectMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n1
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size()))
+ n2, err := m.Spec.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n2
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Status.Size()))
+ n3, err := m.Status.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n3
+ return i, nil
+}
+
+func (m *PetSetList) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *PetSetList) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size()))
+ n4, err := m.ListMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n4
+ if len(m.Items) > 0 {
+ for _, msg := range m.Items {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *PetSetSpec) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *PetSetSpec) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Replicas != nil {
+ data[i] = 0x8
+ i++
+ i = encodeVarintGenerated(data, i, uint64(*m.Replicas))
+ }
+ if m.Selector != nil {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Selector.Size()))
+ n5, err := m.Selector.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n5
+ }
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Template.Size()))
+ n6, err := m.Template.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n6
+ if len(m.VolumeClaimTemplates) > 0 {
+ for _, msg := range m.VolumeClaimTemplates {
+ data[i] = 0x22
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ data[i] = 0x2a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.ServiceName)))
+ i += copy(data[i:], m.ServiceName)
+ return i, nil
+}
+
+func (m *PetSetStatus) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *PetSetStatus) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.ObservedGeneration != nil {
+ data[i] = 0x8
+ i++
+ i = encodeVarintGenerated(data, i, uint64(*m.ObservedGeneration))
+ }
+ data[i] = 0x10
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Replicas))
+ return i, nil
+}
+
+func encodeFixed64Generated(data []byte, offset int, v uint64) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ data[offset+4] = uint8(v >> 32)
+ data[offset+5] = uint8(v >> 40)
+ data[offset+6] = uint8(v >> 48)
+ data[offset+7] = uint8(v >> 56)
+ return offset + 8
+}
+func encodeFixed32Generated(data []byte, offset int, v uint32) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ return offset + 4
+}
+func encodeVarintGenerated(data []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ data[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ data[offset] = uint8(v)
+ return offset + 1
+}
+func (m *PetSet) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *PetSetList) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *PetSetSpec) Size() (n int) {
+ var l int
+ _ = l
+ if m.Replicas != nil {
+ n += 1 + sovGenerated(uint64(*m.Replicas))
+ }
+ if m.Selector != nil {
+ l = m.Selector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = m.Template.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.VolumeClaimTemplates) > 0 {
+ for _, e := range m.VolumeClaimTemplates {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = len(m.ServiceName)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *PetSetStatus) Size() (n int) {
+ var l int
+ _ = l
+ if m.ObservedGeneration != nil {
+ n += 1 + sovGenerated(uint64(*m.ObservedGeneration))
+ }
+ n += 1 + sovGenerated(uint64(m.Replicas))
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *PetSet) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PetSet: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PetSet: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PetSetList) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PetSetList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PetSetList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, PetSet{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PetSetSpec) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PetSetSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PetSetSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Replicas = &v
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Selector == nil {
+ m.Selector = &k8s_io_kubernetes_pkg_api_unversioned.LabelSelector{}
+ }
+ if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field VolumeClaimTemplates", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, k8s_io_kubernetes_pkg_api_v1.PersistentVolumeClaim{})
+ if err := m.VolumeClaimTemplates[len(m.VolumeClaimTemplates)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ServiceName = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PetSetStatus) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PetSetStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PetSetStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.ObservedGeneration = &v
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType)
+ }
+ m.Replicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Replicas |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenerated(data []byte) (n int, err error) {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if data[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipGenerated(data[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
+)
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/generated.proto b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/generated.proto
new file mode 100644
index 0000000..92bdd6b
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/generated.proto
@@ -0,0 +1,102 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.kubernetes.pkg.apis.apps.v1alpha1;
+
+import "k8s.io/kubernetes/pkg/api/resource/generated.proto";
+import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto";
+import "k8s.io/kubernetes/pkg/api/v1/generated.proto";
+import "k8s.io/kubernetes/pkg/util/intstr/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v1alpha1";
+
+// PetSet represents a set of pods with consistent identities.
+// Identities are defined as:
+// - Network: A single stable DNS and hostname.
+// - Storage: As many VolumeClaims as requested.
+// The PetSet guarantees that a given network identity will always
+// map to the same storage identity. PetSet is currently in alpha
+// and subject to change without notice.
+message PetSet {
+ optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1;
+
+ // Spec defines the desired identities of pets in this set.
+ optional PetSetSpec spec = 2;
+
+ // Status is the current status of Pets in this PetSet. This data
+ // may be out of date by some window of time.
+ optional PetSetStatus status = 3;
+}
+
+// PetSetList is a collection of PetSets.
+message PetSetList {
+ optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1;
+
+ repeated PetSet items = 2;
+}
+
+// A PetSetSpec is the specification of a PetSet.
+message PetSetSpec {
+ // Replicas is the desired number of replicas of the given Template.
+ // These are replicas in the sense that they are instantiations of the
+ // same Template, but individual replicas also have a consistent identity.
+ // If unspecified, defaults to 1.
+ // TODO: Consider a rename of this field.
+ optional int32 replicas = 1;
+
+ // Selector is a label query over pods that should match the replica count.
+ // If empty, defaulted to labels on the pod template.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors
+ optional k8s.io.kubernetes.pkg.api.unversioned.LabelSelector selector = 2;
+
+ // Template is the object that describes the pod that will be created if
+ // insufficient replicas are detected. Each pod stamped out by the PetSet
+ // will fulfill this Template, but have a unique identity from the rest
+ // of the PetSet.
+ optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 3;
+
+ // VolumeClaimTemplates is a list of claims that pets are allowed to reference.
+ // The PetSet controller is responsible for mapping network identities to
+ // claims in a way that maintains the identity of a pet. Every claim in
+ // this list must have at least one matching (by name) volumeMount in one
+ // container in the template. A claim in this list takes precedence over
+ // any volumes in the template, with the same name.
+ // TODO: Define the behavior if a claim already exists with the same name.
+ repeated k8s.io.kubernetes.pkg.api.v1.PersistentVolumeClaim volumeClaimTemplates = 4;
+
+ // ServiceName is the name of the service that governs this PetSet.
+ // This service must exist before the PetSet, and is responsible for
+ // the network identity of the set. Pets get DNS/hostnames that follow the
+ // pattern: pet-specific-string.serviceName.default.svc.cluster.local
+ // where "pet-specific-string" is managed by the PetSet controller.
+ optional string serviceName = 5;
+}
+
+// PetSetStatus represents the current state of a PetSet.
+message PetSetStatus {
+ // most recent generation observed by this autoscaler.
+ optional int64 observedGeneration = 1;
+
+ // Replicas is the number of actual replicas.
+ optional int32 replicas = 2;
+}
+
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/register.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/register.go
new file mode 100644
index 0000000..9fd138c
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/register.go
@@ -0,0 +1,50 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/api/v1"
+ "k8s.io/kubernetes/pkg/runtime"
+ versionedwatch "k8s.io/kubernetes/pkg/watch/versioned"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "apps"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1alpha1"}
+
+func AddToScheme(scheme *runtime.Scheme) {
+ addKnownTypes(scheme)
+ addDefaultingFuncs(scheme)
+ addConversionFuncs(scheme)
+}
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &PetSet{},
+ &PetSetList{},
+ &v1.ListOptions{},
+ &v1.DeleteOptions{},
+ )
+ versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion)
+}
+
+func (obj *PetSet) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
+func (obj *PetSetList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/types.generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/types.generated.go
new file mode 100644
index 0000000..d036392
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/types.generated.go
@@ -0,0 +1,1664 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// ************************************************************
+// DO NOT EDIT.
+// THIS FILE IS AUTO-GENERATED BY codecgen.
+// ************************************************************
+
+package v1alpha1
+
+import (
+ "errors"
+ "fmt"
+ codec1978 "github.com/ugorji/go/codec"
+ pkg4_resource "k8s.io/kubernetes/pkg/api/resource"
+ pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned"
+ pkg2_v1 "k8s.io/kubernetes/pkg/api/v1"
+ pkg3_types "k8s.io/kubernetes/pkg/types"
+ pkg5_intstr "k8s.io/kubernetes/pkg/util/intstr"
+ "reflect"
+ "runtime"
+ time "time"
+)
+
+const (
+ // ----- content types ----
+ codecSelferC_UTF81234 = 1
+ codecSelferC_RAW1234 = 0
+ // ----- value types used ----
+ codecSelferValueTypeArray1234 = 10
+ codecSelferValueTypeMap1234 = 9
+ // ----- containerStateValues ----
+ codecSelfer_containerMapKey1234 = 2
+ codecSelfer_containerMapValue1234 = 3
+ codecSelfer_containerMapEnd1234 = 4
+ codecSelfer_containerArrayElem1234 = 6
+ codecSelfer_containerArrayEnd1234 = 7
+)
+
+var (
+ codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits())
+ codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`)
+)
+
+type codecSelfer1234 struct{}
+
+func init() {
+ if codec1978.GenVersion != 5 {
+ _, file, _, _ := runtime.Caller(0)
+ err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v",
+ 5, codec1978.GenVersion, file)
+ panic(err)
+ }
+ if false { // reference the types, but skip this branch at build/run time
+ var v0 pkg4_resource.Quantity
+ var v1 pkg1_unversioned.TypeMeta
+ var v2 pkg2_v1.ObjectMeta
+ var v3 pkg3_types.UID
+ var v4 pkg5_intstr.IntOrString
+ var v5 time.Time
+ _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5
+ }
+}
+
+func (x *PetSet) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = true
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy14 := &x.Status
+ yy14.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy16 := &x.Status
+ yy16.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PetSet) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PetSet) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_v1.ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = PetSetSpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = PetSetStatus{}
+ } else {
+ yyv6 := &x.Status
+ yyv6.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PetSet) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_v1.ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = PetSetSpec{}
+ } else {
+ yyv11 := &x.Spec
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = PetSetStatus{}
+ } else {
+ yyv12 := &x.Status
+ yyv12.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PetSetSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Replicas != nil
+ yyq2[1] = x.Selector != nil
+ yyq2[3] = len(x.VolumeClaimTemplates) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Replicas == nil {
+ r.EncodeNil()
+ } else {
+ yy4 := *x.Replicas
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(yy4))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("replicas"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Replicas == nil {
+ r.EncodeNil()
+ } else {
+ yy6 := *x.Replicas
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeInt(int64(yy6))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Selector == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.Selector) {
+ } else {
+ z.EncFallback(x.Selector)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("selector"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Selector == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.Selector) {
+ } else {
+ z.EncFallback(x.Selector)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy12 := &x.Template
+ yy12.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("template"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy14 := &x.Template
+ yy14.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ if x.VolumeClaimTemplates == nil {
+ r.EncodeNil()
+ } else {
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ h.encSlicev1_PersistentVolumeClaim(([]pkg2_v1.PersistentVolumeClaim)(x.VolumeClaimTemplates), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("volumeClaimTemplates"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.VolumeClaimTemplates == nil {
+ r.EncodeNil()
+ } else {
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ h.encSlicev1_PersistentVolumeClaim(([]pkg2_v1.PersistentVolumeClaim)(x.VolumeClaimTemplates), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ServiceName))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("serviceName"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym21 := z.EncBinary()
+ _ = yym21
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ServiceName))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PetSetSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PetSetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "replicas":
+ if r.TryDecodeAsNil() {
+ if x.Replicas != nil {
+ x.Replicas = nil
+ }
+ } else {
+ if x.Replicas == nil {
+ x.Replicas = new(int32)
+ }
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ *((*int32)(x.Replicas)) = int32(r.DecodeInt(32))
+ }
+ }
+ case "selector":
+ if r.TryDecodeAsNil() {
+ if x.Selector != nil {
+ x.Selector = nil
+ }
+ } else {
+ if x.Selector == nil {
+ x.Selector = new(pkg1_unversioned.LabelSelector)
+ }
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.Selector) {
+ } else {
+ z.DecFallback(x.Selector, false)
+ }
+ }
+ case "template":
+ if r.TryDecodeAsNil() {
+ x.Template = pkg2_v1.PodTemplateSpec{}
+ } else {
+ yyv8 := &x.Template
+ yyv8.CodecDecodeSelf(d)
+ }
+ case "volumeClaimTemplates":
+ if r.TryDecodeAsNil() {
+ x.VolumeClaimTemplates = nil
+ } else {
+ yyv9 := &x.VolumeClaimTemplates
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.decSlicev1_PersistentVolumeClaim((*[]pkg2_v1.PersistentVolumeClaim)(yyv9), d)
+ }
+ }
+ case "serviceName":
+ if r.TryDecodeAsNil() {
+ x.ServiceName = ""
+ } else {
+ x.ServiceName = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PetSetSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj12 int
+ var yyb12 bool
+ var yyhl12 bool = l >= 0
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Replicas != nil {
+ x.Replicas = nil
+ }
+ } else {
+ if x.Replicas == nil {
+ x.Replicas = new(int32)
+ }
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ *((*int32)(x.Replicas)) = int32(r.DecodeInt(32))
+ }
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Selector != nil {
+ x.Selector = nil
+ }
+ } else {
+ if x.Selector == nil {
+ x.Selector = new(pkg1_unversioned.LabelSelector)
+ }
+ yym16 := z.DecBinary()
+ _ = yym16
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.Selector) {
+ } else {
+ z.DecFallback(x.Selector, false)
+ }
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Template = pkg2_v1.PodTemplateSpec{}
+ } else {
+ yyv17 := &x.Template
+ yyv17.CodecDecodeSelf(d)
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.VolumeClaimTemplates = nil
+ } else {
+ yyv18 := &x.VolumeClaimTemplates
+ yym19 := z.DecBinary()
+ _ = yym19
+ if false {
+ } else {
+ h.decSlicev1_PersistentVolumeClaim((*[]pkg2_v1.PersistentVolumeClaim)(yyv18), d)
+ }
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ServiceName = ""
+ } else {
+ x.ServiceName = string(r.DecodeString())
+ }
+ for {
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj12-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PetSetStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.ObservedGeneration != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.ObservedGeneration == nil {
+ r.EncodeNil()
+ } else {
+ yy4 := *x.ObservedGeneration
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(yy4))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("observedGeneration"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.ObservedGeneration == nil {
+ r.EncodeNil()
+ } else {
+ yy6 := *x.ObservedGeneration
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeInt(int64(yy6))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Replicas))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("replicas"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Replicas))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PetSetStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PetSetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "observedGeneration":
+ if r.TryDecodeAsNil() {
+ if x.ObservedGeneration != nil {
+ x.ObservedGeneration = nil
+ }
+ } else {
+ if x.ObservedGeneration == nil {
+ x.ObservedGeneration = new(int64)
+ }
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64))
+ }
+ }
+ case "replicas":
+ if r.TryDecodeAsNil() {
+ x.Replicas = 0
+ } else {
+ x.Replicas = int32(r.DecodeInt(32))
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PetSetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.ObservedGeneration != nil {
+ x.ObservedGeneration = nil
+ }
+ } else {
+ if x.ObservedGeneration == nil {
+ x.ObservedGeneration = new(int64)
+ }
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else {
+ *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64))
+ }
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Replicas = 0
+ } else {
+ x.Replicas = int32(r.DecodeInt(32))
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PetSetList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSlicePetSet(([]PetSet)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSlicePetSet(([]PetSet)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PetSetList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PetSetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSlicePetSet((*[]PetSet)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PetSetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSlicePetSet((*[]PetSet)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) encSlicev1_PersistentVolumeClaim(v []pkg2_v1.PersistentVolumeClaim, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSlicev1_PersistentVolumeClaim(v *[]pkg2_v1.PersistentVolumeClaim, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []pkg2_v1.PersistentVolumeClaim{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 352)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]pkg2_v1.PersistentVolumeClaim, yyrl1)
+ }
+ } else {
+ yyv1 = make([]pkg2_v1.PersistentVolumeClaim, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = pkg2_v1.PersistentVolumeClaim{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, pkg2_v1.PersistentVolumeClaim{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = pkg2_v1.PersistentVolumeClaim{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, pkg2_v1.PersistentVolumeClaim{}) // var yyz1 pkg2_v1.PersistentVolumeClaim
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = pkg2_v1.PersistentVolumeClaim{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []pkg2_v1.PersistentVolumeClaim{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSlicePetSet(v []PetSet, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSlicePetSet(v *[]PetSet, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []PetSet{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 768)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]PetSet, yyrl1)
+ }
+ } else {
+ yyv1 = make([]PetSet, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PetSet{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, PetSet{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PetSet{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, PetSet{}) // var yyz1 PetSet
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PetSet{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []PetSet{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/types.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/types.go
new file mode 100644
index 0000000..93a0c66
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/types.go
@@ -0,0 +1,94 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/api/v1"
+)
+
+// PetSet represents a set of pods with consistent identities.
+// Identities are defined as:
+// - Network: A single stable DNS and hostname.
+// - Storage: As many VolumeClaims as requested.
+// The PetSet guarantees that a given network identity will always
+// map to the same storage identity. PetSet is currently in alpha
+// and subject to change without notice.
+type PetSet struct {
+ unversioned.TypeMeta `json:",inline"`
+ v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Spec defines the desired identities of pets in this set.
+ Spec PetSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+ // Status is the current status of Pets in this PetSet. This data
+ // may be out of date by some window of time.
+ Status PetSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// A PetSetSpec is the specification of a PetSet.
+type PetSetSpec struct {
+ // Replicas is the desired number of replicas of the given Template.
+ // These are replicas in the sense that they are instantiations of the
+ // same Template, but individual replicas also have a consistent identity.
+ // If unspecified, defaults to 1.
+ // TODO: Consider a rename of this field.
+ Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
+
+ // Selector is a label query over pods that should match the replica count.
+ // If empty, defaulted to labels on the pod template.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors
+ Selector *unversioned.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"`
+
+ // Template is the object that describes the pod that will be created if
+ // insufficient replicas are detected. Each pod stamped out by the PetSet
+ // will fulfill this Template, but have a unique identity from the rest
+ // of the PetSet.
+ Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"`
+
+ // VolumeClaimTemplates is a list of claims that pets are allowed to reference.
+ // The PetSet controller is responsible for mapping network identities to
+ // claims in a way that maintains the identity of a pet. Every claim in
+ // this list must have at least one matching (by name) volumeMount in one
+ // container in the template. A claim in this list takes precedence over
+ // any volumes in the template, with the same name.
+ // TODO: Define the behavior if a claim already exists with the same name.
+ VolumeClaimTemplates []v1.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty" protobuf:"bytes,4,rep,name=volumeClaimTemplates"`
+
+ // ServiceName is the name of the service that governs this PetSet.
+ // This service must exist before the PetSet, and is responsible for
+ // the network identity of the set. Pets get DNS/hostnames that follow the
+ // pattern: pet-specific-string.serviceName.default.svc.cluster.local
+ // where "pet-specific-string" is managed by the PetSet controller.
+ ServiceName string `json:"serviceName" protobuf:"bytes,5,opt,name=serviceName"`
+}
+
+// PetSetStatus represents the current state of a PetSet.
+type PetSetStatus struct {
+ // most recent generation observed by this autoscaler.
+ ObservedGeneration *int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"`
+
+ // Replicas is the number of actual replicas.
+ Replicas int32 `json:"replicas" protobuf:"varint,2,opt,name=replicas"`
+}
+
+// PetSetList is a collection of PetSets.
+type PetSetList struct {
+ unversioned.TypeMeta `json:",inline"`
+ unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ Items []PetSet `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/types_swagger_doc_generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/types_swagger_doc_generated.go
new file mode 100644
index 0000000..5191f12
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/types_swagger_doc_generated.go
@@ -0,0 +1,71 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE
+var map_PetSet = map[string]string{
+ "": "PetSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe PetSet guarantees that a given network identity will always map to the same storage identity. PetSet is currently in alpha and subject to change without notice.",
+ "spec": "Spec defines the desired identities of pets in this set.",
+ "status": "Status is the current status of Pets in this PetSet. This data may be out of date by some window of time.",
+}
+
+func (PetSet) SwaggerDoc() map[string]string {
+ return map_PetSet
+}
+
+var map_PetSetList = map[string]string{
+ "": "PetSetList is a collection of PetSets.",
+}
+
+func (PetSetList) SwaggerDoc() map[string]string {
+ return map_PetSetList
+}
+
+var map_PetSetSpec = map[string]string{
+ "": "A PetSetSpec is the specification of a PetSet.",
+ "replicas": "Replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.",
+ "selector": "Selector is a label query over pods that should match the replica count. If empty, defaulted to labels on the pod template. More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors",
+ "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the PetSet will fulfill this Template, but have a unique identity from the rest of the PetSet.",
+ "volumeClaimTemplates": "VolumeClaimTemplates is a list of claims that pets are allowed to reference. The PetSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pet. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.",
+ "serviceName": "ServiceName is the name of the service that governs this PetSet. This service must exist before the PetSet, and is responsible for the network identity of the set. Pets get DNS/hostnames that follow the pattern: pet-specific-string.serviceName.default.svc.cluster.local where \"pet-specific-string\" is managed by the PetSet controller.",
+}
+
+func (PetSetSpec) SwaggerDoc() map[string]string {
+ return map_PetSetSpec
+}
+
+var map_PetSetStatus = map[string]string{
+ "": "PetSetStatus represents the current state of a PetSet.",
+ "observedGeneration": "most recent generation observed by this autoscaler.",
+ "replicas": "Replicas is the number of actual replicas.",
+}
+
+func (PetSetStatus) SwaggerDoc() map[string]string {
+ return map_PetSetStatus
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/deep_copy_generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/deep_copy_generated.go
new file mode 100644
index 0000000..6424875
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/deep_copy_generated.go
@@ -0,0 +1,86 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by deepcopy-gen. Do not edit it manually!
+
+package authentication
+
+import (
+ api "k8s.io/kubernetes/pkg/api"
+ conversion "k8s.io/kubernetes/pkg/conversion"
+)
+
+func init() {
+ if err := api.Scheme.AddGeneratedDeepCopyFuncs(
+ DeepCopy_authenticationk8sio_TokenReview,
+ DeepCopy_authenticationk8sio_TokenReviewSpec,
+ DeepCopy_authenticationk8sio_TokenReviewStatus,
+ DeepCopy_authenticationk8sio_UserInfo,
+ ); err != nil {
+ // if one of the deep copy functions is malformed, detect it immediately.
+ panic(err)
+ }
+}
+
+func DeepCopy_authenticationk8sio_TokenReview(in TokenReview, out *TokenReview, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.Spec = in.Spec
+ if err := DeepCopy_authenticationk8sio_TokenReviewStatus(in.Status, &out.Status, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_authenticationk8sio_TokenReviewSpec(in TokenReviewSpec, out *TokenReviewSpec, c *conversion.Cloner) error {
+ out.Token = in.Token
+ return nil
+}
+
+func DeepCopy_authenticationk8sio_TokenReviewStatus(in TokenReviewStatus, out *TokenReviewStatus, c *conversion.Cloner) error {
+ out.Authenticated = in.Authenticated
+ if err := DeepCopy_authenticationk8sio_UserInfo(in.User, &out.User, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_authenticationk8sio_UserInfo(in UserInfo, out *UserInfo, c *conversion.Cloner) error {
+ out.Username = in.Username
+ out.UID = in.UID
+ if in.Groups != nil {
+ in, out := in.Groups, &out.Groups
+ *out = make([]string, len(in))
+ copy(*out, in)
+ } else {
+ out.Groups = nil
+ }
+ if in.Extra != nil {
+ in, out := in.Extra, &out.Extra
+ *out = make(map[string][]string)
+ for key, val := range in {
+ if newVal, err := c.DeepCopy(val); err != nil {
+ return err
+ } else {
+ (*out)[key] = newVal.([]string)
+ }
+ }
+ } else {
+ out.Extra = nil
+ }
+ return nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/doc.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/doc.go
new file mode 100644
index 0000000..66c5ae7
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package,register
+
+package authentication
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/install/install.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/install/install.go
new file mode 100644
index 0000000..e021ea8
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/install/install.go
@@ -0,0 +1,123 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package install installs the experimental API group, making it available as
+// an option to all of the API encoding/decoding machinery.
+package install
+
+import (
+ "fmt"
+
+ "github.com/golang/glog"
+
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/meta"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/apimachinery"
+ "k8s.io/kubernetes/pkg/apimachinery/registered"
+ "k8s.io/kubernetes/pkg/apis/authentication.k8s.io"
+ "k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1"
+ "k8s.io/kubernetes/pkg/runtime"
+ "k8s.io/kubernetes/pkg/util/sets"
+)
+
+const importPrefix = "k8s.io/kubernetes/pkg/apis/authentication.k8s.io"
+
+var accessor = meta.NewAccessor()
+
+// availableVersions lists all known external versions for this group from most preferred to least preferred
+var availableVersions = []unversioned.GroupVersion{v1beta1.SchemeGroupVersion}
+
+func init() {
+ registered.RegisterVersions(availableVersions)
+ externalVersions := []unversioned.GroupVersion{}
+ for _, v := range availableVersions {
+ if registered.IsAllowedVersion(v) {
+ externalVersions = append(externalVersions, v)
+ }
+ }
+ if len(externalVersions) == 0 {
+ glog.V(4).Infof("No version is registered for group %v", authentication.GroupName)
+ return
+ }
+
+ if err := registered.EnableVersions(externalVersions...); err != nil {
+ glog.V(4).Infof("%v", err)
+ return
+ }
+ if err := enableVersions(externalVersions); err != nil {
+ glog.V(4).Infof("%v", err)
+ return
+ }
+}
+
+// TODO: enableVersions should be centralized rather than spread in each API
+// group.
+// We can combine registered.RegisterVersions, registered.EnableVersions and
+// registered.RegisterGroup once we have moved enableVersions there.
+func enableVersions(externalVersions []unversioned.GroupVersion) error {
+ addVersionsToScheme(externalVersions...)
+ preferredExternalVersion := externalVersions[0]
+
+ groupMeta := apimachinery.GroupMeta{
+ GroupVersion: preferredExternalVersion,
+ GroupVersions: externalVersions,
+ RESTMapper: newRESTMapper(externalVersions),
+ SelfLinker: runtime.SelfLinker(accessor),
+ InterfacesFor: interfacesFor,
+ }
+
+ if err := registered.RegisterGroup(groupMeta); err != nil {
+ return err
+ }
+ api.RegisterRESTMapper(groupMeta.RESTMapper)
+ return nil
+}
+
+func addVersionsToScheme(externalVersions ...unversioned.GroupVersion) {
+ // add the internal version to Scheme
+ authentication.AddToScheme(api.Scheme)
+ // add the enabled external versions to Scheme
+ for _, v := range externalVersions {
+ if !registered.IsEnabledVersion(v) {
+ glog.Errorf("Version %s is not enabled, so it will not be added to the Scheme.", v)
+ continue
+ }
+ switch v {
+ case v1beta1.SchemeGroupVersion:
+ v1beta1.AddToScheme(api.Scheme)
+ }
+ }
+}
+
+func newRESTMapper(externalVersions []unversioned.GroupVersion) meta.RESTMapper {
+ rootScoped := sets.NewString("TokenReview")
+ ignoredKinds := sets.NewString()
+ return api.NewDefaultRESTMapper(externalVersions, interfacesFor, importPrefix, ignoredKinds, rootScoped)
+}
+
+func interfacesFor(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) {
+ switch version {
+ case v1beta1.SchemeGroupVersion:
+ return &meta.VersionInterfaces{
+ ObjectConvertor: api.Scheme,
+ MetadataAccessor: accessor,
+ }, nil
+ default:
+ g, _ := registered.Group(authentication.GroupName)
+ return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, g.GroupVersions)
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/register.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/register.go
new file mode 100644
index 0000000..57394fc
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/register.go
@@ -0,0 +1,50 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package authentication
+
+import (
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/runtime"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "authentication.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
+
+// Kind takes an unqualified kind and returns back a Group qualified GroupKind
+func Kind(kind string) unversioned.GroupKind {
+ return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// Resource takes an unqualified resource and returns back a Group qualified GroupResource
+func Resource(resource string) unversioned.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+func AddToScheme(scheme *runtime.Scheme) {
+ addKnownTypes(scheme)
+}
+
+func addKnownTypes(scheme *runtime.Scheme) {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &TokenReview{},
+ )
+}
+
+func (obj *TokenReview) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/types.generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/types.generated.go
new file mode 100644
index 0000000..b666e8c
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/types.generated.go
@@ -0,0 +1,1265 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// ************************************************************
+// DO NOT EDIT.
+// THIS FILE IS AUTO-GENERATED BY codecgen.
+// ************************************************************
+
+package authentication
+
+import (
+ "errors"
+ "fmt"
+ codec1978 "github.com/ugorji/go/codec"
+ pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned"
+ "reflect"
+ "runtime"
+)
+
+const (
+ // ----- content types ----
+ codecSelferC_UTF81234 = 1
+ codecSelferC_RAW1234 = 0
+ // ----- value types used ----
+ codecSelferValueTypeArray1234 = 10
+ codecSelferValueTypeMap1234 = 9
+ // ----- containerStateValues ----
+ codecSelfer_containerMapKey1234 = 2
+ codecSelfer_containerMapValue1234 = 3
+ codecSelfer_containerMapEnd1234 = 4
+ codecSelfer_containerArrayElem1234 = 6
+ codecSelfer_containerArrayEnd1234 = 7
+)
+
+var (
+ codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits())
+ codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`)
+)
+
+type codecSelfer1234 struct{}
+
+func init() {
+ if codec1978.GenVersion != 5 {
+ _, file, _, _ := runtime.Caller(0)
+ err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v",
+ 5, codec1978.GenVersion, file)
+ panic(err)
+ }
+ if false { // reference the types, but skip this branch at build/run time
+ var v0 pkg1_unversioned.TypeMeta
+ _ = v0
+ }
+}
+
+func (x *TokenReview) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy4 := &x.Spec
+ yy4.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.Spec
+ yy6.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy9 := &x.Status
+ yy9.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Status
+ yy11.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *TokenReview) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *TokenReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "Spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = TokenReviewSpec{}
+ } else {
+ yyv4 := &x.Spec
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "Status":
+ if r.TryDecodeAsNil() {
+ x.Status = TokenReviewStatus{}
+ } else {
+ yyv5 := &x.Status
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *TokenReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = TokenReviewSpec{}
+ } else {
+ yyv9 := &x.Spec
+ yyv9.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = TokenReviewStatus{}
+ } else {
+ yyv10 := &x.Status
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *TokenReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Token))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Token"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Token))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *TokenReviewSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *TokenReviewSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "Token":
+ if r.TryDecodeAsNil() {
+ x.Token = ""
+ } else {
+ x.Token = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *TokenReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj5 int
+ var yyb5 bool
+ var yyhl5 bool = l >= 0
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Token = ""
+ } else {
+ x.Token = string(r.DecodeString())
+ }
+ for {
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj5-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *TokenReviewStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Authenticated))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Authenticated"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Authenticated))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy7 := &x.User
+ yy7.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("User"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy9 := &x.User
+ yy9.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *TokenReviewStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *TokenReviewStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "Authenticated":
+ if r.TryDecodeAsNil() {
+ x.Authenticated = false
+ } else {
+ x.Authenticated = bool(r.DecodeBool())
+ }
+ case "User":
+ if r.TryDecodeAsNil() {
+ x.User = UserInfo{}
+ } else {
+ yyv5 := &x.User
+ yyv5.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *TokenReviewStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Authenticated = false
+ } else {
+ x.Authenticated = bool(r.DecodeBool())
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.User = UserInfo{}
+ } else {
+ yyv8 := &x.User
+ yyv8.CodecDecodeSelf(d)
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *UserInfo) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 4
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Username))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Username"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Username))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.UID))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("UID"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.UID))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Groups == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Groups, false, e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Groups"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Groups == nil {
+ r.EncodeNil()
+ } else {
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Groups, false, e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Extra == nil {
+ r.EncodeNil()
+ } else {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ h.encMapstringSlicestring((map[string][]string)(x.Extra), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Extra"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Extra == nil {
+ r.EncodeNil()
+ } else {
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.encMapstringSlicestring((map[string][]string)(x.Extra), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *UserInfo) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *UserInfo) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "Username":
+ if r.TryDecodeAsNil() {
+ x.Username = ""
+ } else {
+ x.Username = string(r.DecodeString())
+ }
+ case "UID":
+ if r.TryDecodeAsNil() {
+ x.UID = ""
+ } else {
+ x.UID = string(r.DecodeString())
+ }
+ case "Groups":
+ if r.TryDecodeAsNil() {
+ x.Groups = nil
+ } else {
+ yyv6 := &x.Groups
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv6, false, d)
+ }
+ }
+ case "Extra":
+ if r.TryDecodeAsNil() {
+ x.Extra = nil
+ } else {
+ yyv8 := &x.Extra
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.decMapstringSlicestring((*map[string][]string)(yyv8), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *UserInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Username = ""
+ } else {
+ x.Username = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.UID = ""
+ } else {
+ x.UID = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Groups = nil
+ } else {
+ yyv13 := &x.Groups
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv13, false, d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Extra = nil
+ } else {
+ yyv15 := &x.Extra
+ yym16 := z.DecBinary()
+ _ = yym16
+ if false {
+ } else {
+ h.decMapstringSlicestring((*map[string][]string)(yyv15), d)
+ }
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) encMapstringSlicestring(v map[string][]string, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeMapStart(len(v))
+ for yyk1, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ yym2 := z.EncBinary()
+ _ = yym2
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(yyk1))
+ }
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyv1 == nil {
+ r.EncodeNil()
+ } else {
+ yym3 := z.EncBinary()
+ _ = yym3
+ if false {
+ } else {
+ z.F.EncSliceStringV(yyv1, false, e)
+ }
+ }
+ }
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x codecSelfer1234) decMapstringSlicestring(v *map[string][]string, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyl1 := r.ReadMapStart()
+ yybh1 := z.DecBasicHandle()
+ if yyv1 == nil {
+ yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 40)
+ yyv1 = make(map[string][]string, yyrl1)
+ *v = yyv1
+ }
+ var yymk1 string
+ var yymv1 []string
+ var yymg1 bool
+ if yybh1.MapValueReset {
+ yymg1 = true
+ }
+ if yyl1 > 0 {
+ for yyj1 := 0; yyj1 < yyl1; yyj1++ {
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ if r.TryDecodeAsNil() {
+ yymk1 = ""
+ } else {
+ yymk1 = string(r.DecodeString())
+ }
+
+ if yymg1 {
+ yymv1 = yyv1[yymk1]
+ } else {
+ yymv1 = nil
+ }
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ if r.TryDecodeAsNil() {
+ yymv1 = nil
+ } else {
+ yyv3 := &yymv1
+ yym4 := z.DecBinary()
+ _ = yym4
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv3, false, d)
+ }
+ }
+
+ if yyv1 != nil {
+ yyv1[yymk1] = yymv1
+ }
+ }
+ } else if yyl1 < 0 {
+ for yyj1 := 0; !r.CheckBreak(); yyj1++ {
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ if r.TryDecodeAsNil() {
+ yymk1 = ""
+ } else {
+ yymk1 = string(r.DecodeString())
+ }
+
+ if yymg1 {
+ yymv1 = yyv1[yymk1]
+ } else {
+ yymv1 = nil
+ }
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ if r.TryDecodeAsNil() {
+ yymv1 = nil
+ } else {
+ yyv6 := &yymv1
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv6, false, d)
+ }
+ }
+
+ if yyv1 != nil {
+ yyv1[yymk1] = yymv1
+ }
+ }
+ } // else len==0: TODO: Should we clear map entries?
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x codecSelfer1234) encSlicestring(v []string, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym2 := z.EncBinary()
+ _ = yym2
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(yyv1))
+ }
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSlicestring(v *[]string, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []string{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]string, yyrl1)
+ }
+ } else {
+ yyv1 = make([]string, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = string(r.DecodeString())
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, "")
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = string(r.DecodeString())
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, "") // var yyz1 string
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = string(r.DecodeString())
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []string{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/types.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/types.go
new file mode 100644
index 0000000..20eac2b
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/types.go
@@ -0,0 +1,61 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package authentication
+
+import (
+ "k8s.io/kubernetes/pkg/api/unversioned"
+)
+
+// TokenReview attempts to authenticate a token to a known user.
+type TokenReview struct {
+ unversioned.TypeMeta
+
+ // Spec holds information about the request being evaluated
+ Spec TokenReviewSpec
+
+ // Status is filled in by the server and indicates whether the request can be authenticated.
+ Status TokenReviewStatus
+}
+
+// TokenReviewSpec is a description of the token authentication request.
+type TokenReviewSpec struct {
+ // Token is the opaque bearer token.
+ Token string
+}
+
+// TokenReviewStatus is the result of the token authentication request.
+type TokenReviewStatus struct {
+ // Authenticated indicates that the token was associated with a known user.
+ Authenticated bool
+ // User is the UserInfo associated with the provided token.
+ User UserInfo
+}
+
+// UserInfo holds the information about the user needed to implement the
+// user.Info interface.
+type UserInfo struct {
+ // The name that uniquely identifies this user among all active users.
+ Username string
+ // A unique value that identifies this user across time. If this user is
+ // deleted and another user by the same name is added, they will have
+ // different UIDs.
+ UID string
+ // The names of groups this user is a part of.
+ Groups []string
+ // Any additional information provided by the authenticator.
+ Extra map[string][]string
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/conversion.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/conversion.go
new file mode 100644
index 0000000..e360ee8
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/conversion.go
@@ -0,0 +1,30 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+ "k8s.io/kubernetes/pkg/runtime"
+)
+
+func addConversionFuncs(scheme *runtime.Scheme) {
+ // Add non-generated conversion functions
+ err := scheme.AddConversionFuncs()
+ if err != nil {
+ // If one of the conversion functions is malformed, detect it immediately.
+ panic(err)
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/conversion_generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/conversion_generated.go
new file mode 100644
index 0000000..d60db5e
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/conversion_generated.go
@@ -0,0 +1,143 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by conversion-gen. Do not edit it manually!
+
+package v1beta1
+
+import (
+ api "k8s.io/kubernetes/pkg/api"
+ authentication_k8s_io "k8s.io/kubernetes/pkg/apis/authentication.k8s.io"
+ conversion "k8s.io/kubernetes/pkg/conversion"
+)
+
+func init() {
+ if err := api.Scheme.AddGeneratedConversionFuncs(
+ Convert_v1beta1_TokenReview_To_authenticationk8sio_TokenReview,
+ Convert_authenticationk8sio_TokenReview_To_v1beta1_TokenReview,
+ Convert_v1beta1_TokenReviewSpec_To_authenticationk8sio_TokenReviewSpec,
+ Convert_authenticationk8sio_TokenReviewSpec_To_v1beta1_TokenReviewSpec,
+ Convert_v1beta1_TokenReviewStatus_To_authenticationk8sio_TokenReviewStatus,
+ Convert_authenticationk8sio_TokenReviewStatus_To_v1beta1_TokenReviewStatus,
+ Convert_v1beta1_UserInfo_To_authenticationk8sio_UserInfo,
+ Convert_authenticationk8sio_UserInfo_To_v1beta1_UserInfo,
+ ); err != nil {
+ // if one of the conversion functions is malformed, detect it immediately.
+ panic(err)
+ }
+}
+
+func autoConvert_v1beta1_TokenReview_To_authenticationk8sio_TokenReview(in *TokenReview, out *authentication_k8s_io.TokenReview, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_v1beta1_TokenReviewSpec_To_authenticationk8sio_TokenReviewSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1beta1_TokenReviewStatus_To_authenticationk8sio_TokenReviewStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1beta1_TokenReview_To_authenticationk8sio_TokenReview(in *TokenReview, out *authentication_k8s_io.TokenReview, s conversion.Scope) error {
+ return autoConvert_v1beta1_TokenReview_To_authenticationk8sio_TokenReview(in, out, s)
+}
+
+func autoConvert_authenticationk8sio_TokenReview_To_v1beta1_TokenReview(in *authentication_k8s_io.TokenReview, out *TokenReview, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_authenticationk8sio_TokenReviewSpec_To_v1beta1_TokenReviewSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_authenticationk8sio_TokenReviewStatus_To_v1beta1_TokenReviewStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_authenticationk8sio_TokenReview_To_v1beta1_TokenReview(in *authentication_k8s_io.TokenReview, out *TokenReview, s conversion.Scope) error {
+ return autoConvert_authenticationk8sio_TokenReview_To_v1beta1_TokenReview(in, out, s)
+}
+
+func autoConvert_v1beta1_TokenReviewSpec_To_authenticationk8sio_TokenReviewSpec(in *TokenReviewSpec, out *authentication_k8s_io.TokenReviewSpec, s conversion.Scope) error {
+ out.Token = in.Token
+ return nil
+}
+
+func Convert_v1beta1_TokenReviewSpec_To_authenticationk8sio_TokenReviewSpec(in *TokenReviewSpec, out *authentication_k8s_io.TokenReviewSpec, s conversion.Scope) error {
+ return autoConvert_v1beta1_TokenReviewSpec_To_authenticationk8sio_TokenReviewSpec(in, out, s)
+}
+
+func autoConvert_authenticationk8sio_TokenReviewSpec_To_v1beta1_TokenReviewSpec(in *authentication_k8s_io.TokenReviewSpec, out *TokenReviewSpec, s conversion.Scope) error {
+ out.Token = in.Token
+ return nil
+}
+
+func Convert_authenticationk8sio_TokenReviewSpec_To_v1beta1_TokenReviewSpec(in *authentication_k8s_io.TokenReviewSpec, out *TokenReviewSpec, s conversion.Scope) error {
+ return autoConvert_authenticationk8sio_TokenReviewSpec_To_v1beta1_TokenReviewSpec(in, out, s)
+}
+
+func autoConvert_v1beta1_TokenReviewStatus_To_authenticationk8sio_TokenReviewStatus(in *TokenReviewStatus, out *authentication_k8s_io.TokenReviewStatus, s conversion.Scope) error {
+ out.Authenticated = in.Authenticated
+ if err := Convert_v1beta1_UserInfo_To_authenticationk8sio_UserInfo(&in.User, &out.User, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1beta1_TokenReviewStatus_To_authenticationk8sio_TokenReviewStatus(in *TokenReviewStatus, out *authentication_k8s_io.TokenReviewStatus, s conversion.Scope) error {
+ return autoConvert_v1beta1_TokenReviewStatus_To_authenticationk8sio_TokenReviewStatus(in, out, s)
+}
+
+func autoConvert_authenticationk8sio_TokenReviewStatus_To_v1beta1_TokenReviewStatus(in *authentication_k8s_io.TokenReviewStatus, out *TokenReviewStatus, s conversion.Scope) error {
+ out.Authenticated = in.Authenticated
+ if err := Convert_authenticationk8sio_UserInfo_To_v1beta1_UserInfo(&in.User, &out.User, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_authenticationk8sio_TokenReviewStatus_To_v1beta1_TokenReviewStatus(in *authentication_k8s_io.TokenReviewStatus, out *TokenReviewStatus, s conversion.Scope) error {
+ return autoConvert_authenticationk8sio_TokenReviewStatus_To_v1beta1_TokenReviewStatus(in, out, s)
+}
+
+func autoConvert_v1beta1_UserInfo_To_authenticationk8sio_UserInfo(in *UserInfo, out *authentication_k8s_io.UserInfo, s conversion.Scope) error {
+ out.Username = in.Username
+ out.UID = in.UID
+ out.Groups = in.Groups
+ out.Extra = in.Extra
+ return nil
+}
+
+func Convert_v1beta1_UserInfo_To_authenticationk8sio_UserInfo(in *UserInfo, out *authentication_k8s_io.UserInfo, s conversion.Scope) error {
+ return autoConvert_v1beta1_UserInfo_To_authenticationk8sio_UserInfo(in, out, s)
+}
+
+func autoConvert_authenticationk8sio_UserInfo_To_v1beta1_UserInfo(in *authentication_k8s_io.UserInfo, out *UserInfo, s conversion.Scope) error {
+ out.Username = in.Username
+ out.UID = in.UID
+ out.Groups = in.Groups
+ out.Extra = in.Extra
+ return nil
+}
+
+func Convert_authenticationk8sio_UserInfo_To_v1beta1_UserInfo(in *authentication_k8s_io.UserInfo, out *UserInfo, s conversion.Scope) error {
+ return autoConvert_authenticationk8sio_UserInfo_To_v1beta1_UserInfo(in, out, s)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/deep_copy_generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/deep_copy_generated.go
new file mode 100644
index 0000000..d3294bf
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/deep_copy_generated.go
@@ -0,0 +1,86 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by deepcopy-gen. Do not edit it manually!
+
+package v1beta1
+
+import (
+ api "k8s.io/kubernetes/pkg/api"
+ conversion "k8s.io/kubernetes/pkg/conversion"
+)
+
+func init() {
+ if err := api.Scheme.AddGeneratedDeepCopyFuncs(
+ DeepCopy_v1beta1_TokenReview,
+ DeepCopy_v1beta1_TokenReviewSpec,
+ DeepCopy_v1beta1_TokenReviewStatus,
+ DeepCopy_v1beta1_UserInfo,
+ ); err != nil {
+ // if one of the deep copy functions is malformed, detect it immediately.
+ panic(err)
+ }
+}
+
+func DeepCopy_v1beta1_TokenReview(in TokenReview, out *TokenReview, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.Spec = in.Spec
+ if err := DeepCopy_v1beta1_TokenReviewStatus(in.Status, &out.Status, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_v1beta1_TokenReviewSpec(in TokenReviewSpec, out *TokenReviewSpec, c *conversion.Cloner) error {
+ out.Token = in.Token
+ return nil
+}
+
+func DeepCopy_v1beta1_TokenReviewStatus(in TokenReviewStatus, out *TokenReviewStatus, c *conversion.Cloner) error {
+ out.Authenticated = in.Authenticated
+ if err := DeepCopy_v1beta1_UserInfo(in.User, &out.User, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_v1beta1_UserInfo(in UserInfo, out *UserInfo, c *conversion.Cloner) error {
+ out.Username = in.Username
+ out.UID = in.UID
+ if in.Groups != nil {
+ in, out := in.Groups, &out.Groups
+ *out = make([]string, len(in))
+ copy(*out, in)
+ } else {
+ out.Groups = nil
+ }
+ if in.Extra != nil {
+ in, out := in.Extra, &out.Extra
+ *out = make(map[string][]string)
+ for key, val := range in {
+ if newVal, err := c.DeepCopy(val); err != nil {
+ return err
+ } else {
+ (*out)[key] = newVal.([]string)
+ }
+ }
+ } else {
+ out.Extra = nil
+ }
+ return nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/defaults.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/defaults.go
new file mode 100644
index 0000000..83794ff
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/defaults.go
@@ -0,0 +1,25 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+ "k8s.io/kubernetes/pkg/runtime"
+)
+
+func addDefaultingFuncs(scheme *runtime.Scheme) {
+ scheme.AddDefaultingFuncs()
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/doc.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/doc.go
new file mode 100644
index 0000000..25b76fd
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package,register
+// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/authentication.k8s.io
+
+package v1beta1
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/register.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/register.go
new file mode 100644
index 0000000..5117936
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/register.go
@@ -0,0 +1,44 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/runtime"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "authentication.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1beta1"}
+
+func AddToScheme(scheme *runtime.Scheme) {
+ // Add the API to Scheme.
+ addKnownTypes(scheme)
+ addDefaultingFuncs(scheme)
+ addConversionFuncs(scheme)
+}
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &TokenReview{},
+ )
+}
+
+func (obj *TokenReview) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/types.generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/types.generated.go
new file mode 100644
index 0000000..60eeab9
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/types.generated.go
@@ -0,0 +1,1321 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// ************************************************************
+// DO NOT EDIT.
+// THIS FILE IS AUTO-GENERATED BY codecgen.
+// ************************************************************
+
+package v1beta1
+
+import (
+ "errors"
+ "fmt"
+ codec1978 "github.com/ugorji/go/codec"
+ pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned"
+ "reflect"
+ "runtime"
+)
+
+const (
+ // ----- content types ----
+ codecSelferC_UTF81234 = 1
+ codecSelferC_RAW1234 = 0
+ // ----- value types used ----
+ codecSelferValueTypeArray1234 = 10
+ codecSelferValueTypeMap1234 = 9
+ // ----- containerStateValues ----
+ codecSelfer_containerMapKey1234 = 2
+ codecSelfer_containerMapValue1234 = 3
+ codecSelfer_containerMapEnd1234 = 4
+ codecSelfer_containerArrayElem1234 = 6
+ codecSelfer_containerArrayEnd1234 = 7
+)
+
+var (
+ codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits())
+ codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`)
+)
+
+type codecSelfer1234 struct{}
+
+func init() {
+ if codec1978.GenVersion != 5 {
+ _, file, _, _ := runtime.Caller(0)
+ err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v",
+ 5, codec1978.GenVersion, file)
+ panic(err)
+ }
+ if false { // reference the types, but skip this branch at build/run time
+ var v0 pkg1_unversioned.TypeMeta
+ _ = v0
+ }
+}
+
+func (x *TokenReview) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy4 := &x.Spec
+ yy4.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.Spec
+ yy6.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Status
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Status
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *TokenReview) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *TokenReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = TokenReviewSpec{}
+ } else {
+ yyv4 := &x.Spec
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = TokenReviewStatus{}
+ } else {
+ yyv5 := &x.Status
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *TokenReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = TokenReviewSpec{}
+ } else {
+ yyv9 := &x.Spec
+ yyv9.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = TokenReviewStatus{}
+ } else {
+ yyv10 := &x.Status
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *TokenReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Token != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Token))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("token"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Token))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *TokenReviewSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *TokenReviewSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "token":
+ if r.TryDecodeAsNil() {
+ x.Token = ""
+ } else {
+ x.Token = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *TokenReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj5 int
+ var yyb5 bool
+ var yyhl5 bool = l >= 0
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Token = ""
+ } else {
+ x.Token = string(r.DecodeString())
+ }
+ for {
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj5-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *TokenReviewStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Authenticated != false
+ yyq2[1] = true
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Authenticated))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("authenticated"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Authenticated))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy7 := &x.User
+ yy7.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("user"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy9 := &x.User
+ yy9.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *TokenReviewStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *TokenReviewStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "authenticated":
+ if r.TryDecodeAsNil() {
+ x.Authenticated = false
+ } else {
+ x.Authenticated = bool(r.DecodeBool())
+ }
+ case "user":
+ if r.TryDecodeAsNil() {
+ x.User = UserInfo{}
+ } else {
+ yyv5 := &x.User
+ yyv5.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *TokenReviewStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Authenticated = false
+ } else {
+ x.Authenticated = bool(r.DecodeBool())
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.User = UserInfo{}
+ } else {
+ yyv8 := &x.User
+ yyv8.CodecDecodeSelf(d)
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *UserInfo) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Username != ""
+ yyq2[1] = x.UID != ""
+ yyq2[2] = len(x.Groups) != 0
+ yyq2[3] = len(x.Extra) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Username))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("username"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Username))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.UID))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("uid"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.UID))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.Groups == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Groups, false, e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("groups"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Groups == nil {
+ r.EncodeNil()
+ } else {
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Groups, false, e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ if x.Extra == nil {
+ r.EncodeNil()
+ } else {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ h.encMapstringSlicestring((map[string][]string)(x.Extra), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("extra"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Extra == nil {
+ r.EncodeNil()
+ } else {
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.encMapstringSlicestring((map[string][]string)(x.Extra), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *UserInfo) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *UserInfo) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "username":
+ if r.TryDecodeAsNil() {
+ x.Username = ""
+ } else {
+ x.Username = string(r.DecodeString())
+ }
+ case "uid":
+ if r.TryDecodeAsNil() {
+ x.UID = ""
+ } else {
+ x.UID = string(r.DecodeString())
+ }
+ case "groups":
+ if r.TryDecodeAsNil() {
+ x.Groups = nil
+ } else {
+ yyv6 := &x.Groups
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv6, false, d)
+ }
+ }
+ case "extra":
+ if r.TryDecodeAsNil() {
+ x.Extra = nil
+ } else {
+ yyv8 := &x.Extra
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.decMapstringSlicestring((*map[string][]string)(yyv8), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *UserInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Username = ""
+ } else {
+ x.Username = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.UID = ""
+ } else {
+ x.UID = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Groups = nil
+ } else {
+ yyv13 := &x.Groups
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv13, false, d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Extra = nil
+ } else {
+ yyv15 := &x.Extra
+ yym16 := z.DecBinary()
+ _ = yym16
+ if false {
+ } else {
+ h.decMapstringSlicestring((*map[string][]string)(yyv15), d)
+ }
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) encMapstringSlicestring(v map[string][]string, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeMapStart(len(v))
+ for yyk1, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ yym2 := z.EncBinary()
+ _ = yym2
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(yyk1))
+ }
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyv1 == nil {
+ r.EncodeNil()
+ } else {
+ yym3 := z.EncBinary()
+ _ = yym3
+ if false {
+ } else {
+ z.F.EncSliceStringV(yyv1, false, e)
+ }
+ }
+ }
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x codecSelfer1234) decMapstringSlicestring(v *map[string][]string, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyl1 := r.ReadMapStart()
+ yybh1 := z.DecBasicHandle()
+ if yyv1 == nil {
+ yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 40)
+ yyv1 = make(map[string][]string, yyrl1)
+ *v = yyv1
+ }
+ var yymk1 string
+ var yymv1 []string
+ var yymg1 bool
+ if yybh1.MapValueReset {
+ yymg1 = true
+ }
+ if yyl1 > 0 {
+ for yyj1 := 0; yyj1 < yyl1; yyj1++ {
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ if r.TryDecodeAsNil() {
+ yymk1 = ""
+ } else {
+ yymk1 = string(r.DecodeString())
+ }
+
+ if yymg1 {
+ yymv1 = yyv1[yymk1]
+ } else {
+ yymv1 = nil
+ }
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ if r.TryDecodeAsNil() {
+ yymv1 = nil
+ } else {
+ yyv3 := &yymv1
+ yym4 := z.DecBinary()
+ _ = yym4
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv3, false, d)
+ }
+ }
+
+ if yyv1 != nil {
+ yyv1[yymk1] = yymv1
+ }
+ }
+ } else if yyl1 < 0 {
+ for yyj1 := 0; !r.CheckBreak(); yyj1++ {
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ if r.TryDecodeAsNil() {
+ yymk1 = ""
+ } else {
+ yymk1 = string(r.DecodeString())
+ }
+
+ if yymg1 {
+ yymv1 = yyv1[yymk1]
+ } else {
+ yymv1 = nil
+ }
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ if r.TryDecodeAsNil() {
+ yymv1 = nil
+ } else {
+ yyv6 := &yymv1
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv6, false, d)
+ }
+ }
+
+ if yyv1 != nil {
+ yyv1[yymk1] = yymv1
+ }
+ }
+ } // else len==0: TODO: Should we clear map entries?
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x codecSelfer1234) encSlicestring(v []string, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym2 := z.EncBinary()
+ _ = yym2
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(yyv1))
+ }
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSlicestring(v *[]string, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []string{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]string, yyrl1)
+ }
+ } else {
+ yyv1 = make([]string, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = string(r.DecodeString())
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, "")
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = string(r.DecodeString())
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, "") // var yyz1 string
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = string(r.DecodeString())
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []string{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/types.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/types.go
new file mode 100644
index 0000000..ee32206
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/types.go
@@ -0,0 +1,63 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+ "k8s.io/kubernetes/pkg/api/unversioned"
+)
+
+// TokenReview attempts to authenticate a token to a known user.
+// Note: TokenReview requests may be cached by the webhook token authenticator
+// plugin in the kube-apiserver.
+type TokenReview struct {
+ unversioned.TypeMeta `json:",inline"`
+
+ // Spec holds information about the request being evaluated
+ Spec TokenReviewSpec `json:"spec"`
+
+ // Status is filled in by the server and indicates whether the request can be authenticated.
+ Status TokenReviewStatus `json:"status,omitempty"`
+}
+
+// TokenReviewSpec is a description of the token authentication request.
+type TokenReviewSpec struct {
+ // Token is the opaque bearer token.
+ Token string `json:"token,omitempty"`
+}
+
+// TokenReviewStatus is the result of the token authentication request.
+type TokenReviewStatus struct {
+ // Authenticated indicates that the token was associated with a known user.
+ Authenticated bool `json:"authenticated,omitempty"`
+ // User is the UserInfo associated with the provided token.
+ User UserInfo `json:"user,omitempty"`
+}
+
+// UserInfo holds the information about the user needed to implement the
+// user.Info interface.
+type UserInfo struct {
+ // The name that uniquely identifies this user among all active users.
+ Username string `json:"username,omitempty"`
+ // A unique value that identifies this user across time. If this user is
+ // deleted and another user by the same name is added, they will have
+ // different UIDs.
+ UID string `json:"uid,omitempty"`
+ // The names of groups this user is a part of.
+ Groups []string `json:"groups,omitempty"`
+ // Any additional information provided by the authenticator.
+ Extra map[string][]string `json:"extra,omitempty"`
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/deep_copy_generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/deep_copy_generated.go
new file mode 100644
index 0000000..5e92552
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/deep_copy_generated.go
@@ -0,0 +1,149 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by deepcopy-gen. Do not edit it manually!
+
+package authorization
+
+import (
+ api "k8s.io/kubernetes/pkg/api"
+ conversion "k8s.io/kubernetes/pkg/conversion"
+)
+
+func init() {
+ if err := api.Scheme.AddGeneratedDeepCopyFuncs(
+ DeepCopy_authorization_LocalSubjectAccessReview,
+ DeepCopy_authorization_NonResourceAttributes,
+ DeepCopy_authorization_ResourceAttributes,
+ DeepCopy_authorization_SelfSubjectAccessReview,
+ DeepCopy_authorization_SelfSubjectAccessReviewSpec,
+ DeepCopy_authorization_SubjectAccessReview,
+ DeepCopy_authorization_SubjectAccessReviewSpec,
+ DeepCopy_authorization_SubjectAccessReviewStatus,
+ ); err != nil {
+ // if one of the deep copy functions is malformed, detect it immediately.
+ panic(err)
+ }
+}
+
+func DeepCopy_authorization_LocalSubjectAccessReview(in LocalSubjectAccessReview, out *LocalSubjectAccessReview, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := DeepCopy_authorization_SubjectAccessReviewSpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ out.Status = in.Status
+ return nil
+}
+
+func DeepCopy_authorization_NonResourceAttributes(in NonResourceAttributes, out *NonResourceAttributes, c *conversion.Cloner) error {
+ out.Path = in.Path
+ out.Verb = in.Verb
+ return nil
+}
+
+func DeepCopy_authorization_ResourceAttributes(in ResourceAttributes, out *ResourceAttributes, c *conversion.Cloner) error {
+ out.Namespace = in.Namespace
+ out.Verb = in.Verb
+ out.Group = in.Group
+ out.Version = in.Version
+ out.Resource = in.Resource
+ out.Subresource = in.Subresource
+ out.Name = in.Name
+ return nil
+}
+
+func DeepCopy_authorization_SelfSubjectAccessReview(in SelfSubjectAccessReview, out *SelfSubjectAccessReview, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := DeepCopy_authorization_SelfSubjectAccessReviewSpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ out.Status = in.Status
+ return nil
+}
+
+func DeepCopy_authorization_SelfSubjectAccessReviewSpec(in SelfSubjectAccessReviewSpec, out *SelfSubjectAccessReviewSpec, c *conversion.Cloner) error {
+ if in.ResourceAttributes != nil {
+ in, out := in.ResourceAttributes, &out.ResourceAttributes
+ *out = new(ResourceAttributes)
+ **out = *in
+ } else {
+ out.ResourceAttributes = nil
+ }
+ if in.NonResourceAttributes != nil {
+ in, out := in.NonResourceAttributes, &out.NonResourceAttributes
+ *out = new(NonResourceAttributes)
+ **out = *in
+ } else {
+ out.NonResourceAttributes = nil
+ }
+ return nil
+}
+
+func DeepCopy_authorization_SubjectAccessReview(in SubjectAccessReview, out *SubjectAccessReview, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := DeepCopy_authorization_SubjectAccessReviewSpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ out.Status = in.Status
+ return nil
+}
+
+func DeepCopy_authorization_SubjectAccessReviewSpec(in SubjectAccessReviewSpec, out *SubjectAccessReviewSpec, c *conversion.Cloner) error {
+ if in.ResourceAttributes != nil {
+ in, out := in.ResourceAttributes, &out.ResourceAttributes
+ *out = new(ResourceAttributes)
+ **out = *in
+ } else {
+ out.ResourceAttributes = nil
+ }
+ if in.NonResourceAttributes != nil {
+ in, out := in.NonResourceAttributes, &out.NonResourceAttributes
+ *out = new(NonResourceAttributes)
+ **out = *in
+ } else {
+ out.NonResourceAttributes = nil
+ }
+ out.User = in.User
+ if in.Groups != nil {
+ in, out := in.Groups, &out.Groups
+ *out = make([]string, len(in))
+ copy(*out, in)
+ } else {
+ out.Groups = nil
+ }
+ if in.Extra != nil {
+ in, out := in.Extra, &out.Extra
+ *out = make(map[string][]string)
+ for key, val := range in {
+ if newVal, err := c.DeepCopy(val); err != nil {
+ return err
+ } else {
+ (*out)[key] = newVal.([]string)
+ }
+ }
+ } else {
+ out.Extra = nil
+ }
+ return nil
+}
+
+func DeepCopy_authorization_SubjectAccessReviewStatus(in SubjectAccessReviewStatus, out *SubjectAccessReviewStatus, c *conversion.Cloner) error {
+ out.Allowed = in.Allowed
+ out.Reason = in.Reason
+ return nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/doc.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/doc.go
new file mode 100644
index 0000000..9660e5e
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package,register
+
+package authorization
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/install/install.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/install/install.go
new file mode 100644
index 0000000..7899815
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/install/install.go
@@ -0,0 +1,128 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package install installs the experimental API group, making it available as
+// an option to all of the API encoding/decoding machinery.
+package install
+
+import (
+ "fmt"
+
+ "github.com/golang/glog"
+
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/meta"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/apimachinery"
+ "k8s.io/kubernetes/pkg/apimachinery/registered"
+ "k8s.io/kubernetes/pkg/apis/authorization"
+ "k8s.io/kubernetes/pkg/apis/authorization/v1beta1"
+ "k8s.io/kubernetes/pkg/runtime"
+ "k8s.io/kubernetes/pkg/util/sets"
+)
+
+const importPrefix = "k8s.io/kubernetes/pkg/apis/authorization"
+
+var accessor = meta.NewAccessor()
+
+// availableVersions lists all known external versions for this group from most preferred to least preferred
+var availableVersions = []unversioned.GroupVersion{v1beta1.SchemeGroupVersion}
+
+func init() {
+ registered.RegisterVersions(availableVersions)
+ externalVersions := []unversioned.GroupVersion{}
+ for _, v := range availableVersions {
+ if registered.IsAllowedVersion(v) {
+ externalVersions = append(externalVersions, v)
+ }
+ }
+ if len(externalVersions) == 0 {
+ glog.V(4).Infof("No version is registered for group %v", authorization.GroupName)
+ return
+ }
+
+ if err := registered.EnableVersions(externalVersions...); err != nil {
+ glog.V(4).Infof("%v", err)
+ return
+ }
+ if err := enableVersions(externalVersions); err != nil {
+ glog.V(4).Infof("%v", err)
+ return
+ }
+}
+
+// TODO: enableVersions should be centralized rather than spread in each API
+// group.
+// We can combine registered.RegisterVersions, registered.EnableVersions and
+// registered.RegisterGroup once we have moved enableVersions there.
+func enableVersions(externalVersions []unversioned.GroupVersion) error {
+ addVersionsToScheme(externalVersions...)
+ preferredExternalVersion := externalVersions[0]
+
+ groupMeta := apimachinery.GroupMeta{
+ GroupVersion: preferredExternalVersion,
+ GroupVersions: externalVersions,
+ RESTMapper: newRESTMapper(externalVersions),
+ SelfLinker: runtime.SelfLinker(accessor),
+ InterfacesFor: interfacesFor,
+ }
+
+ if err := registered.RegisterGroup(groupMeta); err != nil {
+ return err
+ }
+ api.RegisterRESTMapper(groupMeta.RESTMapper)
+ return nil
+}
+
+func addVersionsToScheme(externalVersions ...unversioned.GroupVersion) {
+ // add the internal version to Scheme
+ authorization.AddToScheme(api.Scheme)
+ // add the enabled external versions to Scheme
+ for _, v := range externalVersions {
+ if !registered.IsEnabledVersion(v) {
+ glog.Errorf("Version %s is not enabled, so it will not be added to the Scheme.", v)
+ continue
+ }
+ switch v {
+ case v1beta1.SchemeGroupVersion:
+ v1beta1.AddToScheme(api.Scheme)
+ }
+ }
+}
+
+func newRESTMapper(externalVersions []unversioned.GroupVersion) meta.RESTMapper {
+ worstToBestGroupVersions := []unversioned.GroupVersion{}
+ for i := len(externalVersions) - 1; i >= 0; i-- {
+ worstToBestGroupVersions = append(worstToBestGroupVersions, externalVersions[i])
+ }
+
+ rootScoped := sets.NewString("SubjectAccessReview", "SelfSubjectAccessReview")
+ ignoredKinds := sets.NewString()
+ return api.NewDefaultRESTMapper(worstToBestGroupVersions, interfacesFor, importPrefix, ignoredKinds, rootScoped)
+}
+
+func interfacesFor(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) {
+ switch version {
+ case v1beta1.SchemeGroupVersion:
+ return &meta.VersionInterfaces{
+ ObjectConvertor: api.Scheme,
+ MetadataAccessor: accessor,
+ }, nil
+ default:
+ g, _ := registered.Group(authorization.GroupName)
+ return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, g.GroupVersions)
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/register.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/register.go
new file mode 100644
index 0000000..b1cfa49
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/register.go
@@ -0,0 +1,50 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package authorization
+
+import (
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/runtime"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "authorization.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
+
+// Kind takes an unqualified kind and returns back a Group qualified GroupKind
+func Kind(kind string) unversioned.GroupKind {
+ return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// Resource takes an unqualified resource and returns back a Group qualified GroupResource
+func Resource(resource string) unversioned.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+func AddToScheme(scheme *runtime.Scheme) {
+ addKnownTypes(scheme)
+}
+
+func addKnownTypes(scheme *runtime.Scheme) {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &SelfSubjectAccessReview{},
+ &SubjectAccessReview{},
+ &LocalSubjectAccessReview{},
+ )
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/types.generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/types.generated.go
new file mode 100644
index 0000000..a0811c8
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/types.generated.go
@@ -0,0 +1,2570 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// ************************************************************
+// DO NOT EDIT.
+// THIS FILE IS AUTO-GENERATED BY codecgen.
+// ************************************************************
+
+package authorization
+
+import (
+ "errors"
+ "fmt"
+ codec1978 "github.com/ugorji/go/codec"
+ pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned"
+ "reflect"
+ "runtime"
+)
+
+const (
+ // ----- content types ----
+ codecSelferC_UTF81234 = 1
+ codecSelferC_RAW1234 = 0
+ // ----- value types used ----
+ codecSelferValueTypeArray1234 = 10
+ codecSelferValueTypeMap1234 = 9
+ // ----- containerStateValues ----
+ codecSelfer_containerMapKey1234 = 2
+ codecSelfer_containerMapValue1234 = 3
+ codecSelfer_containerMapEnd1234 = 4
+ codecSelfer_containerArrayElem1234 = 6
+ codecSelfer_containerArrayEnd1234 = 7
+)
+
+var (
+ codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits())
+ codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`)
+)
+
+type codecSelfer1234 struct{}
+
+func init() {
+ if codec1978.GenVersion != 5 {
+ _, file, _, _ := runtime.Caller(0)
+ err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v",
+ 5, codec1978.GenVersion, file)
+ panic(err)
+ }
+ if false { // reference the types, but skip this branch at build/run time
+ var v0 pkg1_unversioned.TypeMeta
+ _ = v0
+ }
+}
+
+func (x *SubjectAccessReview) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy4 := &x.Spec
+ yy4.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.Spec
+ yy6.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy9 := &x.Status
+ yy9.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Status
+ yy11.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *SubjectAccessReview) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *SubjectAccessReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "Spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = SubjectAccessReviewSpec{}
+ } else {
+ yyv4 := &x.Spec
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "Status":
+ if r.TryDecodeAsNil() {
+ x.Status = SubjectAccessReviewStatus{}
+ } else {
+ yyv5 := &x.Status
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *SubjectAccessReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = SubjectAccessReviewSpec{}
+ } else {
+ yyv9 := &x.Spec
+ yyv9.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = SubjectAccessReviewStatus{}
+ } else {
+ yyv10 := &x.Status
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *SelfSubjectAccessReview) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy4 := &x.Spec
+ yy4.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.Spec
+ yy6.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy9 := &x.Status
+ yy9.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Status
+ yy11.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *SelfSubjectAccessReview) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *SelfSubjectAccessReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "Spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = SelfSubjectAccessReviewSpec{}
+ } else {
+ yyv4 := &x.Spec
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "Status":
+ if r.TryDecodeAsNil() {
+ x.Status = SubjectAccessReviewStatus{}
+ } else {
+ yyv5 := &x.Status
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *SelfSubjectAccessReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = SelfSubjectAccessReviewSpec{}
+ } else {
+ yyv9 := &x.Spec
+ yyv9.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = SubjectAccessReviewStatus{}
+ } else {
+ yyv10 := &x.Status
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *LocalSubjectAccessReview) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy4 := &x.Spec
+ yy4.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.Spec
+ yy6.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy9 := &x.Status
+ yy9.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Status
+ yy11.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *LocalSubjectAccessReview) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *LocalSubjectAccessReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "Spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = SubjectAccessReviewSpec{}
+ } else {
+ yyv4 := &x.Spec
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "Status":
+ if r.TryDecodeAsNil() {
+ x.Status = SubjectAccessReviewStatus{}
+ } else {
+ yyv5 := &x.Status
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *LocalSubjectAccessReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = SubjectAccessReviewSpec{}
+ } else {
+ yyv9 := &x.Spec
+ yyv9.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = SubjectAccessReviewStatus{}
+ } else {
+ yyv10 := &x.Status
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ResourceAttributes) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [7]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(7)
+ } else {
+ yynn2 = 7
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Namespace))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Namespace"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Namespace))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Verb))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Verb"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Verb))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Group))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Group"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Group))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Version))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Version"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Version))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Resource))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Resource"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Resource))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Subresource))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Subresource"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Subresource))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Name"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ResourceAttributes) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ResourceAttributes) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "Namespace":
+ if r.TryDecodeAsNil() {
+ x.Namespace = ""
+ } else {
+ x.Namespace = string(r.DecodeString())
+ }
+ case "Verb":
+ if r.TryDecodeAsNil() {
+ x.Verb = ""
+ } else {
+ x.Verb = string(r.DecodeString())
+ }
+ case "Group":
+ if r.TryDecodeAsNil() {
+ x.Group = ""
+ } else {
+ x.Group = string(r.DecodeString())
+ }
+ case "Version":
+ if r.TryDecodeAsNil() {
+ x.Version = ""
+ } else {
+ x.Version = string(r.DecodeString())
+ }
+ case "Resource":
+ if r.TryDecodeAsNil() {
+ x.Resource = ""
+ } else {
+ x.Resource = string(r.DecodeString())
+ }
+ case "Subresource":
+ if r.TryDecodeAsNil() {
+ x.Subresource = ""
+ } else {
+ x.Subresource = string(r.DecodeString())
+ }
+ case "Name":
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ResourceAttributes) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj11 int
+ var yyb11 bool
+ var yyhl11 bool = l >= 0
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Namespace = ""
+ } else {
+ x.Namespace = string(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Verb = ""
+ } else {
+ x.Verb = string(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Group = ""
+ } else {
+ x.Group = string(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Version = ""
+ } else {
+ x.Version = string(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Resource = ""
+ } else {
+ x.Resource = string(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Subresource = ""
+ } else {
+ x.Subresource = string(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ for {
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj11-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *NonResourceAttributes) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Path))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Path"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Path))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Verb))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Verb"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Verb))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *NonResourceAttributes) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *NonResourceAttributes) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "Path":
+ if r.TryDecodeAsNil() {
+ x.Path = ""
+ } else {
+ x.Path = string(r.DecodeString())
+ }
+ case "Verb":
+ if r.TryDecodeAsNil() {
+ x.Verb = ""
+ } else {
+ x.Verb = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *NonResourceAttributes) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Path = ""
+ } else {
+ x.Path = string(r.DecodeString())
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Verb = ""
+ } else {
+ x.Verb = string(r.DecodeString())
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *SubjectAccessReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 5
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.ResourceAttributes == nil {
+ r.EncodeNil()
+ } else {
+ x.ResourceAttributes.CodecEncodeSelf(e)
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("ResourceAttributes"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.ResourceAttributes == nil {
+ r.EncodeNil()
+ } else {
+ x.ResourceAttributes.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.NonResourceAttributes == nil {
+ r.EncodeNil()
+ } else {
+ x.NonResourceAttributes.CodecEncodeSelf(e)
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("NonResourceAttributes"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.NonResourceAttributes == nil {
+ r.EncodeNil()
+ } else {
+ x.NonResourceAttributes.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.User))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("User"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.User))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Groups == nil {
+ r.EncodeNil()
+ } else {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Groups, false, e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Groups"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Groups == nil {
+ r.EncodeNil()
+ } else {
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Groups, false, e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Extra == nil {
+ r.EncodeNil()
+ } else {
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ h.encMapstringSlicestring((map[string][]string)(x.Extra), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Extra"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Extra == nil {
+ r.EncodeNil()
+ } else {
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ h.encMapstringSlicestring((map[string][]string)(x.Extra), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *SubjectAccessReviewSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *SubjectAccessReviewSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "ResourceAttributes":
+ if r.TryDecodeAsNil() {
+ if x.ResourceAttributes != nil {
+ x.ResourceAttributes = nil
+ }
+ } else {
+ if x.ResourceAttributes == nil {
+ x.ResourceAttributes = new(ResourceAttributes)
+ }
+ x.ResourceAttributes.CodecDecodeSelf(d)
+ }
+ case "NonResourceAttributes":
+ if r.TryDecodeAsNil() {
+ if x.NonResourceAttributes != nil {
+ x.NonResourceAttributes = nil
+ }
+ } else {
+ if x.NonResourceAttributes == nil {
+ x.NonResourceAttributes = new(NonResourceAttributes)
+ }
+ x.NonResourceAttributes.CodecDecodeSelf(d)
+ }
+ case "User":
+ if r.TryDecodeAsNil() {
+ x.User = ""
+ } else {
+ x.User = string(r.DecodeString())
+ }
+ case "Groups":
+ if r.TryDecodeAsNil() {
+ x.Groups = nil
+ } else {
+ yyv7 := &x.Groups
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv7, false, d)
+ }
+ }
+ case "Extra":
+ if r.TryDecodeAsNil() {
+ x.Extra = nil
+ } else {
+ yyv9 := &x.Extra
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.decMapstringSlicestring((*map[string][]string)(yyv9), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *SubjectAccessReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj11 int
+ var yyb11 bool
+ var yyhl11 bool = l >= 0
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.ResourceAttributes != nil {
+ x.ResourceAttributes = nil
+ }
+ } else {
+ if x.ResourceAttributes == nil {
+ x.ResourceAttributes = new(ResourceAttributes)
+ }
+ x.ResourceAttributes.CodecDecodeSelf(d)
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.NonResourceAttributes != nil {
+ x.NonResourceAttributes = nil
+ }
+ } else {
+ if x.NonResourceAttributes == nil {
+ x.NonResourceAttributes = new(NonResourceAttributes)
+ }
+ x.NonResourceAttributes.CodecDecodeSelf(d)
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.User = ""
+ } else {
+ x.User = string(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Groups = nil
+ } else {
+ yyv15 := &x.Groups
+ yym16 := z.DecBinary()
+ _ = yym16
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv15, false, d)
+ }
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Extra = nil
+ } else {
+ yyv17 := &x.Extra
+ yym18 := z.DecBinary()
+ _ = yym18
+ if false {
+ } else {
+ h.decMapstringSlicestring((*map[string][]string)(yyv17), d)
+ }
+ }
+ for {
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj11-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *SelfSubjectAccessReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.ResourceAttributes == nil {
+ r.EncodeNil()
+ } else {
+ x.ResourceAttributes.CodecEncodeSelf(e)
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("ResourceAttributes"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.ResourceAttributes == nil {
+ r.EncodeNil()
+ } else {
+ x.ResourceAttributes.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.NonResourceAttributes == nil {
+ r.EncodeNil()
+ } else {
+ x.NonResourceAttributes.CodecEncodeSelf(e)
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("NonResourceAttributes"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.NonResourceAttributes == nil {
+ r.EncodeNil()
+ } else {
+ x.NonResourceAttributes.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *SelfSubjectAccessReviewSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *SelfSubjectAccessReviewSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "ResourceAttributes":
+ if r.TryDecodeAsNil() {
+ if x.ResourceAttributes != nil {
+ x.ResourceAttributes = nil
+ }
+ } else {
+ if x.ResourceAttributes == nil {
+ x.ResourceAttributes = new(ResourceAttributes)
+ }
+ x.ResourceAttributes.CodecDecodeSelf(d)
+ }
+ case "NonResourceAttributes":
+ if r.TryDecodeAsNil() {
+ if x.NonResourceAttributes != nil {
+ x.NonResourceAttributes = nil
+ }
+ } else {
+ if x.NonResourceAttributes == nil {
+ x.NonResourceAttributes = new(NonResourceAttributes)
+ }
+ x.NonResourceAttributes.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *SelfSubjectAccessReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.ResourceAttributes != nil {
+ x.ResourceAttributes = nil
+ }
+ } else {
+ if x.ResourceAttributes == nil {
+ x.ResourceAttributes = new(ResourceAttributes)
+ }
+ x.ResourceAttributes.CodecDecodeSelf(d)
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.NonResourceAttributes != nil {
+ x.NonResourceAttributes = nil
+ }
+ } else {
+ if x.NonResourceAttributes == nil {
+ x.NonResourceAttributes = new(NonResourceAttributes)
+ }
+ x.NonResourceAttributes.CodecDecodeSelf(d)
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *SubjectAccessReviewStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Allowed))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Allowed"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Allowed))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Reason))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("Reason"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Reason))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *SubjectAccessReviewStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *SubjectAccessReviewStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "Allowed":
+ if r.TryDecodeAsNil() {
+ x.Allowed = false
+ } else {
+ x.Allowed = bool(r.DecodeBool())
+ }
+ case "Reason":
+ if r.TryDecodeAsNil() {
+ x.Reason = ""
+ } else {
+ x.Reason = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *SubjectAccessReviewStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Allowed = false
+ } else {
+ x.Allowed = bool(r.DecodeBool())
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Reason = ""
+ } else {
+ x.Reason = string(r.DecodeString())
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) encMapstringSlicestring(v map[string][]string, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeMapStart(len(v))
+ for yyk1, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ yym2 := z.EncBinary()
+ _ = yym2
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(yyk1))
+ }
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyv1 == nil {
+ r.EncodeNil()
+ } else {
+ yym3 := z.EncBinary()
+ _ = yym3
+ if false {
+ } else {
+ z.F.EncSliceStringV(yyv1, false, e)
+ }
+ }
+ }
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x codecSelfer1234) decMapstringSlicestring(v *map[string][]string, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyl1 := r.ReadMapStart()
+ yybh1 := z.DecBasicHandle()
+ if yyv1 == nil {
+ yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 40)
+ yyv1 = make(map[string][]string, yyrl1)
+ *v = yyv1
+ }
+ var yymk1 string
+ var yymv1 []string
+ var yymg1 bool
+ if yybh1.MapValueReset {
+ yymg1 = true
+ }
+ if yyl1 > 0 {
+ for yyj1 := 0; yyj1 < yyl1; yyj1++ {
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ if r.TryDecodeAsNil() {
+ yymk1 = ""
+ } else {
+ yymk1 = string(r.DecodeString())
+ }
+
+ if yymg1 {
+ yymv1 = yyv1[yymk1]
+ } else {
+ yymv1 = nil
+ }
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ if r.TryDecodeAsNil() {
+ yymv1 = nil
+ } else {
+ yyv3 := &yymv1
+ yym4 := z.DecBinary()
+ _ = yym4
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv3, false, d)
+ }
+ }
+
+ if yyv1 != nil {
+ yyv1[yymk1] = yymv1
+ }
+ }
+ } else if yyl1 < 0 {
+ for yyj1 := 0; !r.CheckBreak(); yyj1++ {
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ if r.TryDecodeAsNil() {
+ yymk1 = ""
+ } else {
+ yymk1 = string(r.DecodeString())
+ }
+
+ if yymg1 {
+ yymv1 = yyv1[yymk1]
+ } else {
+ yymv1 = nil
+ }
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ if r.TryDecodeAsNil() {
+ yymv1 = nil
+ } else {
+ yyv6 := &yymv1
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv6, false, d)
+ }
+ }
+
+ if yyv1 != nil {
+ yyv1[yymk1] = yymv1
+ }
+ }
+ } // else len==0: TODO: Should we clear map entries?
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x codecSelfer1234) encSlicestring(v []string, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym2 := z.EncBinary()
+ _ = yym2
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(yyv1))
+ }
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSlicestring(v *[]string, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []string{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]string, yyrl1)
+ }
+ } else {
+ yyv1 = make([]string, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = string(r.DecodeString())
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, "")
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = string(r.DecodeString())
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, "") // var yyz1 string
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = string(r.DecodeString())
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []string{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/types.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/types.go
new file mode 100644
index 0000000..afa2c42
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/types.go
@@ -0,0 +1,124 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package authorization
+
+import (
+ "k8s.io/kubernetes/pkg/api/unversioned"
+)
+
+// SubjectAccessReview checks whether or not a user or group can perform an action. Not filling in a
+// spec.namespace means "in all namespaces".
+type SubjectAccessReview struct {
+ unversioned.TypeMeta
+
+ // Spec holds information about the request being evaluated
+ Spec SubjectAccessReviewSpec
+
+ // Status is filled in by the server and indicates whether the request is allowed or not
+ Status SubjectAccessReviewStatus
+}
+
+// SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a
+// spec.namespace means "in all namespaces". Self is a special case, because users should always be able
+// to check whether they can perform an action
+type SelfSubjectAccessReview struct {
+ unversioned.TypeMeta
+
+ // Spec holds information about the request being evaluated.
+ Spec SelfSubjectAccessReviewSpec
+
+ // Status is filled in by the server and indicates whether the request is allowed or not
+ Status SubjectAccessReviewStatus
+}
+
+// LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace.
+// Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions
+// checking.
+type LocalSubjectAccessReview struct {
+ unversioned.TypeMeta
+
+ // Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace
+ // you made the request against. If empty, it is defaulted.
+ Spec SubjectAccessReviewSpec
+
+ // Status is filled in by the server and indicates whether the request is allowed or not
+ Status SubjectAccessReviewStatus
+}
+
+// ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface
+type ResourceAttributes struct {
+ // Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces
+ // "" (empty) is defaulted for LocalSubjectAccessReviews
+ // "" (empty) is empty for cluster-scoped resources
+ // "" (empty) means "all" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview
+ Namespace string
+ // Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. "*" means all.
+ Verb string
+ // Group is the API Group of the Resource. "*" means all.
+ Group string
+ // Version is the API Version of the Resource. "*" means all.
+ Version string
+ // Resource is one of the existing resource types. "*" means all.
+ Resource string
+ // Subresource is one of the existing resource types. "" means none.
+ Subresource string
+ // Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all.
+ Name string
+}
+
+// NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface
+type NonResourceAttributes struct {
+ // Path is the URL path of the request
+ Path string
+ // Verb is the standard HTTP verb
+ Verb string
+}
+
+// SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAttributes
+// and NonResourceAttributes must be set
+type SubjectAccessReviewSpec struct {
+ // ResourceAttributes describes information for a resource access request
+ ResourceAttributes *ResourceAttributes
+ // NonResourceAttributes describes information for a non-resource access request
+ NonResourceAttributes *NonResourceAttributes
+
+ // User is the user you're testing for.
+ // If you specify "User" but not "Group", then is it interpreted as "What if User were not a member of any groups
+ User string
+ // Groups is the groups you're testing for.
+ Groups []string
+ // Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer
+ // it needs a reflection here.
+ Extra map[string][]string
+}
+
+// SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAttributes
+// and NonResourceAttributes must be set
+type SelfSubjectAccessReviewSpec struct {
+ // ResourceAttributes describes information for a resource access request
+ ResourceAttributes *ResourceAttributes
+ // NonResourceAttributes describes information for a non-resource access request
+ NonResourceAttributes *NonResourceAttributes
+}
+
+// SubjectAccessReviewStatus
+type SubjectAccessReviewStatus struct {
+ // Allowed is required. True if the action would be allowed, false otherwise.
+ Allowed bool
+ // Reason is optional. It indicates why a request was allowed or denied.
+ Reason string
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/conversion.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/conversion.go
new file mode 100644
index 0000000..cd03e61
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/conversion.go
@@ -0,0 +1,30 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+ "k8s.io/kubernetes/pkg/runtime"
+)
+
+func addConversionFuncs(scheme *runtime.Scheme) {
+ // Add non-generated conversion functions
+ err := scheme.AddConversionFuncs()
+ if err != nil {
+ // If one of the conversion functions is malformed, detect it immediately.
+ panic(err)
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/conversion_generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/conversion_generated.go
new file mode 100644
index 0000000..18d1138
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/conversion_generated.go
@@ -0,0 +1,333 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by conversion-gen. Do not edit it manually!
+
+package v1beta1
+
+import (
+ api "k8s.io/kubernetes/pkg/api"
+ authorization "k8s.io/kubernetes/pkg/apis/authorization"
+ conversion "k8s.io/kubernetes/pkg/conversion"
+)
+
+func init() {
+ if err := api.Scheme.AddGeneratedConversionFuncs(
+ Convert_v1beta1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview,
+ Convert_authorization_LocalSubjectAccessReview_To_v1beta1_LocalSubjectAccessReview,
+ Convert_v1beta1_NonResourceAttributes_To_authorization_NonResourceAttributes,
+ Convert_authorization_NonResourceAttributes_To_v1beta1_NonResourceAttributes,
+ Convert_v1beta1_ResourceAttributes_To_authorization_ResourceAttributes,
+ Convert_authorization_ResourceAttributes_To_v1beta1_ResourceAttributes,
+ Convert_v1beta1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview,
+ Convert_authorization_SelfSubjectAccessReview_To_v1beta1_SelfSubjectAccessReview,
+ Convert_v1beta1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec,
+ Convert_authorization_SelfSubjectAccessReviewSpec_To_v1beta1_SelfSubjectAccessReviewSpec,
+ Convert_v1beta1_SubjectAccessReview_To_authorization_SubjectAccessReview,
+ Convert_authorization_SubjectAccessReview_To_v1beta1_SubjectAccessReview,
+ Convert_v1beta1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec,
+ Convert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessReviewSpec,
+ Convert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus,
+ Convert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus,
+ ); err != nil {
+ // if one of the conversion functions is malformed, detect it immediately.
+ panic(err)
+ }
+}
+
+func autoConvert_v1beta1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview(in *LocalSubjectAccessReview, out *authorization.LocalSubjectAccessReview, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_v1beta1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1beta1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview(in *LocalSubjectAccessReview, out *authorization.LocalSubjectAccessReview, s conversion.Scope) error {
+ return autoConvert_v1beta1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview(in, out, s)
+}
+
+func autoConvert_authorization_LocalSubjectAccessReview_To_v1beta1_LocalSubjectAccessReview(in *authorization.LocalSubjectAccessReview, out *LocalSubjectAccessReview, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_authorization_LocalSubjectAccessReview_To_v1beta1_LocalSubjectAccessReview(in *authorization.LocalSubjectAccessReview, out *LocalSubjectAccessReview, s conversion.Scope) error {
+ return autoConvert_authorization_LocalSubjectAccessReview_To_v1beta1_LocalSubjectAccessReview(in, out, s)
+}
+
+func autoConvert_v1beta1_NonResourceAttributes_To_authorization_NonResourceAttributes(in *NonResourceAttributes, out *authorization.NonResourceAttributes, s conversion.Scope) error {
+ out.Path = in.Path
+ out.Verb = in.Verb
+ return nil
+}
+
+func Convert_v1beta1_NonResourceAttributes_To_authorization_NonResourceAttributes(in *NonResourceAttributes, out *authorization.NonResourceAttributes, s conversion.Scope) error {
+ return autoConvert_v1beta1_NonResourceAttributes_To_authorization_NonResourceAttributes(in, out, s)
+}
+
+func autoConvert_authorization_NonResourceAttributes_To_v1beta1_NonResourceAttributes(in *authorization.NonResourceAttributes, out *NonResourceAttributes, s conversion.Scope) error {
+ out.Path = in.Path
+ out.Verb = in.Verb
+ return nil
+}
+
+func Convert_authorization_NonResourceAttributes_To_v1beta1_NonResourceAttributes(in *authorization.NonResourceAttributes, out *NonResourceAttributes, s conversion.Scope) error {
+ return autoConvert_authorization_NonResourceAttributes_To_v1beta1_NonResourceAttributes(in, out, s)
+}
+
+func autoConvert_v1beta1_ResourceAttributes_To_authorization_ResourceAttributes(in *ResourceAttributes, out *authorization.ResourceAttributes, s conversion.Scope) error {
+ out.Namespace = in.Namespace
+ out.Verb = in.Verb
+ out.Group = in.Group
+ out.Version = in.Version
+ out.Resource = in.Resource
+ out.Subresource = in.Subresource
+ out.Name = in.Name
+ return nil
+}
+
+func Convert_v1beta1_ResourceAttributes_To_authorization_ResourceAttributes(in *ResourceAttributes, out *authorization.ResourceAttributes, s conversion.Scope) error {
+ return autoConvert_v1beta1_ResourceAttributes_To_authorization_ResourceAttributes(in, out, s)
+}
+
+func autoConvert_authorization_ResourceAttributes_To_v1beta1_ResourceAttributes(in *authorization.ResourceAttributes, out *ResourceAttributes, s conversion.Scope) error {
+ out.Namespace = in.Namespace
+ out.Verb = in.Verb
+ out.Group = in.Group
+ out.Version = in.Version
+ out.Resource = in.Resource
+ out.Subresource = in.Subresource
+ out.Name = in.Name
+ return nil
+}
+
+func Convert_authorization_ResourceAttributes_To_v1beta1_ResourceAttributes(in *authorization.ResourceAttributes, out *ResourceAttributes, s conversion.Scope) error {
+ return autoConvert_authorization_ResourceAttributes_To_v1beta1_ResourceAttributes(in, out, s)
+}
+
+func autoConvert_v1beta1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview(in *SelfSubjectAccessReview, out *authorization.SelfSubjectAccessReview, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_v1beta1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1beta1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview(in *SelfSubjectAccessReview, out *authorization.SelfSubjectAccessReview, s conversion.Scope) error {
+ return autoConvert_v1beta1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview(in, out, s)
+}
+
+func autoConvert_authorization_SelfSubjectAccessReview_To_v1beta1_SelfSubjectAccessReview(in *authorization.SelfSubjectAccessReview, out *SelfSubjectAccessReview, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_authorization_SelfSubjectAccessReviewSpec_To_v1beta1_SelfSubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_authorization_SelfSubjectAccessReview_To_v1beta1_SelfSubjectAccessReview(in *authorization.SelfSubjectAccessReview, out *SelfSubjectAccessReview, s conversion.Scope) error {
+ return autoConvert_authorization_SelfSubjectAccessReview_To_v1beta1_SelfSubjectAccessReview(in, out, s)
+}
+
+func autoConvert_v1beta1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(in *SelfSubjectAccessReviewSpec, out *authorization.SelfSubjectAccessReviewSpec, s conversion.Scope) error {
+ if in.ResourceAttributes != nil {
+ in, out := &in.ResourceAttributes, &out.ResourceAttributes
+ *out = new(authorization.ResourceAttributes)
+ if err := Convert_v1beta1_ResourceAttributes_To_authorization_ResourceAttributes(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.ResourceAttributes = nil
+ }
+ if in.NonResourceAttributes != nil {
+ in, out := &in.NonResourceAttributes, &out.NonResourceAttributes
+ *out = new(authorization.NonResourceAttributes)
+ if err := Convert_v1beta1_NonResourceAttributes_To_authorization_NonResourceAttributes(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.NonResourceAttributes = nil
+ }
+ return nil
+}
+
+func Convert_v1beta1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(in *SelfSubjectAccessReviewSpec, out *authorization.SelfSubjectAccessReviewSpec, s conversion.Scope) error {
+ return autoConvert_v1beta1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(in, out, s)
+}
+
+func autoConvert_authorization_SelfSubjectAccessReviewSpec_To_v1beta1_SelfSubjectAccessReviewSpec(in *authorization.SelfSubjectAccessReviewSpec, out *SelfSubjectAccessReviewSpec, s conversion.Scope) error {
+ if in.ResourceAttributes != nil {
+ in, out := &in.ResourceAttributes, &out.ResourceAttributes
+ *out = new(ResourceAttributes)
+ if err := Convert_authorization_ResourceAttributes_To_v1beta1_ResourceAttributes(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.ResourceAttributes = nil
+ }
+ if in.NonResourceAttributes != nil {
+ in, out := &in.NonResourceAttributes, &out.NonResourceAttributes
+ *out = new(NonResourceAttributes)
+ if err := Convert_authorization_NonResourceAttributes_To_v1beta1_NonResourceAttributes(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.NonResourceAttributes = nil
+ }
+ return nil
+}
+
+func Convert_authorization_SelfSubjectAccessReviewSpec_To_v1beta1_SelfSubjectAccessReviewSpec(in *authorization.SelfSubjectAccessReviewSpec, out *SelfSubjectAccessReviewSpec, s conversion.Scope) error {
+ return autoConvert_authorization_SelfSubjectAccessReviewSpec_To_v1beta1_SelfSubjectAccessReviewSpec(in, out, s)
+}
+
+func autoConvert_v1beta1_SubjectAccessReview_To_authorization_SubjectAccessReview(in *SubjectAccessReview, out *authorization.SubjectAccessReview, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_v1beta1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1beta1_SubjectAccessReview_To_authorization_SubjectAccessReview(in *SubjectAccessReview, out *authorization.SubjectAccessReview, s conversion.Scope) error {
+ return autoConvert_v1beta1_SubjectAccessReview_To_authorization_SubjectAccessReview(in, out, s)
+}
+
+func autoConvert_authorization_SubjectAccessReview_To_v1beta1_SubjectAccessReview(in *authorization.SubjectAccessReview, out *SubjectAccessReview, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := Convert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_authorization_SubjectAccessReview_To_v1beta1_SubjectAccessReview(in *authorization.SubjectAccessReview, out *SubjectAccessReview, s conversion.Scope) error {
+ return autoConvert_authorization_SubjectAccessReview_To_v1beta1_SubjectAccessReview(in, out, s)
+}
+
+func autoConvert_v1beta1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(in *SubjectAccessReviewSpec, out *authorization.SubjectAccessReviewSpec, s conversion.Scope) error {
+ if in.ResourceAttributes != nil {
+ in, out := &in.ResourceAttributes, &out.ResourceAttributes
+ *out = new(authorization.ResourceAttributes)
+ if err := Convert_v1beta1_ResourceAttributes_To_authorization_ResourceAttributes(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.ResourceAttributes = nil
+ }
+ if in.NonResourceAttributes != nil {
+ in, out := &in.NonResourceAttributes, &out.NonResourceAttributes
+ *out = new(authorization.NonResourceAttributes)
+ if err := Convert_v1beta1_NonResourceAttributes_To_authorization_NonResourceAttributes(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.NonResourceAttributes = nil
+ }
+ out.User = in.User
+ out.Groups = in.Groups
+ out.Extra = in.Extra
+ return nil
+}
+
+func Convert_v1beta1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(in *SubjectAccessReviewSpec, out *authorization.SubjectAccessReviewSpec, s conversion.Scope) error {
+ return autoConvert_v1beta1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(in, out, s)
+}
+
+func autoConvert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessReviewSpec(in *authorization.SubjectAccessReviewSpec, out *SubjectAccessReviewSpec, s conversion.Scope) error {
+ if in.ResourceAttributes != nil {
+ in, out := &in.ResourceAttributes, &out.ResourceAttributes
+ *out = new(ResourceAttributes)
+ if err := Convert_authorization_ResourceAttributes_To_v1beta1_ResourceAttributes(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.ResourceAttributes = nil
+ }
+ if in.NonResourceAttributes != nil {
+ in, out := &in.NonResourceAttributes, &out.NonResourceAttributes
+ *out = new(NonResourceAttributes)
+ if err := Convert_authorization_NonResourceAttributes_To_v1beta1_NonResourceAttributes(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.NonResourceAttributes = nil
+ }
+ out.User = in.User
+ out.Groups = in.Groups
+ out.Extra = in.Extra
+ return nil
+}
+
+func Convert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessReviewSpec(in *authorization.SubjectAccessReviewSpec, out *SubjectAccessReviewSpec, s conversion.Scope) error {
+ return autoConvert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessReviewSpec(in, out, s)
+}
+
+func autoConvert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(in *SubjectAccessReviewStatus, out *authorization.SubjectAccessReviewStatus, s conversion.Scope) error {
+ out.Allowed = in.Allowed
+ out.Reason = in.Reason
+ return nil
+}
+
+func Convert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(in *SubjectAccessReviewStatus, out *authorization.SubjectAccessReviewStatus, s conversion.Scope) error {
+ return autoConvert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(in, out, s)
+}
+
+func autoConvert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus(in *authorization.SubjectAccessReviewStatus, out *SubjectAccessReviewStatus, s conversion.Scope) error {
+ out.Allowed = in.Allowed
+ out.Reason = in.Reason
+ return nil
+}
+
+func Convert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus(in *authorization.SubjectAccessReviewStatus, out *SubjectAccessReviewStatus, s conversion.Scope) error {
+ return autoConvert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus(in, out, s)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/deep_copy_generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/deep_copy_generated.go
new file mode 100644
index 0000000..23d2edf
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/deep_copy_generated.go
@@ -0,0 +1,149 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by deepcopy-gen. Do not edit it manually!
+
+package v1beta1
+
+import (
+ api "k8s.io/kubernetes/pkg/api"
+ conversion "k8s.io/kubernetes/pkg/conversion"
+)
+
+func init() {
+ if err := api.Scheme.AddGeneratedDeepCopyFuncs(
+ DeepCopy_v1beta1_LocalSubjectAccessReview,
+ DeepCopy_v1beta1_NonResourceAttributes,
+ DeepCopy_v1beta1_ResourceAttributes,
+ DeepCopy_v1beta1_SelfSubjectAccessReview,
+ DeepCopy_v1beta1_SelfSubjectAccessReviewSpec,
+ DeepCopy_v1beta1_SubjectAccessReview,
+ DeepCopy_v1beta1_SubjectAccessReviewSpec,
+ DeepCopy_v1beta1_SubjectAccessReviewStatus,
+ ); err != nil {
+ // if one of the deep copy functions is malformed, detect it immediately.
+ panic(err)
+ }
+}
+
+func DeepCopy_v1beta1_LocalSubjectAccessReview(in LocalSubjectAccessReview, out *LocalSubjectAccessReview, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := DeepCopy_v1beta1_SubjectAccessReviewSpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ out.Status = in.Status
+ return nil
+}
+
+func DeepCopy_v1beta1_NonResourceAttributes(in NonResourceAttributes, out *NonResourceAttributes, c *conversion.Cloner) error {
+ out.Path = in.Path
+ out.Verb = in.Verb
+ return nil
+}
+
+func DeepCopy_v1beta1_ResourceAttributes(in ResourceAttributes, out *ResourceAttributes, c *conversion.Cloner) error {
+ out.Namespace = in.Namespace
+ out.Verb = in.Verb
+ out.Group = in.Group
+ out.Version = in.Version
+ out.Resource = in.Resource
+ out.Subresource = in.Subresource
+ out.Name = in.Name
+ return nil
+}
+
+func DeepCopy_v1beta1_SelfSubjectAccessReview(in SelfSubjectAccessReview, out *SelfSubjectAccessReview, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := DeepCopy_v1beta1_SelfSubjectAccessReviewSpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ out.Status = in.Status
+ return nil
+}
+
+func DeepCopy_v1beta1_SelfSubjectAccessReviewSpec(in SelfSubjectAccessReviewSpec, out *SelfSubjectAccessReviewSpec, c *conversion.Cloner) error {
+ if in.ResourceAttributes != nil {
+ in, out := in.ResourceAttributes, &out.ResourceAttributes
+ *out = new(ResourceAttributes)
+ **out = *in
+ } else {
+ out.ResourceAttributes = nil
+ }
+ if in.NonResourceAttributes != nil {
+ in, out := in.NonResourceAttributes, &out.NonResourceAttributes
+ *out = new(NonResourceAttributes)
+ **out = *in
+ } else {
+ out.NonResourceAttributes = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1beta1_SubjectAccessReview(in SubjectAccessReview, out *SubjectAccessReview, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := DeepCopy_v1beta1_SubjectAccessReviewSpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ out.Status = in.Status
+ return nil
+}
+
+func DeepCopy_v1beta1_SubjectAccessReviewSpec(in SubjectAccessReviewSpec, out *SubjectAccessReviewSpec, c *conversion.Cloner) error {
+ if in.ResourceAttributes != nil {
+ in, out := in.ResourceAttributes, &out.ResourceAttributes
+ *out = new(ResourceAttributes)
+ **out = *in
+ } else {
+ out.ResourceAttributes = nil
+ }
+ if in.NonResourceAttributes != nil {
+ in, out := in.NonResourceAttributes, &out.NonResourceAttributes
+ *out = new(NonResourceAttributes)
+ **out = *in
+ } else {
+ out.NonResourceAttributes = nil
+ }
+ out.User = in.User
+ if in.Groups != nil {
+ in, out := in.Groups, &out.Groups
+ *out = make([]string, len(in))
+ copy(*out, in)
+ } else {
+ out.Groups = nil
+ }
+ if in.Extra != nil {
+ in, out := in.Extra, &out.Extra
+ *out = make(map[string][]string)
+ for key, val := range in {
+ if newVal, err := c.DeepCopy(val); err != nil {
+ return err
+ } else {
+ (*out)[key] = newVal.([]string)
+ }
+ }
+ } else {
+ out.Extra = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1beta1_SubjectAccessReviewStatus(in SubjectAccessReviewStatus, out *SubjectAccessReviewStatus, c *conversion.Cloner) error {
+ out.Allowed = in.Allowed
+ out.Reason = in.Reason
+ return nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/defaults.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/defaults.go
new file mode 100644
index 0000000..f9dda99
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/defaults.go
@@ -0,0 +1,25 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+ "k8s.io/kubernetes/pkg/runtime"
+)
+
+func addDefaultingFuncs(scheme *runtime.Scheme) {
+ scheme.AddDefaultingFuncs()
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/doc.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/doc.go
new file mode 100644
index 0000000..a6f2c27
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package,register
+// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/authorization
+
+package v1beta1
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/register.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/register.go
new file mode 100644
index 0000000..fe99bee
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/register.go
@@ -0,0 +1,48 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/runtime"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "authorization.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1beta1"}
+
+func AddToScheme(scheme *runtime.Scheme) {
+ // Add the API to Scheme.
+ addKnownTypes(scheme)
+ addDefaultingFuncs(scheme)
+ addConversionFuncs(scheme)
+}
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &SelfSubjectAccessReview{},
+ &SubjectAccessReview{},
+ &LocalSubjectAccessReview{},
+ )
+}
+
+func (obj *LocalSubjectAccessReview) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
+func (obj *SubjectAccessReview) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
+func (obj *SelfSubjectAccessReview) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/types.generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/types.generated.go
new file mode 100644
index 0000000..0bd74a1
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/types.generated.go
@@ -0,0 +1,2710 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// ************************************************************
+// DO NOT EDIT.
+// THIS FILE IS AUTO-GENERATED BY codecgen.
+// ************************************************************
+
+package v1beta1
+
+import (
+ "errors"
+ "fmt"
+ codec1978 "github.com/ugorji/go/codec"
+ pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned"
+ "reflect"
+ "runtime"
+)
+
+const (
+ // ----- content types ----
+ codecSelferC_UTF81234 = 1
+ codecSelferC_RAW1234 = 0
+ // ----- value types used ----
+ codecSelferValueTypeArray1234 = 10
+ codecSelferValueTypeMap1234 = 9
+ // ----- containerStateValues ----
+ codecSelfer_containerMapKey1234 = 2
+ codecSelfer_containerMapValue1234 = 3
+ codecSelfer_containerMapEnd1234 = 4
+ codecSelfer_containerArrayElem1234 = 6
+ codecSelfer_containerArrayEnd1234 = 7
+)
+
+var (
+ codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits())
+ codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`)
+)
+
+type codecSelfer1234 struct{}
+
+func init() {
+ if codec1978.GenVersion != 5 {
+ _, file, _, _ := runtime.Caller(0)
+ err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v",
+ 5, codec1978.GenVersion, file)
+ panic(err)
+ }
+ if false { // reference the types, but skip this branch at build/run time
+ var v0 pkg1_unversioned.TypeMeta
+ _ = v0
+ }
+}
+
+func (x *SubjectAccessReview) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy4 := &x.Spec
+ yy4.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.Spec
+ yy6.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Status
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Status
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *SubjectAccessReview) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *SubjectAccessReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = SubjectAccessReviewSpec{}
+ } else {
+ yyv4 := &x.Spec
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = SubjectAccessReviewStatus{}
+ } else {
+ yyv5 := &x.Status
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *SubjectAccessReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = SubjectAccessReviewSpec{}
+ } else {
+ yyv9 := &x.Spec
+ yyv9.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = SubjectAccessReviewStatus{}
+ } else {
+ yyv10 := &x.Status
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *SelfSubjectAccessReview) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy4 := &x.Spec
+ yy4.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.Spec
+ yy6.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Status
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Status
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *SelfSubjectAccessReview) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *SelfSubjectAccessReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = SelfSubjectAccessReviewSpec{}
+ } else {
+ yyv4 := &x.Spec
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = SubjectAccessReviewStatus{}
+ } else {
+ yyv5 := &x.Status
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *SelfSubjectAccessReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = SelfSubjectAccessReviewSpec{}
+ } else {
+ yyv9 := &x.Spec
+ yyv9.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = SubjectAccessReviewStatus{}
+ } else {
+ yyv10 := &x.Status
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *LocalSubjectAccessReview) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy4 := &x.Spec
+ yy4.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.Spec
+ yy6.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Status
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Status
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *LocalSubjectAccessReview) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *LocalSubjectAccessReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = SubjectAccessReviewSpec{}
+ } else {
+ yyv4 := &x.Spec
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = SubjectAccessReviewStatus{}
+ } else {
+ yyv5 := &x.Status
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *LocalSubjectAccessReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = SubjectAccessReviewSpec{}
+ } else {
+ yyv9 := &x.Spec
+ yyv9.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = SubjectAccessReviewStatus{}
+ } else {
+ yyv10 := &x.Status
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ResourceAttributes) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [7]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Namespace != ""
+ yyq2[1] = x.Verb != ""
+ yyq2[2] = x.Group != ""
+ yyq2[3] = x.Version != ""
+ yyq2[4] = x.Resource != ""
+ yyq2[5] = x.Subresource != ""
+ yyq2[6] = x.Name != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(7)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Namespace))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("namespace"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Namespace))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Verb))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("verb"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Verb))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Group))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("group"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Group))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Version))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("version"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Version))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Resource))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("resource"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Resource))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Subresource))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("subresource"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Subresource))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[6] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[6] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("name"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ResourceAttributes) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ResourceAttributes) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "namespace":
+ if r.TryDecodeAsNil() {
+ x.Namespace = ""
+ } else {
+ x.Namespace = string(r.DecodeString())
+ }
+ case "verb":
+ if r.TryDecodeAsNil() {
+ x.Verb = ""
+ } else {
+ x.Verb = string(r.DecodeString())
+ }
+ case "group":
+ if r.TryDecodeAsNil() {
+ x.Group = ""
+ } else {
+ x.Group = string(r.DecodeString())
+ }
+ case "version":
+ if r.TryDecodeAsNil() {
+ x.Version = ""
+ } else {
+ x.Version = string(r.DecodeString())
+ }
+ case "resource":
+ if r.TryDecodeAsNil() {
+ x.Resource = ""
+ } else {
+ x.Resource = string(r.DecodeString())
+ }
+ case "subresource":
+ if r.TryDecodeAsNil() {
+ x.Subresource = ""
+ } else {
+ x.Subresource = string(r.DecodeString())
+ }
+ case "name":
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ResourceAttributes) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj11 int
+ var yyb11 bool
+ var yyhl11 bool = l >= 0
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Namespace = ""
+ } else {
+ x.Namespace = string(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Verb = ""
+ } else {
+ x.Verb = string(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Group = ""
+ } else {
+ x.Group = string(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Version = ""
+ } else {
+ x.Version = string(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Resource = ""
+ } else {
+ x.Resource = string(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Subresource = ""
+ } else {
+ x.Subresource = string(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ for {
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj11-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *NonResourceAttributes) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Path != ""
+ yyq2[1] = x.Verb != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Path))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("path"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Path))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Verb))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("verb"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Verb))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *NonResourceAttributes) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *NonResourceAttributes) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "path":
+ if r.TryDecodeAsNil() {
+ x.Path = ""
+ } else {
+ x.Path = string(r.DecodeString())
+ }
+ case "verb":
+ if r.TryDecodeAsNil() {
+ x.Verb = ""
+ } else {
+ x.Verb = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *NonResourceAttributes) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Path = ""
+ } else {
+ x.Path = string(r.DecodeString())
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Verb = ""
+ } else {
+ x.Verb = string(r.DecodeString())
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *SubjectAccessReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.ResourceAttributes != nil
+ yyq2[1] = x.NonResourceAttributes != nil
+ yyq2[2] = x.User != ""
+ yyq2[3] = len(x.Groups) != 0
+ yyq2[4] = len(x.Extra) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.ResourceAttributes == nil {
+ r.EncodeNil()
+ } else {
+ x.ResourceAttributes.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("resourceAttributes"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.ResourceAttributes == nil {
+ r.EncodeNil()
+ } else {
+ x.ResourceAttributes.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.NonResourceAttributes == nil {
+ r.EncodeNil()
+ } else {
+ x.NonResourceAttributes.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("nonResourceAttributes"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.NonResourceAttributes == nil {
+ r.EncodeNil()
+ } else {
+ x.NonResourceAttributes.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.User))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("user"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.User))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ if x.Groups == nil {
+ r.EncodeNil()
+ } else {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Groups, false, e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("group"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Groups == nil {
+ r.EncodeNil()
+ } else {
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Groups, false, e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ if x.Extra == nil {
+ r.EncodeNil()
+ } else {
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ h.encMapstringSlicestring((map[string][]string)(x.Extra), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("extra"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Extra == nil {
+ r.EncodeNil()
+ } else {
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ h.encMapstringSlicestring((map[string][]string)(x.Extra), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *SubjectAccessReviewSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *SubjectAccessReviewSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "resourceAttributes":
+ if r.TryDecodeAsNil() {
+ if x.ResourceAttributes != nil {
+ x.ResourceAttributes = nil
+ }
+ } else {
+ if x.ResourceAttributes == nil {
+ x.ResourceAttributes = new(ResourceAttributes)
+ }
+ x.ResourceAttributes.CodecDecodeSelf(d)
+ }
+ case "nonResourceAttributes":
+ if r.TryDecodeAsNil() {
+ if x.NonResourceAttributes != nil {
+ x.NonResourceAttributes = nil
+ }
+ } else {
+ if x.NonResourceAttributes == nil {
+ x.NonResourceAttributes = new(NonResourceAttributes)
+ }
+ x.NonResourceAttributes.CodecDecodeSelf(d)
+ }
+ case "user":
+ if r.TryDecodeAsNil() {
+ x.User = ""
+ } else {
+ x.User = string(r.DecodeString())
+ }
+ case "group":
+ if r.TryDecodeAsNil() {
+ x.Groups = nil
+ } else {
+ yyv7 := &x.Groups
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv7, false, d)
+ }
+ }
+ case "extra":
+ if r.TryDecodeAsNil() {
+ x.Extra = nil
+ } else {
+ yyv9 := &x.Extra
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.decMapstringSlicestring((*map[string][]string)(yyv9), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *SubjectAccessReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj11 int
+ var yyb11 bool
+ var yyhl11 bool = l >= 0
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.ResourceAttributes != nil {
+ x.ResourceAttributes = nil
+ }
+ } else {
+ if x.ResourceAttributes == nil {
+ x.ResourceAttributes = new(ResourceAttributes)
+ }
+ x.ResourceAttributes.CodecDecodeSelf(d)
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.NonResourceAttributes != nil {
+ x.NonResourceAttributes = nil
+ }
+ } else {
+ if x.NonResourceAttributes == nil {
+ x.NonResourceAttributes = new(NonResourceAttributes)
+ }
+ x.NonResourceAttributes.CodecDecodeSelf(d)
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.User = ""
+ } else {
+ x.User = string(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Groups = nil
+ } else {
+ yyv15 := &x.Groups
+ yym16 := z.DecBinary()
+ _ = yym16
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv15, false, d)
+ }
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Extra = nil
+ } else {
+ yyv17 := &x.Extra
+ yym18 := z.DecBinary()
+ _ = yym18
+ if false {
+ } else {
+ h.decMapstringSlicestring((*map[string][]string)(yyv17), d)
+ }
+ }
+ for {
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj11-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *SelfSubjectAccessReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.ResourceAttributes != nil
+ yyq2[1] = x.NonResourceAttributes != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.ResourceAttributes == nil {
+ r.EncodeNil()
+ } else {
+ x.ResourceAttributes.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("resourceAttributes"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.ResourceAttributes == nil {
+ r.EncodeNil()
+ } else {
+ x.ResourceAttributes.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.NonResourceAttributes == nil {
+ r.EncodeNil()
+ } else {
+ x.NonResourceAttributes.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("nonResourceAttributes"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.NonResourceAttributes == nil {
+ r.EncodeNil()
+ } else {
+ x.NonResourceAttributes.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *SelfSubjectAccessReviewSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *SelfSubjectAccessReviewSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "resourceAttributes":
+ if r.TryDecodeAsNil() {
+ if x.ResourceAttributes != nil {
+ x.ResourceAttributes = nil
+ }
+ } else {
+ if x.ResourceAttributes == nil {
+ x.ResourceAttributes = new(ResourceAttributes)
+ }
+ x.ResourceAttributes.CodecDecodeSelf(d)
+ }
+ case "nonResourceAttributes":
+ if r.TryDecodeAsNil() {
+ if x.NonResourceAttributes != nil {
+ x.NonResourceAttributes = nil
+ }
+ } else {
+ if x.NonResourceAttributes == nil {
+ x.NonResourceAttributes = new(NonResourceAttributes)
+ }
+ x.NonResourceAttributes.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *SelfSubjectAccessReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.ResourceAttributes != nil {
+ x.ResourceAttributes = nil
+ }
+ } else {
+ if x.ResourceAttributes == nil {
+ x.ResourceAttributes = new(ResourceAttributes)
+ }
+ x.ResourceAttributes.CodecDecodeSelf(d)
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.NonResourceAttributes != nil {
+ x.NonResourceAttributes = nil
+ }
+ } else {
+ if x.NonResourceAttributes == nil {
+ x.NonResourceAttributes = new(NonResourceAttributes)
+ }
+ x.NonResourceAttributes.CodecDecodeSelf(d)
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *SubjectAccessReviewStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.Reason != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Allowed))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("allowed"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Allowed))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Reason))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("reason"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Reason))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *SubjectAccessReviewStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *SubjectAccessReviewStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "allowed":
+ if r.TryDecodeAsNil() {
+ x.Allowed = false
+ } else {
+ x.Allowed = bool(r.DecodeBool())
+ }
+ case "reason":
+ if r.TryDecodeAsNil() {
+ x.Reason = ""
+ } else {
+ x.Reason = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *SubjectAccessReviewStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Allowed = false
+ } else {
+ x.Allowed = bool(r.DecodeBool())
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Reason = ""
+ } else {
+ x.Reason = string(r.DecodeString())
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) encMapstringSlicestring(v map[string][]string, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeMapStart(len(v))
+ for yyk1, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ yym2 := z.EncBinary()
+ _ = yym2
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(yyk1))
+ }
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyv1 == nil {
+ r.EncodeNil()
+ } else {
+ yym3 := z.EncBinary()
+ _ = yym3
+ if false {
+ } else {
+ z.F.EncSliceStringV(yyv1, false, e)
+ }
+ }
+ }
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x codecSelfer1234) decMapstringSlicestring(v *map[string][]string, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyl1 := r.ReadMapStart()
+ yybh1 := z.DecBasicHandle()
+ if yyv1 == nil {
+ yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 40)
+ yyv1 = make(map[string][]string, yyrl1)
+ *v = yyv1
+ }
+ var yymk1 string
+ var yymv1 []string
+ var yymg1 bool
+ if yybh1.MapValueReset {
+ yymg1 = true
+ }
+ if yyl1 > 0 {
+ for yyj1 := 0; yyj1 < yyl1; yyj1++ {
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ if r.TryDecodeAsNil() {
+ yymk1 = ""
+ } else {
+ yymk1 = string(r.DecodeString())
+ }
+
+ if yymg1 {
+ yymv1 = yyv1[yymk1]
+ } else {
+ yymv1 = nil
+ }
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ if r.TryDecodeAsNil() {
+ yymv1 = nil
+ } else {
+ yyv3 := &yymv1
+ yym4 := z.DecBinary()
+ _ = yym4
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv3, false, d)
+ }
+ }
+
+ if yyv1 != nil {
+ yyv1[yymk1] = yymv1
+ }
+ }
+ } else if yyl1 < 0 {
+ for yyj1 := 0; !r.CheckBreak(); yyj1++ {
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ if r.TryDecodeAsNil() {
+ yymk1 = ""
+ } else {
+ yymk1 = string(r.DecodeString())
+ }
+
+ if yymg1 {
+ yymv1 = yyv1[yymk1]
+ } else {
+ yymv1 = nil
+ }
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ if r.TryDecodeAsNil() {
+ yymv1 = nil
+ } else {
+ yyv6 := &yymv1
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv6, false, d)
+ }
+ }
+
+ if yyv1 != nil {
+ yyv1[yymk1] = yymv1
+ }
+ }
+ } // else len==0: TODO: Should we clear map entries?
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x codecSelfer1234) encSlicestring(v []string, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym2 := z.EncBinary()
+ _ = yym2
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(yyv1))
+ }
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSlicestring(v *[]string, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []string{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]string, yyrl1)
+ }
+ } else {
+ yyv1 = make([]string, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = string(r.DecodeString())
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, "")
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = string(r.DecodeString())
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, "") // var yyz1 string
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = string(r.DecodeString())
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []string{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/types.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/types.go
new file mode 100644
index 0000000..70c1282
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/types.go
@@ -0,0 +1,123 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+ "k8s.io/kubernetes/pkg/api/unversioned"
+)
+
+// SubjectAccessReview checks whether or not a user or group can perform an action.
+type SubjectAccessReview struct {
+ unversioned.TypeMeta `json:",inline"`
+
+ // Spec holds information about the request being evaluated
+ Spec SubjectAccessReviewSpec `json:"spec"`
+
+ // Status is filled in by the server and indicates whether the request is allowed or not
+ Status SubjectAccessReviewStatus `json:"status,omitempty"`
+}
+
+// SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a
+// spec.namespace means "in all namespaces". Self is a special case, because users should always be able
+// to check whether they can perform an action
+type SelfSubjectAccessReview struct {
+ unversioned.TypeMeta `json:",inline"`
+
+ // Spec holds information about the request being evaluated. user and groups must be empty
+ Spec SelfSubjectAccessReviewSpec `json:"spec"`
+
+ // Status is filled in by the server and indicates whether the request is allowed or not
+ Status SubjectAccessReviewStatus `json:"status,omitempty"`
+}
+
+// LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace.
+// Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions
+// checking.
+type LocalSubjectAccessReview struct {
+ unversioned.TypeMeta `json:",inline"`
+
+ // Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace
+ // you made the request against. If empty, it is defaulted.
+ Spec SubjectAccessReviewSpec `json:"spec"`
+
+ // Status is filled in by the server and indicates whether the request is allowed or not
+ Status SubjectAccessReviewStatus `json:"status,omitempty"`
+}
+
+// ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface
+type ResourceAttributes struct {
+ // Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces
+ // "" (empty) is defaulted for LocalSubjectAccessReviews
+ // "" (empty) is empty for cluster-scoped resources
+ // "" (empty) means "all" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview
+ Namespace string `json:"namespace,omitempty"`
+ // Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. "*" means all.
+ Verb string `json:"verb,omitempty"`
+ // Group is the API Group of the Resource. "*" means all.
+ Group string `json:"group,omitempty"`
+ // Version is the API Version of the Resource. "*" means all.
+ Version string `json:"version,omitempty"`
+ // Resource is one of the existing resource types. "*" means all.
+ Resource string `json:"resource,omitempty"`
+ // Subresource is one of the existing resource types. "" means none.
+ Subresource string `json:"subresource,omitempty"`
+ // Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all.
+ Name string `json:"name,omitempty"`
+}
+
+// NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface
+type NonResourceAttributes struct {
+ // Path is the URL path of the request
+ Path string `json:"path,omitempty"`
+ // Verb is the standard HTTP verb
+ Verb string `json:"verb,omitempty"`
+}
+
+// SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes
+// and NonResourceAuthorizationAttributes must be set
+type SubjectAccessReviewSpec struct {
+ // ResourceAuthorizationAttributes describes information for a resource access request
+ ResourceAttributes *ResourceAttributes `json:"resourceAttributes,omitempty"`
+ // NonResourceAttributes describes information for a non-resource access request
+ NonResourceAttributes *NonResourceAttributes `json:"nonResourceAttributes,omitempty"`
+
+ // User is the user you're testing for.
+ // If you specify "User" but not "Group", then is it interpreted as "What if User were not a member of any groups
+ User string `json:"user,omitempty"`
+ // Groups is the groups you're testing for.
+ Groups []string `json:"group,omitempty"`
+ // Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer
+ // it needs a reflection here.
+ Extra map[string][]string `json:"extra,omitempty"`
+}
+
+// SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes
+// and NonResourceAuthorizationAttributes must be set
+type SelfSubjectAccessReviewSpec struct {
+ // ResourceAuthorizationAttributes describes information for a resource access request
+ ResourceAttributes *ResourceAttributes `json:"resourceAttributes,omitempty"`
+ // NonResourceAttributes describes information for a non-resource access request
+ NonResourceAttributes *NonResourceAttributes `json:"nonResourceAttributes,omitempty"`
+}
+
+// SubjectAccessReviewStatus
+type SubjectAccessReviewStatus struct {
+ // Allowed is required. True if the action would be allowed, false otherwise.
+ Allowed bool `json:"allowed"`
+ // Reason is optional. It indicates why a request was allowed or denied.
+ Reason string `json:"reason,omitempty"`
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/types_swagger_doc_generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/types_swagger_doc_generated.go
new file mode 100644
index 0000000..dc93476
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/types_swagger_doc_generated.go
@@ -0,0 +1,118 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE
+var map_LocalSubjectAccessReview = map[string]string{
+ "": "LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions checking.",
+ "spec": "Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace you made the request against. If empty, it is defaulted.",
+ "status": "Status is filled in by the server and indicates whether the request is allowed or not",
+}
+
+func (LocalSubjectAccessReview) SwaggerDoc() map[string]string {
+ return map_LocalSubjectAccessReview
+}
+
+var map_NonResourceAttributes = map[string]string{
+ "": "NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface",
+ "path": "Path is the URL path of the request",
+ "verb": "Verb is the standard HTTP verb",
+}
+
+func (NonResourceAttributes) SwaggerDoc() map[string]string {
+ return map_NonResourceAttributes
+}
+
+var map_ResourceAttributes = map[string]string{
+ "": "ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface",
+ "namespace": "Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \"\" (empty) is defaulted for LocalSubjectAccessReviews \"\" (empty) is empty for cluster-scoped resources \"\" (empty) means \"all\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview",
+ "verb": "Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. \"*\" means all.",
+ "group": "Group is the API Group of the Resource. \"*\" means all.",
+ "version": "Version is the API Version of the Resource. \"*\" means all.",
+ "resource": "Resource is one of the existing resource types. \"*\" means all.",
+ "subresource": "Subresource is one of the existing resource types. \"\" means none.",
+ "name": "Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all.",
+}
+
+func (ResourceAttributes) SwaggerDoc() map[string]string {
+ return map_ResourceAttributes
+}
+
+var map_SelfSubjectAccessReview = map[string]string{
+ "": "SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a spec.namespace means \"in all namespaces\". Self is a special case, because users should always be able to check whether they can perform an action",
+ "spec": "Spec holds information about the request being evaluated. user and groups must be empty",
+ "status": "Status is filled in by the server and indicates whether the request is allowed or not",
+}
+
+func (SelfSubjectAccessReview) SwaggerDoc() map[string]string {
+ return map_SelfSubjectAccessReview
+}
+
+var map_SelfSubjectAccessReviewSpec = map[string]string{
+ "": "SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set",
+ "resourceAttributes": "ResourceAuthorizationAttributes describes information for a resource access request",
+ "nonResourceAttributes": "NonResourceAttributes describes information for a non-resource access request",
+}
+
+func (SelfSubjectAccessReviewSpec) SwaggerDoc() map[string]string {
+ return map_SelfSubjectAccessReviewSpec
+}
+
+var map_SubjectAccessReview = map[string]string{
+ "": "SubjectAccessReview checks whether or not a user or group can perform an action.",
+ "spec": "Spec holds information about the request being evaluated",
+ "status": "Status is filled in by the server and indicates whether the request is allowed or not",
+}
+
+func (SubjectAccessReview) SwaggerDoc() map[string]string {
+ return map_SubjectAccessReview
+}
+
+var map_SubjectAccessReviewSpec = map[string]string{
+ "": "SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set",
+ "resourceAttributes": "ResourceAuthorizationAttributes describes information for a resource access request",
+ "nonResourceAttributes": "NonResourceAttributes describes information for a non-resource access request",
+ "user": "User is the user you're testing for. If you specify \"User\" but not \"Group\", then is it interpreted as \"What if User were not a member of any groups",
+ "group": "Groups is the groups you're testing for.",
+ "extra": "Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer it needs a reflection here.",
+}
+
+func (SubjectAccessReviewSpec) SwaggerDoc() map[string]string {
+ return map_SubjectAccessReviewSpec
+}
+
+var map_SubjectAccessReviewStatus = map[string]string{
+ "": "SubjectAccessReviewStatus",
+ "allowed": "Allowed is required. True if the action would be allowed, false otherwise.",
+ "reason": "Reason is optional. It indicates why a request was allowed or denied.",
+}
+
+func (SubjectAccessReviewStatus) SwaggerDoc() map[string]string {
+ return map_SubjectAccessReviewStatus
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/deep_copy_generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/deep_copy_generated.go
new file mode 100644
index 0000000..1ddcc13
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/deep_copy_generated.go
@@ -0,0 +1,149 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by deepcopy-gen. Do not edit it manually!
+
+package autoscaling
+
+import (
+ api "k8s.io/kubernetes/pkg/api"
+ unversioned "k8s.io/kubernetes/pkg/api/unversioned"
+ conversion "k8s.io/kubernetes/pkg/conversion"
+)
+
+func init() {
+ if err := api.Scheme.AddGeneratedDeepCopyFuncs(
+ DeepCopy_autoscaling_CrossVersionObjectReference,
+ DeepCopy_autoscaling_HorizontalPodAutoscaler,
+ DeepCopy_autoscaling_HorizontalPodAutoscalerList,
+ DeepCopy_autoscaling_HorizontalPodAutoscalerSpec,
+ DeepCopy_autoscaling_HorizontalPodAutoscalerStatus,
+ DeepCopy_autoscaling_Scale,
+ DeepCopy_autoscaling_ScaleSpec,
+ DeepCopy_autoscaling_ScaleStatus,
+ ); err != nil {
+ // if one of the deep copy functions is malformed, detect it immediately.
+ panic(err)
+ }
+}
+
+func DeepCopy_autoscaling_CrossVersionObjectReference(in CrossVersionObjectReference, out *CrossVersionObjectReference, c *conversion.Cloner) error {
+ out.Kind = in.Kind
+ out.Name = in.Name
+ out.APIVersion = in.APIVersion
+ return nil
+}
+
+func DeepCopy_autoscaling_HorizontalPodAutoscaler(in HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_autoscaling_HorizontalPodAutoscalerSpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_autoscaling_HorizontalPodAutoscalerStatus(in.Status, &out.Status, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_autoscaling_HorizontalPodAutoscalerList(in HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]HorizontalPodAutoscaler, len(in))
+ for i := range in {
+ if err := DeepCopy_autoscaling_HorizontalPodAutoscaler(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_autoscaling_HorizontalPodAutoscalerSpec(in HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, c *conversion.Cloner) error {
+ out.ScaleTargetRef = in.ScaleTargetRef
+ if in.MinReplicas != nil {
+ in, out := in.MinReplicas, &out.MinReplicas
+ *out = new(int32)
+ **out = *in
+ } else {
+ out.MinReplicas = nil
+ }
+ out.MaxReplicas = in.MaxReplicas
+ if in.TargetCPUUtilizationPercentage != nil {
+ in, out := in.TargetCPUUtilizationPercentage, &out.TargetCPUUtilizationPercentage
+ *out = new(int32)
+ **out = *in
+ } else {
+ out.TargetCPUUtilizationPercentage = nil
+ }
+ return nil
+}
+
+func DeepCopy_autoscaling_HorizontalPodAutoscalerStatus(in HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, c *conversion.Cloner) error {
+ if in.ObservedGeneration != nil {
+ in, out := in.ObservedGeneration, &out.ObservedGeneration
+ *out = new(int64)
+ **out = *in
+ } else {
+ out.ObservedGeneration = nil
+ }
+ if in.LastScaleTime != nil {
+ in, out := in.LastScaleTime, &out.LastScaleTime
+ *out = new(unversioned.Time)
+ **out = in.DeepCopy()
+ } else {
+ out.LastScaleTime = nil
+ }
+ out.CurrentReplicas = in.CurrentReplicas
+ out.DesiredReplicas = in.DesiredReplicas
+ if in.CurrentCPUUtilizationPercentage != nil {
+ in, out := in.CurrentCPUUtilizationPercentage, &out.CurrentCPUUtilizationPercentage
+ *out = new(int32)
+ **out = *in
+ } else {
+ out.CurrentCPUUtilizationPercentage = nil
+ }
+ return nil
+}
+
+func DeepCopy_autoscaling_Scale(in Scale, out *Scale, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ out.Spec = in.Spec
+ out.Status = in.Status
+ return nil
+}
+
+func DeepCopy_autoscaling_ScaleSpec(in ScaleSpec, out *ScaleSpec, c *conversion.Cloner) error {
+ out.Replicas = in.Replicas
+ return nil
+}
+
+func DeepCopy_autoscaling_ScaleStatus(in ScaleStatus, out *ScaleStatus, c *conversion.Cloner) error {
+ out.Replicas = in.Replicas
+ out.Selector = in.Selector
+ return nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/doc.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/doc.go
new file mode 100644
index 0000000..2c77018
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package,register
+
+package autoscaling
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/install/install.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/install/install.go
new file mode 100644
index 0000000..be236ff
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/install/install.go
@@ -0,0 +1,129 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package install installs the experimental API group, making it available as
+// an option to all of the API encoding/decoding machinery.
+package install
+
+import (
+ "fmt"
+
+ "github.com/golang/glog"
+
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/meta"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/apimachinery"
+ "k8s.io/kubernetes/pkg/apimachinery/registered"
+ "k8s.io/kubernetes/pkg/apis/autoscaling"
+ "k8s.io/kubernetes/pkg/apis/autoscaling/v1"
+ "k8s.io/kubernetes/pkg/runtime"
+ "k8s.io/kubernetes/pkg/util/sets"
+)
+
+const importPrefix = "k8s.io/kubernetes/pkg/apis/autoscaling"
+
+var accessor = meta.NewAccessor()
+
+// availableVersions lists all known external versions for this group from most preferred to least preferred
+var availableVersions = []unversioned.GroupVersion{v1.SchemeGroupVersion}
+
+func init() {
+ registered.RegisterVersions(availableVersions)
+ externalVersions := []unversioned.GroupVersion{}
+ for _, v := range availableVersions {
+ if registered.IsAllowedVersion(v) {
+ externalVersions = append(externalVersions, v)
+ }
+ }
+ if len(externalVersions) == 0 {
+ glog.V(4).Infof("No version is registered for group %v", autoscaling.GroupName)
+ return
+ }
+
+ if err := registered.EnableVersions(externalVersions...); err != nil {
+ glog.V(4).Infof("%v", err)
+ return
+ }
+ if err := enableVersions(externalVersions); err != nil {
+ glog.V(4).Infof("%v", err)
+ return
+ }
+}
+
+// TODO: enableVersions should be centralized rather than spread in each API
+// group.
+// We can combine registered.RegisterVersions, registered.EnableVersions and
+// registered.RegisterGroup once we have moved enableVersions there.
+func enableVersions(externalVersions []unversioned.GroupVersion) error {
+ addVersionsToScheme(externalVersions...)
+ preferredExternalVersion := externalVersions[0]
+
+ groupMeta := apimachinery.GroupMeta{
+ GroupVersion: preferredExternalVersion,
+ GroupVersions: externalVersions,
+ RESTMapper: newRESTMapper(externalVersions),
+ SelfLinker: runtime.SelfLinker(accessor),
+ InterfacesFor: interfacesFor,
+ }
+
+ if err := registered.RegisterGroup(groupMeta); err != nil {
+ return err
+ }
+ api.RegisterRESTMapper(groupMeta.RESTMapper)
+ return nil
+}
+
+func newRESTMapper(externalVersions []unversioned.GroupVersion) meta.RESTMapper {
+ // the list of kinds that are scoped at the root of the api hierarchy
+ // if a kind is not enumerated here, it is assumed to have a namespace scope
+ rootScoped := sets.NewString()
+
+ ignoredKinds := sets.NewString()
+
+ return api.NewDefaultRESTMapper(externalVersions, interfacesFor, importPrefix, ignoredKinds, rootScoped)
+}
+
+// interfacesFor returns the default Codec and ResourceVersioner for a given version
+// string, or an error if the version is not known.
+func interfacesFor(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) {
+ switch version {
+ case v1.SchemeGroupVersion:
+ return &meta.VersionInterfaces{
+ ObjectConvertor: api.Scheme,
+ MetadataAccessor: accessor,
+ }, nil
+ default:
+ g, _ := registered.Group(autoscaling.GroupName)
+ return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, g.GroupVersions)
+ }
+}
+
+func addVersionsToScheme(externalVersions ...unversioned.GroupVersion) {
+ // add the internal version to Scheme
+ autoscaling.AddToScheme(api.Scheme)
+ // add the enabled external versions to Scheme
+ for _, v := range externalVersions {
+ if !registered.IsEnabledVersion(v) {
+ glog.Errorf("Version %s is not enabled, so it will not be added to the Scheme.", v)
+ continue
+ }
+ switch v {
+ case v1.SchemeGroupVersion:
+ v1.AddToScheme(api.Scheme)
+ }
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/register.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/register.go
new file mode 100644
index 0000000..ee0d3c7
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/register.go
@@ -0,0 +1,54 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package autoscaling
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/runtime"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "autoscaling"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
+
+// Kind takes an unqualified kind and returns back a Group qualified GroupKind
+func Kind(kind string) unversioned.GroupKind {
+ return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// Resource takes an unqualified resource and returns back a Group qualified GroupResource
+func Resource(resource string) unversioned.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+func AddToScheme(scheme *runtime.Scheme) {
+ // Add the API to Scheme.
+ addKnownTypes(scheme)
+}
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &Scale{},
+ &HorizontalPodAutoscaler{},
+ &HorizontalPodAutoscalerList{},
+ &api.ListOptions{},
+ )
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/types.generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/types.generated.go
new file mode 100644
index 0000000..ded7227
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/types.generated.go
@@ -0,0 +1,2659 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// ************************************************************
+// DO NOT EDIT.
+// THIS FILE IS AUTO-GENERATED BY codecgen.
+// ************************************************************
+
+package autoscaling
+
+import (
+ "errors"
+ "fmt"
+ codec1978 "github.com/ugorji/go/codec"
+ pkg2_api "k8s.io/kubernetes/pkg/api"
+ pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned"
+ pkg3_types "k8s.io/kubernetes/pkg/types"
+ "reflect"
+ "runtime"
+ time "time"
+)
+
+const (
+ // ----- content types ----
+ codecSelferC_UTF81234 = 1
+ codecSelferC_RAW1234 = 0
+ // ----- value types used ----
+ codecSelferValueTypeArray1234 = 10
+ codecSelferValueTypeMap1234 = 9
+ // ----- containerStateValues ----
+ codecSelfer_containerMapKey1234 = 2
+ codecSelfer_containerMapValue1234 = 3
+ codecSelfer_containerMapEnd1234 = 4
+ codecSelfer_containerArrayElem1234 = 6
+ codecSelfer_containerArrayEnd1234 = 7
+)
+
+var (
+ codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits())
+ codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`)
+)
+
+type codecSelfer1234 struct{}
+
+func init() {
+ if codec1978.GenVersion != 5 {
+ _, file, _, _ := runtime.Caller(0)
+ err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v",
+ 5, codec1978.GenVersion, file)
+ panic(err)
+ }
+ if false { // reference the types, but skip this branch at build/run time
+ var v0 pkg2_api.ObjectMeta
+ var v1 pkg1_unversioned.TypeMeta
+ var v2 pkg3_types.UID
+ var v3 time.Time
+ _, _, _, _ = v0, v1, v2, v3
+ }
+}
+
+func (x *Scale) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = true
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy14 := &x.Status
+ yy14.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy16 := &x.Status
+ yy16.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *Scale) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *Scale) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_api.ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = ScaleSpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = ScaleStatus{}
+ } else {
+ yyv6 := &x.Status
+ yyv6.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *Scale) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_api.ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = ScaleSpec{}
+ } else {
+ yyv11 := &x.Spec
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = ScaleStatus{}
+ } else {
+ yyv12 := &x.Status
+ yyv12.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ScaleSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Replicas != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Replicas))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("replicas"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Replicas))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ScaleSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ScaleSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "replicas":
+ if r.TryDecodeAsNil() {
+ x.Replicas = 0
+ } else {
+ x.Replicas = int32(r.DecodeInt(32))
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ScaleSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj5 int
+ var yyb5 bool
+ var yyhl5 bool = l >= 0
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Replicas = 0
+ } else {
+ x.Replicas = int32(r.DecodeInt(32))
+ }
+ for {
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj5-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ScaleStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.Selector != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Replicas))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("replicas"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Replicas))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Selector))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("selector"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Selector))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ScaleStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ScaleStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "replicas":
+ if r.TryDecodeAsNil() {
+ x.Replicas = 0
+ } else {
+ x.Replicas = int32(r.DecodeInt(32))
+ }
+ case "selector":
+ if r.TryDecodeAsNil() {
+ x.Selector = ""
+ } else {
+ x.Selector = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ScaleStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Replicas = 0
+ } else {
+ x.Replicas = int32(r.DecodeInt(32))
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Selector = ""
+ } else {
+ x.Selector = string(r.DecodeString())
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *CrossVersionObjectReference) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[2] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("name"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *CrossVersionObjectReference) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *CrossVersionObjectReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "name":
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *CrossVersionObjectReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *HorizontalPodAutoscalerSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.MinReplicas != nil
+ yyq2[3] = x.TargetCPUUtilizationPercentage != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy4 := &x.ScaleTargetRef
+ yy4.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("scaleTargetRef"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ScaleTargetRef
+ yy6.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.MinReplicas == nil {
+ r.EncodeNil()
+ } else {
+ yy9 := *x.MinReplicas
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeInt(int64(yy9))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("minReplicas"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.MinReplicas == nil {
+ r.EncodeNil()
+ } else {
+ yy11 := *x.MinReplicas
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeInt(int64(yy11))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeInt(int64(x.MaxReplicas))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("maxReplicas"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeInt(int64(x.MaxReplicas))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ if x.TargetCPUUtilizationPercentage == nil {
+ r.EncodeNil()
+ } else {
+ yy17 := *x.TargetCPUUtilizationPercentage
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ r.EncodeInt(int64(yy17))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("targetCPUUtilizationPercentage"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.TargetCPUUtilizationPercentage == nil {
+ r.EncodeNil()
+ } else {
+ yy19 := *x.TargetCPUUtilizationPercentage
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeInt(int64(yy19))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *HorizontalPodAutoscalerSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *HorizontalPodAutoscalerSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "scaleTargetRef":
+ if r.TryDecodeAsNil() {
+ x.ScaleTargetRef = CrossVersionObjectReference{}
+ } else {
+ yyv4 := &x.ScaleTargetRef
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "minReplicas":
+ if r.TryDecodeAsNil() {
+ if x.MinReplicas != nil {
+ x.MinReplicas = nil
+ }
+ } else {
+ if x.MinReplicas == nil {
+ x.MinReplicas = new(int32)
+ }
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ *((*int32)(x.MinReplicas)) = int32(r.DecodeInt(32))
+ }
+ }
+ case "maxReplicas":
+ if r.TryDecodeAsNil() {
+ x.MaxReplicas = 0
+ } else {
+ x.MaxReplicas = int32(r.DecodeInt(32))
+ }
+ case "targetCPUUtilizationPercentage":
+ if r.TryDecodeAsNil() {
+ if x.TargetCPUUtilizationPercentage != nil {
+ x.TargetCPUUtilizationPercentage = nil
+ }
+ } else {
+ if x.TargetCPUUtilizationPercentage == nil {
+ x.TargetCPUUtilizationPercentage = new(int32)
+ }
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else {
+ *((*int32)(x.TargetCPUUtilizationPercentage)) = int32(r.DecodeInt(32))
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *HorizontalPodAutoscalerSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ScaleTargetRef = CrossVersionObjectReference{}
+ } else {
+ yyv11 := &x.ScaleTargetRef
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.MinReplicas != nil {
+ x.MinReplicas = nil
+ }
+ } else {
+ if x.MinReplicas == nil {
+ x.MinReplicas = new(int32)
+ }
+ yym13 := z.DecBinary()
+ _ = yym13
+ if false {
+ } else {
+ *((*int32)(x.MinReplicas)) = int32(r.DecodeInt(32))
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.MaxReplicas = 0
+ } else {
+ x.MaxReplicas = int32(r.DecodeInt(32))
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.TargetCPUUtilizationPercentage != nil {
+ x.TargetCPUUtilizationPercentage = nil
+ }
+ } else {
+ if x.TargetCPUUtilizationPercentage == nil {
+ x.TargetCPUUtilizationPercentage = new(int32)
+ }
+ yym16 := z.DecBinary()
+ _ = yym16
+ if false {
+ } else {
+ *((*int32)(x.TargetCPUUtilizationPercentage)) = int32(r.DecodeInt(32))
+ }
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *HorizontalPodAutoscalerStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.ObservedGeneration != nil
+ yyq2[1] = x.LastScaleTime != nil
+ yyq2[4] = x.CurrentCPUUtilizationPercentage != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.ObservedGeneration == nil {
+ r.EncodeNil()
+ } else {
+ yy4 := *x.ObservedGeneration
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(yy4))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("observedGeneration"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.ObservedGeneration == nil {
+ r.EncodeNil()
+ } else {
+ yy6 := *x.ObservedGeneration
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeInt(int64(yy6))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.LastScaleTime == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.LastScaleTime) {
+ } else if yym9 {
+ z.EncBinaryMarshal(x.LastScaleTime)
+ } else if !yym9 && z.IsJSONHandle() {
+ z.EncJSONMarshal(x.LastScaleTime)
+ } else {
+ z.EncFallback(x.LastScaleTime)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("lastScaleTime"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.LastScaleTime == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.LastScaleTime) {
+ } else if yym10 {
+ z.EncBinaryMarshal(x.LastScaleTime)
+ } else if !yym10 && z.IsJSONHandle() {
+ z.EncJSONMarshal(x.LastScaleTime)
+ } else {
+ z.EncFallback(x.LastScaleTime)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeInt(int64(x.CurrentReplicas))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("currentReplicas"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeInt(int64(x.CurrentReplicas))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeInt(int64(x.DesiredReplicas))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("desiredReplicas"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeInt(int64(x.DesiredReplicas))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ if x.CurrentCPUUtilizationPercentage == nil {
+ r.EncodeNil()
+ } else {
+ yy18 := *x.CurrentCPUUtilizationPercentage
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeInt(int64(yy18))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("currentCPUUtilizationPercentage"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.CurrentCPUUtilizationPercentage == nil {
+ r.EncodeNil()
+ } else {
+ yy20 := *x.CurrentCPUUtilizationPercentage
+ yym21 := z.EncBinary()
+ _ = yym21
+ if false {
+ } else {
+ r.EncodeInt(int64(yy20))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *HorizontalPodAutoscalerStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *HorizontalPodAutoscalerStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "observedGeneration":
+ if r.TryDecodeAsNil() {
+ if x.ObservedGeneration != nil {
+ x.ObservedGeneration = nil
+ }
+ } else {
+ if x.ObservedGeneration == nil {
+ x.ObservedGeneration = new(int64)
+ }
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64))
+ }
+ }
+ case "lastScaleTime":
+ if r.TryDecodeAsNil() {
+ if x.LastScaleTime != nil {
+ x.LastScaleTime = nil
+ }
+ } else {
+ if x.LastScaleTime == nil {
+ x.LastScaleTime = new(pkg1_unversioned.Time)
+ }
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.LastScaleTime) {
+ } else if yym7 {
+ z.DecBinaryUnmarshal(x.LastScaleTime)
+ } else if !yym7 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(x.LastScaleTime)
+ } else {
+ z.DecFallback(x.LastScaleTime, false)
+ }
+ }
+ case "currentReplicas":
+ if r.TryDecodeAsNil() {
+ x.CurrentReplicas = 0
+ } else {
+ x.CurrentReplicas = int32(r.DecodeInt(32))
+ }
+ case "desiredReplicas":
+ if r.TryDecodeAsNil() {
+ x.DesiredReplicas = 0
+ } else {
+ x.DesiredReplicas = int32(r.DecodeInt(32))
+ }
+ case "currentCPUUtilizationPercentage":
+ if r.TryDecodeAsNil() {
+ if x.CurrentCPUUtilizationPercentage != nil {
+ x.CurrentCPUUtilizationPercentage = nil
+ }
+ } else {
+ if x.CurrentCPUUtilizationPercentage == nil {
+ x.CurrentCPUUtilizationPercentage = new(int32)
+ }
+ yym11 := z.DecBinary()
+ _ = yym11
+ if false {
+ } else {
+ *((*int32)(x.CurrentCPUUtilizationPercentage)) = int32(r.DecodeInt(32))
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *HorizontalPodAutoscalerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj12 int
+ var yyb12 bool
+ var yyhl12 bool = l >= 0
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.ObservedGeneration != nil {
+ x.ObservedGeneration = nil
+ }
+ } else {
+ if x.ObservedGeneration == nil {
+ x.ObservedGeneration = new(int64)
+ }
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64))
+ }
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.LastScaleTime != nil {
+ x.LastScaleTime = nil
+ }
+ } else {
+ if x.LastScaleTime == nil {
+ x.LastScaleTime = new(pkg1_unversioned.Time)
+ }
+ yym16 := z.DecBinary()
+ _ = yym16
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.LastScaleTime) {
+ } else if yym16 {
+ z.DecBinaryUnmarshal(x.LastScaleTime)
+ } else if !yym16 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(x.LastScaleTime)
+ } else {
+ z.DecFallback(x.LastScaleTime, false)
+ }
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.CurrentReplicas = 0
+ } else {
+ x.CurrentReplicas = int32(r.DecodeInt(32))
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.DesiredReplicas = 0
+ } else {
+ x.DesiredReplicas = int32(r.DecodeInt(32))
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.CurrentCPUUtilizationPercentage != nil {
+ x.CurrentCPUUtilizationPercentage = nil
+ }
+ } else {
+ if x.CurrentCPUUtilizationPercentage == nil {
+ x.CurrentCPUUtilizationPercentage = new(int32)
+ }
+ yym20 := z.DecBinary()
+ _ = yym20
+ if false {
+ } else {
+ *((*int32)(x.CurrentCPUUtilizationPercentage)) = int32(r.DecodeInt(32))
+ }
+ }
+ for {
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj12-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *HorizontalPodAutoscaler) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = true
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy14 := &x.Status
+ yy14.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy16 := &x.Status
+ yy16.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *HorizontalPodAutoscaler) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *HorizontalPodAutoscaler) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_api.ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = HorizontalPodAutoscalerSpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = HorizontalPodAutoscalerStatus{}
+ } else {
+ yyv6 := &x.Status
+ yyv6.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *HorizontalPodAutoscaler) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_api.ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = HorizontalPodAutoscalerSpec{}
+ } else {
+ yyv11 := &x.Spec
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = HorizontalPodAutoscalerStatus{}
+ } else {
+ yyv12 := &x.Status
+ yyv12.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *HorizontalPodAutoscalerList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceHorizontalPodAutoscaler(([]HorizontalPodAutoscaler)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceHorizontalPodAutoscaler(([]HorizontalPodAutoscaler)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *HorizontalPodAutoscalerList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *HorizontalPodAutoscalerList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceHorizontalPodAutoscaler((*[]HorizontalPodAutoscaler)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *HorizontalPodAutoscalerList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceHorizontalPodAutoscaler((*[]HorizontalPodAutoscaler)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) encSliceHorizontalPodAutoscaler(v []HorizontalPodAutoscaler, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceHorizontalPodAutoscaler(v *[]HorizontalPodAutoscaler, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []HorizontalPodAutoscaler{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 344)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]HorizontalPodAutoscaler, yyrl1)
+ }
+ } else {
+ yyv1 = make([]HorizontalPodAutoscaler, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = HorizontalPodAutoscaler{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, HorizontalPodAutoscaler{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = HorizontalPodAutoscaler{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, HorizontalPodAutoscaler{}) // var yyz1 HorizontalPodAutoscaler
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = HorizontalPodAutoscaler{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []HorizontalPodAutoscaler{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/types.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/types.go
new file mode 100644
index 0000000..6accc9f
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/types.go
@@ -0,0 +1,120 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package autoscaling
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+)
+
+// Scale represents a scaling request for a resource.
+type Scale struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata.
+ api.ObjectMeta `json:"metadata,omitempty"`
+
+ // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.
+ Spec ScaleSpec `json:"spec,omitempty"`
+
+ // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only.
+ Status ScaleStatus `json:"status,omitempty"`
+}
+
+// ScaleSpec describes the attributes of a scale subresource.
+type ScaleSpec struct {
+ // desired number of instances for the scaled object.
+ Replicas int32 `json:"replicas,omitempty"`
+}
+
+// ScaleStatus represents the current status of a scale subresource.
+type ScaleStatus struct {
+ // actual number of observed instances of the scaled object.
+ Replicas int32 `json:"replicas"`
+
+ // label query over pods that should match the replicas count. This is same
+ // as the label selector but in the string format to avoid introspection
+ // by clients. The string will be in the same format as the query-param syntax.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors
+ Selector string `json:"selector,omitempty"`
+}
+
+// CrossVersionObjectReference contains enough information to let you identify the referred resource.
+type CrossVersionObjectReference struct {
+ // Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds"
+ Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"`
+ // Name of the referent; More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names
+ Name string `json:"name" protobuf:"bytes,2,opt,name=name"`
+ // API version of the referent
+ APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"`
+}
+
+// specification of a horizontal pod autoscaler.
+type HorizontalPodAutoscalerSpec struct {
+ // reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption
+ // and will set the desired number of pods by using its Scale subresource.
+ ScaleTargetRef CrossVersionObjectReference `json:"scaleTargetRef"`
+ // lower limit for the number of pods that can be set by the autoscaler, default 1.
+ MinReplicas *int32 `json:"minReplicas,omitempty"`
+ // upper limit for the number of pods that can be set by the autoscaler. It cannot be smaller than MinReplicas.
+ MaxReplicas int32 `json:"maxReplicas"`
+ // target average CPU utilization (represented as a percentage of requested CPU) over all the pods;
+ // if not specified the default autoscaling policy will be used.
+ TargetCPUUtilizationPercentage *int32 `json:"targetCPUUtilizationPercentage,omitempty"`
+}
+
+// current status of a horizontal pod autoscaler
+type HorizontalPodAutoscalerStatus struct {
+ // most recent generation observed by this autoscaler.
+ ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
+
+ // last time the HorizontalPodAutoscaler scaled the number of pods;
+ // used by the autoscaler to control how often the number of pods is changed.
+ LastScaleTime *unversioned.Time `json:"lastScaleTime,omitempty"`
+
+ // current number of replicas of pods managed by this autoscaler.
+ CurrentReplicas int32 `json:"currentReplicas"`
+
+ // desired number of replicas of pods managed by this autoscaler.
+ DesiredReplicas int32 `json:"desiredReplicas"`
+
+ // current average CPU utilization over all pods, represented as a percentage of requested CPU,
+ // e.g. 70 means that an average pod is using now 70% of its requested CPU.
+ CurrentCPUUtilizationPercentage *int32 `json:"currentCPUUtilizationPercentage,omitempty"`
+}
+
+// +genclient=true
+
+// configuration of a horizontal pod autoscaler.
+type HorizontalPodAutoscaler struct {
+ unversioned.TypeMeta `json:",inline"`
+ api.ObjectMeta `json:"metadata,omitempty"`
+
+ // behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.
+ Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty"`
+
+ // current information about the autoscaler.
+ Status HorizontalPodAutoscalerStatus `json:"status,omitempty"`
+}
+
+// list of horizontal pod autoscaler objects.
+type HorizontalPodAutoscalerList struct {
+ unversioned.TypeMeta `json:",inline"`
+ unversioned.ListMeta `json:"metadata,omitempty"`
+
+ // list of horizontal pod autoscaler objects.
+ Items []HorizontalPodAutoscaler `json:"items"`
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/conversion_generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/conversion_generated.go
new file mode 100644
index 0000000..3d56992
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/conversion_generated.go
@@ -0,0 +1,300 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by conversion-gen. Do not edit it manually!
+
+package v1
+
+import (
+ api "k8s.io/kubernetes/pkg/api"
+ autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling"
+ conversion "k8s.io/kubernetes/pkg/conversion"
+)
+
+func init() {
+ if err := api.Scheme.AddGeneratedConversionFuncs(
+ Convert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference,
+ Convert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference,
+ Convert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler,
+ Convert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler,
+ Convert_v1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList,
+ Convert_autoscaling_HorizontalPodAutoscalerList_To_v1_HorizontalPodAutoscalerList,
+ Convert_v1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec,
+ Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec,
+ Convert_v1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus,
+ Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus,
+ Convert_v1_Scale_To_autoscaling_Scale,
+ Convert_autoscaling_Scale_To_v1_Scale,
+ Convert_v1_ScaleSpec_To_autoscaling_ScaleSpec,
+ Convert_autoscaling_ScaleSpec_To_v1_ScaleSpec,
+ Convert_v1_ScaleStatus_To_autoscaling_ScaleStatus,
+ Convert_autoscaling_ScaleStatus_To_v1_ScaleStatus,
+ ); err != nil {
+ // if one of the conversion functions is malformed, detect it immediately.
+ panic(err)
+ }
+}
+
+func autoConvert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(in *CrossVersionObjectReference, out *autoscaling.CrossVersionObjectReference, s conversion.Scope) error {
+ out.Kind = in.Kind
+ out.Name = in.Name
+ out.APIVersion = in.APIVersion
+ return nil
+}
+
+func Convert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(in *CrossVersionObjectReference, out *autoscaling.CrossVersionObjectReference, s conversion.Scope) error {
+ return autoConvert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(in, out, s)
+}
+
+func autoConvert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference(in *autoscaling.CrossVersionObjectReference, out *CrossVersionObjectReference, s conversion.Scope) error {
+ out.Kind = in.Kind
+ out.Name = in.Name
+ out.APIVersion = in.APIVersion
+ return nil
+}
+
+func Convert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference(in *autoscaling.CrossVersionObjectReference, out *CrossVersionObjectReference, s conversion.Scope) error {
+ return autoConvert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference(in, out, s)
+}
+
+func autoConvert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in *HorizontalPodAutoscaler, out *autoscaling.HorizontalPodAutoscaler, s conversion.Scope) error {
+ SetDefaults_HorizontalPodAutoscaler(in)
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ if err := Convert_v1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in *HorizontalPodAutoscaler, out *autoscaling.HorizontalPodAutoscaler, s conversion.Scope) error {
+ return autoConvert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in, out, s)
+}
+
+func autoConvert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(in *autoscaling.HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ if err := Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(in *autoscaling.HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, s conversion.Scope) error {
+ return autoConvert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(in, out, s)
+}
+
+func autoConvert_v1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in *HorizontalPodAutoscalerList, out *autoscaling.HorizontalPodAutoscalerList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]autoscaling.HorizontalPodAutoscaler, len(*in))
+ for i := range *in {
+ if err := Convert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_v1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in *HorizontalPodAutoscalerList, out *autoscaling.HorizontalPodAutoscalerList, s conversion.Scope) error {
+ return autoConvert_v1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in, out, s)
+}
+
+func autoConvert_autoscaling_HorizontalPodAutoscalerList_To_v1_HorizontalPodAutoscalerList(in *autoscaling.HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]HorizontalPodAutoscaler, len(*in))
+ for i := range *in {
+ if err := Convert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_autoscaling_HorizontalPodAutoscalerList_To_v1_HorizontalPodAutoscalerList(in *autoscaling.HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, s conversion.Scope) error {
+ return autoConvert_autoscaling_HorizontalPodAutoscalerList_To_v1_HorizontalPodAutoscalerList(in, out, s)
+}
+
+func autoConvert_v1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in *HorizontalPodAutoscalerSpec, out *autoscaling.HorizontalPodAutoscalerSpec, s conversion.Scope) error {
+ if err := Convert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(&in.ScaleTargetRef, &out.ScaleTargetRef, s); err != nil {
+ return err
+ }
+ out.MinReplicas = in.MinReplicas
+ out.MaxReplicas = in.MaxReplicas
+ out.TargetCPUUtilizationPercentage = in.TargetCPUUtilizationPercentage
+ return nil
+}
+
+func Convert_v1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in *HorizontalPodAutoscalerSpec, out *autoscaling.HorizontalPodAutoscalerSpec, s conversion.Scope) error {
+ return autoConvert_v1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in, out, s)
+}
+
+func autoConvert_autoscaling_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec(in *autoscaling.HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, s conversion.Scope) error {
+ if err := Convert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference(&in.ScaleTargetRef, &out.ScaleTargetRef, s); err != nil {
+ return err
+ }
+ out.MinReplicas = in.MinReplicas
+ out.MaxReplicas = in.MaxReplicas
+ out.TargetCPUUtilizationPercentage = in.TargetCPUUtilizationPercentage
+ return nil
+}
+
+func Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec(in *autoscaling.HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, s conversion.Scope) error {
+ return autoConvert_autoscaling_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec(in, out, s)
+}
+
+func autoConvert_v1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in *HorizontalPodAutoscalerStatus, out *autoscaling.HorizontalPodAutoscalerStatus, s conversion.Scope) error {
+ out.ObservedGeneration = in.ObservedGeneration
+ out.LastScaleTime = in.LastScaleTime
+ out.CurrentReplicas = in.CurrentReplicas
+ out.DesiredReplicas = in.DesiredReplicas
+ out.CurrentCPUUtilizationPercentage = in.CurrentCPUUtilizationPercentage
+ return nil
+}
+
+func Convert_v1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in *HorizontalPodAutoscalerStatus, out *autoscaling.HorizontalPodAutoscalerStatus, s conversion.Scope) error {
+ return autoConvert_v1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in, out, s)
+}
+
+func autoConvert_autoscaling_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus(in *autoscaling.HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, s conversion.Scope) error {
+ out.ObservedGeneration = in.ObservedGeneration
+ out.LastScaleTime = in.LastScaleTime
+ out.CurrentReplicas = in.CurrentReplicas
+ out.DesiredReplicas = in.DesiredReplicas
+ out.CurrentCPUUtilizationPercentage = in.CurrentCPUUtilizationPercentage
+ return nil
+}
+
+func Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus(in *autoscaling.HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, s conversion.Scope) error {
+ return autoConvert_autoscaling_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus(in, out, s)
+}
+
+func autoConvert_v1_Scale_To_autoscaling_Scale(in *Scale, out *autoscaling.Scale, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ if err := Convert_v1_ScaleSpec_To_autoscaling_ScaleSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_ScaleStatus_To_autoscaling_ScaleStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1_Scale_To_autoscaling_Scale(in *Scale, out *autoscaling.Scale, s conversion.Scope) error {
+ return autoConvert_v1_Scale_To_autoscaling_Scale(in, out, s)
+}
+
+func autoConvert_autoscaling_Scale_To_v1_Scale(in *autoscaling.Scale, out *Scale, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ if err := Convert_autoscaling_ScaleSpec_To_v1_ScaleSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_autoscaling_ScaleStatus_To_v1_ScaleStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_autoscaling_Scale_To_v1_Scale(in *autoscaling.Scale, out *Scale, s conversion.Scope) error {
+ return autoConvert_autoscaling_Scale_To_v1_Scale(in, out, s)
+}
+
+func autoConvert_v1_ScaleSpec_To_autoscaling_ScaleSpec(in *ScaleSpec, out *autoscaling.ScaleSpec, s conversion.Scope) error {
+ out.Replicas = in.Replicas
+ return nil
+}
+
+func Convert_v1_ScaleSpec_To_autoscaling_ScaleSpec(in *ScaleSpec, out *autoscaling.ScaleSpec, s conversion.Scope) error {
+ return autoConvert_v1_ScaleSpec_To_autoscaling_ScaleSpec(in, out, s)
+}
+
+func autoConvert_autoscaling_ScaleSpec_To_v1_ScaleSpec(in *autoscaling.ScaleSpec, out *ScaleSpec, s conversion.Scope) error {
+ out.Replicas = in.Replicas
+ return nil
+}
+
+func Convert_autoscaling_ScaleSpec_To_v1_ScaleSpec(in *autoscaling.ScaleSpec, out *ScaleSpec, s conversion.Scope) error {
+ return autoConvert_autoscaling_ScaleSpec_To_v1_ScaleSpec(in, out, s)
+}
+
+func autoConvert_v1_ScaleStatus_To_autoscaling_ScaleStatus(in *ScaleStatus, out *autoscaling.ScaleStatus, s conversion.Scope) error {
+ out.Replicas = in.Replicas
+ out.Selector = in.Selector
+ return nil
+}
+
+func Convert_v1_ScaleStatus_To_autoscaling_ScaleStatus(in *ScaleStatus, out *autoscaling.ScaleStatus, s conversion.Scope) error {
+ return autoConvert_v1_ScaleStatus_To_autoscaling_ScaleStatus(in, out, s)
+}
+
+func autoConvert_autoscaling_ScaleStatus_To_v1_ScaleStatus(in *autoscaling.ScaleStatus, out *ScaleStatus, s conversion.Scope) error {
+ out.Replicas = in.Replicas
+ out.Selector = in.Selector
+ return nil
+}
+
+func Convert_autoscaling_ScaleStatus_To_v1_ScaleStatus(in *autoscaling.ScaleStatus, out *ScaleStatus, s conversion.Scope) error {
+ return autoConvert_autoscaling_ScaleStatus_To_v1_ScaleStatus(in, out, s)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/deep_copy_generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/deep_copy_generated.go
new file mode 100644
index 0000000..af0245e
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/deep_copy_generated.go
@@ -0,0 +1,150 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by deepcopy-gen. Do not edit it manually!
+
+package v1
+
+import (
+ api "k8s.io/kubernetes/pkg/api"
+ unversioned "k8s.io/kubernetes/pkg/api/unversioned"
+ api_v1 "k8s.io/kubernetes/pkg/api/v1"
+ conversion "k8s.io/kubernetes/pkg/conversion"
+)
+
+func init() {
+ if err := api.Scheme.AddGeneratedDeepCopyFuncs(
+ DeepCopy_v1_CrossVersionObjectReference,
+ DeepCopy_v1_HorizontalPodAutoscaler,
+ DeepCopy_v1_HorizontalPodAutoscalerList,
+ DeepCopy_v1_HorizontalPodAutoscalerSpec,
+ DeepCopy_v1_HorizontalPodAutoscalerStatus,
+ DeepCopy_v1_Scale,
+ DeepCopy_v1_ScaleSpec,
+ DeepCopy_v1_ScaleStatus,
+ ); err != nil {
+ // if one of the deep copy functions is malformed, detect it immediately.
+ panic(err)
+ }
+}
+
+func DeepCopy_v1_CrossVersionObjectReference(in CrossVersionObjectReference, out *CrossVersionObjectReference, c *conversion.Cloner) error {
+ out.Kind = in.Kind
+ out.Name = in.Name
+ out.APIVersion = in.APIVersion
+ return nil
+}
+
+func DeepCopy_v1_HorizontalPodAutoscaler(in HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := api_v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_v1_HorizontalPodAutoscalerSpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_v1_HorizontalPodAutoscalerStatus(in.Status, &out.Status, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_v1_HorizontalPodAutoscalerList(in HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]HorizontalPodAutoscaler, len(in))
+ for i := range in {
+ if err := DeepCopy_v1_HorizontalPodAutoscaler(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_HorizontalPodAutoscalerSpec(in HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, c *conversion.Cloner) error {
+ out.ScaleTargetRef = in.ScaleTargetRef
+ if in.MinReplicas != nil {
+ in, out := in.MinReplicas, &out.MinReplicas
+ *out = new(int32)
+ **out = *in
+ } else {
+ out.MinReplicas = nil
+ }
+ out.MaxReplicas = in.MaxReplicas
+ if in.TargetCPUUtilizationPercentage != nil {
+ in, out := in.TargetCPUUtilizationPercentage, &out.TargetCPUUtilizationPercentage
+ *out = new(int32)
+ **out = *in
+ } else {
+ out.TargetCPUUtilizationPercentage = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_HorizontalPodAutoscalerStatus(in HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, c *conversion.Cloner) error {
+ if in.ObservedGeneration != nil {
+ in, out := in.ObservedGeneration, &out.ObservedGeneration
+ *out = new(int64)
+ **out = *in
+ } else {
+ out.ObservedGeneration = nil
+ }
+ if in.LastScaleTime != nil {
+ in, out := in.LastScaleTime, &out.LastScaleTime
+ *out = new(unversioned.Time)
+ **out = in.DeepCopy()
+ } else {
+ out.LastScaleTime = nil
+ }
+ out.CurrentReplicas = in.CurrentReplicas
+ out.DesiredReplicas = in.DesiredReplicas
+ if in.CurrentCPUUtilizationPercentage != nil {
+ in, out := in.CurrentCPUUtilizationPercentage, &out.CurrentCPUUtilizationPercentage
+ *out = new(int32)
+ **out = *in
+ } else {
+ out.CurrentCPUUtilizationPercentage = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_Scale(in Scale, out *Scale, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := api_v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ out.Spec = in.Spec
+ out.Status = in.Status
+ return nil
+}
+
+func DeepCopy_v1_ScaleSpec(in ScaleSpec, out *ScaleSpec, c *conversion.Cloner) error {
+ out.Replicas = in.Replicas
+ return nil
+}
+
+func DeepCopy_v1_ScaleStatus(in ScaleStatus, out *ScaleStatus, c *conversion.Cloner) error {
+ out.Replicas = in.Replicas
+ out.Selector = in.Selector
+ return nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/defaults.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/defaults.go
new file mode 100644
index 0000000..aacf552
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/defaults.go
@@ -0,0 +1,34 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "k8s.io/kubernetes/pkg/runtime"
+)
+
+func addDefaultingFuncs(scheme *runtime.Scheme) {
+ scheme.AddDefaultingFuncs(
+ SetDefaults_HorizontalPodAutoscaler,
+ )
+}
+
+func SetDefaults_HorizontalPodAutoscaler(obj *HorizontalPodAutoscaler) {
+ if obj.Spec.MinReplicas == nil {
+ minReplicas := int32(1)
+ obj.Spec.MinReplicas = &minReplicas
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/doc.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/doc.go
new file mode 100644
index 0000000..be1c70f
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package,register
+// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/autoscaling
+
+package v1
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.pb.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.pb.go
new file mode 100644
index 0000000..f44b843
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.pb.go
@@ -0,0 +1,1612 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by protoc-gen-gogo.
+// source: k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.proto
+// DO NOT EDIT!
+
+/*
+ Package v1 is a generated protocol buffer package.
+
+ It is generated from these files:
+ k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.proto
+
+ It has these top-level messages:
+ CrossVersionObjectReference
+ HorizontalPodAutoscaler
+ HorizontalPodAutoscalerList
+ HorizontalPodAutoscalerSpec
+ HorizontalPodAutoscalerStatus
+ Scale
+ ScaleSpec
+ ScaleStatus
+*/
+package v1
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+import k8s_io_kubernetes_pkg_api_unversioned "k8s.io/kubernetes/pkg/api/unversioned"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} }
+func (m *CrossVersionObjectReference) String() string { return proto.CompactTextString(m) }
+func (*CrossVersionObjectReference) ProtoMessage() {}
+
+func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} }
+func (m *HorizontalPodAutoscaler) String() string { return proto.CompactTextString(m) }
+func (*HorizontalPodAutoscaler) ProtoMessage() {}
+
+func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} }
+func (m *HorizontalPodAutoscalerList) String() string { return proto.CompactTextString(m) }
+func (*HorizontalPodAutoscalerList) ProtoMessage() {}
+
+func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} }
+func (m *HorizontalPodAutoscalerSpec) String() string { return proto.CompactTextString(m) }
+func (*HorizontalPodAutoscalerSpec) ProtoMessage() {}
+
+func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} }
+func (m *HorizontalPodAutoscalerStatus) String() string { return proto.CompactTextString(m) }
+func (*HorizontalPodAutoscalerStatus) ProtoMessage() {}
+
+func (m *Scale) Reset() { *m = Scale{} }
+func (m *Scale) String() string { return proto.CompactTextString(m) }
+func (*Scale) ProtoMessage() {}
+
+func (m *ScaleSpec) Reset() { *m = ScaleSpec{} }
+func (m *ScaleSpec) String() string { return proto.CompactTextString(m) }
+func (*ScaleSpec) ProtoMessage() {}
+
+func (m *ScaleStatus) Reset() { *m = ScaleStatus{} }
+func (m *ScaleStatus) String() string { return proto.CompactTextString(m) }
+func (*ScaleStatus) ProtoMessage() {}
+
+func init() {
+ proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v1.CrossVersionObjectReference")
+ proto.RegisterType((*HorizontalPodAutoscaler)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v1.HorizontalPodAutoscaler")
+ proto.RegisterType((*HorizontalPodAutoscalerList)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v1.HorizontalPodAutoscalerList")
+ proto.RegisterType((*HorizontalPodAutoscalerSpec)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v1.HorizontalPodAutoscalerSpec")
+ proto.RegisterType((*HorizontalPodAutoscalerStatus)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v1.HorizontalPodAutoscalerStatus")
+ proto.RegisterType((*Scale)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v1.Scale")
+ proto.RegisterType((*ScaleSpec)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v1.ScaleSpec")
+ proto.RegisterType((*ScaleStatus)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v1.ScaleStatus")
+}
+func (m *CrossVersionObjectReference) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *CrossVersionObjectReference) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Kind)))
+ i += copy(data[i:], m.Kind)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Name)))
+ i += copy(data[i:], m.Name)
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion)))
+ i += copy(data[i:], m.APIVersion)
+ return i, nil
+}
+
+func (m *HorizontalPodAutoscaler) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *HorizontalPodAutoscaler) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
+ n1, err := m.ObjectMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n1
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size()))
+ n2, err := m.Spec.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n2
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Status.Size()))
+ n3, err := m.Status.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n3
+ return i, nil
+}
+
+func (m *HorizontalPodAutoscalerList) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *HorizontalPodAutoscalerList) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size()))
+ n4, err := m.ListMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n4
+ if len(m.Items) > 0 {
+ for _, msg := range m.Items {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *HorizontalPodAutoscalerSpec) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *HorizontalPodAutoscalerSpec) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ScaleTargetRef.Size()))
+ n5, err := m.ScaleTargetRef.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n5
+ if m.MinReplicas != nil {
+ data[i] = 0x10
+ i++
+ i = encodeVarintGenerated(data, i, uint64(*m.MinReplicas))
+ }
+ data[i] = 0x18
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.MaxReplicas))
+ if m.TargetCPUUtilizationPercentage != nil {
+ data[i] = 0x20
+ i++
+ i = encodeVarintGenerated(data, i, uint64(*m.TargetCPUUtilizationPercentage))
+ }
+ return i, nil
+}
+
+func (m *HorizontalPodAutoscalerStatus) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *HorizontalPodAutoscalerStatus) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.ObservedGeneration != nil {
+ data[i] = 0x8
+ i++
+ i = encodeVarintGenerated(data, i, uint64(*m.ObservedGeneration))
+ }
+ if m.LastScaleTime != nil {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.LastScaleTime.Size()))
+ n6, err := m.LastScaleTime.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n6
+ }
+ data[i] = 0x18
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.CurrentReplicas))
+ data[i] = 0x20
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.DesiredReplicas))
+ if m.CurrentCPUUtilizationPercentage != nil {
+ data[i] = 0x28
+ i++
+ i = encodeVarintGenerated(data, i, uint64(*m.CurrentCPUUtilizationPercentage))
+ }
+ return i, nil
+}
+
+func (m *Scale) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Scale) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
+ n7, err := m.ObjectMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n7
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size()))
+ n8, err := m.Spec.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n8
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Status.Size()))
+ n9, err := m.Status.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n9
+ return i, nil
+}
+
+func (m *ScaleSpec) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ScaleSpec) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0x8
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Replicas))
+ return i, nil
+}
+
+func (m *ScaleStatus) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ScaleStatus) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0x8
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Replicas))
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Selector)))
+ i += copy(data[i:], m.Selector)
+ return i, nil
+}
+
+func encodeFixed64Generated(data []byte, offset int, v uint64) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ data[offset+4] = uint8(v >> 32)
+ data[offset+5] = uint8(v >> 40)
+ data[offset+6] = uint8(v >> 48)
+ data[offset+7] = uint8(v >> 56)
+ return offset + 8
+}
+func encodeFixed32Generated(data []byte, offset int, v uint32) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ return offset + 4
+}
+func encodeVarintGenerated(data []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ data[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ data[offset] = uint8(v)
+ return offset + 1
+}
+func (m *CrossVersionObjectReference) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Kind)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.APIVersion)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *HorizontalPodAutoscaler) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *HorizontalPodAutoscalerList) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *HorizontalPodAutoscalerSpec) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ScaleTargetRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.MinReplicas != nil {
+ n += 1 + sovGenerated(uint64(*m.MinReplicas))
+ }
+ n += 1 + sovGenerated(uint64(m.MaxReplicas))
+ if m.TargetCPUUtilizationPercentage != nil {
+ n += 1 + sovGenerated(uint64(*m.TargetCPUUtilizationPercentage))
+ }
+ return n
+}
+
+func (m *HorizontalPodAutoscalerStatus) Size() (n int) {
+ var l int
+ _ = l
+ if m.ObservedGeneration != nil {
+ n += 1 + sovGenerated(uint64(*m.ObservedGeneration))
+ }
+ if m.LastScaleTime != nil {
+ l = m.LastScaleTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ n += 1 + sovGenerated(uint64(m.CurrentReplicas))
+ n += 1 + sovGenerated(uint64(m.DesiredReplicas))
+ if m.CurrentCPUUtilizationPercentage != nil {
+ n += 1 + sovGenerated(uint64(*m.CurrentCPUUtilizationPercentage))
+ }
+ return n
+}
+
+func (m *Scale) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ScaleSpec) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovGenerated(uint64(m.Replicas))
+ return n
+}
+
+func (m *ScaleStatus) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovGenerated(uint64(m.Replicas))
+ l = len(m.Selector)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *CrossVersionObjectReference) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CrossVersionObjectReference: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CrossVersionObjectReference: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Kind = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.APIVersion = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *HorizontalPodAutoscaler) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: HorizontalPodAutoscaler: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: HorizontalPodAutoscaler: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *HorizontalPodAutoscalerList) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: HorizontalPodAutoscalerList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: HorizontalPodAutoscalerList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, HorizontalPodAutoscaler{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *HorizontalPodAutoscalerSpec) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ScaleTargetRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ScaleTargetRef.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MinReplicas", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.MinReplicas = &v
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MaxReplicas", wireType)
+ }
+ m.MaxReplicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.MaxReplicas |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TargetCPUUtilizationPercentage", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.TargetCPUUtilizationPercentage = &v
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *HorizontalPodAutoscalerStatus) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.ObservedGeneration = &v
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastScaleTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.LastScaleTime == nil {
+ m.LastScaleTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{}
+ }
+ if err := m.LastScaleTime.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CurrentReplicas", wireType)
+ }
+ m.CurrentReplicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.CurrentReplicas |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DesiredReplicas", wireType)
+ }
+ m.DesiredReplicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.DesiredReplicas |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CurrentCPUUtilizationPercentage", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.CurrentCPUUtilizationPercentage = &v
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Scale) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Scale: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Scale: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ScaleSpec) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ScaleSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ScaleSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType)
+ }
+ m.Replicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Replicas |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ScaleStatus) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ScaleStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ScaleStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType)
+ }
+ m.Replicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Replicas |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Selector = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenerated(data []byte) (n int, err error) {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if data[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipGenerated(data[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
+)
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.proto b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.proto
new file mode 100644
index 0000000..7730d6e
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.proto
@@ -0,0 +1,131 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.kubernetes.pkg.apis.autoscaling.v1;
+
+import "k8s.io/kubernetes/pkg/api/resource/generated.proto";
+import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto";
+import "k8s.io/kubernetes/pkg/api/v1/generated.proto";
+import "k8s.io/kubernetes/pkg/util/intstr/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v1";
+
+// CrossVersionObjectReference contains enough information to let you identify the referred resource.
+message CrossVersionObjectReference {
+ // Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds"
+ optional string kind = 1;
+
+ // Name of the referent; More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names
+ optional string name = 2;
+
+ // API version of the referent
+ optional string apiVersion = 3;
+}
+
+// configuration of a horizontal pod autoscaler.
+message HorizontalPodAutoscaler {
+ // Standard object metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1;
+
+ // behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.
+ optional HorizontalPodAutoscalerSpec spec = 2;
+
+ // current information about the autoscaler.
+ optional HorizontalPodAutoscalerStatus status = 3;
+}
+
+// list of horizontal pod autoscaler objects.
+message HorizontalPodAutoscalerList {
+ // Standard list metadata.
+ optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1;
+
+ // list of horizontal pod autoscaler objects.
+ repeated HorizontalPodAutoscaler items = 2;
+}
+
+// specification of a horizontal pod autoscaler.
+message HorizontalPodAutoscalerSpec {
+ // reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption
+ // and will set the desired number of pods by using its Scale subresource.
+ optional CrossVersionObjectReference scaleTargetRef = 1;
+
+ // lower limit for the number of pods that can be set by the autoscaler, default 1.
+ optional int32 minReplicas = 2;
+
+ // upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.
+ optional int32 maxReplicas = 3;
+
+ // target average CPU utilization (represented as a percentage of requested CPU) over all the pods;
+ // if not specified the default autoscaling policy will be used.
+ optional int32 targetCPUUtilizationPercentage = 4;
+}
+
+// current status of a horizontal pod autoscaler
+message HorizontalPodAutoscalerStatus {
+ // most recent generation observed by this autoscaler.
+ optional int64 observedGeneration = 1;
+
+ // last time the HorizontalPodAutoscaler scaled the number of pods;
+ // used by the autoscaler to control how often the number of pods is changed.
+ optional k8s.io.kubernetes.pkg.api.unversioned.Time lastScaleTime = 2;
+
+ // current number of replicas of pods managed by this autoscaler.
+ optional int32 currentReplicas = 3;
+
+ // desired number of replicas of pods managed by this autoscaler.
+ optional int32 desiredReplicas = 4;
+
+ // current average CPU utilization over all pods, represented as a percentage of requested CPU,
+ // e.g. 70 means that an average pod is using now 70% of its requested CPU.
+ optional int32 currentCPUUtilizationPercentage = 5;
+}
+
+// Scale represents a scaling request for a resource.
+message Scale {
+ // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata.
+ optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1;
+
+ // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.
+ optional ScaleSpec spec = 2;
+
+ // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only.
+ optional ScaleStatus status = 3;
+}
+
+// ScaleSpec describes the attributes of a scale subresource.
+message ScaleSpec {
+ // desired number of instances for the scaled object.
+ optional int32 replicas = 1;
+}
+
+// ScaleStatus represents the current status of a scale subresource.
+message ScaleStatus {
+ // actual number of observed instances of the scaled object.
+ optional int32 replicas = 1;
+
+ // label query over pods that should match the replicas count. This is same
+ // as the label selector but in the string format to avoid introspection
+ // by clients. The string will be in the same format as the query-param syntax.
+ // More info about label selectors: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors
+ optional string selector = 2;
+}
+
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/register.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/register.go
new file mode 100644
index 0000000..93d9535
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/register.go
@@ -0,0 +1,47 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/api/v1"
+ "k8s.io/kubernetes/pkg/runtime"
+ versionedwatch "k8s.io/kubernetes/pkg/watch/versioned"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "autoscaling"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1"}
+
+func AddToScheme(scheme *runtime.Scheme) {
+ addKnownTypes(scheme)
+ addDefaultingFuncs(scheme)
+}
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &HorizontalPodAutoscaler{},
+ &HorizontalPodAutoscalerList{},
+ &Scale{},
+ &v1.ListOptions{},
+ &v1.DeleteOptions{},
+ )
+ versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types.generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types.generated.go
new file mode 100644
index 0000000..ab9ebc5
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types.generated.go
@@ -0,0 +1,2659 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// ************************************************************
+// DO NOT EDIT.
+// THIS FILE IS AUTO-GENERATED BY codecgen.
+// ************************************************************
+
+package v1
+
+import (
+ "errors"
+ "fmt"
+ codec1978 "github.com/ugorji/go/codec"
+ pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned"
+ pkg2_v1 "k8s.io/kubernetes/pkg/api/v1"
+ pkg3_types "k8s.io/kubernetes/pkg/types"
+ "reflect"
+ "runtime"
+ time "time"
+)
+
+const (
+ // ----- content types ----
+ codecSelferC_UTF81234 = 1
+ codecSelferC_RAW1234 = 0
+ // ----- value types used ----
+ codecSelferValueTypeArray1234 = 10
+ codecSelferValueTypeMap1234 = 9
+ // ----- containerStateValues ----
+ codecSelfer_containerMapKey1234 = 2
+ codecSelfer_containerMapValue1234 = 3
+ codecSelfer_containerMapEnd1234 = 4
+ codecSelfer_containerArrayElem1234 = 6
+ codecSelfer_containerArrayEnd1234 = 7
+)
+
+var (
+ codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits())
+ codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`)
+)
+
+type codecSelfer1234 struct{}
+
+func init() {
+ if codec1978.GenVersion != 5 {
+ _, file, _, _ := runtime.Caller(0)
+ err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v",
+ 5, codec1978.GenVersion, file)
+ panic(err)
+ }
+ if false { // reference the types, but skip this branch at build/run time
+ var v0 pkg1_unversioned.Time
+ var v1 pkg2_v1.ObjectMeta
+ var v2 pkg3_types.UID
+ var v3 time.Time
+ _, _, _, _ = v0, v1, v2, v3
+ }
+}
+
+func (x *CrossVersionObjectReference) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[2] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("name"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *CrossVersionObjectReference) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *CrossVersionObjectReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "name":
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *CrossVersionObjectReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *HorizontalPodAutoscalerSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.MinReplicas != nil
+ yyq2[3] = x.TargetCPUUtilizationPercentage != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy4 := &x.ScaleTargetRef
+ yy4.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("scaleTargetRef"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ScaleTargetRef
+ yy6.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.MinReplicas == nil {
+ r.EncodeNil()
+ } else {
+ yy9 := *x.MinReplicas
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeInt(int64(yy9))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("minReplicas"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.MinReplicas == nil {
+ r.EncodeNil()
+ } else {
+ yy11 := *x.MinReplicas
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeInt(int64(yy11))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeInt(int64(x.MaxReplicas))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("maxReplicas"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeInt(int64(x.MaxReplicas))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ if x.TargetCPUUtilizationPercentage == nil {
+ r.EncodeNil()
+ } else {
+ yy17 := *x.TargetCPUUtilizationPercentage
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ r.EncodeInt(int64(yy17))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("targetCPUUtilizationPercentage"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.TargetCPUUtilizationPercentage == nil {
+ r.EncodeNil()
+ } else {
+ yy19 := *x.TargetCPUUtilizationPercentage
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeInt(int64(yy19))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *HorizontalPodAutoscalerSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *HorizontalPodAutoscalerSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "scaleTargetRef":
+ if r.TryDecodeAsNil() {
+ x.ScaleTargetRef = CrossVersionObjectReference{}
+ } else {
+ yyv4 := &x.ScaleTargetRef
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "minReplicas":
+ if r.TryDecodeAsNil() {
+ if x.MinReplicas != nil {
+ x.MinReplicas = nil
+ }
+ } else {
+ if x.MinReplicas == nil {
+ x.MinReplicas = new(int32)
+ }
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ *((*int32)(x.MinReplicas)) = int32(r.DecodeInt(32))
+ }
+ }
+ case "maxReplicas":
+ if r.TryDecodeAsNil() {
+ x.MaxReplicas = 0
+ } else {
+ x.MaxReplicas = int32(r.DecodeInt(32))
+ }
+ case "targetCPUUtilizationPercentage":
+ if r.TryDecodeAsNil() {
+ if x.TargetCPUUtilizationPercentage != nil {
+ x.TargetCPUUtilizationPercentage = nil
+ }
+ } else {
+ if x.TargetCPUUtilizationPercentage == nil {
+ x.TargetCPUUtilizationPercentage = new(int32)
+ }
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else {
+ *((*int32)(x.TargetCPUUtilizationPercentage)) = int32(r.DecodeInt(32))
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *HorizontalPodAutoscalerSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ScaleTargetRef = CrossVersionObjectReference{}
+ } else {
+ yyv11 := &x.ScaleTargetRef
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.MinReplicas != nil {
+ x.MinReplicas = nil
+ }
+ } else {
+ if x.MinReplicas == nil {
+ x.MinReplicas = new(int32)
+ }
+ yym13 := z.DecBinary()
+ _ = yym13
+ if false {
+ } else {
+ *((*int32)(x.MinReplicas)) = int32(r.DecodeInt(32))
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.MaxReplicas = 0
+ } else {
+ x.MaxReplicas = int32(r.DecodeInt(32))
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.TargetCPUUtilizationPercentage != nil {
+ x.TargetCPUUtilizationPercentage = nil
+ }
+ } else {
+ if x.TargetCPUUtilizationPercentage == nil {
+ x.TargetCPUUtilizationPercentage = new(int32)
+ }
+ yym16 := z.DecBinary()
+ _ = yym16
+ if false {
+ } else {
+ *((*int32)(x.TargetCPUUtilizationPercentage)) = int32(r.DecodeInt(32))
+ }
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *HorizontalPodAutoscalerStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.ObservedGeneration != nil
+ yyq2[1] = x.LastScaleTime != nil
+ yyq2[4] = x.CurrentCPUUtilizationPercentage != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.ObservedGeneration == nil {
+ r.EncodeNil()
+ } else {
+ yy4 := *x.ObservedGeneration
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(yy4))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("observedGeneration"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.ObservedGeneration == nil {
+ r.EncodeNil()
+ } else {
+ yy6 := *x.ObservedGeneration
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeInt(int64(yy6))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.LastScaleTime == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.LastScaleTime) {
+ } else if yym9 {
+ z.EncBinaryMarshal(x.LastScaleTime)
+ } else if !yym9 && z.IsJSONHandle() {
+ z.EncJSONMarshal(x.LastScaleTime)
+ } else {
+ z.EncFallback(x.LastScaleTime)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("lastScaleTime"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.LastScaleTime == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.LastScaleTime) {
+ } else if yym10 {
+ z.EncBinaryMarshal(x.LastScaleTime)
+ } else if !yym10 && z.IsJSONHandle() {
+ z.EncJSONMarshal(x.LastScaleTime)
+ } else {
+ z.EncFallback(x.LastScaleTime)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeInt(int64(x.CurrentReplicas))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("currentReplicas"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeInt(int64(x.CurrentReplicas))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeInt(int64(x.DesiredReplicas))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("desiredReplicas"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeInt(int64(x.DesiredReplicas))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ if x.CurrentCPUUtilizationPercentage == nil {
+ r.EncodeNil()
+ } else {
+ yy18 := *x.CurrentCPUUtilizationPercentage
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeInt(int64(yy18))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("currentCPUUtilizationPercentage"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.CurrentCPUUtilizationPercentage == nil {
+ r.EncodeNil()
+ } else {
+ yy20 := *x.CurrentCPUUtilizationPercentage
+ yym21 := z.EncBinary()
+ _ = yym21
+ if false {
+ } else {
+ r.EncodeInt(int64(yy20))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *HorizontalPodAutoscalerStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *HorizontalPodAutoscalerStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "observedGeneration":
+ if r.TryDecodeAsNil() {
+ if x.ObservedGeneration != nil {
+ x.ObservedGeneration = nil
+ }
+ } else {
+ if x.ObservedGeneration == nil {
+ x.ObservedGeneration = new(int64)
+ }
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64))
+ }
+ }
+ case "lastScaleTime":
+ if r.TryDecodeAsNil() {
+ if x.LastScaleTime != nil {
+ x.LastScaleTime = nil
+ }
+ } else {
+ if x.LastScaleTime == nil {
+ x.LastScaleTime = new(pkg1_unversioned.Time)
+ }
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.LastScaleTime) {
+ } else if yym7 {
+ z.DecBinaryUnmarshal(x.LastScaleTime)
+ } else if !yym7 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(x.LastScaleTime)
+ } else {
+ z.DecFallback(x.LastScaleTime, false)
+ }
+ }
+ case "currentReplicas":
+ if r.TryDecodeAsNil() {
+ x.CurrentReplicas = 0
+ } else {
+ x.CurrentReplicas = int32(r.DecodeInt(32))
+ }
+ case "desiredReplicas":
+ if r.TryDecodeAsNil() {
+ x.DesiredReplicas = 0
+ } else {
+ x.DesiredReplicas = int32(r.DecodeInt(32))
+ }
+ case "currentCPUUtilizationPercentage":
+ if r.TryDecodeAsNil() {
+ if x.CurrentCPUUtilizationPercentage != nil {
+ x.CurrentCPUUtilizationPercentage = nil
+ }
+ } else {
+ if x.CurrentCPUUtilizationPercentage == nil {
+ x.CurrentCPUUtilizationPercentage = new(int32)
+ }
+ yym11 := z.DecBinary()
+ _ = yym11
+ if false {
+ } else {
+ *((*int32)(x.CurrentCPUUtilizationPercentage)) = int32(r.DecodeInt(32))
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *HorizontalPodAutoscalerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj12 int
+ var yyb12 bool
+ var yyhl12 bool = l >= 0
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.ObservedGeneration != nil {
+ x.ObservedGeneration = nil
+ }
+ } else {
+ if x.ObservedGeneration == nil {
+ x.ObservedGeneration = new(int64)
+ }
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64))
+ }
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.LastScaleTime != nil {
+ x.LastScaleTime = nil
+ }
+ } else {
+ if x.LastScaleTime == nil {
+ x.LastScaleTime = new(pkg1_unversioned.Time)
+ }
+ yym16 := z.DecBinary()
+ _ = yym16
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.LastScaleTime) {
+ } else if yym16 {
+ z.DecBinaryUnmarshal(x.LastScaleTime)
+ } else if !yym16 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(x.LastScaleTime)
+ } else {
+ z.DecFallback(x.LastScaleTime, false)
+ }
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.CurrentReplicas = 0
+ } else {
+ x.CurrentReplicas = int32(r.DecodeInt(32))
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.DesiredReplicas = 0
+ } else {
+ x.DesiredReplicas = int32(r.DecodeInt(32))
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.CurrentCPUUtilizationPercentage != nil {
+ x.CurrentCPUUtilizationPercentage = nil
+ }
+ } else {
+ if x.CurrentCPUUtilizationPercentage == nil {
+ x.CurrentCPUUtilizationPercentage = new(int32)
+ }
+ yym20 := z.DecBinary()
+ _ = yym20
+ if false {
+ } else {
+ *((*int32)(x.CurrentCPUUtilizationPercentage)) = int32(r.DecodeInt(32))
+ }
+ }
+ for {
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj12-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *HorizontalPodAutoscaler) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = true
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy14 := &x.Status
+ yy14.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy16 := &x.Status
+ yy16.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *HorizontalPodAutoscaler) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *HorizontalPodAutoscaler) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_v1.ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = HorizontalPodAutoscalerSpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = HorizontalPodAutoscalerStatus{}
+ } else {
+ yyv6 := &x.Status
+ yyv6.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *HorizontalPodAutoscaler) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_v1.ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = HorizontalPodAutoscalerSpec{}
+ } else {
+ yyv11 := &x.Spec
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = HorizontalPodAutoscalerStatus{}
+ } else {
+ yyv12 := &x.Status
+ yyv12.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *HorizontalPodAutoscalerList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceHorizontalPodAutoscaler(([]HorizontalPodAutoscaler)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceHorizontalPodAutoscaler(([]HorizontalPodAutoscaler)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *HorizontalPodAutoscalerList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *HorizontalPodAutoscalerList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceHorizontalPodAutoscaler((*[]HorizontalPodAutoscaler)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *HorizontalPodAutoscalerList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceHorizontalPodAutoscaler((*[]HorizontalPodAutoscaler)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *Scale) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = true
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy14 := &x.Status
+ yy14.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy16 := &x.Status
+ yy16.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *Scale) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *Scale) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_v1.ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = ScaleSpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = ScaleStatus{}
+ } else {
+ yyv6 := &x.Status
+ yyv6.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *Scale) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_v1.ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = ScaleSpec{}
+ } else {
+ yyv11 := &x.Spec
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = ScaleStatus{}
+ } else {
+ yyv12 := &x.Status
+ yyv12.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ScaleSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Replicas != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Replicas))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("replicas"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Replicas))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ScaleSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ScaleSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "replicas":
+ if r.TryDecodeAsNil() {
+ x.Replicas = 0
+ } else {
+ x.Replicas = int32(r.DecodeInt(32))
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ScaleSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj5 int
+ var yyb5 bool
+ var yyhl5 bool = l >= 0
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Replicas = 0
+ } else {
+ x.Replicas = int32(r.DecodeInt(32))
+ }
+ for {
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj5-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ScaleStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.Selector != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Replicas))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("replicas"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Replicas))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Selector))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("selector"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Selector))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ScaleStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ScaleStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "replicas":
+ if r.TryDecodeAsNil() {
+ x.Replicas = 0
+ } else {
+ x.Replicas = int32(r.DecodeInt(32))
+ }
+ case "selector":
+ if r.TryDecodeAsNil() {
+ x.Selector = ""
+ } else {
+ x.Selector = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ScaleStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Replicas = 0
+ } else {
+ x.Replicas = int32(r.DecodeInt(32))
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Selector = ""
+ } else {
+ x.Selector = string(r.DecodeString())
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) encSliceHorizontalPodAutoscaler(v []HorizontalPodAutoscaler, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceHorizontalPodAutoscaler(v *[]HorizontalPodAutoscaler, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []HorizontalPodAutoscaler{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 344)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]HorizontalPodAutoscaler, yyrl1)
+ }
+ } else {
+ yyv1 = make([]HorizontalPodAutoscaler, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = HorizontalPodAutoscaler{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, HorizontalPodAutoscaler{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = HorizontalPodAutoscaler{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, HorizontalPodAutoscaler{}) // var yyz1 HorizontalPodAutoscaler
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = HorizontalPodAutoscaler{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []HorizontalPodAutoscaler{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types.go
new file mode 100644
index 0000000..035288c
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types.go
@@ -0,0 +1,122 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/api/v1"
+)
+
+// CrossVersionObjectReference contains enough information to let you identify the referred resource.
+type CrossVersionObjectReference struct {
+ // Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds"
+ Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"`
+ // Name of the referent; More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names
+ Name string `json:"name" protobuf:"bytes,2,opt,name=name"`
+ // API version of the referent
+ APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"`
+}
+
+// specification of a horizontal pod autoscaler.
+type HorizontalPodAutoscalerSpec struct {
+ // reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption
+ // and will set the desired number of pods by using its Scale subresource.
+ ScaleTargetRef CrossVersionObjectReference `json:"scaleTargetRef" protobuf:"bytes,1,opt,name=scaleTargetRef"`
+ // lower limit for the number of pods that can be set by the autoscaler, default 1.
+ MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"`
+ // upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.
+ MaxReplicas int32 `json:"maxReplicas" protobuf:"varint,3,opt,name=maxReplicas"`
+ // target average CPU utilization (represented as a percentage of requested CPU) over all the pods;
+ // if not specified the default autoscaling policy will be used.
+ TargetCPUUtilizationPercentage *int32 `json:"targetCPUUtilizationPercentage,omitempty" protobuf:"varint,4,opt,name=targetCPUUtilizationPercentage"`
+}
+
+// current status of a horizontal pod autoscaler
+type HorizontalPodAutoscalerStatus struct {
+ // most recent generation observed by this autoscaler.
+ ObservedGeneration *int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"`
+
+ // last time the HorizontalPodAutoscaler scaled the number of pods;
+ // used by the autoscaler to control how often the number of pods is changed.
+ LastScaleTime *unversioned.Time `json:"lastScaleTime,omitempty" protobuf:"bytes,2,opt,name=lastScaleTime"`
+
+ // current number of replicas of pods managed by this autoscaler.
+ CurrentReplicas int32 `json:"currentReplicas" protobuf:"varint,3,opt,name=currentReplicas"`
+
+ // desired number of replicas of pods managed by this autoscaler.
+ DesiredReplicas int32 `json:"desiredReplicas" protobuf:"varint,4,opt,name=desiredReplicas"`
+
+ // current average CPU utilization over all pods, represented as a percentage of requested CPU,
+ // e.g. 70 means that an average pod is using now 70% of its requested CPU.
+ CurrentCPUUtilizationPercentage *int32 `json:"currentCPUUtilizationPercentage,omitempty" protobuf:"varint,5,opt,name=currentCPUUtilizationPercentage"`
+}
+
+// +genclient=true
+
+// configuration of a horizontal pod autoscaler.
+type HorizontalPodAutoscaler struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard object metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.
+ Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+ // current information about the autoscaler.
+ Status HorizontalPodAutoscalerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// list of horizontal pod autoscaler objects.
+type HorizontalPodAutoscalerList struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard list metadata.
+ unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // list of horizontal pod autoscaler objects.
+ Items []HorizontalPodAutoscaler `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// Scale represents a scaling request for a resource.
+type Scale struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata.
+ v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.
+ Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+ // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only.
+ Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// ScaleSpec describes the attributes of a scale subresource.
+type ScaleSpec struct {
+ // desired number of instances for the scaled object.
+ Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
+}
+
+// ScaleStatus represents the current status of a scale subresource.
+type ScaleStatus struct {
+ // actual number of observed instances of the scaled object.
+ Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"`
+
+ // label query over pods that should match the replicas count. This is same
+ // as the label selector but in the string format to avoid introspection
+ // by clients. The string will be in the same format as the query-param syntax.
+ // More info about label selectors: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors
+ Selector string `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"`
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types_swagger_doc_generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types_swagger_doc_generated.go
new file mode 100644
index 0000000..6b9bcf4
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types_swagger_doc_generated.go
@@ -0,0 +1,117 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE
+var map_CrossVersionObjectReference = map[string]string{
+ "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.",
+ "kind": "Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds\"",
+ "name": "Name of the referent; More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names",
+ "apiVersion": "API version of the referent",
+}
+
+func (CrossVersionObjectReference) SwaggerDoc() map[string]string {
+ return map_CrossVersionObjectReference
+}
+
+var map_HorizontalPodAutoscaler = map[string]string{
+ "": "configuration of a horizontal pod autoscaler.",
+ "metadata": "Standard object metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
+ "spec": "behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.",
+ "status": "current information about the autoscaler.",
+}
+
+func (HorizontalPodAutoscaler) SwaggerDoc() map[string]string {
+ return map_HorizontalPodAutoscaler
+}
+
+var map_HorizontalPodAutoscalerList = map[string]string{
+ "": "list of horizontal pod autoscaler objects.",
+ "metadata": "Standard list metadata.",
+ "items": "list of horizontal pod autoscaler objects.",
+}
+
+func (HorizontalPodAutoscalerList) SwaggerDoc() map[string]string {
+ return map_HorizontalPodAutoscalerList
+}
+
+var map_HorizontalPodAutoscalerSpec = map[string]string{
+ "": "specification of a horizontal pod autoscaler.",
+ "scaleTargetRef": "reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption and will set the desired number of pods by using its Scale subresource.",
+ "minReplicas": "lower limit for the number of pods that can be set by the autoscaler, default 1.",
+ "maxReplicas": "upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.",
+ "targetCPUUtilizationPercentage": "target average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified the default autoscaling policy will be used.",
+}
+
+func (HorizontalPodAutoscalerSpec) SwaggerDoc() map[string]string {
+ return map_HorizontalPodAutoscalerSpec
+}
+
+var map_HorizontalPodAutoscalerStatus = map[string]string{
+ "": "current status of a horizontal pod autoscaler",
+ "observedGeneration": "most recent generation observed by this autoscaler.",
+ "lastScaleTime": "last time the HorizontalPodAutoscaler scaled the number of pods; used by the autoscaler to control how often the number of pods is changed.",
+ "currentReplicas": "current number of replicas of pods managed by this autoscaler.",
+ "desiredReplicas": "desired number of replicas of pods managed by this autoscaler.",
+ "currentCPUUtilizationPercentage": "current average CPU utilization over all pods, represented as a percentage of requested CPU, e.g. 70 means that an average pod is using now 70% of its requested CPU.",
+}
+
+func (HorizontalPodAutoscalerStatus) SwaggerDoc() map[string]string {
+ return map_HorizontalPodAutoscalerStatus
+}
+
+var map_Scale = map[string]string{
+ "": "Scale represents a scaling request for a resource.",
+ "metadata": "Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata.",
+ "spec": "defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.",
+ "status": "current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only.",
+}
+
+func (Scale) SwaggerDoc() map[string]string {
+ return map_Scale
+}
+
+var map_ScaleSpec = map[string]string{
+ "": "ScaleSpec describes the attributes of a scale subresource.",
+ "replicas": "desired number of instances for the scaled object.",
+}
+
+func (ScaleSpec) SwaggerDoc() map[string]string {
+ return map_ScaleSpec
+}
+
+var map_ScaleStatus = map[string]string{
+ "": "ScaleStatus represents the current status of a scale subresource.",
+ "replicas": "actual number of observed instances of the scaled object.",
+ "selector": "label query over pods that should match the replicas count. This is same as the label selector but in the string format to avoid introspection by clients. The string will be in the same format as the query-param syntax. More info about label selectors: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors",
+}
+
+func (ScaleStatus) SwaggerDoc() map[string]string {
+ return map_ScaleStatus
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/deep_copy_generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/deep_copy_generated.go
new file mode 100644
index 0000000..a6adac4
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/deep_copy_generated.go
@@ -0,0 +1,258 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by deepcopy-gen. Do not edit it manually!
+
+package batch
+
+import (
+ api "k8s.io/kubernetes/pkg/api"
+ unversioned "k8s.io/kubernetes/pkg/api/unversioned"
+ conversion "k8s.io/kubernetes/pkg/conversion"
+)
+
+func init() {
+ if err := api.Scheme.AddGeneratedDeepCopyFuncs(
+ DeepCopy_batch_Job,
+ DeepCopy_batch_JobCondition,
+ DeepCopy_batch_JobList,
+ DeepCopy_batch_JobSpec,
+ DeepCopy_batch_JobStatus,
+ DeepCopy_batch_JobTemplate,
+ DeepCopy_batch_JobTemplateSpec,
+ DeepCopy_batch_ScheduledJob,
+ DeepCopy_batch_ScheduledJobList,
+ DeepCopy_batch_ScheduledJobSpec,
+ DeepCopy_batch_ScheduledJobStatus,
+ ); err != nil {
+ // if one of the deep copy functions is malformed, detect it immediately.
+ panic(err)
+ }
+}
+
+func DeepCopy_batch_Job(in Job, out *Job, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_batch_JobSpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_batch_JobStatus(in.Status, &out.Status, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_batch_JobCondition(in JobCondition, out *JobCondition, c *conversion.Cloner) error {
+ out.Type = in.Type
+ out.Status = in.Status
+ out.LastProbeTime = in.LastProbeTime.DeepCopy()
+ out.LastTransitionTime = in.LastTransitionTime.DeepCopy()
+ out.Reason = in.Reason
+ out.Message = in.Message
+ return nil
+}
+
+func DeepCopy_batch_JobList(in JobList, out *JobList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]Job, len(in))
+ for i := range in {
+ if err := DeepCopy_batch_Job(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_batch_JobSpec(in JobSpec, out *JobSpec, c *conversion.Cloner) error {
+ if in.Parallelism != nil {
+ in, out := in.Parallelism, &out.Parallelism
+ *out = new(int32)
+ **out = *in
+ } else {
+ out.Parallelism = nil
+ }
+ if in.Completions != nil {
+ in, out := in.Completions, &out.Completions
+ *out = new(int32)
+ **out = *in
+ } else {
+ out.Completions = nil
+ }
+ if in.ActiveDeadlineSeconds != nil {
+ in, out := in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds
+ *out = new(int64)
+ **out = *in
+ } else {
+ out.ActiveDeadlineSeconds = nil
+ }
+ if in.Selector != nil {
+ in, out := in.Selector, &out.Selector
+ *out = new(unversioned.LabelSelector)
+ if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.Selector = nil
+ }
+ if in.ManualSelector != nil {
+ in, out := in.ManualSelector, &out.ManualSelector
+ *out = new(bool)
+ **out = *in
+ } else {
+ out.ManualSelector = nil
+ }
+ if err := api.DeepCopy_api_PodTemplateSpec(in.Template, &out.Template, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_batch_JobStatus(in JobStatus, out *JobStatus, c *conversion.Cloner) error {
+ if in.Conditions != nil {
+ in, out := in.Conditions, &out.Conditions
+ *out = make([]JobCondition, len(in))
+ for i := range in {
+ if err := DeepCopy_batch_JobCondition(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Conditions = nil
+ }
+ if in.StartTime != nil {
+ in, out := in.StartTime, &out.StartTime
+ *out = new(unversioned.Time)
+ **out = in.DeepCopy()
+ } else {
+ out.StartTime = nil
+ }
+ if in.CompletionTime != nil {
+ in, out := in.CompletionTime, &out.CompletionTime
+ *out = new(unversioned.Time)
+ **out = in.DeepCopy()
+ } else {
+ out.CompletionTime = nil
+ }
+ out.Active = in.Active
+ out.Succeeded = in.Succeeded
+ out.Failed = in.Failed
+ return nil
+}
+
+func DeepCopy_batch_JobTemplate(in JobTemplate, out *JobTemplate, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_batch_JobTemplateSpec(in.Template, &out.Template, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_batch_JobTemplateSpec(in JobTemplateSpec, out *JobTemplateSpec, c *conversion.Cloner) error {
+ if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_batch_JobSpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_batch_ScheduledJob(in ScheduledJob, out *ScheduledJob, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_batch_ScheduledJobSpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_batch_ScheduledJobStatus(in.Status, &out.Status, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_batch_ScheduledJobList(in ScheduledJobList, out *ScheduledJobList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]ScheduledJob, len(in))
+ for i := range in {
+ if err := DeepCopy_batch_ScheduledJob(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_batch_ScheduledJobSpec(in ScheduledJobSpec, out *ScheduledJobSpec, c *conversion.Cloner) error {
+ out.Schedule = in.Schedule
+ if in.StartingDeadlineSeconds != nil {
+ in, out := in.StartingDeadlineSeconds, &out.StartingDeadlineSeconds
+ *out = new(int64)
+ **out = *in
+ } else {
+ out.StartingDeadlineSeconds = nil
+ }
+ out.ConcurrencyPolicy = in.ConcurrencyPolicy
+ if in.Suspend != nil {
+ in, out := in.Suspend, &out.Suspend
+ *out = new(bool)
+ **out = *in
+ } else {
+ out.Suspend = nil
+ }
+ if err := DeepCopy_batch_JobTemplateSpec(in.JobTemplate, &out.JobTemplate, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_batch_ScheduledJobStatus(in ScheduledJobStatus, out *ScheduledJobStatus, c *conversion.Cloner) error {
+ if in.Active != nil {
+ in, out := in.Active, &out.Active
+ *out = make([]api.ObjectReference, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.Active = nil
+ }
+ if in.LastScheduleTime != nil {
+ in, out := in.LastScheduleTime, &out.LastScheduleTime
+ *out = new(unversioned.Time)
+ **out = in.DeepCopy()
+ } else {
+ out.LastScheduleTime = nil
+ }
+ return nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/doc.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/doc.go
new file mode 100644
index 0000000..c6b203c
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package,register
+
+package batch
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/install/install.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/install/install.go
new file mode 100644
index 0000000..22f92b7
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/install/install.go
@@ -0,0 +1,137 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package install installs the batch API group, making it available as
+// an option to all of the API encoding/decoding machinery.
+package install
+
+import (
+ "fmt"
+
+ "github.com/golang/glog"
+
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/meta"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/apimachinery"
+ "k8s.io/kubernetes/pkg/apimachinery/registered"
+ "k8s.io/kubernetes/pkg/apis/batch"
+ "k8s.io/kubernetes/pkg/apis/batch/v1"
+ "k8s.io/kubernetes/pkg/apis/batch/v2alpha1"
+ "k8s.io/kubernetes/pkg/runtime"
+ "k8s.io/kubernetes/pkg/util/sets"
+)
+
+const importPrefix = "k8s.io/kubernetes/pkg/apis/batch"
+
+var accessor = meta.NewAccessor()
+
+// availableVersions lists all known external versions for this group from most preferred to least preferred
+var availableVersions = []unversioned.GroupVersion{v1.SchemeGroupVersion, v2alpha1.SchemeGroupVersion}
+
+func init() {
+ registered.RegisterVersions(availableVersions)
+ externalVersions := []unversioned.GroupVersion{}
+ for _, v := range availableVersions {
+ if registered.IsAllowedVersion(v) {
+ externalVersions = append(externalVersions, v)
+ }
+ }
+ if len(externalVersions) == 0 {
+ glog.V(4).Infof("No version is registered for group %v", batch.GroupName)
+ return
+ }
+
+ if err := registered.EnableVersions(externalVersions...); err != nil {
+ glog.V(4).Infof("%v", err)
+ return
+ }
+ if err := enableVersions(externalVersions); err != nil {
+ glog.V(4).Infof("%v", err)
+ return
+ }
+}
+
+// TODO: enableVersions should be centralized rather than spread in each API
+// group.
+// We can combine registered.RegisterVersions, registered.EnableVersions and
+// registered.RegisterGroup once we have moved enableVersions there.
+func enableVersions(externalVersions []unversioned.GroupVersion) error {
+ addVersionsToScheme(externalVersions...)
+ preferredExternalVersion := externalVersions[0]
+
+ groupMeta := apimachinery.GroupMeta{
+ GroupVersion: preferredExternalVersion,
+ GroupVersions: externalVersions,
+ RESTMapper: newRESTMapper(externalVersions),
+ SelfLinker: runtime.SelfLinker(accessor),
+ InterfacesFor: interfacesFor,
+ }
+
+ if err := registered.RegisterGroup(groupMeta); err != nil {
+ return err
+ }
+ api.RegisterRESTMapper(groupMeta.RESTMapper)
+ return nil
+}
+
+func newRESTMapper(externalVersions []unversioned.GroupVersion) meta.RESTMapper {
+ // the list of kinds that are scoped at the root of the api hierarchy
+ // if a kind is not enumerated here, it is assumed to have a namespace scope
+ rootScoped := sets.NewString()
+
+ ignoredKinds := sets.NewString()
+
+ return api.NewDefaultRESTMapper(externalVersions, interfacesFor, importPrefix, ignoredKinds, rootScoped)
+}
+
+// interfacesFor returns the default Codec and ResourceVersioner for a given version
+// string, or an error if the version is not known.
+func interfacesFor(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) {
+ switch version {
+ case v1.SchemeGroupVersion:
+ return &meta.VersionInterfaces{
+ ObjectConvertor: api.Scheme,
+ MetadataAccessor: accessor,
+ }, nil
+ case v2alpha1.SchemeGroupVersion:
+ return &meta.VersionInterfaces{
+ ObjectConvertor: api.Scheme,
+ MetadataAccessor: accessor,
+ }, nil
+ default:
+ g, _ := registered.Group(batch.GroupName)
+ return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, g.GroupVersions)
+ }
+}
+
+func addVersionsToScheme(externalVersions ...unversioned.GroupVersion) {
+ // add the internal version to Scheme
+ batch.AddToScheme(api.Scheme)
+ // add the enabled external versions to Scheme
+ for _, v := range externalVersions {
+ if !registered.IsEnabledVersion(v) {
+ glog.Errorf("Version %s is not enabled, so it will not be added to the Scheme.", v)
+ continue
+ }
+ switch v {
+ case v1.SchemeGroupVersion:
+ v1.AddToScheme(api.Scheme)
+ case v2alpha1.SchemeGroupVersion:
+ v2alpha1.AddToScheme(api.Scheme)
+ }
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/register.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/register.go
new file mode 100644
index 0000000..641df5f
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/register.go
@@ -0,0 +1,56 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package batch
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/runtime"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "batch"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
+
+// Kind takes an unqualified kind and returns back a Group qualified GroupKind
+func Kind(kind string) unversioned.GroupKind {
+ return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// Resource takes an unqualified resource and returns back a Group qualified GroupResource
+func Resource(resource string) unversioned.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+func AddToScheme(scheme *runtime.Scheme) {
+ // Add the API to Scheme.
+ addKnownTypes(scheme)
+}
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &Job{},
+ &JobList{},
+ &JobTemplate{},
+ &ScheduledJob{},
+ &ScheduledJobList{},
+ &api.ListOptions{},
+ )
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/types.generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/types.generated.go
new file mode 100644
index 0000000..03f0d24
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/types.generated.go
@@ -0,0 +1,4671 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// ************************************************************
+// DO NOT EDIT.
+// THIS FILE IS AUTO-GENERATED BY codecgen.
+// ************************************************************
+
+package batch
+
+import (
+ "errors"
+ "fmt"
+ codec1978 "github.com/ugorji/go/codec"
+ pkg2_api "k8s.io/kubernetes/pkg/api"
+ pkg4_resource "k8s.io/kubernetes/pkg/api/resource"
+ pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned"
+ pkg3_types "k8s.io/kubernetes/pkg/types"
+ pkg5_intstr "k8s.io/kubernetes/pkg/util/intstr"
+ "reflect"
+ "runtime"
+ time "time"
+)
+
+const (
+ // ----- content types ----
+ codecSelferC_UTF81234 = 1
+ codecSelferC_RAW1234 = 0
+ // ----- value types used ----
+ codecSelferValueTypeArray1234 = 10
+ codecSelferValueTypeMap1234 = 9
+ // ----- containerStateValues ----
+ codecSelfer_containerMapKey1234 = 2
+ codecSelfer_containerMapValue1234 = 3
+ codecSelfer_containerMapEnd1234 = 4
+ codecSelfer_containerArrayElem1234 = 6
+ codecSelfer_containerArrayEnd1234 = 7
+)
+
+var (
+ codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits())
+ codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`)
+)
+
+type codecSelfer1234 struct{}
+
+func init() {
+ if codec1978.GenVersion != 5 {
+ _, file, _, _ := runtime.Caller(0)
+ err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v",
+ 5, codec1978.GenVersion, file)
+ panic(err)
+ }
+ if false { // reference the types, but skip this branch at build/run time
+ var v0 pkg2_api.ObjectMeta
+ var v1 pkg4_resource.Quantity
+ var v2 pkg1_unversioned.TypeMeta
+ var v3 pkg3_types.UID
+ var v4 pkg5_intstr.IntOrString
+ var v5 time.Time
+ _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5
+ }
+}
+
+func (x *Job) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = true
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy14 := &x.Status
+ yy14.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy16 := &x.Status
+ yy16.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *Job) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *Job) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_api.ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = JobSpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = JobStatus{}
+ } else {
+ yyv6 := &x.Status
+ yyv6.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *Job) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_api.ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = JobSpec{}
+ } else {
+ yyv11 := &x.Spec
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = JobStatus{}
+ } else {
+ yyv12 := &x.Status
+ yyv12.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *JobList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceJob(([]Job)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceJob(([]Job)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *JobList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *JobList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceJob((*[]Job)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *JobList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceJob((*[]Job)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *JobTemplate) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Template
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("template"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Template
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *JobTemplate) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *JobTemplate) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_api.ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "template":
+ if r.TryDecodeAsNil() {
+ x.Template = JobTemplateSpec{}
+ } else {
+ yyv5 := &x.Template
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *JobTemplate) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_api.ObjectMeta{}
+ } else {
+ yyv9 := &x.ObjectMeta
+ yyv9.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Template = JobTemplateSpec{}
+ } else {
+ yyv10 := &x.Template
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *JobTemplateSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *JobTemplateSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *JobTemplateSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_api.ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = JobSpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *JobTemplateSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_api.ObjectMeta{}
+ } else {
+ yyv7 := &x.ObjectMeta
+ yyv7.CodecDecodeSelf(d)
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = JobSpec{}
+ } else {
+ yyv8 := &x.Spec
+ yyv8.CodecDecodeSelf(d)
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *JobSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [6]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Parallelism != nil
+ yyq2[1] = x.Completions != nil
+ yyq2[2] = x.ActiveDeadlineSeconds != nil
+ yyq2[3] = x.Selector != nil
+ yyq2[4] = x.ManualSelector != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(6)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Parallelism == nil {
+ r.EncodeNil()
+ } else {
+ yy4 := *x.Parallelism
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(yy4))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("parallelism"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Parallelism == nil {
+ r.EncodeNil()
+ } else {
+ yy6 := *x.Parallelism
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeInt(int64(yy6))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Completions == nil {
+ r.EncodeNil()
+ } else {
+ yy9 := *x.Completions
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeInt(int64(yy9))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("completions"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Completions == nil {
+ r.EncodeNil()
+ } else {
+ yy11 := *x.Completions
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeInt(int64(yy11))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.ActiveDeadlineSeconds == nil {
+ r.EncodeNil()
+ } else {
+ yy14 := *x.ActiveDeadlineSeconds
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeInt(int64(yy14))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("activeDeadlineSeconds"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.ActiveDeadlineSeconds == nil {
+ r.EncodeNil()
+ } else {
+ yy16 := *x.ActiveDeadlineSeconds
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeInt(int64(yy16))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ if x.Selector == nil {
+ r.EncodeNil()
+ } else {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.Selector) {
+ } else {
+ z.EncFallback(x.Selector)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("selector"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Selector == nil {
+ r.EncodeNil()
+ } else {
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.Selector) {
+ } else {
+ z.EncFallback(x.Selector)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ if x.ManualSelector == nil {
+ r.EncodeNil()
+ } else {
+ yy22 := *x.ManualSelector
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeBool(bool(yy22))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("manualSelector"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.ManualSelector == nil {
+ r.EncodeNil()
+ } else {
+ yy24 := *x.ManualSelector
+ yym25 := z.EncBinary()
+ _ = yym25
+ if false {
+ } else {
+ r.EncodeBool(bool(yy24))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy27 := &x.Template
+ yy27.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("template"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy29 := &x.Template
+ yy29.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *JobSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *JobSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "parallelism":
+ if r.TryDecodeAsNil() {
+ if x.Parallelism != nil {
+ x.Parallelism = nil
+ }
+ } else {
+ if x.Parallelism == nil {
+ x.Parallelism = new(int32)
+ }
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ *((*int32)(x.Parallelism)) = int32(r.DecodeInt(32))
+ }
+ }
+ case "completions":
+ if r.TryDecodeAsNil() {
+ if x.Completions != nil {
+ x.Completions = nil
+ }
+ } else {
+ if x.Completions == nil {
+ x.Completions = new(int32)
+ }
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ *((*int32)(x.Completions)) = int32(r.DecodeInt(32))
+ }
+ }
+ case "activeDeadlineSeconds":
+ if r.TryDecodeAsNil() {
+ if x.ActiveDeadlineSeconds != nil {
+ x.ActiveDeadlineSeconds = nil
+ }
+ } else {
+ if x.ActiveDeadlineSeconds == nil {
+ x.ActiveDeadlineSeconds = new(int64)
+ }
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else {
+ *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64))
+ }
+ }
+ case "selector":
+ if r.TryDecodeAsNil() {
+ if x.Selector != nil {
+ x.Selector = nil
+ }
+ } else {
+ if x.Selector == nil {
+ x.Selector = new(pkg1_unversioned.LabelSelector)
+ }
+ yym11 := z.DecBinary()
+ _ = yym11
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.Selector) {
+ } else {
+ z.DecFallback(x.Selector, false)
+ }
+ }
+ case "manualSelector":
+ if r.TryDecodeAsNil() {
+ if x.ManualSelector != nil {
+ x.ManualSelector = nil
+ }
+ } else {
+ if x.ManualSelector == nil {
+ x.ManualSelector = new(bool)
+ }
+ yym13 := z.DecBinary()
+ _ = yym13
+ if false {
+ } else {
+ *((*bool)(x.ManualSelector)) = r.DecodeBool()
+ }
+ }
+ case "template":
+ if r.TryDecodeAsNil() {
+ x.Template = pkg2_api.PodTemplateSpec{}
+ } else {
+ yyv14 := &x.Template
+ yyv14.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *JobSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj15 int
+ var yyb15 bool
+ var yyhl15 bool = l >= 0
+ yyj15++
+ if yyhl15 {
+ yyb15 = yyj15 > l
+ } else {
+ yyb15 = r.CheckBreak()
+ }
+ if yyb15 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Parallelism != nil {
+ x.Parallelism = nil
+ }
+ } else {
+ if x.Parallelism == nil {
+ x.Parallelism = new(int32)
+ }
+ yym17 := z.DecBinary()
+ _ = yym17
+ if false {
+ } else {
+ *((*int32)(x.Parallelism)) = int32(r.DecodeInt(32))
+ }
+ }
+ yyj15++
+ if yyhl15 {
+ yyb15 = yyj15 > l
+ } else {
+ yyb15 = r.CheckBreak()
+ }
+ if yyb15 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Completions != nil {
+ x.Completions = nil
+ }
+ } else {
+ if x.Completions == nil {
+ x.Completions = new(int32)
+ }
+ yym19 := z.DecBinary()
+ _ = yym19
+ if false {
+ } else {
+ *((*int32)(x.Completions)) = int32(r.DecodeInt(32))
+ }
+ }
+ yyj15++
+ if yyhl15 {
+ yyb15 = yyj15 > l
+ } else {
+ yyb15 = r.CheckBreak()
+ }
+ if yyb15 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.ActiveDeadlineSeconds != nil {
+ x.ActiveDeadlineSeconds = nil
+ }
+ } else {
+ if x.ActiveDeadlineSeconds == nil {
+ x.ActiveDeadlineSeconds = new(int64)
+ }
+ yym21 := z.DecBinary()
+ _ = yym21
+ if false {
+ } else {
+ *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64))
+ }
+ }
+ yyj15++
+ if yyhl15 {
+ yyb15 = yyj15 > l
+ } else {
+ yyb15 = r.CheckBreak()
+ }
+ if yyb15 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Selector != nil {
+ x.Selector = nil
+ }
+ } else {
+ if x.Selector == nil {
+ x.Selector = new(pkg1_unversioned.LabelSelector)
+ }
+ yym23 := z.DecBinary()
+ _ = yym23
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.Selector) {
+ } else {
+ z.DecFallback(x.Selector, false)
+ }
+ }
+ yyj15++
+ if yyhl15 {
+ yyb15 = yyj15 > l
+ } else {
+ yyb15 = r.CheckBreak()
+ }
+ if yyb15 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.ManualSelector != nil {
+ x.ManualSelector = nil
+ }
+ } else {
+ if x.ManualSelector == nil {
+ x.ManualSelector = new(bool)
+ }
+ yym25 := z.DecBinary()
+ _ = yym25
+ if false {
+ } else {
+ *((*bool)(x.ManualSelector)) = r.DecodeBool()
+ }
+ }
+ yyj15++
+ if yyhl15 {
+ yyb15 = yyj15 > l
+ } else {
+ yyb15 = r.CheckBreak()
+ }
+ if yyb15 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Template = pkg2_api.PodTemplateSpec{}
+ } else {
+ yyv26 := &x.Template
+ yyv26.CodecDecodeSelf(d)
+ }
+ for {
+ yyj15++
+ if yyhl15 {
+ yyb15 = yyj15 > l
+ } else {
+ yyb15 = r.CheckBreak()
+ }
+ if yyb15 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj15-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *JobStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [6]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = len(x.Conditions) != 0
+ yyq2[1] = x.StartTime != nil
+ yyq2[2] = x.CompletionTime != nil
+ yyq2[3] = x.Active != 0
+ yyq2[4] = x.Succeeded != 0
+ yyq2[5] = x.Failed != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(6)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Conditions == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ h.encSliceJobCondition(([]JobCondition)(x.Conditions), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("conditions"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Conditions == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.encSliceJobCondition(([]JobCondition)(x.Conditions), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.StartTime == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.StartTime) {
+ } else if yym7 {
+ z.EncBinaryMarshal(x.StartTime)
+ } else if !yym7 && z.IsJSONHandle() {
+ z.EncJSONMarshal(x.StartTime)
+ } else {
+ z.EncFallback(x.StartTime)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("startTime"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.StartTime == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.StartTime) {
+ } else if yym8 {
+ z.EncBinaryMarshal(x.StartTime)
+ } else if !yym8 && z.IsJSONHandle() {
+ z.EncJSONMarshal(x.StartTime)
+ } else {
+ z.EncFallback(x.StartTime)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.CompletionTime == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.CompletionTime) {
+ } else if yym10 {
+ z.EncBinaryMarshal(x.CompletionTime)
+ } else if !yym10 && z.IsJSONHandle() {
+ z.EncJSONMarshal(x.CompletionTime)
+ } else {
+ z.EncFallback(x.CompletionTime)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("completionTime"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.CompletionTime == nil {
+ r.EncodeNil()
+ } else {
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.CompletionTime) {
+ } else if yym11 {
+ z.EncBinaryMarshal(x.CompletionTime)
+ } else if !yym11 && z.IsJSONHandle() {
+ z.EncJSONMarshal(x.CompletionTime)
+ } else {
+ z.EncFallback(x.CompletionTime)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Active))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("active"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Active))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Succeeded))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("succeeded"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Succeeded))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Failed))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("failed"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Failed))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *JobStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *JobStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "conditions":
+ if r.TryDecodeAsNil() {
+ x.Conditions = nil
+ } else {
+ yyv4 := &x.Conditions
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.decSliceJobCondition((*[]JobCondition)(yyv4), d)
+ }
+ }
+ case "startTime":
+ if r.TryDecodeAsNil() {
+ if x.StartTime != nil {
+ x.StartTime = nil
+ }
+ } else {
+ if x.StartTime == nil {
+ x.StartTime = new(pkg1_unversioned.Time)
+ }
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.StartTime) {
+ } else if yym7 {
+ z.DecBinaryUnmarshal(x.StartTime)
+ } else if !yym7 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(x.StartTime)
+ } else {
+ z.DecFallback(x.StartTime, false)
+ }
+ }
+ case "completionTime":
+ if r.TryDecodeAsNil() {
+ if x.CompletionTime != nil {
+ x.CompletionTime = nil
+ }
+ } else {
+ if x.CompletionTime == nil {
+ x.CompletionTime = new(pkg1_unversioned.Time)
+ }
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.CompletionTime) {
+ } else if yym9 {
+ z.DecBinaryUnmarshal(x.CompletionTime)
+ } else if !yym9 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(x.CompletionTime)
+ } else {
+ z.DecFallback(x.CompletionTime, false)
+ }
+ }
+ case "active":
+ if r.TryDecodeAsNil() {
+ x.Active = 0
+ } else {
+ x.Active = int32(r.DecodeInt(32))
+ }
+ case "succeeded":
+ if r.TryDecodeAsNil() {
+ x.Succeeded = 0
+ } else {
+ x.Succeeded = int32(r.DecodeInt(32))
+ }
+ case "failed":
+ if r.TryDecodeAsNil() {
+ x.Failed = 0
+ } else {
+ x.Failed = int32(r.DecodeInt(32))
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *JobStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj13 int
+ var yyb13 bool
+ var yyhl13 bool = l >= 0
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Conditions = nil
+ } else {
+ yyv14 := &x.Conditions
+ yym15 := z.DecBinary()
+ _ = yym15
+ if false {
+ } else {
+ h.decSliceJobCondition((*[]JobCondition)(yyv14), d)
+ }
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.StartTime != nil {
+ x.StartTime = nil
+ }
+ } else {
+ if x.StartTime == nil {
+ x.StartTime = new(pkg1_unversioned.Time)
+ }
+ yym17 := z.DecBinary()
+ _ = yym17
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.StartTime) {
+ } else if yym17 {
+ z.DecBinaryUnmarshal(x.StartTime)
+ } else if !yym17 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(x.StartTime)
+ } else {
+ z.DecFallback(x.StartTime, false)
+ }
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.CompletionTime != nil {
+ x.CompletionTime = nil
+ }
+ } else {
+ if x.CompletionTime == nil {
+ x.CompletionTime = new(pkg1_unversioned.Time)
+ }
+ yym19 := z.DecBinary()
+ _ = yym19
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.CompletionTime) {
+ } else if yym19 {
+ z.DecBinaryUnmarshal(x.CompletionTime)
+ } else if !yym19 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(x.CompletionTime)
+ } else {
+ z.DecFallback(x.CompletionTime, false)
+ }
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Active = 0
+ } else {
+ x.Active = int32(r.DecodeInt(32))
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Succeeded = 0
+ } else {
+ x.Succeeded = int32(r.DecodeInt(32))
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Failed = 0
+ } else {
+ x.Failed = int32(r.DecodeInt(32))
+ }
+ for {
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj13-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x JobConditionType) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *JobConditionType) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *JobCondition) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [6]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[2] = true
+ yyq2[3] = true
+ yyq2[4] = x.Reason != ""
+ yyq2[5] = x.Message != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(6)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ x.Type.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("type"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Type.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yysf7 := &x.Status
+ yysf7.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yysf8 := &x.Status
+ yysf8.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy10 := &x.LastProbeTime
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy10) {
+ } else if yym11 {
+ z.EncBinaryMarshal(yy10)
+ } else if !yym11 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy10)
+ } else {
+ z.EncFallback(yy10)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("lastProbeTime"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy12 := &x.LastProbeTime
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy12) {
+ } else if yym13 {
+ z.EncBinaryMarshal(yy12)
+ } else if !yym13 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy12)
+ } else {
+ z.EncFallback(yy12)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yy15 := &x.LastTransitionTime
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy15) {
+ } else if yym16 {
+ z.EncBinaryMarshal(yy15)
+ } else if !yym16 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy15)
+ } else {
+ z.EncFallback(yy15)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy17 := &x.LastTransitionTime
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy17) {
+ } else if yym18 {
+ z.EncBinaryMarshal(yy17)
+ } else if !yym18 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy17)
+ } else {
+ z.EncFallback(yy17)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Reason))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("reason"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym21 := z.EncBinary()
+ _ = yym21
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Reason))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Message))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("message"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym24 := z.EncBinary()
+ _ = yym24
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Message))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *JobCondition) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *JobCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "type":
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = JobConditionType(r.DecodeString())
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = ""
+ } else {
+ x.Status = pkg2_api.ConditionStatus(r.DecodeString())
+ }
+ case "lastProbeTime":
+ if r.TryDecodeAsNil() {
+ x.LastProbeTime = pkg1_unversioned.Time{}
+ } else {
+ yyv6 := &x.LastProbeTime
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv6) {
+ } else if yym7 {
+ z.DecBinaryUnmarshal(yyv6)
+ } else if !yym7 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv6)
+ } else {
+ z.DecFallback(yyv6, false)
+ }
+ }
+ case "lastTransitionTime":
+ if r.TryDecodeAsNil() {
+ x.LastTransitionTime = pkg1_unversioned.Time{}
+ } else {
+ yyv8 := &x.LastTransitionTime
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv8) {
+ } else if yym9 {
+ z.DecBinaryUnmarshal(yyv8)
+ } else if !yym9 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv8)
+ } else {
+ z.DecFallback(yyv8, false)
+ }
+ }
+ case "reason":
+ if r.TryDecodeAsNil() {
+ x.Reason = ""
+ } else {
+ x.Reason = string(r.DecodeString())
+ }
+ case "message":
+ if r.TryDecodeAsNil() {
+ x.Message = ""
+ } else {
+ x.Message = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *JobCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj12 int
+ var yyb12 bool
+ var yyhl12 bool = l >= 0
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = JobConditionType(r.DecodeString())
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = ""
+ } else {
+ x.Status = pkg2_api.ConditionStatus(r.DecodeString())
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.LastProbeTime = pkg1_unversioned.Time{}
+ } else {
+ yyv15 := &x.LastProbeTime
+ yym16 := z.DecBinary()
+ _ = yym16
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv15) {
+ } else if yym16 {
+ z.DecBinaryUnmarshal(yyv15)
+ } else if !yym16 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv15)
+ } else {
+ z.DecFallback(yyv15, false)
+ }
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.LastTransitionTime = pkg1_unversioned.Time{}
+ } else {
+ yyv17 := &x.LastTransitionTime
+ yym18 := z.DecBinary()
+ _ = yym18
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv17) {
+ } else if yym18 {
+ z.DecBinaryUnmarshal(yyv17)
+ } else if !yym18 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv17)
+ } else {
+ z.DecFallback(yyv17, false)
+ }
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Reason = ""
+ } else {
+ x.Reason = string(r.DecodeString())
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Message = ""
+ } else {
+ x.Message = string(r.DecodeString())
+ }
+ for {
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj12-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ScheduledJob) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = true
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy14 := &x.Status
+ yy14.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy16 := &x.Status
+ yy16.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ScheduledJob) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ScheduledJob) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_api.ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = ScheduledJobSpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = ScheduledJobStatus{}
+ } else {
+ yyv6 := &x.Status
+ yyv6.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ScheduledJob) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_api.ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = ScheduledJobSpec{}
+ } else {
+ yyv11 := &x.Spec
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = ScheduledJobStatus{}
+ } else {
+ yyv12 := &x.Status
+ yyv12.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ScheduledJobList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceScheduledJob(([]ScheduledJob)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceScheduledJob(([]ScheduledJob)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ScheduledJobList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ScheduledJobList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceScheduledJob((*[]ScheduledJob)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ScheduledJobList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceScheduledJob((*[]ScheduledJob)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ScheduledJobSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.StartingDeadlineSeconds != nil
+ yyq2[2] = x.ConcurrencyPolicy != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 3
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Schedule))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("schedule"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Schedule))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.StartingDeadlineSeconds == nil {
+ r.EncodeNil()
+ } else {
+ yy7 := *x.StartingDeadlineSeconds
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeInt(int64(yy7))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("startingDeadlineSeconds"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.StartingDeadlineSeconds == nil {
+ r.EncodeNil()
+ } else {
+ yy9 := *x.StartingDeadlineSeconds
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeInt(int64(yy9))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ x.ConcurrencyPolicy.CodecEncodeSelf(e)
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("concurrencyPolicy"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.ConcurrencyPolicy.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Suspend == nil {
+ r.EncodeNil()
+ } else {
+ yy15 := *x.Suspend
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeBool(bool(yy15))
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("suspend"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Suspend == nil {
+ r.EncodeNil()
+ } else {
+ yy17 := *x.Suspend
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ r.EncodeBool(bool(yy17))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy20 := &x.JobTemplate
+ yy20.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("jobTemplate"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy22 := &x.JobTemplate
+ yy22.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ScheduledJobSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ScheduledJobSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "schedule":
+ if r.TryDecodeAsNil() {
+ x.Schedule = ""
+ } else {
+ x.Schedule = string(r.DecodeString())
+ }
+ case "startingDeadlineSeconds":
+ if r.TryDecodeAsNil() {
+ if x.StartingDeadlineSeconds != nil {
+ x.StartingDeadlineSeconds = nil
+ }
+ } else {
+ if x.StartingDeadlineSeconds == nil {
+ x.StartingDeadlineSeconds = new(int64)
+ }
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ *((*int64)(x.StartingDeadlineSeconds)) = int64(r.DecodeInt(64))
+ }
+ }
+ case "concurrencyPolicy":
+ if r.TryDecodeAsNil() {
+ x.ConcurrencyPolicy = ""
+ } else {
+ x.ConcurrencyPolicy = ConcurrencyPolicy(r.DecodeString())
+ }
+ case "suspend":
+ if r.TryDecodeAsNil() {
+ if x.Suspend != nil {
+ x.Suspend = nil
+ }
+ } else {
+ if x.Suspend == nil {
+ x.Suspend = new(bool)
+ }
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else {
+ *((*bool)(x.Suspend)) = r.DecodeBool()
+ }
+ }
+ case "jobTemplate":
+ if r.TryDecodeAsNil() {
+ x.JobTemplate = JobTemplateSpec{}
+ } else {
+ yyv10 := &x.JobTemplate
+ yyv10.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ScheduledJobSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj11 int
+ var yyb11 bool
+ var yyhl11 bool = l >= 0
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Schedule = ""
+ } else {
+ x.Schedule = string(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.StartingDeadlineSeconds != nil {
+ x.StartingDeadlineSeconds = nil
+ }
+ } else {
+ if x.StartingDeadlineSeconds == nil {
+ x.StartingDeadlineSeconds = new(int64)
+ }
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ *((*int64)(x.StartingDeadlineSeconds)) = int64(r.DecodeInt(64))
+ }
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ConcurrencyPolicy = ""
+ } else {
+ x.ConcurrencyPolicy = ConcurrencyPolicy(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Suspend != nil {
+ x.Suspend = nil
+ }
+ } else {
+ if x.Suspend == nil {
+ x.Suspend = new(bool)
+ }
+ yym17 := z.DecBinary()
+ _ = yym17
+ if false {
+ } else {
+ *((*bool)(x.Suspend)) = r.DecodeBool()
+ }
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.JobTemplate = JobTemplateSpec{}
+ } else {
+ yyv18 := &x.JobTemplate
+ yyv18.CodecDecodeSelf(d)
+ }
+ for {
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj11-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x ConcurrencyPolicy) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *ConcurrencyPolicy) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *ScheduledJobStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = len(x.Active) != 0
+ yyq2[1] = x.LastScheduleTime != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Active == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ h.encSliceapi_ObjectReference(([]pkg2_api.ObjectReference)(x.Active), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("active"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Active == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.encSliceapi_ObjectReference(([]pkg2_api.ObjectReference)(x.Active), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.LastScheduleTime == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.LastScheduleTime) {
+ } else if yym7 {
+ z.EncBinaryMarshal(x.LastScheduleTime)
+ } else if !yym7 && z.IsJSONHandle() {
+ z.EncJSONMarshal(x.LastScheduleTime)
+ } else {
+ z.EncFallback(x.LastScheduleTime)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("lastScheduleTime"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.LastScheduleTime == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.LastScheduleTime) {
+ } else if yym8 {
+ z.EncBinaryMarshal(x.LastScheduleTime)
+ } else if !yym8 && z.IsJSONHandle() {
+ z.EncJSONMarshal(x.LastScheduleTime)
+ } else {
+ z.EncFallback(x.LastScheduleTime)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ScheduledJobStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ScheduledJobStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "active":
+ if r.TryDecodeAsNil() {
+ x.Active = nil
+ } else {
+ yyv4 := &x.Active
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.decSliceapi_ObjectReference((*[]pkg2_api.ObjectReference)(yyv4), d)
+ }
+ }
+ case "lastScheduleTime":
+ if r.TryDecodeAsNil() {
+ if x.LastScheduleTime != nil {
+ x.LastScheduleTime = nil
+ }
+ } else {
+ if x.LastScheduleTime == nil {
+ x.LastScheduleTime = new(pkg1_unversioned.Time)
+ }
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.LastScheduleTime) {
+ } else if yym7 {
+ z.DecBinaryUnmarshal(x.LastScheduleTime)
+ } else if !yym7 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(x.LastScheduleTime)
+ } else {
+ z.DecFallback(x.LastScheduleTime, false)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ScheduledJobStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Active = nil
+ } else {
+ yyv9 := &x.Active
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.decSliceapi_ObjectReference((*[]pkg2_api.ObjectReference)(yyv9), d)
+ }
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.LastScheduleTime != nil {
+ x.LastScheduleTime = nil
+ }
+ } else {
+ if x.LastScheduleTime == nil {
+ x.LastScheduleTime = new(pkg1_unversioned.Time)
+ }
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.LastScheduleTime) {
+ } else if yym12 {
+ z.DecBinaryUnmarshal(x.LastScheduleTime)
+ } else if !yym12 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(x.LastScheduleTime)
+ } else {
+ z.DecFallback(x.LastScheduleTime, false)
+ }
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) encSliceJob(v []Job, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceJob(v *[]Job, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []Job{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 768)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]Job, yyrl1)
+ }
+ } else {
+ yyv1 = make([]Job, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Job{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, Job{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Job{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, Job{}) // var yyz1 Job
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Job{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []Job{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceJobCondition(v []JobCondition, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceJobCondition(v *[]JobCondition, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []JobCondition{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]JobCondition, yyrl1)
+ }
+ } else {
+ yyv1 = make([]JobCondition, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = JobCondition{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, JobCondition{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = JobCondition{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, JobCondition{}) // var yyz1 JobCondition
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = JobCondition{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []JobCondition{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceScheduledJob(v []ScheduledJob, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceScheduledJob(v *[]ScheduledJob, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []ScheduledJob{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 1000)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]ScheduledJob, yyrl1)
+ }
+ } else {
+ yyv1 = make([]ScheduledJob, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ScheduledJob{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, ScheduledJob{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ScheduledJob{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, ScheduledJob{}) // var yyz1 ScheduledJob
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ScheduledJob{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []ScheduledJob{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceapi_ObjectReference(v []pkg2_api.ObjectReference, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceapi_ObjectReference(v *[]pkg2_api.ObjectReference, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []pkg2_api.ObjectReference{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]pkg2_api.ObjectReference, yyrl1)
+ }
+ } else {
+ yyv1 = make([]pkg2_api.ObjectReference, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = pkg2_api.ObjectReference{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, pkg2_api.ObjectReference{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = pkg2_api.ObjectReference{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, pkg2_api.ObjectReference{}) // var yyz1 pkg2_api.ObjectReference
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = pkg2_api.ObjectReference{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []pkg2_api.ObjectReference{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/types.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/types.go
new file mode 100644
index 0000000..61a35d4
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/types.go
@@ -0,0 +1,244 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package batch
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+)
+
+// +genclient=true
+
+// Job represents the configuration of a single job.
+type Job struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ api.ObjectMeta `json:"metadata,omitempty"`
+
+ // Spec is a structure defining the expected behavior of a job.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ Spec JobSpec `json:"spec,omitempty"`
+
+ // Status is a structure describing current status of a job.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ Status JobStatus `json:"status,omitempty"`
+}
+
+// JobList is a collection of jobs.
+type JobList struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard list metadata
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ unversioned.ListMeta `json:"metadata,omitempty"`
+
+ // Items is the list of Job.
+ Items []Job `json:"items"`
+}
+
+// JobTemplate describes a template for creating copies of a predefined pod.
+type JobTemplate struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ api.ObjectMeta `json:"metadata,omitempty"`
+
+ // Template defines jobs that will be created from this template
+ // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ Template JobTemplateSpec `json:"template,omitempty"`
+}
+
+// JobTemplateSpec describes the data a Job should have when created from a template
+type JobTemplateSpec struct {
+ // Standard object's metadata of the jobs created from this template.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ api.ObjectMeta `json:"metadata,omitempty"`
+
+ // Specification of the desired behavior of the job.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ Spec JobSpec `json:"spec,omitempty"`
+}
+
+// JobSpec describes how the job execution will look like.
+type JobSpec struct {
+
+ // Parallelism specifies the maximum desired number of pods the job should
+ // run at any given time. The actual number of pods running in steady state will
+ // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism),
+ // i.e. when the work left to do is less than max parallelism.
+ Parallelism *int32 `json:"parallelism,omitempty"`
+
+ // Completions specifies the desired number of successfully finished pods the
+ // job should be run with. Setting to nil means that the success of any
+ // pod signals the success of all pods, and allows parallelism to have any positive
+ // value. Setting to 1 means that parallelism is limited to 1 and the success of that
+ // pod signals the success of the job.
+ Completions *int32 `json:"completions,omitempty"`
+
+ // Optional duration in seconds relative to the startTime that the job may be active
+ // before the system tries to terminate it; value must be positive integer
+ ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty"`
+
+ // Selector is a label query over pods that should match the pod count.
+ // Normally, the system sets this field for you.
+ Selector *unversioned.LabelSelector `json:"selector,omitempty"`
+
+ // ManualSelector controls generation of pod labels and pod selectors.
+ // Leave `manualSelector` unset unless you are certain what you are doing.
+ // When false or unset, the system pick labels unique to this job
+ // and appends those labels to the pod template. When true,
+ // the user is responsible for picking unique labels and specifying
+ // the selector. Failure to pick a unique label may cause this
+ // and other jobs to not function correctly. However, You may see
+ // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1`
+ // API.
+ ManualSelector *bool `json:"manualSelector,omitempty"`
+
+ // Template is the object that describes the pod that will be created when
+ // executing a job.
+ Template api.PodTemplateSpec `json:"template"`
+}
+
+// JobStatus represents the current state of a Job.
+type JobStatus struct {
+
+ // Conditions represent the latest available observations of an object's current state.
+ Conditions []JobCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"`
+
+ // StartTime represents time when the job was acknowledged by the Job Manager.
+ // It is not guaranteed to be set in happens-before order across separate operations.
+ // It is represented in RFC3339 form and is in UTC.
+ StartTime *unversioned.Time `json:"startTime,omitempty"`
+
+ // CompletionTime represents time when the job was completed. It is not guaranteed to
+ // be set in happens-before order across separate operations.
+ // It is represented in RFC3339 form and is in UTC.
+ CompletionTime *unversioned.Time `json:"completionTime,omitempty"`
+
+ // Active is the number of actively running pods.
+ Active int32 `json:"active,omitempty"`
+
+ // Succeeded is the number of pods which reached Phase Succeeded.
+ Succeeded int32 `json:"succeeded,omitempty"`
+
+ // Failed is the number of pods which reached Phase Failed.
+ Failed int32 `json:"failed,omitempty"`
+}
+
+type JobConditionType string
+
+// These are valid conditions of a job.
+const (
+ // JobComplete means the job has completed its execution.
+ JobComplete JobConditionType = "Complete"
+ // JobFailed means the job has failed its execution.
+ JobFailed JobConditionType = "Failed"
+)
+
+// JobCondition describes current state of a job.
+type JobCondition struct {
+ // Type of job condition, Complete or Failed.
+ Type JobConditionType `json:"type"`
+ // Status of the condition, one of True, False, Unknown.
+ Status api.ConditionStatus `json:"status"`
+ // Last time the condition was checked.
+ LastProbeTime unversioned.Time `json:"lastProbeTime,omitempty"`
+ // Last time the condition transit from one status to another.
+ LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty"`
+ // (brief) reason for the condition's last transition.
+ Reason string `json:"reason,omitempty"`
+ // Human readable message indicating details about last transition.
+ Message string `json:"message,omitempty"`
+}
+
+// +genclient=true
+
+// ScheduledJob represents the configuration of a single scheduled job.
+type ScheduledJob struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ api.ObjectMeta `json:"metadata,omitempty"`
+
+ // Spec is a structure defining the expected behavior of a job, including the schedule.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ Spec ScheduledJobSpec `json:"spec,omitempty"`
+
+ // Status is a structure describing current status of a job.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ Status ScheduledJobStatus `json:"status,omitempty"`
+}
+
+// ScheduledJobList is a collection of scheduled jobs.
+type ScheduledJobList struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard list metadata
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ unversioned.ListMeta `json:"metadata,omitempty"`
+
+ // Items is the list of ScheduledJob.
+ Items []ScheduledJob `json:"items"`
+}
+
+// ScheduledJobSpec describes how the job execution will look like and when it will actually run.
+type ScheduledJobSpec struct {
+
+ // Schedule contains the schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.
+ Schedule string `json:"schedule"`
+
+ // Optional deadline in seconds for starting the job if it misses scheduled
+ // time for any reason. Missed jobs executions will be counted as failed ones.
+ StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty"`
+
+ // ConcurrencyPolicy specifies how to treat concurrent executions of a Job.
+ ConcurrencyPolicy ConcurrencyPolicy `json:"concurrencyPolicy,omitempty"`
+
+ // Suspend flag tells the controller to suspend subsequent executions, it does
+ // not apply to already started executions. Defaults to false.
+ Suspend *bool `json:"suspend"`
+
+ // JobTemplate is the object that describes the job that will be created when
+ // executing a ScheduledJob.
+ JobTemplate JobTemplateSpec `json:"jobTemplate"`
+}
+
+// ConcurrencyPolicy describes how the job will be handled.
+// Only one of the following concurrent policies may be specified.
+// If none of the following policies is specified, the default one
+// is AllowConcurrent.
+type ConcurrencyPolicy string
+
+const (
+ // AllowConcurrent allows ScheduledJobs to run concurrently.
+ AllowConcurrent ConcurrencyPolicy = "Allow"
+
+ // ForbidConcurrent forbids concurrent runs, skipping next run if previous
+ // hasn't finished yet.
+ ForbidConcurrent ConcurrencyPolicy = "Forbid"
+
+ // ReplaceConcurrent cancels currently running job and replaces it with a new one.
+ ReplaceConcurrent ConcurrencyPolicy = "Replace"
+)
+
+// ScheduledJobStatus represents the current state of a Job.
+type ScheduledJobStatus struct {
+ // Active holds pointers to currently running jobs.
+ Active []api.ObjectReference `json:"active,omitempty"`
+
+ // LastScheduleTime keeps information of when was the last time the job was successfully scheduled.
+ LastScheduleTime *unversioned.Time `json:"lastScheduleTime,omitempty"`
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/conversion.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/conversion.go
new file mode 100644
index 0000000..8846af1
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/conversion.go
@@ -0,0 +1,106 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "fmt"
+
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ v1 "k8s.io/kubernetes/pkg/api/v1"
+ "k8s.io/kubernetes/pkg/apis/batch"
+ "k8s.io/kubernetes/pkg/conversion"
+ "k8s.io/kubernetes/pkg/runtime"
+)
+
+func addConversionFuncs(scheme *runtime.Scheme) {
+ // Add non-generated conversion functions
+ err := scheme.AddConversionFuncs(
+ Convert_batch_JobSpec_To_v1_JobSpec,
+ Convert_v1_JobSpec_To_batch_JobSpec,
+ )
+ if err != nil {
+ // If one of the conversion functions is malformed, detect it immediately.
+ panic(err)
+ }
+
+ err = api.Scheme.AddFieldLabelConversionFunc("batch/v1", "Job",
+ func(label, value string) (string, string, error) {
+ switch label {
+ case "metadata.name", "metadata.namespace", "status.successful":
+ return label, value, nil
+ default:
+ return "", "", fmt.Errorf("field label not supported: %s", label)
+ }
+ })
+ if err != nil {
+ // If one of the conversion functions is malformed, detect it immediately.
+ panic(err)
+ }
+}
+
+func Convert_batch_JobSpec_To_v1_JobSpec(in *batch.JobSpec, out *JobSpec, s conversion.Scope) error {
+ out.Parallelism = in.Parallelism
+ out.Completions = in.Completions
+ out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds
+ // unable to generate simple pointer conversion for unversioned.LabelSelector -> v1.LabelSelector
+ if in.Selector != nil {
+ out.Selector = new(LabelSelector)
+ if err := Convert_unversioned_LabelSelector_To_v1_LabelSelector(in.Selector, out.Selector, s); err != nil {
+ return err
+ }
+ } else {
+ out.Selector = nil
+ }
+ if in.ManualSelector != nil {
+ out.ManualSelector = new(bool)
+ *out.ManualSelector = *in.ManualSelector
+ } else {
+ out.ManualSelector = nil
+ }
+
+ if err := v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1_JobSpec_To_batch_JobSpec(in *JobSpec, out *batch.JobSpec, s conversion.Scope) error {
+ out.Parallelism = in.Parallelism
+ out.Completions = in.Completions
+ out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds
+ // unable to generate simple pointer conversion for v1.LabelSelector -> unversioned.LabelSelector
+ if in.Selector != nil {
+ out.Selector = new(unversioned.LabelSelector)
+ if err := Convert_v1_LabelSelector_To_unversioned_LabelSelector(in.Selector, out.Selector, s); err != nil {
+ return err
+ }
+ } else {
+ out.Selector = nil
+ }
+ if in.ManualSelector != nil {
+ out.ManualSelector = new(bool)
+ *out.ManualSelector = *in.ManualSelector
+ } else {
+ out.ManualSelector = nil
+ }
+
+ if err := v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/conversion_generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/conversion_generated.go
new file mode 100644
index 0000000..88e6d96
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/conversion_generated.go
@@ -0,0 +1,330 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by conversion-gen. Do not edit it manually!
+
+package v1
+
+import (
+ api "k8s.io/kubernetes/pkg/api"
+ unversioned "k8s.io/kubernetes/pkg/api/unversioned"
+ api_v1 "k8s.io/kubernetes/pkg/api/v1"
+ batch "k8s.io/kubernetes/pkg/apis/batch"
+ conversion "k8s.io/kubernetes/pkg/conversion"
+)
+
+func init() {
+ if err := api.Scheme.AddGeneratedConversionFuncs(
+ Convert_v1_Job_To_batch_Job,
+ Convert_batch_Job_To_v1_Job,
+ Convert_v1_JobCondition_To_batch_JobCondition,
+ Convert_batch_JobCondition_To_v1_JobCondition,
+ Convert_v1_JobList_To_batch_JobList,
+ Convert_batch_JobList_To_v1_JobList,
+ Convert_v1_JobSpec_To_batch_JobSpec,
+ Convert_batch_JobSpec_To_v1_JobSpec,
+ Convert_v1_JobStatus_To_batch_JobStatus,
+ Convert_batch_JobStatus_To_v1_JobStatus,
+ Convert_v1_LabelSelector_To_unversioned_LabelSelector,
+ Convert_unversioned_LabelSelector_To_v1_LabelSelector,
+ Convert_v1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement,
+ Convert_unversioned_LabelSelectorRequirement_To_v1_LabelSelectorRequirement,
+ ); err != nil {
+ // if one of the conversion functions is malformed, detect it immediately.
+ panic(err)
+ }
+}
+
+func autoConvert_v1_Job_To_batch_Job(in *Job, out *batch.Job, s conversion.Scope) error {
+ SetDefaults_Job(in)
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ if err := Convert_v1_JobSpec_To_batch_JobSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_JobStatus_To_batch_JobStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1_Job_To_batch_Job(in *Job, out *batch.Job, s conversion.Scope) error {
+ return autoConvert_v1_Job_To_batch_Job(in, out, s)
+}
+
+func autoConvert_batch_Job_To_v1_Job(in *batch.Job, out *Job, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ if err := Convert_batch_JobSpec_To_v1_JobSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_batch_JobStatus_To_v1_JobStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_batch_Job_To_v1_Job(in *batch.Job, out *Job, s conversion.Scope) error {
+ return autoConvert_batch_Job_To_v1_Job(in, out, s)
+}
+
+func autoConvert_v1_JobCondition_To_batch_JobCondition(in *JobCondition, out *batch.JobCondition, s conversion.Scope) error {
+ out.Type = batch.JobConditionType(in.Type)
+ out.Status = api.ConditionStatus(in.Status)
+ if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastProbeTime, &out.LastProbeTime, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTransitionTime, &out.LastTransitionTime, s); err != nil {
+ return err
+ }
+ out.Reason = in.Reason
+ out.Message = in.Message
+ return nil
+}
+
+func Convert_v1_JobCondition_To_batch_JobCondition(in *JobCondition, out *batch.JobCondition, s conversion.Scope) error {
+ return autoConvert_v1_JobCondition_To_batch_JobCondition(in, out, s)
+}
+
+func autoConvert_batch_JobCondition_To_v1_JobCondition(in *batch.JobCondition, out *JobCondition, s conversion.Scope) error {
+ out.Type = JobConditionType(in.Type)
+ out.Status = api_v1.ConditionStatus(in.Status)
+ if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastProbeTime, &out.LastProbeTime, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTransitionTime, &out.LastTransitionTime, s); err != nil {
+ return err
+ }
+ out.Reason = in.Reason
+ out.Message = in.Message
+ return nil
+}
+
+func Convert_batch_JobCondition_To_v1_JobCondition(in *batch.JobCondition, out *JobCondition, s conversion.Scope) error {
+ return autoConvert_batch_JobCondition_To_v1_JobCondition(in, out, s)
+}
+
+func autoConvert_v1_JobList_To_batch_JobList(in *JobList, out *batch.JobList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]batch.Job, len(*in))
+ for i := range *in {
+ if err := Convert_v1_Job_To_batch_Job(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_v1_JobList_To_batch_JobList(in *JobList, out *batch.JobList, s conversion.Scope) error {
+ return autoConvert_v1_JobList_To_batch_JobList(in, out, s)
+}
+
+func autoConvert_batch_JobList_To_v1_JobList(in *batch.JobList, out *JobList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Job, len(*in))
+ for i := range *in {
+ if err := Convert_batch_Job_To_v1_Job(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_batch_JobList_To_v1_JobList(in *batch.JobList, out *JobList, s conversion.Scope) error {
+ return autoConvert_batch_JobList_To_v1_JobList(in, out, s)
+}
+
+func autoConvert_v1_JobSpec_To_batch_JobSpec(in *JobSpec, out *batch.JobSpec, s conversion.Scope) error {
+ out.Parallelism = in.Parallelism
+ out.Completions = in.Completions
+ out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds
+ if in.Selector != nil {
+ in, out := &in.Selector, &out.Selector
+ *out = new(unversioned.LabelSelector)
+ if err := Convert_v1_LabelSelector_To_unversioned_LabelSelector(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Selector = nil
+ }
+ out.ManualSelector = in.ManualSelector
+ if err := api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func autoConvert_batch_JobSpec_To_v1_JobSpec(in *batch.JobSpec, out *JobSpec, s conversion.Scope) error {
+ out.Parallelism = in.Parallelism
+ out.Completions = in.Completions
+ out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds
+ if in.Selector != nil {
+ in, out := &in.Selector, &out.Selector
+ *out = new(LabelSelector)
+ if err := Convert_unversioned_LabelSelector_To_v1_LabelSelector(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Selector = nil
+ }
+ out.ManualSelector = in.ManualSelector
+ if err := api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func autoConvert_v1_JobStatus_To_batch_JobStatus(in *JobStatus, out *batch.JobStatus, s conversion.Scope) error {
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]batch.JobCondition, len(*in))
+ for i := range *in {
+ if err := Convert_v1_JobCondition_To_batch_JobCondition(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Conditions = nil
+ }
+ out.StartTime = in.StartTime
+ out.CompletionTime = in.CompletionTime
+ out.Active = in.Active
+ out.Succeeded = in.Succeeded
+ out.Failed = in.Failed
+ return nil
+}
+
+func Convert_v1_JobStatus_To_batch_JobStatus(in *JobStatus, out *batch.JobStatus, s conversion.Scope) error {
+ return autoConvert_v1_JobStatus_To_batch_JobStatus(in, out, s)
+}
+
+func autoConvert_batch_JobStatus_To_v1_JobStatus(in *batch.JobStatus, out *JobStatus, s conversion.Scope) error {
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]JobCondition, len(*in))
+ for i := range *in {
+ if err := Convert_batch_JobCondition_To_v1_JobCondition(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Conditions = nil
+ }
+ out.StartTime = in.StartTime
+ out.CompletionTime = in.CompletionTime
+ out.Active = in.Active
+ out.Succeeded = in.Succeeded
+ out.Failed = in.Failed
+ return nil
+}
+
+func Convert_batch_JobStatus_To_v1_JobStatus(in *batch.JobStatus, out *JobStatus, s conversion.Scope) error {
+ return autoConvert_batch_JobStatus_To_v1_JobStatus(in, out, s)
+}
+
+func autoConvert_v1_LabelSelector_To_unversioned_LabelSelector(in *LabelSelector, out *unversioned.LabelSelector, s conversion.Scope) error {
+ out.MatchLabels = in.MatchLabels
+ if in.MatchExpressions != nil {
+ in, out := &in.MatchExpressions, &out.MatchExpressions
+ *out = make([]unversioned.LabelSelectorRequirement, len(*in))
+ for i := range *in {
+ if err := Convert_v1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.MatchExpressions = nil
+ }
+ return nil
+}
+
+func Convert_v1_LabelSelector_To_unversioned_LabelSelector(in *LabelSelector, out *unversioned.LabelSelector, s conversion.Scope) error {
+ return autoConvert_v1_LabelSelector_To_unversioned_LabelSelector(in, out, s)
+}
+
+func autoConvert_unversioned_LabelSelector_To_v1_LabelSelector(in *unversioned.LabelSelector, out *LabelSelector, s conversion.Scope) error {
+ out.MatchLabels = in.MatchLabels
+ if in.MatchExpressions != nil {
+ in, out := &in.MatchExpressions, &out.MatchExpressions
+ *out = make([]LabelSelectorRequirement, len(*in))
+ for i := range *in {
+ if err := Convert_unversioned_LabelSelectorRequirement_To_v1_LabelSelectorRequirement(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.MatchExpressions = nil
+ }
+ return nil
+}
+
+func Convert_unversioned_LabelSelector_To_v1_LabelSelector(in *unversioned.LabelSelector, out *LabelSelector, s conversion.Scope) error {
+ return autoConvert_unversioned_LabelSelector_To_v1_LabelSelector(in, out, s)
+}
+
+func autoConvert_v1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(in *LabelSelectorRequirement, out *unversioned.LabelSelectorRequirement, s conversion.Scope) error {
+ out.Key = in.Key
+ out.Operator = unversioned.LabelSelectorOperator(in.Operator)
+ out.Values = in.Values
+ return nil
+}
+
+func Convert_v1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(in *LabelSelectorRequirement, out *unversioned.LabelSelectorRequirement, s conversion.Scope) error {
+ return autoConvert_v1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(in, out, s)
+}
+
+func autoConvert_unversioned_LabelSelectorRequirement_To_v1_LabelSelectorRequirement(in *unversioned.LabelSelectorRequirement, out *LabelSelectorRequirement, s conversion.Scope) error {
+ out.Key = in.Key
+ out.Operator = LabelSelectorOperator(in.Operator)
+ out.Values = in.Values
+ return nil
+}
+
+func Convert_unversioned_LabelSelectorRequirement_To_v1_LabelSelectorRequirement(in *unversioned.LabelSelectorRequirement, out *LabelSelectorRequirement, s conversion.Scope) error {
+ return autoConvert_unversioned_LabelSelectorRequirement_To_v1_LabelSelectorRequirement(in, out, s)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/deep_copy_generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/deep_copy_generated.go
new file mode 100644
index 0000000..53d4eab
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/deep_copy_generated.go
@@ -0,0 +1,197 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by deepcopy-gen. Do not edit it manually!
+
+package v1
+
+import (
+ api "k8s.io/kubernetes/pkg/api"
+ unversioned "k8s.io/kubernetes/pkg/api/unversioned"
+ api_v1 "k8s.io/kubernetes/pkg/api/v1"
+ conversion "k8s.io/kubernetes/pkg/conversion"
+)
+
+func init() {
+ if err := api.Scheme.AddGeneratedDeepCopyFuncs(
+ DeepCopy_v1_Job,
+ DeepCopy_v1_JobCondition,
+ DeepCopy_v1_JobList,
+ DeepCopy_v1_JobSpec,
+ DeepCopy_v1_JobStatus,
+ DeepCopy_v1_LabelSelector,
+ DeepCopy_v1_LabelSelectorRequirement,
+ ); err != nil {
+ // if one of the deep copy functions is malformed, detect it immediately.
+ panic(err)
+ }
+}
+
+func DeepCopy_v1_Job(in Job, out *Job, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := api_v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_v1_JobSpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_v1_JobStatus(in.Status, &out.Status, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_v1_JobCondition(in JobCondition, out *JobCondition, c *conversion.Cloner) error {
+ out.Type = in.Type
+ out.Status = in.Status
+ out.LastProbeTime = in.LastProbeTime.DeepCopy()
+ out.LastTransitionTime = in.LastTransitionTime.DeepCopy()
+ out.Reason = in.Reason
+ out.Message = in.Message
+ return nil
+}
+
+func DeepCopy_v1_JobList(in JobList, out *JobList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]Job, len(in))
+ for i := range in {
+ if err := DeepCopy_v1_Job(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_JobSpec(in JobSpec, out *JobSpec, c *conversion.Cloner) error {
+ if in.Parallelism != nil {
+ in, out := in.Parallelism, &out.Parallelism
+ *out = new(int32)
+ **out = *in
+ } else {
+ out.Parallelism = nil
+ }
+ if in.Completions != nil {
+ in, out := in.Completions, &out.Completions
+ *out = new(int32)
+ **out = *in
+ } else {
+ out.Completions = nil
+ }
+ if in.ActiveDeadlineSeconds != nil {
+ in, out := in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds
+ *out = new(int64)
+ **out = *in
+ } else {
+ out.ActiveDeadlineSeconds = nil
+ }
+ if in.Selector != nil {
+ in, out := in.Selector, &out.Selector
+ *out = new(LabelSelector)
+ if err := DeepCopy_v1_LabelSelector(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.Selector = nil
+ }
+ if in.ManualSelector != nil {
+ in, out := in.ManualSelector, &out.ManualSelector
+ *out = new(bool)
+ **out = *in
+ } else {
+ out.ManualSelector = nil
+ }
+ if err := api_v1.DeepCopy_v1_PodTemplateSpec(in.Template, &out.Template, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_v1_JobStatus(in JobStatus, out *JobStatus, c *conversion.Cloner) error {
+ if in.Conditions != nil {
+ in, out := in.Conditions, &out.Conditions
+ *out = make([]JobCondition, len(in))
+ for i := range in {
+ if err := DeepCopy_v1_JobCondition(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Conditions = nil
+ }
+ if in.StartTime != nil {
+ in, out := in.StartTime, &out.StartTime
+ *out = new(unversioned.Time)
+ **out = in.DeepCopy()
+ } else {
+ out.StartTime = nil
+ }
+ if in.CompletionTime != nil {
+ in, out := in.CompletionTime, &out.CompletionTime
+ *out = new(unversioned.Time)
+ **out = in.DeepCopy()
+ } else {
+ out.CompletionTime = nil
+ }
+ out.Active = in.Active
+ out.Succeeded = in.Succeeded
+ out.Failed = in.Failed
+ return nil
+}
+
+func DeepCopy_v1_LabelSelector(in LabelSelector, out *LabelSelector, c *conversion.Cloner) error {
+ if in.MatchLabels != nil {
+ in, out := in.MatchLabels, &out.MatchLabels
+ *out = make(map[string]string)
+ for key, val := range in {
+ (*out)[key] = val
+ }
+ } else {
+ out.MatchLabels = nil
+ }
+ if in.MatchExpressions != nil {
+ in, out := in.MatchExpressions, &out.MatchExpressions
+ *out = make([]LabelSelectorRequirement, len(in))
+ for i := range in {
+ if err := DeepCopy_v1_LabelSelectorRequirement(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.MatchExpressions = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1_LabelSelectorRequirement(in LabelSelectorRequirement, out *LabelSelectorRequirement, c *conversion.Cloner) error {
+ out.Key = in.Key
+ out.Operator = in.Operator
+ if in.Values != nil {
+ in, out := in.Values, &out.Values
+ *out = make([]string, len(in))
+ copy(*out, in)
+ } else {
+ out.Values = nil
+ }
+ return nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/defaults.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/defaults.go
new file mode 100644
index 0000000..571d2c5
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/defaults.go
@@ -0,0 +1,42 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "k8s.io/kubernetes/pkg/runtime"
+)
+
+func addDefaultingFuncs(scheme *runtime.Scheme) {
+ scheme.AddDefaultingFuncs(
+ SetDefaults_Job,
+ )
+}
+
+func SetDefaults_Job(obj *Job) {
+ // For a non-parallel job, you can leave both `.spec.completions` and
+ // `.spec.parallelism` unset. When both are unset, both are defaulted to 1.
+ if obj.Spec.Completions == nil && obj.Spec.Parallelism == nil {
+ obj.Spec.Completions = new(int32)
+ *obj.Spec.Completions = 1
+ obj.Spec.Parallelism = new(int32)
+ *obj.Spec.Parallelism = 1
+ }
+ if obj.Spec.Parallelism == nil {
+ obj.Spec.Parallelism = new(int32)
+ *obj.Spec.Parallelism = 1
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/doc.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/doc.go
new file mode 100644
index 0000000..5695b9e
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package,register
+// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/batch
+
+package v1
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/generated.pb.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/generated.pb.go
new file mode 100644
index 0000000..d3649ef
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/generated.pb.go
@@ -0,0 +1,1901 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by protoc-gen-gogo.
+// source: k8s.io/kubernetes/pkg/apis/batch/v1/generated.proto
+// DO NOT EDIT!
+
+/*
+ Package v1 is a generated protocol buffer package.
+
+ It is generated from these files:
+ k8s.io/kubernetes/pkg/apis/batch/v1/generated.proto
+
+ It has these top-level messages:
+ Job
+ JobCondition
+ JobList
+ JobSpec
+ JobStatus
+ LabelSelector
+ LabelSelectorRequirement
+*/
+package v1
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+import k8s_io_kubernetes_pkg_api_unversioned "k8s.io/kubernetes/pkg/api/unversioned"
+import k8s_io_kubernetes_pkg_api_v1 "k8s.io/kubernetes/pkg/api/v1"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+func (m *Job) Reset() { *m = Job{} }
+func (m *Job) String() string { return proto.CompactTextString(m) }
+func (*Job) ProtoMessage() {}
+
+func (m *JobCondition) Reset() { *m = JobCondition{} }
+func (m *JobCondition) String() string { return proto.CompactTextString(m) }
+func (*JobCondition) ProtoMessage() {}
+
+func (m *JobList) Reset() { *m = JobList{} }
+func (m *JobList) String() string { return proto.CompactTextString(m) }
+func (*JobList) ProtoMessage() {}
+
+func (m *JobSpec) Reset() { *m = JobSpec{} }
+func (m *JobSpec) String() string { return proto.CompactTextString(m) }
+func (*JobSpec) ProtoMessage() {}
+
+func (m *JobStatus) Reset() { *m = JobStatus{} }
+func (m *JobStatus) String() string { return proto.CompactTextString(m) }
+func (*JobStatus) ProtoMessage() {}
+
+func (m *LabelSelector) Reset() { *m = LabelSelector{} }
+func (m *LabelSelector) String() string { return proto.CompactTextString(m) }
+func (*LabelSelector) ProtoMessage() {}
+
+func (m *LabelSelectorRequirement) Reset() { *m = LabelSelectorRequirement{} }
+func (m *LabelSelectorRequirement) String() string { return proto.CompactTextString(m) }
+func (*LabelSelectorRequirement) ProtoMessage() {}
+
+func init() {
+ proto.RegisterType((*Job)(nil), "k8s.io.kubernetes.pkg.apis.batch.v1.Job")
+ proto.RegisterType((*JobCondition)(nil), "k8s.io.kubernetes.pkg.apis.batch.v1.JobCondition")
+ proto.RegisterType((*JobList)(nil), "k8s.io.kubernetes.pkg.apis.batch.v1.JobList")
+ proto.RegisterType((*JobSpec)(nil), "k8s.io.kubernetes.pkg.apis.batch.v1.JobSpec")
+ proto.RegisterType((*JobStatus)(nil), "k8s.io.kubernetes.pkg.apis.batch.v1.JobStatus")
+ proto.RegisterType((*LabelSelector)(nil), "k8s.io.kubernetes.pkg.apis.batch.v1.LabelSelector")
+ proto.RegisterType((*LabelSelectorRequirement)(nil), "k8s.io.kubernetes.pkg.apis.batch.v1.LabelSelectorRequirement")
+}
+func (m *Job) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Job) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
+ n1, err := m.ObjectMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n1
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size()))
+ n2, err := m.Spec.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n2
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Status.Size()))
+ n3, err := m.Status.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n3
+ return i, nil
+}
+
+func (m *JobCondition) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *JobCondition) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Type)))
+ i += copy(data[i:], m.Type)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Status)))
+ i += copy(data[i:], m.Status)
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.LastProbeTime.Size()))
+ n4, err := m.LastProbeTime.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n4
+ data[i] = 0x22
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size()))
+ n5, err := m.LastTransitionTime.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n5
+ data[i] = 0x2a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Reason)))
+ i += copy(data[i:], m.Reason)
+ data[i] = 0x32
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Message)))
+ i += copy(data[i:], m.Message)
+ return i, nil
+}
+
+func (m *JobList) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *JobList) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size()))
+ n6, err := m.ListMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n6
+ if len(m.Items) > 0 {
+ for _, msg := range m.Items {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *JobSpec) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *JobSpec) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Parallelism != nil {
+ data[i] = 0x8
+ i++
+ i = encodeVarintGenerated(data, i, uint64(*m.Parallelism))
+ }
+ if m.Completions != nil {
+ data[i] = 0x10
+ i++
+ i = encodeVarintGenerated(data, i, uint64(*m.Completions))
+ }
+ if m.ActiveDeadlineSeconds != nil {
+ data[i] = 0x18
+ i++
+ i = encodeVarintGenerated(data, i, uint64(*m.ActiveDeadlineSeconds))
+ }
+ if m.Selector != nil {
+ data[i] = 0x22
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Selector.Size()))
+ n7, err := m.Selector.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n7
+ }
+ if m.ManualSelector != nil {
+ data[i] = 0x28
+ i++
+ if *m.ManualSelector {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ }
+ data[i] = 0x32
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Template.Size()))
+ n8, err := m.Template.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n8
+ return i, nil
+}
+
+func (m *JobStatus) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *JobStatus) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Conditions) > 0 {
+ for _, msg := range m.Conditions {
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ if m.StartTime != nil {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.StartTime.Size()))
+ n9, err := m.StartTime.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n9
+ }
+ if m.CompletionTime != nil {
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.CompletionTime.Size()))
+ n10, err := m.CompletionTime.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n10
+ }
+ data[i] = 0x20
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Active))
+ data[i] = 0x28
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Succeeded))
+ data[i] = 0x30
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Failed))
+ return i, nil
+}
+
+func (m *LabelSelector) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *LabelSelector) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.MatchLabels) > 0 {
+ for k := range m.MatchLabels {
+ data[i] = 0xa
+ i++
+ v := m.MatchLabels[k]
+ mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ i = encodeVarintGenerated(data, i, uint64(mapSize))
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(k)))
+ i += copy(data[i:], k)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(v)))
+ i += copy(data[i:], v)
+ }
+ }
+ if len(m.MatchExpressions) > 0 {
+ for _, msg := range m.MatchExpressions {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *LabelSelectorRequirement) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *LabelSelectorRequirement) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Key)))
+ i += copy(data[i:], m.Key)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Operator)))
+ i += copy(data[i:], m.Operator)
+ if len(m.Values) > 0 {
+ for _, s := range m.Values {
+ data[i] = 0x1a
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ return i, nil
+}
+
+func encodeFixed64Generated(data []byte, offset int, v uint64) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ data[offset+4] = uint8(v >> 32)
+ data[offset+5] = uint8(v >> 40)
+ data[offset+6] = uint8(v >> 48)
+ data[offset+7] = uint8(v >> 56)
+ return offset + 8
+}
+func encodeFixed32Generated(data []byte, offset int, v uint32) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ return offset + 4
+}
+func encodeVarintGenerated(data []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ data[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ data[offset] = uint8(v)
+ return offset + 1
+}
+func (m *Job) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *JobCondition) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Status)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.LastProbeTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.LastTransitionTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Reason)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Message)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *JobList) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *JobSpec) Size() (n int) {
+ var l int
+ _ = l
+ if m.Parallelism != nil {
+ n += 1 + sovGenerated(uint64(*m.Parallelism))
+ }
+ if m.Completions != nil {
+ n += 1 + sovGenerated(uint64(*m.Completions))
+ }
+ if m.ActiveDeadlineSeconds != nil {
+ n += 1 + sovGenerated(uint64(*m.ActiveDeadlineSeconds))
+ }
+ if m.Selector != nil {
+ l = m.Selector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ManualSelector != nil {
+ n += 2
+ }
+ l = m.Template.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *JobStatus) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Conditions) > 0 {
+ for _, e := range m.Conditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.StartTime != nil {
+ l = m.StartTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.CompletionTime != nil {
+ l = m.CompletionTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ n += 1 + sovGenerated(uint64(m.Active))
+ n += 1 + sovGenerated(uint64(m.Succeeded))
+ n += 1 + sovGenerated(uint64(m.Failed))
+ return n
+}
+
+func (m *LabelSelector) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.MatchLabels) > 0 {
+ for k, v := range m.MatchLabels {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ if len(m.MatchExpressions) > 0 {
+ for _, e := range m.MatchExpressions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *LabelSelectorRequirement) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Key)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Operator)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Values) > 0 {
+ for _, s := range m.Values {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *Job) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Job: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Job: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *JobCondition) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: JobCondition: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: JobCondition: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = JobConditionType(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Status = k8s_io_kubernetes_pkg_api_v1.ConditionStatus(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastProbeTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LastProbeTime.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LastTransitionTime.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Reason = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *JobList) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: JobList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: JobList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, Job{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *JobSpec) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: JobSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: JobSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Parallelism", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Parallelism = &v
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Completions", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Completions = &v
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ActiveDeadlineSeconds", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.ActiveDeadlineSeconds = &v
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Selector == nil {
+ m.Selector = &LabelSelector{}
+ }
+ if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ManualSelector", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.ManualSelector = &b
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *JobStatus) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: JobStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: JobStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Conditions = append(m.Conditions, JobCondition{})
+ if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.StartTime == nil {
+ m.StartTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{}
+ }
+ if err := m.StartTime.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CompletionTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.CompletionTime == nil {
+ m.CompletionTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{}
+ }
+ if err := m.CompletionTime.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Active", wireType)
+ }
+ m.Active = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Active |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Succeeded", wireType)
+ }
+ m.Succeeded = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Succeeded |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Failed", wireType)
+ }
+ m.Failed = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Failed |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LabelSelector) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LabelSelector: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LabelSelector: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MatchLabels", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var keykey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ keykey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey := string(data[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ var valuekey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ valuekey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue := string(data[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ if m.MatchLabels == nil {
+ m.MatchLabels = make(map[string]string)
+ }
+ m.MatchLabels[mapkey] = mapvalue
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MatchExpressions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.MatchExpressions = append(m.MatchExpressions, LabelSelectorRequirement{})
+ if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LabelSelectorRequirement) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LabelSelectorRequirement: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LabelSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Operator = LabelSelectorOperator(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Values = append(m.Values, string(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenerated(data []byte) (n int, err error) {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if data[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipGenerated(data[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
+)
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/generated.proto b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/generated.proto
new file mode 100644
index 0000000..75840ed
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/generated.proto
@@ -0,0 +1,177 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.kubernetes.pkg.apis.batch.v1;
+
+import "k8s.io/kubernetes/pkg/api/resource/generated.proto";
+import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto";
+import "k8s.io/kubernetes/pkg/api/v1/generated.proto";
+import "k8s.io/kubernetes/pkg/util/intstr/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v1";
+
+// Job represents the configuration of a single job.
+message Job {
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1;
+
+ // Spec is a structure defining the expected behavior of a job.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ optional JobSpec spec = 2;
+
+ // Status is a structure describing current status of a job.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ optional JobStatus status = 3;
+}
+
+// JobCondition describes current state of a job.
+message JobCondition {
+ // Type of job condition, Complete or Failed.
+ optional string type = 1;
+
+ // Status of the condition, one of True, False, Unknown.
+ optional string status = 2;
+
+ // Last time the condition was checked.
+ optional k8s.io.kubernetes.pkg.api.unversioned.Time lastProbeTime = 3;
+
+ // Last time the condition transit from one status to another.
+ optional k8s.io.kubernetes.pkg.api.unversioned.Time lastTransitionTime = 4;
+
+ // (brief) reason for the condition's last transition.
+ optional string reason = 5;
+
+ // Human readable message indicating details about last transition.
+ optional string message = 6;
+}
+
+// JobList is a collection of jobs.
+message JobList {
+ // Standard list metadata
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1;
+
+ // Items is the list of Job.
+ repeated Job items = 2;
+}
+
+// JobSpec describes how the job execution will look like.
+message JobSpec {
+ // Parallelism specifies the maximum desired number of pods the job should
+ // run at any given time. The actual number of pods running in steady state will
+ // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism),
+ // i.e. when the work left to do is less than max parallelism.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md
+ optional int32 parallelism = 1;
+
+ // Completions specifies the desired number of successfully finished pods the
+ // job should be run with. Setting to nil means that the success of any
+ // pod signals the success of all pods, and allows parallelism to have any positive
+ // value. Setting to 1 means that parallelism is limited to 1 and the success of that
+ // pod signals the success of the job.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md
+ optional int32 completions = 2;
+
+ // Optional duration in seconds relative to the startTime that the job may be active
+ // before the system tries to terminate it; value must be positive integer
+ optional int64 activeDeadlineSeconds = 3;
+
+ // Selector is a label query over pods that should match the pod count.
+ // Normally, the system sets this field for you.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors
+ optional LabelSelector selector = 4;
+
+ // ManualSelector controls generation of pod labels and pod selectors.
+ // Leave `manualSelector` unset unless you are certain what you are doing.
+ // When false or unset, the system pick labels unique to this job
+ // and appends those labels to the pod template. When true,
+ // the user is responsible for picking unique labels and specifying
+ // the selector. Failure to pick a unique label may cause this
+ // and other jobs to not function correctly. However, You may see
+ // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1`
+ // API.
+ // More info: http://releases.k8s.io/HEAD/docs/design/selector-generation.md
+ optional bool manualSelector = 5;
+
+ // Template is the object that describes the pod that will be created when
+ // executing a job.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md
+ optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 6;
+}
+
+// JobStatus represents the current state of a Job.
+message JobStatus {
+ // Conditions represent the latest available observations of an object's current state.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md
+ repeated JobCondition conditions = 1;
+
+ // StartTime represents time when the job was acknowledged by the Job Manager.
+ // It is not guaranteed to be set in happens-before order across separate operations.
+ // It is represented in RFC3339 form and is in UTC.
+ optional k8s.io.kubernetes.pkg.api.unversioned.Time startTime = 2;
+
+ // CompletionTime represents time when the job was completed. It is not guaranteed to
+ // be set in happens-before order across separate operations.
+ // It is represented in RFC3339 form and is in UTC.
+ optional k8s.io.kubernetes.pkg.api.unversioned.Time completionTime = 3;
+
+ // Active is the number of actively running pods.
+ optional int32 active = 4;
+
+ // Succeeded is the number of pods which reached Phase Succeeded.
+ optional int32 succeeded = 5;
+
+ // Failed is the number of pods which reached Phase Failed.
+ optional int32 failed = 6;
+}
+
+// A label selector is a label query over a set of resources. The result of matchLabels and
+// matchExpressions are ANDed. An empty label selector matches all objects. A null
+// label selector matches no objects.
+message LabelSelector {
+ // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ // map is equivalent to an element of matchExpressions, whose key field is "key", the
+ // operator is "In", and the values array contains only "value". The requirements are ANDed.
+ map<string, string> matchLabels = 1;
+
+ // matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ repeated LabelSelectorRequirement matchExpressions = 2;
+}
+
+// A label selector requirement is a selector that contains values, a key, and an operator that
+// relates the key and values.
+message LabelSelectorRequirement {
+ // key is the label key that the selector applies to.
+ optional string key = 1;
+
+ // operator represents a key's relationship to a set of values.
+ // Valid operators ard In, NotIn, Exists and DoesNotExist.
+ optional string operator = 2;
+
+ // values is an array of string values. If the operator is In or NotIn,
+ // the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ // the values array must be empty. This array is replaced during a strategic
+ // merge patch.
+ repeated string values = 3;
+}
+
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/register.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/register.go
new file mode 100644
index 0000000..3631c02
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/register.go
@@ -0,0 +1,47 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/api/v1"
+ "k8s.io/kubernetes/pkg/runtime"
+ versionedwatch "k8s.io/kubernetes/pkg/watch/versioned"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "batch"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1"}
+
+func AddToScheme(scheme *runtime.Scheme) {
+ addKnownTypes(scheme)
+ addDefaultingFuncs(scheme)
+ addConversionFuncs(scheme)
+}
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &Job{},
+ &JobList{},
+ &v1.ListOptions{},
+ &v1.DeleteOptions{},
+ )
+ versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/types.generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/types.generated.go
new file mode 100644
index 0000000..a44952c
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/types.generated.go
@@ -0,0 +1,3184 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// ************************************************************
+// DO NOT EDIT.
+// THIS FILE IS AUTO-GENERATED BY codecgen.
+// ************************************************************
+
+package v1
+
+import (
+ "errors"
+ "fmt"
+ codec1978 "github.com/ugorji/go/codec"
+ pkg4_resource "k8s.io/kubernetes/pkg/api/resource"
+ pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned"
+ pkg2_v1 "k8s.io/kubernetes/pkg/api/v1"
+ pkg3_types "k8s.io/kubernetes/pkg/types"
+ pkg5_intstr "k8s.io/kubernetes/pkg/util/intstr"
+ "reflect"
+ "runtime"
+ time "time"
+)
+
+const (
+ // ----- content types ----
+ codecSelferC_UTF81234 = 1
+ codecSelferC_RAW1234 = 0
+ // ----- value types used ----
+ codecSelferValueTypeArray1234 = 10
+ codecSelferValueTypeMap1234 = 9
+ // ----- containerStateValues ----
+ codecSelfer_containerMapKey1234 = 2
+ codecSelfer_containerMapValue1234 = 3
+ codecSelfer_containerMapEnd1234 = 4
+ codecSelfer_containerArrayElem1234 = 6
+ codecSelfer_containerArrayEnd1234 = 7
+)
+
+var (
+ codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits())
+ codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`)
+)
+
+type codecSelfer1234 struct{}
+
+func init() {
+ if codec1978.GenVersion != 5 {
+ _, file, _, _ := runtime.Caller(0)
+ err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v",
+ 5, codec1978.GenVersion, file)
+ panic(err)
+ }
+ if false { // reference the types, but skip this branch at build/run time
+ var v0 pkg4_resource.Quantity
+ var v1 pkg1_unversioned.TypeMeta
+ var v2 pkg2_v1.ObjectMeta
+ var v3 pkg3_types.UID
+ var v4 pkg5_intstr.IntOrString
+ var v5 time.Time
+ _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5
+ }
+}
+
+func (x *Job) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = true
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy14 := &x.Status
+ yy14.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy16 := &x.Status
+ yy16.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *Job) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *Job) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_v1.ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = JobSpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = JobStatus{}
+ } else {
+ yyv6 := &x.Status
+ yyv6.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *Job) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_v1.ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = JobSpec{}
+ } else {
+ yyv11 := &x.Spec
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = JobStatus{}
+ } else {
+ yyv12 := &x.Status
+ yyv12.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *JobList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceJob(([]Job)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceJob(([]Job)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *JobList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *JobList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceJob((*[]Job)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *JobList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceJob((*[]Job)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *JobSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [6]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Parallelism != nil
+ yyq2[1] = x.Completions != nil
+ yyq2[2] = x.ActiveDeadlineSeconds != nil
+ yyq2[3] = x.Selector != nil
+ yyq2[4] = x.ManualSelector != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(6)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Parallelism == nil {
+ r.EncodeNil()
+ } else {
+ yy4 := *x.Parallelism
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(yy4))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("parallelism"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Parallelism == nil {
+ r.EncodeNil()
+ } else {
+ yy6 := *x.Parallelism
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeInt(int64(yy6))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Completions == nil {
+ r.EncodeNil()
+ } else {
+ yy9 := *x.Completions
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeInt(int64(yy9))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("completions"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Completions == nil {
+ r.EncodeNil()
+ } else {
+ yy11 := *x.Completions
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeInt(int64(yy11))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.ActiveDeadlineSeconds == nil {
+ r.EncodeNil()
+ } else {
+ yy14 := *x.ActiveDeadlineSeconds
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeInt(int64(yy14))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("activeDeadlineSeconds"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.ActiveDeadlineSeconds == nil {
+ r.EncodeNil()
+ } else {
+ yy16 := *x.ActiveDeadlineSeconds
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeInt(int64(yy16))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ if x.Selector == nil {
+ r.EncodeNil()
+ } else {
+ x.Selector.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("selector"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Selector == nil {
+ r.EncodeNil()
+ } else {
+ x.Selector.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ if x.ManualSelector == nil {
+ r.EncodeNil()
+ } else {
+ yy22 := *x.ManualSelector
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeBool(bool(yy22))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("manualSelector"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.ManualSelector == nil {
+ r.EncodeNil()
+ } else {
+ yy24 := *x.ManualSelector
+ yym25 := z.EncBinary()
+ _ = yym25
+ if false {
+ } else {
+ r.EncodeBool(bool(yy24))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy27 := &x.Template
+ yy27.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("template"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy29 := &x.Template
+ yy29.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *JobSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *JobSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "parallelism":
+ if r.TryDecodeAsNil() {
+ if x.Parallelism != nil {
+ x.Parallelism = nil
+ }
+ } else {
+ if x.Parallelism == nil {
+ x.Parallelism = new(int32)
+ }
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ *((*int32)(x.Parallelism)) = int32(r.DecodeInt(32))
+ }
+ }
+ case "completions":
+ if r.TryDecodeAsNil() {
+ if x.Completions != nil {
+ x.Completions = nil
+ }
+ } else {
+ if x.Completions == nil {
+ x.Completions = new(int32)
+ }
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ *((*int32)(x.Completions)) = int32(r.DecodeInt(32))
+ }
+ }
+ case "activeDeadlineSeconds":
+ if r.TryDecodeAsNil() {
+ if x.ActiveDeadlineSeconds != nil {
+ x.ActiveDeadlineSeconds = nil
+ }
+ } else {
+ if x.ActiveDeadlineSeconds == nil {
+ x.ActiveDeadlineSeconds = new(int64)
+ }
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else {
+ *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64))
+ }
+ }
+ case "selector":
+ if r.TryDecodeAsNil() {
+ if x.Selector != nil {
+ x.Selector = nil
+ }
+ } else {
+ if x.Selector == nil {
+ x.Selector = new(LabelSelector)
+ }
+ x.Selector.CodecDecodeSelf(d)
+ }
+ case "manualSelector":
+ if r.TryDecodeAsNil() {
+ if x.ManualSelector != nil {
+ x.ManualSelector = nil
+ }
+ } else {
+ if x.ManualSelector == nil {
+ x.ManualSelector = new(bool)
+ }
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ *((*bool)(x.ManualSelector)) = r.DecodeBool()
+ }
+ }
+ case "template":
+ if r.TryDecodeAsNil() {
+ x.Template = pkg2_v1.PodTemplateSpec{}
+ } else {
+ yyv13 := &x.Template
+ yyv13.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *JobSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj14 int
+ var yyb14 bool
+ var yyhl14 bool = l >= 0
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Parallelism != nil {
+ x.Parallelism = nil
+ }
+ } else {
+ if x.Parallelism == nil {
+ x.Parallelism = new(int32)
+ }
+ yym16 := z.DecBinary()
+ _ = yym16
+ if false {
+ } else {
+ *((*int32)(x.Parallelism)) = int32(r.DecodeInt(32))
+ }
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Completions != nil {
+ x.Completions = nil
+ }
+ } else {
+ if x.Completions == nil {
+ x.Completions = new(int32)
+ }
+ yym18 := z.DecBinary()
+ _ = yym18
+ if false {
+ } else {
+ *((*int32)(x.Completions)) = int32(r.DecodeInt(32))
+ }
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.ActiveDeadlineSeconds != nil {
+ x.ActiveDeadlineSeconds = nil
+ }
+ } else {
+ if x.ActiveDeadlineSeconds == nil {
+ x.ActiveDeadlineSeconds = new(int64)
+ }
+ yym20 := z.DecBinary()
+ _ = yym20
+ if false {
+ } else {
+ *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64))
+ }
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Selector != nil {
+ x.Selector = nil
+ }
+ } else {
+ if x.Selector == nil {
+ x.Selector = new(LabelSelector)
+ }
+ x.Selector.CodecDecodeSelf(d)
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.ManualSelector != nil {
+ x.ManualSelector = nil
+ }
+ } else {
+ if x.ManualSelector == nil {
+ x.ManualSelector = new(bool)
+ }
+ yym23 := z.DecBinary()
+ _ = yym23
+ if false {
+ } else {
+ *((*bool)(x.ManualSelector)) = r.DecodeBool()
+ }
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Template = pkg2_v1.PodTemplateSpec{}
+ } else {
+ yyv24 := &x.Template
+ yyv24.CodecDecodeSelf(d)
+ }
+ for {
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj14-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *JobStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [6]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = len(x.Conditions) != 0
+ yyq2[1] = x.StartTime != nil
+ yyq2[2] = x.CompletionTime != nil
+ yyq2[3] = x.Active != 0
+ yyq2[4] = x.Succeeded != 0
+ yyq2[5] = x.Failed != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(6)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Conditions == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ h.encSliceJobCondition(([]JobCondition)(x.Conditions), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("conditions"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Conditions == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.encSliceJobCondition(([]JobCondition)(x.Conditions), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.StartTime == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.StartTime) {
+ } else if yym7 {
+ z.EncBinaryMarshal(x.StartTime)
+ } else if !yym7 && z.IsJSONHandle() {
+ z.EncJSONMarshal(x.StartTime)
+ } else {
+ z.EncFallback(x.StartTime)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("startTime"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.StartTime == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.StartTime) {
+ } else if yym8 {
+ z.EncBinaryMarshal(x.StartTime)
+ } else if !yym8 && z.IsJSONHandle() {
+ z.EncJSONMarshal(x.StartTime)
+ } else {
+ z.EncFallback(x.StartTime)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.CompletionTime == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.CompletionTime) {
+ } else if yym10 {
+ z.EncBinaryMarshal(x.CompletionTime)
+ } else if !yym10 && z.IsJSONHandle() {
+ z.EncJSONMarshal(x.CompletionTime)
+ } else {
+ z.EncFallback(x.CompletionTime)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("completionTime"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.CompletionTime == nil {
+ r.EncodeNil()
+ } else {
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.CompletionTime) {
+ } else if yym11 {
+ z.EncBinaryMarshal(x.CompletionTime)
+ } else if !yym11 && z.IsJSONHandle() {
+ z.EncJSONMarshal(x.CompletionTime)
+ } else {
+ z.EncFallback(x.CompletionTime)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Active))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("active"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Active))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Succeeded))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("succeeded"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Succeeded))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Failed))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("failed"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Failed))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *JobStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *JobStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "conditions":
+ if r.TryDecodeAsNil() {
+ x.Conditions = nil
+ } else {
+ yyv4 := &x.Conditions
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.decSliceJobCondition((*[]JobCondition)(yyv4), d)
+ }
+ }
+ case "startTime":
+ if r.TryDecodeAsNil() {
+ if x.StartTime != nil {
+ x.StartTime = nil
+ }
+ } else {
+ if x.StartTime == nil {
+ x.StartTime = new(pkg1_unversioned.Time)
+ }
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.StartTime) {
+ } else if yym7 {
+ z.DecBinaryUnmarshal(x.StartTime)
+ } else if !yym7 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(x.StartTime)
+ } else {
+ z.DecFallback(x.StartTime, false)
+ }
+ }
+ case "completionTime":
+ if r.TryDecodeAsNil() {
+ if x.CompletionTime != nil {
+ x.CompletionTime = nil
+ }
+ } else {
+ if x.CompletionTime == nil {
+ x.CompletionTime = new(pkg1_unversioned.Time)
+ }
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.CompletionTime) {
+ } else if yym9 {
+ z.DecBinaryUnmarshal(x.CompletionTime)
+ } else if !yym9 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(x.CompletionTime)
+ } else {
+ z.DecFallback(x.CompletionTime, false)
+ }
+ }
+ case "active":
+ if r.TryDecodeAsNil() {
+ x.Active = 0
+ } else {
+ x.Active = int32(r.DecodeInt(32))
+ }
+ case "succeeded":
+ if r.TryDecodeAsNil() {
+ x.Succeeded = 0
+ } else {
+ x.Succeeded = int32(r.DecodeInt(32))
+ }
+ case "failed":
+ if r.TryDecodeAsNil() {
+ x.Failed = 0
+ } else {
+ x.Failed = int32(r.DecodeInt(32))
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *JobStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj13 int
+ var yyb13 bool
+ var yyhl13 bool = l >= 0
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Conditions = nil
+ } else {
+ yyv14 := &x.Conditions
+ yym15 := z.DecBinary()
+ _ = yym15
+ if false {
+ } else {
+ h.decSliceJobCondition((*[]JobCondition)(yyv14), d)
+ }
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.StartTime != nil {
+ x.StartTime = nil
+ }
+ } else {
+ if x.StartTime == nil {
+ x.StartTime = new(pkg1_unversioned.Time)
+ }
+ yym17 := z.DecBinary()
+ _ = yym17
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.StartTime) {
+ } else if yym17 {
+ z.DecBinaryUnmarshal(x.StartTime)
+ } else if !yym17 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(x.StartTime)
+ } else {
+ z.DecFallback(x.StartTime, false)
+ }
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.CompletionTime != nil {
+ x.CompletionTime = nil
+ }
+ } else {
+ if x.CompletionTime == nil {
+ x.CompletionTime = new(pkg1_unversioned.Time)
+ }
+ yym19 := z.DecBinary()
+ _ = yym19
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.CompletionTime) {
+ } else if yym19 {
+ z.DecBinaryUnmarshal(x.CompletionTime)
+ } else if !yym19 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(x.CompletionTime)
+ } else {
+ z.DecFallback(x.CompletionTime, false)
+ }
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Active = 0
+ } else {
+ x.Active = int32(r.DecodeInt(32))
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Succeeded = 0
+ } else {
+ x.Succeeded = int32(r.DecodeInt(32))
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Failed = 0
+ } else {
+ x.Failed = int32(r.DecodeInt(32))
+ }
+ for {
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj13-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x JobConditionType) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *JobConditionType) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *JobCondition) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [6]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[2] = true
+ yyq2[3] = true
+ yyq2[4] = x.Reason != ""
+ yyq2[5] = x.Message != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(6)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ x.Type.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("type"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Type.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yysf7 := &x.Status
+ yysf7.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yysf8 := &x.Status
+ yysf8.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy10 := &x.LastProbeTime
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy10) {
+ } else if yym11 {
+ z.EncBinaryMarshal(yy10)
+ } else if !yym11 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy10)
+ } else {
+ z.EncFallback(yy10)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("lastProbeTime"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy12 := &x.LastProbeTime
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy12) {
+ } else if yym13 {
+ z.EncBinaryMarshal(yy12)
+ } else if !yym13 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy12)
+ } else {
+ z.EncFallback(yy12)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yy15 := &x.LastTransitionTime
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy15) {
+ } else if yym16 {
+ z.EncBinaryMarshal(yy15)
+ } else if !yym16 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy15)
+ } else {
+ z.EncFallback(yy15)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy17 := &x.LastTransitionTime
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy17) {
+ } else if yym18 {
+ z.EncBinaryMarshal(yy17)
+ } else if !yym18 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy17)
+ } else {
+ z.EncFallback(yy17)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Reason))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("reason"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym21 := z.EncBinary()
+ _ = yym21
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Reason))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Message))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("message"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym24 := z.EncBinary()
+ _ = yym24
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Message))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *JobCondition) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *JobCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "type":
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = JobConditionType(r.DecodeString())
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = ""
+ } else {
+ x.Status = pkg2_v1.ConditionStatus(r.DecodeString())
+ }
+ case "lastProbeTime":
+ if r.TryDecodeAsNil() {
+ x.LastProbeTime = pkg1_unversioned.Time{}
+ } else {
+ yyv6 := &x.LastProbeTime
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv6) {
+ } else if yym7 {
+ z.DecBinaryUnmarshal(yyv6)
+ } else if !yym7 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv6)
+ } else {
+ z.DecFallback(yyv6, false)
+ }
+ }
+ case "lastTransitionTime":
+ if r.TryDecodeAsNil() {
+ x.LastTransitionTime = pkg1_unversioned.Time{}
+ } else {
+ yyv8 := &x.LastTransitionTime
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv8) {
+ } else if yym9 {
+ z.DecBinaryUnmarshal(yyv8)
+ } else if !yym9 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv8)
+ } else {
+ z.DecFallback(yyv8, false)
+ }
+ }
+ case "reason":
+ if r.TryDecodeAsNil() {
+ x.Reason = ""
+ } else {
+ x.Reason = string(r.DecodeString())
+ }
+ case "message":
+ if r.TryDecodeAsNil() {
+ x.Message = ""
+ } else {
+ x.Message = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *JobCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj12 int
+ var yyb12 bool
+ var yyhl12 bool = l >= 0
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = JobConditionType(r.DecodeString())
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = ""
+ } else {
+ x.Status = pkg2_v1.ConditionStatus(r.DecodeString())
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.LastProbeTime = pkg1_unversioned.Time{}
+ } else {
+ yyv15 := &x.LastProbeTime
+ yym16 := z.DecBinary()
+ _ = yym16
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv15) {
+ } else if yym16 {
+ z.DecBinaryUnmarshal(yyv15)
+ } else if !yym16 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv15)
+ } else {
+ z.DecFallback(yyv15, false)
+ }
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.LastTransitionTime = pkg1_unversioned.Time{}
+ } else {
+ yyv17 := &x.LastTransitionTime
+ yym18 := z.DecBinary()
+ _ = yym18
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv17) {
+ } else if yym18 {
+ z.DecBinaryUnmarshal(yyv17)
+ } else if !yym18 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv17)
+ } else {
+ z.DecFallback(yyv17, false)
+ }
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Reason = ""
+ } else {
+ x.Reason = string(r.DecodeString())
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Message = ""
+ } else {
+ x.Message = string(r.DecodeString())
+ }
+ for {
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj12-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *LabelSelector) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = len(x.MatchLabels) != 0
+ yyq2[1] = len(x.MatchExpressions) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.MatchLabels == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ z.F.EncMapStringStringV(x.MatchLabels, false, e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("matchLabels"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.MatchLabels == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ z.F.EncMapStringStringV(x.MatchLabels, false, e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.MatchExpressions == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.encSliceLabelSelectorRequirement(([]LabelSelectorRequirement)(x.MatchExpressions), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("matchExpressions"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.MatchExpressions == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.encSliceLabelSelectorRequirement(([]LabelSelectorRequirement)(x.MatchExpressions), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *LabelSelector) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *LabelSelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "matchLabels":
+ if r.TryDecodeAsNil() {
+ x.MatchLabels = nil
+ } else {
+ yyv4 := &x.MatchLabels
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ z.F.DecMapStringStringX(yyv4, false, d)
+ }
+ }
+ case "matchExpressions":
+ if r.TryDecodeAsNil() {
+ x.MatchExpressions = nil
+ } else {
+ yyv6 := &x.MatchExpressions
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceLabelSelectorRequirement((*[]LabelSelectorRequirement)(yyv6), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *LabelSelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.MatchLabels = nil
+ } else {
+ yyv9 := &x.MatchLabels
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else {
+ z.F.DecMapStringStringX(yyv9, false, d)
+ }
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.MatchExpressions = nil
+ } else {
+ yyv11 := &x.MatchExpressions
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ h.decSliceLabelSelectorRequirement((*[]LabelSelectorRequirement)(yyv11), d)
+ }
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *LabelSelectorRequirement) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[2] = len(x.Values) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Key))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("key"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Key))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ x.Operator.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("operator"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Operator.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.Values == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Values, false, e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("values"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Values == nil {
+ r.EncodeNil()
+ } else {
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Values, false, e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *LabelSelectorRequirement) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *LabelSelectorRequirement) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "key":
+ if r.TryDecodeAsNil() {
+ x.Key = ""
+ } else {
+ x.Key = string(r.DecodeString())
+ }
+ case "operator":
+ if r.TryDecodeAsNil() {
+ x.Operator = ""
+ } else {
+ x.Operator = LabelSelectorOperator(r.DecodeString())
+ }
+ case "values":
+ if r.TryDecodeAsNil() {
+ x.Values = nil
+ } else {
+ yyv6 := &x.Values
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv6, false, d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *LabelSelectorRequirement) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Key = ""
+ } else {
+ x.Key = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Operator = ""
+ } else {
+ x.Operator = LabelSelectorOperator(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Values = nil
+ } else {
+ yyv11 := &x.Values
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv11, false, d)
+ }
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x LabelSelectorOperator) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *LabelSelectorOperator) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x codecSelfer1234) encSliceJob(v []Job, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceJob(v *[]Job, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []Job{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 792)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]Job, yyrl1)
+ }
+ } else {
+ yyv1 = make([]Job, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Job{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, Job{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Job{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, Job{}) // var yyz1 Job
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Job{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []Job{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceJobCondition(v []JobCondition, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceJobCondition(v *[]JobCondition, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []JobCondition{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]JobCondition, yyrl1)
+ }
+ } else {
+ yyv1 = make([]JobCondition, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = JobCondition{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, JobCondition{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = JobCondition{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, JobCondition{}) // var yyz1 JobCondition
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = JobCondition{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []JobCondition{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceLabelSelectorRequirement(v []LabelSelectorRequirement, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceLabelSelectorRequirement(v *[]LabelSelectorRequirement, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []LabelSelectorRequirement{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]LabelSelectorRequirement, yyrl1)
+ }
+ } else {
+ yyv1 = make([]LabelSelectorRequirement, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = LabelSelectorRequirement{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, LabelSelectorRequirement{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = LabelSelectorRequirement{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, LabelSelectorRequirement{}) // var yyz1 LabelSelectorRequirement
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = LabelSelectorRequirement{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []LabelSelectorRequirement{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/types.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/types.go
new file mode 100644
index 0000000..a821e93
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/types.go
@@ -0,0 +1,186 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/api/v1"
+)
+
+// +genclient=true
+
+// Job represents the configuration of a single job.
+type Job struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Spec is a structure defining the expected behavior of a job.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ Spec JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+ // Status is a structure describing current status of a job.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ Status JobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// JobList is a collection of jobs.
+type JobList struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard list metadata
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is the list of Job.
+ Items []Job `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// JobSpec describes how the job execution will look like.
+type JobSpec struct {
+
+ // Parallelism specifies the maximum desired number of pods the job should
+ // run at any given time. The actual number of pods running in steady state will
+ // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism),
+ // i.e. when the work left to do is less than max parallelism.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md
+ Parallelism *int32 `json:"parallelism,omitempty" protobuf:"varint,1,opt,name=parallelism"`
+
+ // Completions specifies the desired number of successfully finished pods the
+ // job should be run with. Setting to nil means that the success of any
+ // pod signals the success of all pods, and allows parallelism to have any positive
+ // value. Setting to 1 means that parallelism is limited to 1 and the success of that
+ // pod signals the success of the job.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md
+ Completions *int32 `json:"completions,omitempty" protobuf:"varint,2,opt,name=completions"`
+
+ // Optional duration in seconds relative to the startTime that the job may be active
+ // before the system tries to terminate it; value must be positive integer
+ ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,3,opt,name=activeDeadlineSeconds"`
+
+ // Selector is a label query over pods that should match the pod count.
+ // Normally, the system sets this field for you.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors
+ Selector *LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"`
+
+ // ManualSelector controls generation of pod labels and pod selectors.
+ // Leave `manualSelector` unset unless you are certain what you are doing.
+ // When false or unset, the system pick labels unique to this job
+ // and appends those labels to the pod template. When true,
+ // the user is responsible for picking unique labels and specifying
+ // the selector. Failure to pick a unique label may cause this
+ // and other jobs to not function correctly. However, You may see
+ // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1`
+ // API.
+ // More info: http://releases.k8s.io/HEAD/docs/design/selector-generation.md
+ ManualSelector *bool `json:"manualSelector,omitempty" protobuf:"varint,5,opt,name=manualSelector"`
+
+ // Template is the object that describes the pod that will be created when
+ // executing a job.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md
+ Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,6,opt,name=template"`
+}
+
+// JobStatus represents the current state of a Job.
+type JobStatus struct {
+
+ // Conditions represent the latest available observations of an object's current state.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md
+ Conditions []JobCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
+
+ // StartTime represents time when the job was acknowledged by the Job Manager.
+ // It is not guaranteed to be set in happens-before order across separate operations.
+ // It is represented in RFC3339 form and is in UTC.
+ StartTime *unversioned.Time `json:"startTime,omitempty" protobuf:"bytes,2,opt,name=startTime"`
+
+ // CompletionTime represents time when the job was completed. It is not guaranteed to
+ // be set in happens-before order across separate operations.
+ // It is represented in RFC3339 form and is in UTC.
+ CompletionTime *unversioned.Time `json:"completionTime,omitempty" protobuf:"bytes,3,opt,name=completionTime"`
+
+ // Active is the number of actively running pods.
+ Active int32 `json:"active,omitempty" protobuf:"varint,4,opt,name=active"`
+
+ // Succeeded is the number of pods which reached Phase Succeeded.
+ Succeeded int32 `json:"succeeded,omitempty" protobuf:"varint,5,opt,name=succeeded"`
+
+ // Failed is the number of pods which reached Phase Failed.
+ Failed int32 `json:"failed,omitempty" protobuf:"varint,6,opt,name=failed"`
+}
+
+type JobConditionType string
+
+// These are valid conditions of a job.
+const (
+ // JobComplete means the job has completed its execution.
+ JobComplete JobConditionType = "Complete"
+ // JobFailed means the job has failed its execution.
+ JobFailed JobConditionType = "Failed"
+)
+
+// JobCondition describes current state of a job.
+type JobCondition struct {
+ // Type of job condition, Complete or Failed.
+ Type JobConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=JobConditionType"`
+ // Status of the condition, one of True, False, Unknown.
+ Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"`
+ // Last time the condition was checked.
+ LastProbeTime unversioned.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"`
+ // Last time the condition transit from one status to another.
+ LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"`
+ // (brief) reason for the condition's last transition.
+ Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"`
+ // Human readable message indicating details about last transition.
+ Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
+}
+
+// A label selector is a label query over a set of resources. The result of matchLabels and
+// matchExpressions are ANDed. An empty label selector matches all objects. A null
+// label selector matches no objects.
+type LabelSelector struct {
+ // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ // map is equivalent to an element of matchExpressions, whose key field is "key", the
+ // operator is "In", and the values array contains only "value". The requirements are ANDed.
+ MatchLabels map[string]string `json:"matchLabels,omitempty" protobuf:"bytes,1,rep,name=matchLabels"`
+ // matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ MatchExpressions []LabelSelectorRequirement `json:"matchExpressions,omitempty" protobuf:"bytes,2,rep,name=matchExpressions"`
+}
+
+// A label selector requirement is a selector that contains values, a key, and an operator that
+// relates the key and values.
+type LabelSelectorRequirement struct {
+ // key is the label key that the selector applies to.
+ Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"`
+ // operator represents a key's relationship to a set of values.
+ // Valid operators ard In, NotIn, Exists and DoesNotExist.
+ Operator LabelSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=LabelSelectorOperator"`
+ // values is an array of string values. If the operator is In or NotIn,
+ // the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ // the values array must be empty. This array is replaced during a strategic
+ // merge patch.
+ Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"`
+}
+
+// A label selector operator is the set of operators that can be used in a selector requirement.
+type LabelSelectorOperator string
+
+const (
+ LabelSelectorOpIn LabelSelectorOperator = "In"
+ LabelSelectorOpNotIn LabelSelectorOperator = "NotIn"
+ LabelSelectorOpExists LabelSelectorOperator = "Exists"
+ LabelSelectorOpDoesNotExist LabelSelectorOperator = "DoesNotExist"
+)
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/types_swagger_doc_generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/types_swagger_doc_generated.go
new file mode 100644
index 0000000..aa0dbcc
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/types_swagger_doc_generated.go
@@ -0,0 +1,114 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE
+var map_Job = map[string]string{
+ "": "Job represents the configuration of a single job.",
+ "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
+ "spec": "Spec is a structure defining the expected behavior of a job. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status",
+ "status": "Status is a structure describing current status of a job. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status",
+}
+
+func (Job) SwaggerDoc() map[string]string {
+ return map_Job
+}
+
+var map_JobCondition = map[string]string{
+ "": "JobCondition describes current state of a job.",
+ "type": "Type of job condition, Complete or Failed.",
+ "status": "Status of the condition, one of True, False, Unknown.",
+ "lastProbeTime": "Last time the condition was checked.",
+ "lastTransitionTime": "Last time the condition transit from one status to another.",
+ "reason": "(brief) reason for the condition's last transition.",
+ "message": "Human readable message indicating details about last transition.",
+}
+
+func (JobCondition) SwaggerDoc() map[string]string {
+ return map_JobCondition
+}
+
+var map_JobList = map[string]string{
+ "": "JobList is a collection of jobs.",
+ "metadata": "Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
+ "items": "Items is the list of Job.",
+}
+
+func (JobList) SwaggerDoc() map[string]string {
+ return map_JobList
+}
+
+var map_JobSpec = map[string]string{
+ "": "JobSpec describes how the job execution will look like.",
+ "parallelism": "Parallelism specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md",
+ "completions": "Completions specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md",
+ "activeDeadlineSeconds": "Optional duration in seconds relative to the startTime that the job may be active before the system tries to terminate it; value must be positive integer",
+ "selector": "Selector is a label query over pods that should match the pod count. Normally, the system sets this field for you. More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors",
+ "manualSelector": "ManualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: http://releases.k8s.io/HEAD/docs/design/selector-generation.md",
+ "template": "Template is the object that describes the pod that will be created when executing a job. More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md",
+}
+
+func (JobSpec) SwaggerDoc() map[string]string {
+ return map_JobSpec
+}
+
+var map_JobStatus = map[string]string{
+ "": "JobStatus represents the current state of a Job.",
+ "conditions": "Conditions represent the latest available observations of an object's current state. More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md",
+ "startTime": "StartTime represents time when the job was acknowledged by the Job Manager. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.",
+ "completionTime": "CompletionTime represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.",
+ "active": "Active is the number of actively running pods.",
+ "succeeded": "Succeeded is the number of pods which reached Phase Succeeded.",
+ "failed": "Failed is the number of pods which reached Phase Failed.",
+}
+
+func (JobStatus) SwaggerDoc() map[string]string {
+ return map_JobStatus
+}
+
+var map_LabelSelector = map[string]string{
+ "": "A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.",
+ "matchLabels": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.",
+ "matchExpressions": "matchExpressions is a list of label selector requirements. The requirements are ANDed.",
+}
+
+func (LabelSelector) SwaggerDoc() map[string]string {
+ return map_LabelSelector
+}
+
+var map_LabelSelectorRequirement = map[string]string{
+ "": "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.",
+ "key": "key is the label key that the selector applies to.",
+ "operator": "operator represents a key's relationship to a set of values. Valid operators ard In, NotIn, Exists and DoesNotExist.",
+ "values": "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.",
+}
+
+func (LabelSelectorRequirement) SwaggerDoc() map[string]string {
+ return map_LabelSelectorRequirement
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/conversion.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/conversion.go
new file mode 100644
index 0000000..2d5710a
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/conversion.go
@@ -0,0 +1,116 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v2alpha1
+
+import (
+ "fmt"
+ "reflect"
+
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ v1 "k8s.io/kubernetes/pkg/api/v1"
+ "k8s.io/kubernetes/pkg/apis/batch"
+ "k8s.io/kubernetes/pkg/conversion"
+ "k8s.io/kubernetes/pkg/runtime"
+)
+
+func addConversionFuncs(scheme *runtime.Scheme) {
+ // Add non-generated conversion functions
+ err := scheme.AddConversionFuncs(
+ Convert_batch_JobSpec_To_v2alpha1_JobSpec,
+ Convert_v2alpha1_JobSpec_To_batch_JobSpec,
+ )
+ if err != nil {
+ // If one of the conversion functions is malformed, detect it immediately.
+ panic(err)
+ }
+
+ // Add field label conversions for kinds having selectable nothing but ObjectMeta fields.
+ for _, kind := range []string{"Job", "JobTemplate", "ScheduledJob"} {
+ err = api.Scheme.AddFieldLabelConversionFunc("batch/v2alpha1", kind,
+ func(label, value string) (string, string, error) {
+ switch label {
+ case "metadata.name", "metadata.namespace", "status.successful":
+ return label, value, nil
+ default:
+ return "", "", fmt.Errorf("field label not supported: %s", label)
+ }
+ })
+ }
+ if err != nil {
+ // If one of the conversion functions is malformed, detect it immediately.
+ panic(err)
+ }
+}
+
+func Convert_batch_JobSpec_To_v2alpha1_JobSpec(in *batch.JobSpec, out *JobSpec, s conversion.Scope) error {
+ if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
+ defaulting.(func(*batch.JobSpec))(in)
+ }
+ out.Parallelism = in.Parallelism
+ out.Completions = in.Completions
+ out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds
+ // unable to generate simple pointer conversion for unversioned.LabelSelector -> v1.LabelSelector
+ if in.Selector != nil {
+ out.Selector = new(LabelSelector)
+ if err := Convert_unversioned_LabelSelector_To_v2alpha1_LabelSelector(in.Selector, out.Selector, s); err != nil {
+ return err
+ }
+ } else {
+ out.Selector = nil
+ }
+ if in.ManualSelector != nil {
+ out.ManualSelector = new(bool)
+ *out.ManualSelector = *in.ManualSelector
+ } else {
+ out.ManualSelector = nil
+ }
+
+ if err := v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v2alpha1_JobSpec_To_batch_JobSpec(in *JobSpec, out *batch.JobSpec, s conversion.Scope) error {
+ if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
+ defaulting.(func(*JobSpec))(in)
+ }
+ out.Parallelism = in.Parallelism
+ out.Completions = in.Completions
+ out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds
+ // unable to generate simple pointer conversion for unversioned.LabelSelector -> v1.LabelSelector
+ if in.Selector != nil {
+ out.Selector = new(unversioned.LabelSelector)
+ if err := Convert_v2alpha1_LabelSelector_To_unversioned_LabelSelector(in.Selector, out.Selector, s); err != nil {
+ return err
+ }
+ } else {
+ out.Selector = nil
+ }
+ if in.ManualSelector != nil {
+ out.ManualSelector = new(bool)
+ *out.ManualSelector = *in.ManualSelector
+ } else {
+ out.ManualSelector = nil
+ }
+
+ if err := v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/conversion_generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/conversion_generated.go
new file mode 100644
index 0000000..3265269
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/conversion_generated.go
@@ -0,0 +1,573 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by conversion-gen. Do not edit it manually!
+
+package v2alpha1
+
+import (
+ api "k8s.io/kubernetes/pkg/api"
+ unversioned "k8s.io/kubernetes/pkg/api/unversioned"
+ v1 "k8s.io/kubernetes/pkg/api/v1"
+ batch "k8s.io/kubernetes/pkg/apis/batch"
+ conversion "k8s.io/kubernetes/pkg/conversion"
+)
+
+func init() {
+ if err := api.Scheme.AddGeneratedConversionFuncs(
+ Convert_v2alpha1_Job_To_batch_Job,
+ Convert_batch_Job_To_v2alpha1_Job,
+ Convert_v2alpha1_JobCondition_To_batch_JobCondition,
+ Convert_batch_JobCondition_To_v2alpha1_JobCondition,
+ Convert_v2alpha1_JobList_To_batch_JobList,
+ Convert_batch_JobList_To_v2alpha1_JobList,
+ Convert_v2alpha1_JobSpec_To_batch_JobSpec,
+ Convert_batch_JobSpec_To_v2alpha1_JobSpec,
+ Convert_v2alpha1_JobStatus_To_batch_JobStatus,
+ Convert_batch_JobStatus_To_v2alpha1_JobStatus,
+ Convert_v2alpha1_JobTemplate_To_batch_JobTemplate,
+ Convert_batch_JobTemplate_To_v2alpha1_JobTemplate,
+ Convert_v2alpha1_JobTemplateSpec_To_batch_JobTemplateSpec,
+ Convert_batch_JobTemplateSpec_To_v2alpha1_JobTemplateSpec,
+ Convert_v2alpha1_LabelSelector_To_unversioned_LabelSelector,
+ Convert_unversioned_LabelSelector_To_v2alpha1_LabelSelector,
+ Convert_v2alpha1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement,
+ Convert_unversioned_LabelSelectorRequirement_To_v2alpha1_LabelSelectorRequirement,
+ Convert_v2alpha1_ScheduledJob_To_batch_ScheduledJob,
+ Convert_batch_ScheduledJob_To_v2alpha1_ScheduledJob,
+ Convert_v2alpha1_ScheduledJobList_To_batch_ScheduledJobList,
+ Convert_batch_ScheduledJobList_To_v2alpha1_ScheduledJobList,
+ Convert_v2alpha1_ScheduledJobSpec_To_batch_ScheduledJobSpec,
+ Convert_batch_ScheduledJobSpec_To_v2alpha1_ScheduledJobSpec,
+ Convert_v2alpha1_ScheduledJobStatus_To_batch_ScheduledJobStatus,
+ Convert_batch_ScheduledJobStatus_To_v2alpha1_ScheduledJobStatus,
+ ); err != nil {
+ // if one of the conversion functions is malformed, detect it immediately.
+ panic(err)
+ }
+}
+
+func autoConvert_v2alpha1_Job_To_batch_Job(in *Job, out *batch.Job, s conversion.Scope) error {
+ SetDefaults_Job(in)
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ if err := Convert_v2alpha1_JobSpec_To_batch_JobSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v2alpha1_JobStatus_To_batch_JobStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v2alpha1_Job_To_batch_Job(in *Job, out *batch.Job, s conversion.Scope) error {
+ return autoConvert_v2alpha1_Job_To_batch_Job(in, out, s)
+}
+
+func autoConvert_batch_Job_To_v2alpha1_Job(in *batch.Job, out *Job, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ if err := Convert_batch_JobSpec_To_v2alpha1_JobSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_batch_JobStatus_To_v2alpha1_JobStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_batch_Job_To_v2alpha1_Job(in *batch.Job, out *Job, s conversion.Scope) error {
+ return autoConvert_batch_Job_To_v2alpha1_Job(in, out, s)
+}
+
+func autoConvert_v2alpha1_JobCondition_To_batch_JobCondition(in *JobCondition, out *batch.JobCondition, s conversion.Scope) error {
+ out.Type = batch.JobConditionType(in.Type)
+ out.Status = api.ConditionStatus(in.Status)
+ if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastProbeTime, &out.LastProbeTime, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTransitionTime, &out.LastTransitionTime, s); err != nil {
+ return err
+ }
+ out.Reason = in.Reason
+ out.Message = in.Message
+ return nil
+}
+
+func Convert_v2alpha1_JobCondition_To_batch_JobCondition(in *JobCondition, out *batch.JobCondition, s conversion.Scope) error {
+ return autoConvert_v2alpha1_JobCondition_To_batch_JobCondition(in, out, s)
+}
+
+func autoConvert_batch_JobCondition_To_v2alpha1_JobCondition(in *batch.JobCondition, out *JobCondition, s conversion.Scope) error {
+ out.Type = JobConditionType(in.Type)
+ out.Status = v1.ConditionStatus(in.Status)
+ if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastProbeTime, &out.LastProbeTime, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTransitionTime, &out.LastTransitionTime, s); err != nil {
+ return err
+ }
+ out.Reason = in.Reason
+ out.Message = in.Message
+ return nil
+}
+
+func Convert_batch_JobCondition_To_v2alpha1_JobCondition(in *batch.JobCondition, out *JobCondition, s conversion.Scope) error {
+ return autoConvert_batch_JobCondition_To_v2alpha1_JobCondition(in, out, s)
+}
+
+func autoConvert_v2alpha1_JobList_To_batch_JobList(in *JobList, out *batch.JobList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]batch.Job, len(*in))
+ for i := range *in {
+ if err := Convert_v2alpha1_Job_To_batch_Job(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_v2alpha1_JobList_To_batch_JobList(in *JobList, out *batch.JobList, s conversion.Scope) error {
+ return autoConvert_v2alpha1_JobList_To_batch_JobList(in, out, s)
+}
+
+func autoConvert_batch_JobList_To_v2alpha1_JobList(in *batch.JobList, out *JobList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Job, len(*in))
+ for i := range *in {
+ if err := Convert_batch_Job_To_v2alpha1_Job(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_batch_JobList_To_v2alpha1_JobList(in *batch.JobList, out *JobList, s conversion.Scope) error {
+ return autoConvert_batch_JobList_To_v2alpha1_JobList(in, out, s)
+}
+
+func autoConvert_v2alpha1_JobSpec_To_batch_JobSpec(in *JobSpec, out *batch.JobSpec, s conversion.Scope) error {
+ out.Parallelism = in.Parallelism
+ out.Completions = in.Completions
+ out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds
+ if in.Selector != nil {
+ in, out := &in.Selector, &out.Selector
+ *out = new(unversioned.LabelSelector)
+ if err := Convert_v2alpha1_LabelSelector_To_unversioned_LabelSelector(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Selector = nil
+ }
+ out.ManualSelector = in.ManualSelector
+ if err := v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func autoConvert_batch_JobSpec_To_v2alpha1_JobSpec(in *batch.JobSpec, out *JobSpec, s conversion.Scope) error {
+ out.Parallelism = in.Parallelism
+ out.Completions = in.Completions
+ out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds
+ if in.Selector != nil {
+ in, out := &in.Selector, &out.Selector
+ *out = new(LabelSelector)
+ if err := Convert_unversioned_LabelSelector_To_v2alpha1_LabelSelector(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Selector = nil
+ }
+ out.ManualSelector = in.ManualSelector
+ if err := v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func autoConvert_v2alpha1_JobStatus_To_batch_JobStatus(in *JobStatus, out *batch.JobStatus, s conversion.Scope) error {
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]batch.JobCondition, len(*in))
+ for i := range *in {
+ if err := Convert_v2alpha1_JobCondition_To_batch_JobCondition(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Conditions = nil
+ }
+ out.StartTime = in.StartTime
+ out.CompletionTime = in.CompletionTime
+ out.Active = in.Active
+ out.Succeeded = in.Succeeded
+ out.Failed = in.Failed
+ return nil
+}
+
+func Convert_v2alpha1_JobStatus_To_batch_JobStatus(in *JobStatus, out *batch.JobStatus, s conversion.Scope) error {
+ return autoConvert_v2alpha1_JobStatus_To_batch_JobStatus(in, out, s)
+}
+
+func autoConvert_batch_JobStatus_To_v2alpha1_JobStatus(in *batch.JobStatus, out *JobStatus, s conversion.Scope) error {
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]JobCondition, len(*in))
+ for i := range *in {
+ if err := Convert_batch_JobCondition_To_v2alpha1_JobCondition(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Conditions = nil
+ }
+ out.StartTime = in.StartTime
+ out.CompletionTime = in.CompletionTime
+ out.Active = in.Active
+ out.Succeeded = in.Succeeded
+ out.Failed = in.Failed
+ return nil
+}
+
+func Convert_batch_JobStatus_To_v2alpha1_JobStatus(in *batch.JobStatus, out *JobStatus, s conversion.Scope) error {
+ return autoConvert_batch_JobStatus_To_v2alpha1_JobStatus(in, out, s)
+}
+
+func autoConvert_v2alpha1_JobTemplate_To_batch_JobTemplate(in *JobTemplate, out *batch.JobTemplate, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ if err := Convert_v2alpha1_JobTemplateSpec_To_batch_JobTemplateSpec(&in.Template, &out.Template, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v2alpha1_JobTemplate_To_batch_JobTemplate(in *JobTemplate, out *batch.JobTemplate, s conversion.Scope) error {
+ return autoConvert_v2alpha1_JobTemplate_To_batch_JobTemplate(in, out, s)
+}
+
+func autoConvert_batch_JobTemplate_To_v2alpha1_JobTemplate(in *batch.JobTemplate, out *JobTemplate, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ if err := Convert_batch_JobTemplateSpec_To_v2alpha1_JobTemplateSpec(&in.Template, &out.Template, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_batch_JobTemplate_To_v2alpha1_JobTemplate(in *batch.JobTemplate, out *JobTemplate, s conversion.Scope) error {
+ return autoConvert_batch_JobTemplate_To_v2alpha1_JobTemplate(in, out, s)
+}
+
+func autoConvert_v2alpha1_JobTemplateSpec_To_batch_JobTemplateSpec(in *JobTemplateSpec, out *batch.JobTemplateSpec, s conversion.Scope) error {
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ if err := Convert_v2alpha1_JobSpec_To_batch_JobSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v2alpha1_JobTemplateSpec_To_batch_JobTemplateSpec(in *JobTemplateSpec, out *batch.JobTemplateSpec, s conversion.Scope) error {
+ return autoConvert_v2alpha1_JobTemplateSpec_To_batch_JobTemplateSpec(in, out, s)
+}
+
+func autoConvert_batch_JobTemplateSpec_To_v2alpha1_JobTemplateSpec(in *batch.JobTemplateSpec, out *JobTemplateSpec, s conversion.Scope) error {
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ if err := Convert_batch_JobSpec_To_v2alpha1_JobSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_batch_JobTemplateSpec_To_v2alpha1_JobTemplateSpec(in *batch.JobTemplateSpec, out *JobTemplateSpec, s conversion.Scope) error {
+ return autoConvert_batch_JobTemplateSpec_To_v2alpha1_JobTemplateSpec(in, out, s)
+}
+
+func autoConvert_v2alpha1_LabelSelector_To_unversioned_LabelSelector(in *LabelSelector, out *unversioned.LabelSelector, s conversion.Scope) error {
+ out.MatchLabels = in.MatchLabels
+ if in.MatchExpressions != nil {
+ in, out := &in.MatchExpressions, &out.MatchExpressions
+ *out = make([]unversioned.LabelSelectorRequirement, len(*in))
+ for i := range *in {
+ if err := Convert_v2alpha1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.MatchExpressions = nil
+ }
+ return nil
+}
+
+func Convert_v2alpha1_LabelSelector_To_unversioned_LabelSelector(in *LabelSelector, out *unversioned.LabelSelector, s conversion.Scope) error {
+ return autoConvert_v2alpha1_LabelSelector_To_unversioned_LabelSelector(in, out, s)
+}
+
+func autoConvert_unversioned_LabelSelector_To_v2alpha1_LabelSelector(in *unversioned.LabelSelector, out *LabelSelector, s conversion.Scope) error {
+ out.MatchLabels = in.MatchLabels
+ if in.MatchExpressions != nil {
+ in, out := &in.MatchExpressions, &out.MatchExpressions
+ *out = make([]LabelSelectorRequirement, len(*in))
+ for i := range *in {
+ if err := Convert_unversioned_LabelSelectorRequirement_To_v2alpha1_LabelSelectorRequirement(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.MatchExpressions = nil
+ }
+ return nil
+}
+
+func Convert_unversioned_LabelSelector_To_v2alpha1_LabelSelector(in *unversioned.LabelSelector, out *LabelSelector, s conversion.Scope) error {
+ return autoConvert_unversioned_LabelSelector_To_v2alpha1_LabelSelector(in, out, s)
+}
+
+func autoConvert_v2alpha1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(in *LabelSelectorRequirement, out *unversioned.LabelSelectorRequirement, s conversion.Scope) error {
+ out.Key = in.Key
+ out.Operator = unversioned.LabelSelectorOperator(in.Operator)
+ out.Values = in.Values
+ return nil
+}
+
+func Convert_v2alpha1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(in *LabelSelectorRequirement, out *unversioned.LabelSelectorRequirement, s conversion.Scope) error {
+ return autoConvert_v2alpha1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(in, out, s)
+}
+
+func autoConvert_unversioned_LabelSelectorRequirement_To_v2alpha1_LabelSelectorRequirement(in *unversioned.LabelSelectorRequirement, out *LabelSelectorRequirement, s conversion.Scope) error {
+ out.Key = in.Key
+ out.Operator = LabelSelectorOperator(in.Operator)
+ out.Values = in.Values
+ return nil
+}
+
+func Convert_unversioned_LabelSelectorRequirement_To_v2alpha1_LabelSelectorRequirement(in *unversioned.LabelSelectorRequirement, out *LabelSelectorRequirement, s conversion.Scope) error {
+ return autoConvert_unversioned_LabelSelectorRequirement_To_v2alpha1_LabelSelectorRequirement(in, out, s)
+}
+
+func autoConvert_v2alpha1_ScheduledJob_To_batch_ScheduledJob(in *ScheduledJob, out *batch.ScheduledJob, s conversion.Scope) error {
+ SetDefaults_ScheduledJob(in)
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ if err := Convert_v2alpha1_ScheduledJobSpec_To_batch_ScheduledJobSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v2alpha1_ScheduledJobStatus_To_batch_ScheduledJobStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v2alpha1_ScheduledJob_To_batch_ScheduledJob(in *ScheduledJob, out *batch.ScheduledJob, s conversion.Scope) error {
+ return autoConvert_v2alpha1_ScheduledJob_To_batch_ScheduledJob(in, out, s)
+}
+
+func autoConvert_batch_ScheduledJob_To_v2alpha1_ScheduledJob(in *batch.ScheduledJob, out *ScheduledJob, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ if err := Convert_batch_ScheduledJobSpec_To_v2alpha1_ScheduledJobSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_batch_ScheduledJobStatus_To_v2alpha1_ScheduledJobStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_batch_ScheduledJob_To_v2alpha1_ScheduledJob(in *batch.ScheduledJob, out *ScheduledJob, s conversion.Scope) error {
+ return autoConvert_batch_ScheduledJob_To_v2alpha1_ScheduledJob(in, out, s)
+}
+
+func autoConvert_v2alpha1_ScheduledJobList_To_batch_ScheduledJobList(in *ScheduledJobList, out *batch.ScheduledJobList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]batch.ScheduledJob, len(*in))
+ for i := range *in {
+ if err := Convert_v2alpha1_ScheduledJob_To_batch_ScheduledJob(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_v2alpha1_ScheduledJobList_To_batch_ScheduledJobList(in *ScheduledJobList, out *batch.ScheduledJobList, s conversion.Scope) error {
+ return autoConvert_v2alpha1_ScheduledJobList_To_batch_ScheduledJobList(in, out, s)
+}
+
+func autoConvert_batch_ScheduledJobList_To_v2alpha1_ScheduledJobList(in *batch.ScheduledJobList, out *ScheduledJobList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ScheduledJob, len(*in))
+ for i := range *in {
+ if err := Convert_batch_ScheduledJob_To_v2alpha1_ScheduledJob(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_batch_ScheduledJobList_To_v2alpha1_ScheduledJobList(in *batch.ScheduledJobList, out *ScheduledJobList, s conversion.Scope) error {
+ return autoConvert_batch_ScheduledJobList_To_v2alpha1_ScheduledJobList(in, out, s)
+}
+
+func autoConvert_v2alpha1_ScheduledJobSpec_To_batch_ScheduledJobSpec(in *ScheduledJobSpec, out *batch.ScheduledJobSpec, s conversion.Scope) error {
+ out.Schedule = in.Schedule
+ out.StartingDeadlineSeconds = in.StartingDeadlineSeconds
+ out.ConcurrencyPolicy = batch.ConcurrencyPolicy(in.ConcurrencyPolicy)
+ out.Suspend = in.Suspend
+ if err := Convert_v2alpha1_JobTemplateSpec_To_batch_JobTemplateSpec(&in.JobTemplate, &out.JobTemplate, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v2alpha1_ScheduledJobSpec_To_batch_ScheduledJobSpec(in *ScheduledJobSpec, out *batch.ScheduledJobSpec, s conversion.Scope) error {
+ return autoConvert_v2alpha1_ScheduledJobSpec_To_batch_ScheduledJobSpec(in, out, s)
+}
+
+func autoConvert_batch_ScheduledJobSpec_To_v2alpha1_ScheduledJobSpec(in *batch.ScheduledJobSpec, out *ScheduledJobSpec, s conversion.Scope) error {
+ out.Schedule = in.Schedule
+ out.StartingDeadlineSeconds = in.StartingDeadlineSeconds
+ out.ConcurrencyPolicy = ConcurrencyPolicy(in.ConcurrencyPolicy)
+ out.Suspend = in.Suspend
+ if err := Convert_batch_JobTemplateSpec_To_v2alpha1_JobTemplateSpec(&in.JobTemplate, &out.JobTemplate, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_batch_ScheduledJobSpec_To_v2alpha1_ScheduledJobSpec(in *batch.ScheduledJobSpec, out *ScheduledJobSpec, s conversion.Scope) error {
+ return autoConvert_batch_ScheduledJobSpec_To_v2alpha1_ScheduledJobSpec(in, out, s)
+}
+
+func autoConvert_v2alpha1_ScheduledJobStatus_To_batch_ScheduledJobStatus(in *ScheduledJobStatus, out *batch.ScheduledJobStatus, s conversion.Scope) error {
+ if in.Active != nil {
+ in, out := &in.Active, &out.Active
+ *out = make([]api.ObjectReference, len(*in))
+ for i := range *in {
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&(*in)[i], &(*out)[i], 0); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Active = nil
+ }
+ out.LastScheduleTime = in.LastScheduleTime
+ return nil
+}
+
+func Convert_v2alpha1_ScheduledJobStatus_To_batch_ScheduledJobStatus(in *ScheduledJobStatus, out *batch.ScheduledJobStatus, s conversion.Scope) error {
+ return autoConvert_v2alpha1_ScheduledJobStatus_To_batch_ScheduledJobStatus(in, out, s)
+}
+
+func autoConvert_batch_ScheduledJobStatus_To_v2alpha1_ScheduledJobStatus(in *batch.ScheduledJobStatus, out *ScheduledJobStatus, s conversion.Scope) error {
+ if in.Active != nil {
+ in, out := &in.Active, &out.Active
+ *out = make([]v1.ObjectReference, len(*in))
+ for i := range *in {
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&(*in)[i], &(*out)[i], 0); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Active = nil
+ }
+ out.LastScheduleTime = in.LastScheduleTime
+ return nil
+}
+
+func Convert_batch_ScheduledJobStatus_To_v2alpha1_ScheduledJobStatus(in *batch.ScheduledJobStatus, out *ScheduledJobStatus, s conversion.Scope) error {
+ return autoConvert_batch_ScheduledJobStatus_To_v2alpha1_ScheduledJobStatus(in, out, s)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/deep_copy_generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/deep_copy_generated.go
new file mode 100644
index 0000000..baa1b59
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/deep_copy_generated.go
@@ -0,0 +1,298 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by deepcopy-gen. Do not edit it manually!
+
+package v2alpha1
+
+import (
+ api "k8s.io/kubernetes/pkg/api"
+ unversioned "k8s.io/kubernetes/pkg/api/unversioned"
+ v1 "k8s.io/kubernetes/pkg/api/v1"
+ conversion "k8s.io/kubernetes/pkg/conversion"
+)
+
+func init() {
+ if err := api.Scheme.AddGeneratedDeepCopyFuncs(
+ DeepCopy_v2alpha1_Job,
+ DeepCopy_v2alpha1_JobCondition,
+ DeepCopy_v2alpha1_JobList,
+ DeepCopy_v2alpha1_JobSpec,
+ DeepCopy_v2alpha1_JobStatus,
+ DeepCopy_v2alpha1_JobTemplate,
+ DeepCopy_v2alpha1_JobTemplateSpec,
+ DeepCopy_v2alpha1_LabelSelector,
+ DeepCopy_v2alpha1_LabelSelectorRequirement,
+ DeepCopy_v2alpha1_ScheduledJob,
+ DeepCopy_v2alpha1_ScheduledJobList,
+ DeepCopy_v2alpha1_ScheduledJobSpec,
+ DeepCopy_v2alpha1_ScheduledJobStatus,
+ ); err != nil {
+ // if one of the deep copy functions is malformed, detect it immediately.
+ panic(err)
+ }
+}
+
+func DeepCopy_v2alpha1_Job(in Job, out *Job, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_v2alpha1_JobSpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_v2alpha1_JobStatus(in.Status, &out.Status, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_v2alpha1_JobCondition(in JobCondition, out *JobCondition, c *conversion.Cloner) error {
+ out.Type = in.Type
+ out.Status = in.Status
+ out.LastProbeTime = in.LastProbeTime.DeepCopy()
+ out.LastTransitionTime = in.LastTransitionTime.DeepCopy()
+ out.Reason = in.Reason
+ out.Message = in.Message
+ return nil
+}
+
+func DeepCopy_v2alpha1_JobList(in JobList, out *JobList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]Job, len(in))
+ for i := range in {
+ if err := DeepCopy_v2alpha1_Job(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_v2alpha1_JobSpec(in JobSpec, out *JobSpec, c *conversion.Cloner) error {
+ if in.Parallelism != nil {
+ in, out := in.Parallelism, &out.Parallelism
+ *out = new(int32)
+ **out = *in
+ } else {
+ out.Parallelism = nil
+ }
+ if in.Completions != nil {
+ in, out := in.Completions, &out.Completions
+ *out = new(int32)
+ **out = *in
+ } else {
+ out.Completions = nil
+ }
+ if in.ActiveDeadlineSeconds != nil {
+ in, out := in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds
+ *out = new(int64)
+ **out = *in
+ } else {
+ out.ActiveDeadlineSeconds = nil
+ }
+ if in.Selector != nil {
+ in, out := in.Selector, &out.Selector
+ *out = new(LabelSelector)
+ if err := DeepCopy_v2alpha1_LabelSelector(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.Selector = nil
+ }
+ if in.ManualSelector != nil {
+ in, out := in.ManualSelector, &out.ManualSelector
+ *out = new(bool)
+ **out = *in
+ } else {
+ out.ManualSelector = nil
+ }
+ if err := v1.DeepCopy_v1_PodTemplateSpec(in.Template, &out.Template, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_v2alpha1_JobStatus(in JobStatus, out *JobStatus, c *conversion.Cloner) error {
+ if in.Conditions != nil {
+ in, out := in.Conditions, &out.Conditions
+ *out = make([]JobCondition, len(in))
+ for i := range in {
+ if err := DeepCopy_v2alpha1_JobCondition(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Conditions = nil
+ }
+ if in.StartTime != nil {
+ in, out := in.StartTime, &out.StartTime
+ *out = new(unversioned.Time)
+ **out = in.DeepCopy()
+ } else {
+ out.StartTime = nil
+ }
+ if in.CompletionTime != nil {
+ in, out := in.CompletionTime, &out.CompletionTime
+ *out = new(unversioned.Time)
+ **out = in.DeepCopy()
+ } else {
+ out.CompletionTime = nil
+ }
+ out.Active = in.Active
+ out.Succeeded = in.Succeeded
+ out.Failed = in.Failed
+ return nil
+}
+
+func DeepCopy_v2alpha1_JobTemplate(in JobTemplate, out *JobTemplate, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_v2alpha1_JobTemplateSpec(in.Template, &out.Template, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_v2alpha1_JobTemplateSpec(in JobTemplateSpec, out *JobTemplateSpec, c *conversion.Cloner) error {
+ if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_v2alpha1_JobSpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_v2alpha1_LabelSelector(in LabelSelector, out *LabelSelector, c *conversion.Cloner) error {
+ if in.MatchLabels != nil {
+ in, out := in.MatchLabels, &out.MatchLabels
+ *out = make(map[string]string)
+ for key, val := range in {
+ (*out)[key] = val
+ }
+ } else {
+ out.MatchLabels = nil
+ }
+ if in.MatchExpressions != nil {
+ in, out := in.MatchExpressions, &out.MatchExpressions
+ *out = make([]LabelSelectorRequirement, len(in))
+ for i := range in {
+ if err := DeepCopy_v2alpha1_LabelSelectorRequirement(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.MatchExpressions = nil
+ }
+ return nil
+}
+
+func DeepCopy_v2alpha1_LabelSelectorRequirement(in LabelSelectorRequirement, out *LabelSelectorRequirement, c *conversion.Cloner) error {
+ out.Key = in.Key
+ out.Operator = in.Operator
+ if in.Values != nil {
+ in, out := in.Values, &out.Values
+ *out = make([]string, len(in))
+ copy(*out, in)
+ } else {
+ out.Values = nil
+ }
+ return nil
+}
+
+func DeepCopy_v2alpha1_ScheduledJob(in ScheduledJob, out *ScheduledJob, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_v2alpha1_ScheduledJobSpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_v2alpha1_ScheduledJobStatus(in.Status, &out.Status, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_v2alpha1_ScheduledJobList(in ScheduledJobList, out *ScheduledJobList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]ScheduledJob, len(in))
+ for i := range in {
+ if err := DeepCopy_v2alpha1_ScheduledJob(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_v2alpha1_ScheduledJobSpec(in ScheduledJobSpec, out *ScheduledJobSpec, c *conversion.Cloner) error {
+ out.Schedule = in.Schedule
+ if in.StartingDeadlineSeconds != nil {
+ in, out := in.StartingDeadlineSeconds, &out.StartingDeadlineSeconds
+ *out = new(int64)
+ **out = *in
+ } else {
+ out.StartingDeadlineSeconds = nil
+ }
+ out.ConcurrencyPolicy = in.ConcurrencyPolicy
+ if in.Suspend != nil {
+ in, out := in.Suspend, &out.Suspend
+ *out = new(bool)
+ **out = *in
+ } else {
+ out.Suspend = nil
+ }
+ if err := DeepCopy_v2alpha1_JobTemplateSpec(in.JobTemplate, &out.JobTemplate, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_v2alpha1_ScheduledJobStatus(in ScheduledJobStatus, out *ScheduledJobStatus, c *conversion.Cloner) error {
+ if in.Active != nil {
+ in, out := in.Active, &out.Active
+ *out = make([]v1.ObjectReference, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.Active = nil
+ }
+ if in.LastScheduleTime != nil {
+ in, out := in.LastScheduleTime, &out.LastScheduleTime
+ *out = new(unversioned.Time)
+ **out = in.DeepCopy()
+ } else {
+ out.LastScheduleTime = nil
+ }
+ return nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/defaults.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/defaults.go
new file mode 100644
index 0000000..a7d24ce
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/defaults.go
@@ -0,0 +1,49 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v2alpha1
+
+import (
+ "k8s.io/kubernetes/pkg/runtime"
+)
+
+func addDefaultingFuncs(scheme *runtime.Scheme) {
+ scheme.AddDefaultingFuncs(
+ SetDefaults_Job,
+ SetDefaults_ScheduledJob,
+ )
+}
+
+func SetDefaults_Job(obj *Job) {
+ // For a non-parallel job, you can leave both `.spec.completions` and
+ // `.spec.parallelism` unset. When both are unset, both are defaulted to 1.
+ if obj.Spec.Completions == nil && obj.Spec.Parallelism == nil {
+ obj.Spec.Completions = new(int32)
+ *obj.Spec.Completions = 1
+ obj.Spec.Parallelism = new(int32)
+ *obj.Spec.Parallelism = 1
+ }
+ if obj.Spec.Parallelism == nil {
+ obj.Spec.Parallelism = new(int32)
+ *obj.Spec.Parallelism = 1
+ }
+}
+
+func SetDefaults_ScheduledJob(obj *ScheduledJob) {
+ if obj.Spec.ConcurrencyPolicy == "" {
+ obj.Spec.ConcurrencyPolicy = AllowConcurrent
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/doc.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/doc.go
new file mode 100644
index 0000000..76b5d32
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package,register
+// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/batch
+
+package v2alpha1
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.pb.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.pb.go
new file mode 100644
index 0000000..a461a47
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.pb.go
@@ -0,0 +1,3018 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by protoc-gen-gogo.
+// source: k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.proto
+// DO NOT EDIT!
+
+/*
+ Package v2alpha1 is a generated protocol buffer package.
+
+ It is generated from these files:
+ k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.proto
+
+ It has these top-level messages:
+ Job
+ JobCondition
+ JobList
+ JobSpec
+ JobStatus
+ JobTemplate
+ JobTemplateSpec
+ LabelSelector
+ LabelSelectorRequirement
+ ScheduledJob
+ ScheduledJobList
+ ScheduledJobSpec
+ ScheduledJobStatus
+*/
+package v2alpha1
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+import k8s_io_kubernetes_pkg_api_unversioned "k8s.io/kubernetes/pkg/api/unversioned"
+import k8s_io_kubernetes_pkg_api_v1 "k8s.io/kubernetes/pkg/api/v1"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+func (m *Job) Reset() { *m = Job{} }
+func (m *Job) String() string { return proto.CompactTextString(m) }
+func (*Job) ProtoMessage() {}
+
+func (m *JobCondition) Reset() { *m = JobCondition{} }
+func (m *JobCondition) String() string { return proto.CompactTextString(m) }
+func (*JobCondition) ProtoMessage() {}
+
+func (m *JobList) Reset() { *m = JobList{} }
+func (m *JobList) String() string { return proto.CompactTextString(m) }
+func (*JobList) ProtoMessage() {}
+
+func (m *JobSpec) Reset() { *m = JobSpec{} }
+func (m *JobSpec) String() string { return proto.CompactTextString(m) }
+func (*JobSpec) ProtoMessage() {}
+
+func (m *JobStatus) Reset() { *m = JobStatus{} }
+func (m *JobStatus) String() string { return proto.CompactTextString(m) }
+func (*JobStatus) ProtoMessage() {}
+
+func (m *JobTemplate) Reset() { *m = JobTemplate{} }
+func (m *JobTemplate) String() string { return proto.CompactTextString(m) }
+func (*JobTemplate) ProtoMessage() {}
+
+func (m *JobTemplateSpec) Reset() { *m = JobTemplateSpec{} }
+func (m *JobTemplateSpec) String() string { return proto.CompactTextString(m) }
+func (*JobTemplateSpec) ProtoMessage() {}
+
+func (m *LabelSelector) Reset() { *m = LabelSelector{} }
+func (m *LabelSelector) String() string { return proto.CompactTextString(m) }
+func (*LabelSelector) ProtoMessage() {}
+
+func (m *LabelSelectorRequirement) Reset() { *m = LabelSelectorRequirement{} }
+func (m *LabelSelectorRequirement) String() string { return proto.CompactTextString(m) }
+func (*LabelSelectorRequirement) ProtoMessage() {}
+
+func (m *ScheduledJob) Reset() { *m = ScheduledJob{} }
+func (m *ScheduledJob) String() string { return proto.CompactTextString(m) }
+func (*ScheduledJob) ProtoMessage() {}
+
+func (m *ScheduledJobList) Reset() { *m = ScheduledJobList{} }
+func (m *ScheduledJobList) String() string { return proto.CompactTextString(m) }
+func (*ScheduledJobList) ProtoMessage() {}
+
+func (m *ScheduledJobSpec) Reset() { *m = ScheduledJobSpec{} }
+func (m *ScheduledJobSpec) String() string { return proto.CompactTextString(m) }
+func (*ScheduledJobSpec) ProtoMessage() {}
+
+func (m *ScheduledJobStatus) Reset() { *m = ScheduledJobStatus{} }
+func (m *ScheduledJobStatus) String() string { return proto.CompactTextString(m) }
+func (*ScheduledJobStatus) ProtoMessage() {}
+
+func init() {
+ proto.RegisterType((*Job)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.Job")
+ proto.RegisterType((*JobCondition)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.JobCondition")
+ proto.RegisterType((*JobList)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.JobList")
+ proto.RegisterType((*JobSpec)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.JobSpec")
+ proto.RegisterType((*JobStatus)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.JobStatus")
+ proto.RegisterType((*JobTemplate)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.JobTemplate")
+ proto.RegisterType((*JobTemplateSpec)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.JobTemplateSpec")
+ proto.RegisterType((*LabelSelector)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.LabelSelector")
+ proto.RegisterType((*LabelSelectorRequirement)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.LabelSelectorRequirement")
+ proto.RegisterType((*ScheduledJob)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.ScheduledJob")
+ proto.RegisterType((*ScheduledJobList)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.ScheduledJobList")
+ proto.RegisterType((*ScheduledJobSpec)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.ScheduledJobSpec")
+ proto.RegisterType((*ScheduledJobStatus)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.ScheduledJobStatus")
+}
+func (m *Job) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Job) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
+ n1, err := m.ObjectMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n1
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size()))
+ n2, err := m.Spec.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n2
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Status.Size()))
+ n3, err := m.Status.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n3
+ return i, nil
+}
+
+func (m *JobCondition) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *JobCondition) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Type)))
+ i += copy(data[i:], m.Type)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Status)))
+ i += copy(data[i:], m.Status)
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.LastProbeTime.Size()))
+ n4, err := m.LastProbeTime.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n4
+ data[i] = 0x22
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size()))
+ n5, err := m.LastTransitionTime.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n5
+ data[i] = 0x2a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Reason)))
+ i += copy(data[i:], m.Reason)
+ data[i] = 0x32
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Message)))
+ i += copy(data[i:], m.Message)
+ return i, nil
+}
+
+func (m *JobList) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *JobList) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size()))
+ n6, err := m.ListMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n6
+ if len(m.Items) > 0 {
+ for _, msg := range m.Items {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *JobSpec) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *JobSpec) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Parallelism != nil {
+ data[i] = 0x8
+ i++
+ i = encodeVarintGenerated(data, i, uint64(*m.Parallelism))
+ }
+ if m.Completions != nil {
+ data[i] = 0x10
+ i++
+ i = encodeVarintGenerated(data, i, uint64(*m.Completions))
+ }
+ if m.ActiveDeadlineSeconds != nil {
+ data[i] = 0x18
+ i++
+ i = encodeVarintGenerated(data, i, uint64(*m.ActiveDeadlineSeconds))
+ }
+ if m.Selector != nil {
+ data[i] = 0x22
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Selector.Size()))
+ n7, err := m.Selector.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n7
+ }
+ if m.ManualSelector != nil {
+ data[i] = 0x28
+ i++
+ if *m.ManualSelector {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ }
+ data[i] = 0x32
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Template.Size()))
+ n8, err := m.Template.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n8
+ return i, nil
+}
+
+func (m *JobStatus) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *JobStatus) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Conditions) > 0 {
+ for _, msg := range m.Conditions {
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ if m.StartTime != nil {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.StartTime.Size()))
+ n9, err := m.StartTime.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n9
+ }
+ if m.CompletionTime != nil {
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.CompletionTime.Size()))
+ n10, err := m.CompletionTime.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n10
+ }
+ data[i] = 0x20
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Active))
+ data[i] = 0x28
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Succeeded))
+ data[i] = 0x30
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Failed))
+ return i, nil
+}
+
+func (m *JobTemplate) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *JobTemplate) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
+ n11, err := m.ObjectMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n11
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Template.Size()))
+ n12, err := m.Template.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n12
+ return i, nil
+}
+
+func (m *JobTemplateSpec) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *JobTemplateSpec) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
+ n13, err := m.ObjectMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n13
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size()))
+ n14, err := m.Spec.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n14
+ return i, nil
+}
+
+func (m *LabelSelector) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *LabelSelector) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.MatchLabels) > 0 {
+ for k := range m.MatchLabels {
+ data[i] = 0xa
+ i++
+ v := m.MatchLabels[k]
+ mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ i = encodeVarintGenerated(data, i, uint64(mapSize))
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(k)))
+ i += copy(data[i:], k)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(v)))
+ i += copy(data[i:], v)
+ }
+ }
+ if len(m.MatchExpressions) > 0 {
+ for _, msg := range m.MatchExpressions {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *LabelSelectorRequirement) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *LabelSelectorRequirement) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Key)))
+ i += copy(data[i:], m.Key)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Operator)))
+ i += copy(data[i:], m.Operator)
+ if len(m.Values) > 0 {
+ for _, s := range m.Values {
+ data[i] = 0x1a
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ return i, nil
+}
+
+func (m *ScheduledJob) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ScheduledJob) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
+ n15, err := m.ObjectMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n15
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size()))
+ n16, err := m.Spec.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n16
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Status.Size()))
+ n17, err := m.Status.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n17
+ return i, nil
+}
+
+func (m *ScheduledJobList) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ScheduledJobList) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size()))
+ n18, err := m.ListMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n18
+ if len(m.Items) > 0 {
+ for _, msg := range m.Items {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *ScheduledJobSpec) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ScheduledJobSpec) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Schedule)))
+ i += copy(data[i:], m.Schedule)
+ if m.StartingDeadlineSeconds != nil {
+ data[i] = 0x10
+ i++
+ i = encodeVarintGenerated(data, i, uint64(*m.StartingDeadlineSeconds))
+ }
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.ConcurrencyPolicy)))
+ i += copy(data[i:], m.ConcurrencyPolicy)
+ if m.Suspend != nil {
+ data[i] = 0x20
+ i++
+ if *m.Suspend {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ }
+ data[i] = 0x2a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.JobTemplate.Size()))
+ n19, err := m.JobTemplate.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n19
+ return i, nil
+}
+
+func (m *ScheduledJobStatus) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ScheduledJobStatus) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Active) > 0 {
+ for _, msg := range m.Active {
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ if m.LastScheduleTime != nil {
+ data[i] = 0x22
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.LastScheduleTime.Size()))
+ n20, err := m.LastScheduleTime.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n20
+ }
+ return i, nil
+}
+
+func encodeFixed64Generated(data []byte, offset int, v uint64) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ data[offset+4] = uint8(v >> 32)
+ data[offset+5] = uint8(v >> 40)
+ data[offset+6] = uint8(v >> 48)
+ data[offset+7] = uint8(v >> 56)
+ return offset + 8
+}
+func encodeFixed32Generated(data []byte, offset int, v uint32) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ return offset + 4
+}
+func encodeVarintGenerated(data []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ data[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ data[offset] = uint8(v)
+ return offset + 1
+}
+func (m *Job) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *JobCondition) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Status)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.LastProbeTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.LastTransitionTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Reason)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Message)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *JobList) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *JobSpec) Size() (n int) {
+ var l int
+ _ = l
+ if m.Parallelism != nil {
+ n += 1 + sovGenerated(uint64(*m.Parallelism))
+ }
+ if m.Completions != nil {
+ n += 1 + sovGenerated(uint64(*m.Completions))
+ }
+ if m.ActiveDeadlineSeconds != nil {
+ n += 1 + sovGenerated(uint64(*m.ActiveDeadlineSeconds))
+ }
+ if m.Selector != nil {
+ l = m.Selector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ManualSelector != nil {
+ n += 2
+ }
+ l = m.Template.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *JobStatus) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Conditions) > 0 {
+ for _, e := range m.Conditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.StartTime != nil {
+ l = m.StartTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.CompletionTime != nil {
+ l = m.CompletionTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ n += 1 + sovGenerated(uint64(m.Active))
+ n += 1 + sovGenerated(uint64(m.Succeeded))
+ n += 1 + sovGenerated(uint64(m.Failed))
+ return n
+}
+
+func (m *JobTemplate) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Template.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *JobTemplateSpec) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *LabelSelector) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.MatchLabels) > 0 {
+ for k, v := range m.MatchLabels {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ if len(m.MatchExpressions) > 0 {
+ for _, e := range m.MatchExpressions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *LabelSelectorRequirement) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Key)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Operator)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Values) > 0 {
+ for _, s := range m.Values {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ScheduledJob) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ScheduledJobList) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ScheduledJobSpec) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Schedule)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.StartingDeadlineSeconds != nil {
+ n += 1 + sovGenerated(uint64(*m.StartingDeadlineSeconds))
+ }
+ l = len(m.ConcurrencyPolicy)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Suspend != nil {
+ n += 2
+ }
+ l = m.JobTemplate.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ScheduledJobStatus) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Active) > 0 {
+ for _, e := range m.Active {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.LastScheduleTime != nil {
+ l = m.LastScheduleTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *Job) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Job: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Job: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *JobCondition) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: JobCondition: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: JobCondition: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = JobConditionType(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Status = k8s_io_kubernetes_pkg_api_v1.ConditionStatus(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastProbeTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LastProbeTime.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LastTransitionTime.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Reason = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *JobList) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: JobList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: JobList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, Job{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *JobSpec) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: JobSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: JobSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Parallelism", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Parallelism = &v
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Completions", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Completions = &v
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ActiveDeadlineSeconds", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.ActiveDeadlineSeconds = &v
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Selector == nil {
+ m.Selector = &LabelSelector{}
+ }
+ if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ManualSelector", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.ManualSelector = &b
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *JobStatus) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: JobStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: JobStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Conditions = append(m.Conditions, JobCondition{})
+ if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.StartTime == nil {
+ m.StartTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{}
+ }
+ if err := m.StartTime.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CompletionTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.CompletionTime == nil {
+ m.CompletionTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{}
+ }
+ if err := m.CompletionTime.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Active", wireType)
+ }
+ m.Active = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Active |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Succeeded", wireType)
+ }
+ m.Succeeded = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Succeeded |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Failed", wireType)
+ }
+ m.Failed = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Failed |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *JobTemplate) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: JobTemplate: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: JobTemplate: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *JobTemplateSpec) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: JobTemplateSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: JobTemplateSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LabelSelector) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LabelSelector: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LabelSelector: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MatchLabels", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var keykey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ keykey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey := string(data[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ var valuekey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ valuekey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue := string(data[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ if m.MatchLabels == nil {
+ m.MatchLabels = make(map[string]string)
+ }
+ m.MatchLabels[mapkey] = mapvalue
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MatchExpressions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.MatchExpressions = append(m.MatchExpressions, LabelSelectorRequirement{})
+ if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LabelSelectorRequirement) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LabelSelectorRequirement: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LabelSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Operator = LabelSelectorOperator(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Values = append(m.Values, string(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ScheduledJob) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ScheduledJob: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ScheduledJob: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ScheduledJobList) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ScheduledJobList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ScheduledJobList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, ScheduledJob{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ScheduledJobSpec) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ScheduledJobSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ScheduledJobSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Schedule", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Schedule = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field StartingDeadlineSeconds", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.StartingDeadlineSeconds = &v
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ConcurrencyPolicy", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ConcurrencyPolicy = ConcurrencyPolicy(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Suspend", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.Suspend = &b
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field JobTemplate", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.JobTemplate.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ScheduledJobStatus) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ScheduledJobStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ScheduledJobStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Active", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Active = append(m.Active, k8s_io_kubernetes_pkg_api_v1.ObjectReference{})
+ if err := m.Active[len(m.Active)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastScheduleTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.LastScheduleTime == nil {
+ m.LastScheduleTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{}
+ }
+ if err := m.LastScheduleTime.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenerated(data []byte) (n int, err error) {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if data[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipGenerated(data[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
+)
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.proto b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.proto
new file mode 100644
index 0000000..41e2001
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.proto
@@ -0,0 +1,254 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.kubernetes.pkg.apis.batch.v2alpha1;
+
+import "k8s.io/kubernetes/pkg/api/resource/generated.proto";
+import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto";
+import "k8s.io/kubernetes/pkg/api/v1/generated.proto";
+import "k8s.io/kubernetes/pkg/util/intstr/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v2alpha1";
+
+// Job represents the configuration of a single job.
+message Job {
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1;
+
+ // Spec is a structure defining the expected behavior of a job.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ optional JobSpec spec = 2;
+
+ // Status is a structure describing current status of a job.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ optional JobStatus status = 3;
+}
+
+// JobCondition describes current state of a job.
+message JobCondition {
+ // Type of job condition, Complete or Failed.
+ optional string type = 1;
+
+ // Status of the condition, one of True, False, Unknown.
+ optional string status = 2;
+
+ // Last time the condition was checked.
+ optional k8s.io.kubernetes.pkg.api.unversioned.Time lastProbeTime = 3;
+
+ // Last time the condition transit from one status to another.
+ optional k8s.io.kubernetes.pkg.api.unversioned.Time lastTransitionTime = 4;
+
+ // (brief) reason for the condition's last transition.
+ optional string reason = 5;
+
+ // Human readable message indicating details about last transition.
+ optional string message = 6;
+}
+
+// JobList is a collection of jobs.
+message JobList {
+ // Standard list metadata
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1;
+
+ // Items is the list of Job.
+ repeated Job items = 2;
+}
+
+// JobSpec describes how the job execution will look like.
+message JobSpec {
+ // Parallelism specifies the maximum desired number of pods the job should
+ // run at any given time. The actual number of pods running in steady state will
+ // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism),
+ // i.e. when the work left to do is less than max parallelism.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md
+ optional int32 parallelism = 1;
+
+ // Completions specifies the desired number of successfully finished pods the
+ // job should be run with. Setting to nil means that the success of any
+ // pod signals the success of all pods, and allows parallelism to have any positive
+ // value. Setting to 1 means that parallelism is limited to 1 and the success of that
+ // pod signals the success of the job.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md
+ optional int32 completions = 2;
+
+ // Optional duration in seconds relative to the startTime that the job may be active
+ // before the system tries to terminate it; value must be positive integer
+ optional int64 activeDeadlineSeconds = 3;
+
+ // Selector is a label query over pods that should match the pod count.
+ // Normally, the system sets this field for you.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors
+ optional LabelSelector selector = 4;
+
+ // ManualSelector controls generation of pod labels and pod selectors.
+ // Leave `manualSelector` unset unless you are certain what you are doing.
+ // When false or unset, the system pick labels unique to this job
+ // and appends those labels to the pod template. When true,
+ // the user is responsible for picking unique labels and specifying
+ // the selector. Failure to pick a unique label may cause this
+ // and other jobs to not function correctly. However, You may see
+ // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1`
+ // API.
+ // More info: http://releases.k8s.io/HEAD/docs/design/selector-generation.md
+ optional bool manualSelector = 5;
+
+ // Template is the object that describes the pod that will be created when
+ // executing a job.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md
+ optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 6;
+}
+
+// JobStatus represents the current state of a Job.
+message JobStatus {
+ // Conditions represent the latest available observations of an object's current state.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md
+ repeated JobCondition conditions = 1;
+
+ // StartTime represents time when the job was acknowledged by the Job Manager.
+ // It is not guaranteed to be set in happens-before order across separate operations.
+ // It is represented in RFC3339 form and is in UTC.
+ optional k8s.io.kubernetes.pkg.api.unversioned.Time startTime = 2;
+
+ // CompletionTime represents time when the job was completed. It is not guaranteed to
+ // be set in happens-before order across separate operations.
+ // It is represented in RFC3339 form and is in UTC.
+ optional k8s.io.kubernetes.pkg.api.unversioned.Time completionTime = 3;
+
+ // Active is the number of actively running pods.
+ optional int32 active = 4;
+
+ // Succeeded is the number of pods which reached Phase Succeeded.
+ optional int32 succeeded = 5;
+
+ // Failed is the number of pods which reached Phase Failed.
+ optional int32 failed = 6;
+}
+
+// JobTemplate describes a template for creating copies of a predefined pod.
+message JobTemplate {
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1;
+
+ // Template defines jobs that will be created from this template
+ // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ optional JobTemplateSpec template = 2;
+}
+
+// JobTemplateSpec describes the data a Job should have when created from a template
+message JobTemplateSpec {
+ // Standard object's metadata of the jobs created from this template.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1;
+
+ // Specification of the desired behavior of the job.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ optional JobSpec spec = 2;
+}
+
+// A label selector is a label query over a set of resources. The result of matchLabels and
+// matchExpressions are ANDed. An empty label selector matches all objects. A null
+// label selector matches no objects.
+message LabelSelector {
+ // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ // map is equivalent to an element of matchExpressions, whose key field is "key", the
+ // operator is "In", and the values array contains only "value". The requirements are ANDed.
+ map<string, string> matchLabels = 1;
+
+ // matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ repeated LabelSelectorRequirement matchExpressions = 2;
+}
+
+// A label selector requirement is a selector that contains values, a key, and an operator that
+// relates the key and values.
+message LabelSelectorRequirement {
+ // key is the label key that the selector applies to.
+ optional string key = 1;
+
+ // operator represents a key's relationship to a set of values.
+ // Valid operators ard In, NotIn, Exists and DoesNotExist.
+ optional string operator = 2;
+
+ // values is an array of string values. If the operator is In or NotIn,
+ // the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ // the values array must be empty. This array is replaced during a strategic
+ // merge patch.
+ repeated string values = 3;
+}
+
+// ScheduledJob represents the configuration of a single scheduled job.
+message ScheduledJob {
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1;
+
+ // Spec is a structure defining the expected behavior of a job, including the schedule.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ optional ScheduledJobSpec spec = 2;
+
+ // Status is a structure describing current status of a job.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ optional ScheduledJobStatus status = 3;
+}
+
+// ScheduledJobList is a collection of scheduled jobs.
+message ScheduledJobList {
+ // Standard list metadata
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1;
+
+ // Items is the list of ScheduledJob.
+ repeated ScheduledJob items = 2;
+}
+
+// ScheduledJobSpec describes how the job execution will look like and when it will actually run.
+message ScheduledJobSpec {
+ // Schedule contains the schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.
+ optional string schedule = 1;
+
+ // Optional deadline in seconds for starting the job if it misses scheduled
+ // time for any reason. Missed jobs executions will be counted as failed ones.
+ optional int64 startingDeadlineSeconds = 2;
+
+ // ConcurrencyPolicy specifies how to treat concurrent executions of a Job.
+ optional string concurrencyPolicy = 3;
+
+ // Suspend flag tells the controller to suspend subsequent executions, it does
+ // not apply to already started executions. Defaults to false.
+ optional bool suspend = 4;
+
+ // JobTemplate is the object that describes the job that will be created when
+ // executing a ScheduledJob.
+ optional JobTemplateSpec jobTemplate = 5;
+}
+
+// ScheduledJobStatus represents the current state of a Job.
+message ScheduledJobStatus {
+ // Active holds pointers to currently running jobs.
+ repeated k8s.io.kubernetes.pkg.api.v1.ObjectReference active = 1;
+
+ // LastScheduleTime keeps information of when was the last time the job was successfully scheduled.
+ optional k8s.io.kubernetes.pkg.api.unversioned.Time lastScheduleTime = 4;
+}
+
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/register.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/register.go
new file mode 100644
index 0000000..54f0d62
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/register.go
@@ -0,0 +1,50 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v2alpha1
+
+import (
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/api/v1"
+ "k8s.io/kubernetes/pkg/runtime"
+ versionedwatch "k8s.io/kubernetes/pkg/watch/versioned"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "batch"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v2alpha1"}
+
+func AddToScheme(scheme *runtime.Scheme) {
+ addKnownTypes(scheme)
+ addDefaultingFuncs(scheme)
+ addConversionFuncs(scheme)
+}
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &Job{},
+ &JobList{},
+ &JobTemplate{},
+ &ScheduledJob{},
+ &ScheduledJobList{},
+ &v1.ListOptions{},
+ &v1.DeleteOptions{},
+ )
+ versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types.generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types.generated.go
new file mode 100644
index 0000000..bd48550
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types.generated.go
@@ -0,0 +1,5310 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// ************************************************************
+// DO NOT EDIT.
+// THIS FILE IS AUTO-GENERATED BY codecgen.
+// ************************************************************
+
+package v2alpha1
+
+import (
+ "errors"
+ "fmt"
+ codec1978 "github.com/ugorji/go/codec"
+ pkg4_resource "k8s.io/kubernetes/pkg/api/resource"
+ pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned"
+ pkg2_v1 "k8s.io/kubernetes/pkg/api/v1"
+ pkg3_types "k8s.io/kubernetes/pkg/types"
+ pkg5_intstr "k8s.io/kubernetes/pkg/util/intstr"
+ "reflect"
+ "runtime"
+ time "time"
+)
+
+const (
+ // ----- content types ----
+ codecSelferC_UTF81234 = 1
+ codecSelferC_RAW1234 = 0
+ // ----- value types used ----
+ codecSelferValueTypeArray1234 = 10
+ codecSelferValueTypeMap1234 = 9
+ // ----- containerStateValues ----
+ codecSelfer_containerMapKey1234 = 2
+ codecSelfer_containerMapValue1234 = 3
+ codecSelfer_containerMapEnd1234 = 4
+ codecSelfer_containerArrayElem1234 = 6
+ codecSelfer_containerArrayEnd1234 = 7
+)
+
+var (
+ codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits())
+ codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`)
+)
+
+type codecSelfer1234 struct{}
+
+func init() {
+ if codec1978.GenVersion != 5 {
+ _, file, _, _ := runtime.Caller(0)
+ err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v",
+ 5, codec1978.GenVersion, file)
+ panic(err)
+ }
+ if false { // reference the types, but skip this branch at build/run time
+ var v0 pkg4_resource.Quantity
+ var v1 pkg1_unversioned.TypeMeta
+ var v2 pkg2_v1.ObjectMeta
+ var v3 pkg3_types.UID
+ var v4 pkg5_intstr.IntOrString
+ var v5 time.Time
+ _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5
+ }
+}
+
+func (x *Job) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = true
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy14 := &x.Status
+ yy14.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy16 := &x.Status
+ yy16.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *Job) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *Job) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_v1.ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = JobSpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = JobStatus{}
+ } else {
+ yyv6 := &x.Status
+ yyv6.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *Job) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_v1.ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = JobSpec{}
+ } else {
+ yyv11 := &x.Spec
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = JobStatus{}
+ } else {
+ yyv12 := &x.Status
+ yyv12.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *JobList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceJob(([]Job)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceJob(([]Job)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *JobList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *JobList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceJob((*[]Job)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *JobList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceJob((*[]Job)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *JobTemplate) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Template
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("template"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Template
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *JobTemplate) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *JobTemplate) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_v1.ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "template":
+ if r.TryDecodeAsNil() {
+ x.Template = JobTemplateSpec{}
+ } else {
+ yyv5 := &x.Template
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *JobTemplate) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_v1.ObjectMeta{}
+ } else {
+ yyv9 := &x.ObjectMeta
+ yyv9.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Template = JobTemplateSpec{}
+ } else {
+ yyv10 := &x.Template
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *JobTemplateSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *JobTemplateSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *JobTemplateSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_v1.ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = JobSpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *JobTemplateSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_v1.ObjectMeta{}
+ } else {
+ yyv7 := &x.ObjectMeta
+ yyv7.CodecDecodeSelf(d)
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = JobSpec{}
+ } else {
+ yyv8 := &x.Spec
+ yyv8.CodecDecodeSelf(d)
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *JobSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [6]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Parallelism != nil
+ yyq2[1] = x.Completions != nil
+ yyq2[2] = x.ActiveDeadlineSeconds != nil
+ yyq2[3] = x.Selector != nil
+ yyq2[4] = x.ManualSelector != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(6)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Parallelism == nil {
+ r.EncodeNil()
+ } else {
+ yy4 := *x.Parallelism
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(yy4))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("parallelism"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Parallelism == nil {
+ r.EncodeNil()
+ } else {
+ yy6 := *x.Parallelism
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeInt(int64(yy6))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Completions == nil {
+ r.EncodeNil()
+ } else {
+ yy9 := *x.Completions
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeInt(int64(yy9))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("completions"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Completions == nil {
+ r.EncodeNil()
+ } else {
+ yy11 := *x.Completions
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeInt(int64(yy11))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.ActiveDeadlineSeconds == nil {
+ r.EncodeNil()
+ } else {
+ yy14 := *x.ActiveDeadlineSeconds
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeInt(int64(yy14))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("activeDeadlineSeconds"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.ActiveDeadlineSeconds == nil {
+ r.EncodeNil()
+ } else {
+ yy16 := *x.ActiveDeadlineSeconds
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeInt(int64(yy16))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ if x.Selector == nil {
+ r.EncodeNil()
+ } else {
+ x.Selector.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("selector"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Selector == nil {
+ r.EncodeNil()
+ } else {
+ x.Selector.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ if x.ManualSelector == nil {
+ r.EncodeNil()
+ } else {
+ yy22 := *x.ManualSelector
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeBool(bool(yy22))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("manualSelector"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.ManualSelector == nil {
+ r.EncodeNil()
+ } else {
+ yy24 := *x.ManualSelector
+ yym25 := z.EncBinary()
+ _ = yym25
+ if false {
+ } else {
+ r.EncodeBool(bool(yy24))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy27 := &x.Template
+ yy27.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("template"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy29 := &x.Template
+ yy29.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *JobSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *JobSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "parallelism":
+ if r.TryDecodeAsNil() {
+ if x.Parallelism != nil {
+ x.Parallelism = nil
+ }
+ } else {
+ if x.Parallelism == nil {
+ x.Parallelism = new(int32)
+ }
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ *((*int32)(x.Parallelism)) = int32(r.DecodeInt(32))
+ }
+ }
+ case "completions":
+ if r.TryDecodeAsNil() {
+ if x.Completions != nil {
+ x.Completions = nil
+ }
+ } else {
+ if x.Completions == nil {
+ x.Completions = new(int32)
+ }
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ *((*int32)(x.Completions)) = int32(r.DecodeInt(32))
+ }
+ }
+ case "activeDeadlineSeconds":
+ if r.TryDecodeAsNil() {
+ if x.ActiveDeadlineSeconds != nil {
+ x.ActiveDeadlineSeconds = nil
+ }
+ } else {
+ if x.ActiveDeadlineSeconds == nil {
+ x.ActiveDeadlineSeconds = new(int64)
+ }
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else {
+ *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64))
+ }
+ }
+ case "selector":
+ if r.TryDecodeAsNil() {
+ if x.Selector != nil {
+ x.Selector = nil
+ }
+ } else {
+ if x.Selector == nil {
+ x.Selector = new(LabelSelector)
+ }
+ x.Selector.CodecDecodeSelf(d)
+ }
+ case "manualSelector":
+ if r.TryDecodeAsNil() {
+ if x.ManualSelector != nil {
+ x.ManualSelector = nil
+ }
+ } else {
+ if x.ManualSelector == nil {
+ x.ManualSelector = new(bool)
+ }
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ *((*bool)(x.ManualSelector)) = r.DecodeBool()
+ }
+ }
+ case "template":
+ if r.TryDecodeAsNil() {
+ x.Template = pkg2_v1.PodTemplateSpec{}
+ } else {
+ yyv13 := &x.Template
+ yyv13.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *JobSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj14 int
+ var yyb14 bool
+ var yyhl14 bool = l >= 0
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Parallelism != nil {
+ x.Parallelism = nil
+ }
+ } else {
+ if x.Parallelism == nil {
+ x.Parallelism = new(int32)
+ }
+ yym16 := z.DecBinary()
+ _ = yym16
+ if false {
+ } else {
+ *((*int32)(x.Parallelism)) = int32(r.DecodeInt(32))
+ }
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Completions != nil {
+ x.Completions = nil
+ }
+ } else {
+ if x.Completions == nil {
+ x.Completions = new(int32)
+ }
+ yym18 := z.DecBinary()
+ _ = yym18
+ if false {
+ } else {
+ *((*int32)(x.Completions)) = int32(r.DecodeInt(32))
+ }
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.ActiveDeadlineSeconds != nil {
+ x.ActiveDeadlineSeconds = nil
+ }
+ } else {
+ if x.ActiveDeadlineSeconds == nil {
+ x.ActiveDeadlineSeconds = new(int64)
+ }
+ yym20 := z.DecBinary()
+ _ = yym20
+ if false {
+ } else {
+ *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64))
+ }
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Selector != nil {
+ x.Selector = nil
+ }
+ } else {
+ if x.Selector == nil {
+ x.Selector = new(LabelSelector)
+ }
+ x.Selector.CodecDecodeSelf(d)
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.ManualSelector != nil {
+ x.ManualSelector = nil
+ }
+ } else {
+ if x.ManualSelector == nil {
+ x.ManualSelector = new(bool)
+ }
+ yym23 := z.DecBinary()
+ _ = yym23
+ if false {
+ } else {
+ *((*bool)(x.ManualSelector)) = r.DecodeBool()
+ }
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Template = pkg2_v1.PodTemplateSpec{}
+ } else {
+ yyv24 := &x.Template
+ yyv24.CodecDecodeSelf(d)
+ }
+ for {
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj14-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *JobStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [6]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = len(x.Conditions) != 0
+ yyq2[1] = x.StartTime != nil
+ yyq2[2] = x.CompletionTime != nil
+ yyq2[3] = x.Active != 0
+ yyq2[4] = x.Succeeded != 0
+ yyq2[5] = x.Failed != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(6)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Conditions == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ h.encSliceJobCondition(([]JobCondition)(x.Conditions), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("conditions"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Conditions == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.encSliceJobCondition(([]JobCondition)(x.Conditions), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.StartTime == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.StartTime) {
+ } else if yym7 {
+ z.EncBinaryMarshal(x.StartTime)
+ } else if !yym7 && z.IsJSONHandle() {
+ z.EncJSONMarshal(x.StartTime)
+ } else {
+ z.EncFallback(x.StartTime)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("startTime"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.StartTime == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.StartTime) {
+ } else if yym8 {
+ z.EncBinaryMarshal(x.StartTime)
+ } else if !yym8 && z.IsJSONHandle() {
+ z.EncJSONMarshal(x.StartTime)
+ } else {
+ z.EncFallback(x.StartTime)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.CompletionTime == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.CompletionTime) {
+ } else if yym10 {
+ z.EncBinaryMarshal(x.CompletionTime)
+ } else if !yym10 && z.IsJSONHandle() {
+ z.EncJSONMarshal(x.CompletionTime)
+ } else {
+ z.EncFallback(x.CompletionTime)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("completionTime"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.CompletionTime == nil {
+ r.EncodeNil()
+ } else {
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.CompletionTime) {
+ } else if yym11 {
+ z.EncBinaryMarshal(x.CompletionTime)
+ } else if !yym11 && z.IsJSONHandle() {
+ z.EncJSONMarshal(x.CompletionTime)
+ } else {
+ z.EncFallback(x.CompletionTime)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Active))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("active"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Active))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Succeeded))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("succeeded"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Succeeded))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Failed))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("failed"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Failed))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *JobStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *JobStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "conditions":
+ if r.TryDecodeAsNil() {
+ x.Conditions = nil
+ } else {
+ yyv4 := &x.Conditions
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.decSliceJobCondition((*[]JobCondition)(yyv4), d)
+ }
+ }
+ case "startTime":
+ if r.TryDecodeAsNil() {
+ if x.StartTime != nil {
+ x.StartTime = nil
+ }
+ } else {
+ if x.StartTime == nil {
+ x.StartTime = new(pkg1_unversioned.Time)
+ }
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.StartTime) {
+ } else if yym7 {
+ z.DecBinaryUnmarshal(x.StartTime)
+ } else if !yym7 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(x.StartTime)
+ } else {
+ z.DecFallback(x.StartTime, false)
+ }
+ }
+ case "completionTime":
+ if r.TryDecodeAsNil() {
+ if x.CompletionTime != nil {
+ x.CompletionTime = nil
+ }
+ } else {
+ if x.CompletionTime == nil {
+ x.CompletionTime = new(pkg1_unversioned.Time)
+ }
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.CompletionTime) {
+ } else if yym9 {
+ z.DecBinaryUnmarshal(x.CompletionTime)
+ } else if !yym9 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(x.CompletionTime)
+ } else {
+ z.DecFallback(x.CompletionTime, false)
+ }
+ }
+ case "active":
+ if r.TryDecodeAsNil() {
+ x.Active = 0
+ } else {
+ x.Active = int32(r.DecodeInt(32))
+ }
+ case "succeeded":
+ if r.TryDecodeAsNil() {
+ x.Succeeded = 0
+ } else {
+ x.Succeeded = int32(r.DecodeInt(32))
+ }
+ case "failed":
+ if r.TryDecodeAsNil() {
+ x.Failed = 0
+ } else {
+ x.Failed = int32(r.DecodeInt(32))
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *JobStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj13 int
+ var yyb13 bool
+ var yyhl13 bool = l >= 0
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Conditions = nil
+ } else {
+ yyv14 := &x.Conditions
+ yym15 := z.DecBinary()
+ _ = yym15
+ if false {
+ } else {
+ h.decSliceJobCondition((*[]JobCondition)(yyv14), d)
+ }
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.StartTime != nil {
+ x.StartTime = nil
+ }
+ } else {
+ if x.StartTime == nil {
+ x.StartTime = new(pkg1_unversioned.Time)
+ }
+ yym17 := z.DecBinary()
+ _ = yym17
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.StartTime) {
+ } else if yym17 {
+ z.DecBinaryUnmarshal(x.StartTime)
+ } else if !yym17 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(x.StartTime)
+ } else {
+ z.DecFallback(x.StartTime, false)
+ }
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.CompletionTime != nil {
+ x.CompletionTime = nil
+ }
+ } else {
+ if x.CompletionTime == nil {
+ x.CompletionTime = new(pkg1_unversioned.Time)
+ }
+ yym19 := z.DecBinary()
+ _ = yym19
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.CompletionTime) {
+ } else if yym19 {
+ z.DecBinaryUnmarshal(x.CompletionTime)
+ } else if !yym19 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(x.CompletionTime)
+ } else {
+ z.DecFallback(x.CompletionTime, false)
+ }
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Active = 0
+ } else {
+ x.Active = int32(r.DecodeInt(32))
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Succeeded = 0
+ } else {
+ x.Succeeded = int32(r.DecodeInt(32))
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Failed = 0
+ } else {
+ x.Failed = int32(r.DecodeInt(32))
+ }
+ for {
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj13-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x JobConditionType) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *JobConditionType) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *JobCondition) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [6]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[2] = true
+ yyq2[3] = true
+ yyq2[4] = x.Reason != ""
+ yyq2[5] = x.Message != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(6)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ x.Type.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("type"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Type.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yysf7 := &x.Status
+ yysf7.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yysf8 := &x.Status
+ yysf8.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy10 := &x.LastProbeTime
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy10) {
+ } else if yym11 {
+ z.EncBinaryMarshal(yy10)
+ } else if !yym11 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy10)
+ } else {
+ z.EncFallback(yy10)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("lastProbeTime"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy12 := &x.LastProbeTime
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy12) {
+ } else if yym13 {
+ z.EncBinaryMarshal(yy12)
+ } else if !yym13 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy12)
+ } else {
+ z.EncFallback(yy12)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yy15 := &x.LastTransitionTime
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy15) {
+ } else if yym16 {
+ z.EncBinaryMarshal(yy15)
+ } else if !yym16 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy15)
+ } else {
+ z.EncFallback(yy15)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy17 := &x.LastTransitionTime
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy17) {
+ } else if yym18 {
+ z.EncBinaryMarshal(yy17)
+ } else if !yym18 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy17)
+ } else {
+ z.EncFallback(yy17)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Reason))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("reason"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym21 := z.EncBinary()
+ _ = yym21
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Reason))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Message))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("message"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym24 := z.EncBinary()
+ _ = yym24
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Message))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *JobCondition) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *JobCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "type":
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = JobConditionType(r.DecodeString())
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = ""
+ } else {
+ x.Status = pkg2_v1.ConditionStatus(r.DecodeString())
+ }
+ case "lastProbeTime":
+ if r.TryDecodeAsNil() {
+ x.LastProbeTime = pkg1_unversioned.Time{}
+ } else {
+ yyv6 := &x.LastProbeTime
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv6) {
+ } else if yym7 {
+ z.DecBinaryUnmarshal(yyv6)
+ } else if !yym7 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv6)
+ } else {
+ z.DecFallback(yyv6, false)
+ }
+ }
+ case "lastTransitionTime":
+ if r.TryDecodeAsNil() {
+ x.LastTransitionTime = pkg1_unversioned.Time{}
+ } else {
+ yyv8 := &x.LastTransitionTime
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv8) {
+ } else if yym9 {
+ z.DecBinaryUnmarshal(yyv8)
+ } else if !yym9 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv8)
+ } else {
+ z.DecFallback(yyv8, false)
+ }
+ }
+ case "reason":
+ if r.TryDecodeAsNil() {
+ x.Reason = ""
+ } else {
+ x.Reason = string(r.DecodeString())
+ }
+ case "message":
+ if r.TryDecodeAsNil() {
+ x.Message = ""
+ } else {
+ x.Message = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *JobCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj12 int
+ var yyb12 bool
+ var yyhl12 bool = l >= 0
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = JobConditionType(r.DecodeString())
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = ""
+ } else {
+ x.Status = pkg2_v1.ConditionStatus(r.DecodeString())
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.LastProbeTime = pkg1_unversioned.Time{}
+ } else {
+ yyv15 := &x.LastProbeTime
+ yym16 := z.DecBinary()
+ _ = yym16
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv15) {
+ } else if yym16 {
+ z.DecBinaryUnmarshal(yyv15)
+ } else if !yym16 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv15)
+ } else {
+ z.DecFallback(yyv15, false)
+ }
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.LastTransitionTime = pkg1_unversioned.Time{}
+ } else {
+ yyv17 := &x.LastTransitionTime
+ yym18 := z.DecBinary()
+ _ = yym18
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv17) {
+ } else if yym18 {
+ z.DecBinaryUnmarshal(yyv17)
+ } else if !yym18 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv17)
+ } else {
+ z.DecFallback(yyv17, false)
+ }
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Reason = ""
+ } else {
+ x.Reason = string(r.DecodeString())
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Message = ""
+ } else {
+ x.Message = string(r.DecodeString())
+ }
+ for {
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj12-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ScheduledJob) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = true
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy14 := &x.Status
+ yy14.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy16 := &x.Status
+ yy16.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ScheduledJob) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ScheduledJob) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_v1.ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = ScheduledJobSpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = ScheduledJobStatus{}
+ } else {
+ yyv6 := &x.Status
+ yyv6.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ScheduledJob) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_v1.ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = ScheduledJobSpec{}
+ } else {
+ yyv11 := &x.Spec
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = ScheduledJobStatus{}
+ } else {
+ yyv12 := &x.Status
+ yyv12.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ScheduledJobList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceScheduledJob(([]ScheduledJob)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceScheduledJob(([]ScheduledJob)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ScheduledJobList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ScheduledJobList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceScheduledJob((*[]ScheduledJob)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ScheduledJobList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceScheduledJob((*[]ScheduledJob)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ScheduledJobSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.StartingDeadlineSeconds != nil
+ yyq2[2] = x.ConcurrencyPolicy != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 3
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Schedule))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("schedule"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Schedule))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.StartingDeadlineSeconds == nil {
+ r.EncodeNil()
+ } else {
+ yy7 := *x.StartingDeadlineSeconds
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeInt(int64(yy7))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("startingDeadlineSeconds"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.StartingDeadlineSeconds == nil {
+ r.EncodeNil()
+ } else {
+ yy9 := *x.StartingDeadlineSeconds
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeInt(int64(yy9))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ x.ConcurrencyPolicy.CodecEncodeSelf(e)
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("concurrencyPolicy"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.ConcurrencyPolicy.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Suspend == nil {
+ r.EncodeNil()
+ } else {
+ yy15 := *x.Suspend
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeBool(bool(yy15))
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("suspend"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Suspend == nil {
+ r.EncodeNil()
+ } else {
+ yy17 := *x.Suspend
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ r.EncodeBool(bool(yy17))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy20 := &x.JobTemplate
+ yy20.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("jobTemplate"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy22 := &x.JobTemplate
+ yy22.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ScheduledJobSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ScheduledJobSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "schedule":
+ if r.TryDecodeAsNil() {
+ x.Schedule = ""
+ } else {
+ x.Schedule = string(r.DecodeString())
+ }
+ case "startingDeadlineSeconds":
+ if r.TryDecodeAsNil() {
+ if x.StartingDeadlineSeconds != nil {
+ x.StartingDeadlineSeconds = nil
+ }
+ } else {
+ if x.StartingDeadlineSeconds == nil {
+ x.StartingDeadlineSeconds = new(int64)
+ }
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ *((*int64)(x.StartingDeadlineSeconds)) = int64(r.DecodeInt(64))
+ }
+ }
+ case "concurrencyPolicy":
+ if r.TryDecodeAsNil() {
+ x.ConcurrencyPolicy = ""
+ } else {
+ x.ConcurrencyPolicy = ConcurrencyPolicy(r.DecodeString())
+ }
+ case "suspend":
+ if r.TryDecodeAsNil() {
+ if x.Suspend != nil {
+ x.Suspend = nil
+ }
+ } else {
+ if x.Suspend == nil {
+ x.Suspend = new(bool)
+ }
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else {
+ *((*bool)(x.Suspend)) = r.DecodeBool()
+ }
+ }
+ case "jobTemplate":
+ if r.TryDecodeAsNil() {
+ x.JobTemplate = JobTemplateSpec{}
+ } else {
+ yyv10 := &x.JobTemplate
+ yyv10.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ScheduledJobSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj11 int
+ var yyb11 bool
+ var yyhl11 bool = l >= 0
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Schedule = ""
+ } else {
+ x.Schedule = string(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.StartingDeadlineSeconds != nil {
+ x.StartingDeadlineSeconds = nil
+ }
+ } else {
+ if x.StartingDeadlineSeconds == nil {
+ x.StartingDeadlineSeconds = new(int64)
+ }
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ *((*int64)(x.StartingDeadlineSeconds)) = int64(r.DecodeInt(64))
+ }
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ConcurrencyPolicy = ""
+ } else {
+ x.ConcurrencyPolicy = ConcurrencyPolicy(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Suspend != nil {
+ x.Suspend = nil
+ }
+ } else {
+ if x.Suspend == nil {
+ x.Suspend = new(bool)
+ }
+ yym17 := z.DecBinary()
+ _ = yym17
+ if false {
+ } else {
+ *((*bool)(x.Suspend)) = r.DecodeBool()
+ }
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.JobTemplate = JobTemplateSpec{}
+ } else {
+ yyv18 := &x.JobTemplate
+ yyv18.CodecDecodeSelf(d)
+ }
+ for {
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj11-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x ConcurrencyPolicy) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *ConcurrencyPolicy) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *ScheduledJobStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = len(x.Active) != 0
+ yyq2[1] = x.LastScheduleTime != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Active == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ h.encSlicev1_ObjectReference(([]pkg2_v1.ObjectReference)(x.Active), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("active"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Active == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.encSlicev1_ObjectReference(([]pkg2_v1.ObjectReference)(x.Active), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.LastScheduleTime == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.LastScheduleTime) {
+ } else if yym7 {
+ z.EncBinaryMarshal(x.LastScheduleTime)
+ } else if !yym7 && z.IsJSONHandle() {
+ z.EncJSONMarshal(x.LastScheduleTime)
+ } else {
+ z.EncFallback(x.LastScheduleTime)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("lastScheduleTime"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.LastScheduleTime == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.LastScheduleTime) {
+ } else if yym8 {
+ z.EncBinaryMarshal(x.LastScheduleTime)
+ } else if !yym8 && z.IsJSONHandle() {
+ z.EncJSONMarshal(x.LastScheduleTime)
+ } else {
+ z.EncFallback(x.LastScheduleTime)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ScheduledJobStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ScheduledJobStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "active":
+ if r.TryDecodeAsNil() {
+ x.Active = nil
+ } else {
+ yyv4 := &x.Active
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.decSlicev1_ObjectReference((*[]pkg2_v1.ObjectReference)(yyv4), d)
+ }
+ }
+ case "lastScheduleTime":
+ if r.TryDecodeAsNil() {
+ if x.LastScheduleTime != nil {
+ x.LastScheduleTime = nil
+ }
+ } else {
+ if x.LastScheduleTime == nil {
+ x.LastScheduleTime = new(pkg1_unversioned.Time)
+ }
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.LastScheduleTime) {
+ } else if yym7 {
+ z.DecBinaryUnmarshal(x.LastScheduleTime)
+ } else if !yym7 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(x.LastScheduleTime)
+ } else {
+ z.DecFallback(x.LastScheduleTime, false)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ScheduledJobStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Active = nil
+ } else {
+ yyv9 := &x.Active
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.decSlicev1_ObjectReference((*[]pkg2_v1.ObjectReference)(yyv9), d)
+ }
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.LastScheduleTime != nil {
+ x.LastScheduleTime = nil
+ }
+ } else {
+ if x.LastScheduleTime == nil {
+ x.LastScheduleTime = new(pkg1_unversioned.Time)
+ }
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.LastScheduleTime) {
+ } else if yym12 {
+ z.DecBinaryUnmarshal(x.LastScheduleTime)
+ } else if !yym12 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(x.LastScheduleTime)
+ } else {
+ z.DecFallback(x.LastScheduleTime, false)
+ }
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *LabelSelector) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = len(x.MatchLabels) != 0
+ yyq2[1] = len(x.MatchExpressions) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.MatchLabels == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ z.F.EncMapStringStringV(x.MatchLabels, false, e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("matchLabels"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.MatchLabels == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ z.F.EncMapStringStringV(x.MatchLabels, false, e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.MatchExpressions == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.encSliceLabelSelectorRequirement(([]LabelSelectorRequirement)(x.MatchExpressions), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("matchExpressions"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.MatchExpressions == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.encSliceLabelSelectorRequirement(([]LabelSelectorRequirement)(x.MatchExpressions), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *LabelSelector) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *LabelSelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "matchLabels":
+ if r.TryDecodeAsNil() {
+ x.MatchLabels = nil
+ } else {
+ yyv4 := &x.MatchLabels
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ z.F.DecMapStringStringX(yyv4, false, d)
+ }
+ }
+ case "matchExpressions":
+ if r.TryDecodeAsNil() {
+ x.MatchExpressions = nil
+ } else {
+ yyv6 := &x.MatchExpressions
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceLabelSelectorRequirement((*[]LabelSelectorRequirement)(yyv6), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *LabelSelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.MatchLabels = nil
+ } else {
+ yyv9 := &x.MatchLabels
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else {
+ z.F.DecMapStringStringX(yyv9, false, d)
+ }
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.MatchExpressions = nil
+ } else {
+ yyv11 := &x.MatchExpressions
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ h.decSliceLabelSelectorRequirement((*[]LabelSelectorRequirement)(yyv11), d)
+ }
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *LabelSelectorRequirement) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[2] = len(x.Values) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Key))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("key"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Key))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ x.Operator.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("operator"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Operator.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.Values == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Values, false, e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("values"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Values == nil {
+ r.EncodeNil()
+ } else {
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Values, false, e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *LabelSelectorRequirement) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *LabelSelectorRequirement) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "key":
+ if r.TryDecodeAsNil() {
+ x.Key = ""
+ } else {
+ x.Key = string(r.DecodeString())
+ }
+ case "operator":
+ if r.TryDecodeAsNil() {
+ x.Operator = ""
+ } else {
+ x.Operator = LabelSelectorOperator(r.DecodeString())
+ }
+ case "values":
+ if r.TryDecodeAsNil() {
+ x.Values = nil
+ } else {
+ yyv6 := &x.Values
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv6, false, d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *LabelSelectorRequirement) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Key = ""
+ } else {
+ x.Key = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Operator = ""
+ } else {
+ x.Operator = LabelSelectorOperator(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Values = nil
+ } else {
+ yyv11 := &x.Values
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv11, false, d)
+ }
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x LabelSelectorOperator) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *LabelSelectorOperator) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x codecSelfer1234) encSliceJob(v []Job, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceJob(v *[]Job, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []Job{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 792)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]Job, yyrl1)
+ }
+ } else {
+ yyv1 = make([]Job, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Job{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, Job{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Job{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, Job{}) // var yyz1 Job
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Job{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []Job{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceJobCondition(v []JobCondition, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceJobCondition(v *[]JobCondition, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []JobCondition{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]JobCondition, yyrl1)
+ }
+ } else {
+ yyv1 = make([]JobCondition, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = JobCondition{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, JobCondition{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = JobCondition{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, JobCondition{}) // var yyz1 JobCondition
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = JobCondition{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []JobCondition{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceScheduledJob(v []ScheduledJob, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceScheduledJob(v *[]ScheduledJob, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []ScheduledJob{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 1024)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]ScheduledJob, yyrl1)
+ }
+ } else {
+ yyv1 = make([]ScheduledJob, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ScheduledJob{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, ScheduledJob{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ScheduledJob{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, ScheduledJob{}) // var yyz1 ScheduledJob
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ScheduledJob{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []ScheduledJob{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSlicev1_ObjectReference(v []pkg2_v1.ObjectReference, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSlicev1_ObjectReference(v *[]pkg2_v1.ObjectReference, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []pkg2_v1.ObjectReference{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]pkg2_v1.ObjectReference, yyrl1)
+ }
+ } else {
+ yyv1 = make([]pkg2_v1.ObjectReference, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = pkg2_v1.ObjectReference{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, pkg2_v1.ObjectReference{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = pkg2_v1.ObjectReference{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, pkg2_v1.ObjectReference{}) // var yyz1 pkg2_v1.ObjectReference
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = pkg2_v1.ObjectReference{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []pkg2_v1.ObjectReference{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceLabelSelectorRequirement(v []LabelSelectorRequirement, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceLabelSelectorRequirement(v *[]LabelSelectorRequirement, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []LabelSelectorRequirement{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]LabelSelectorRequirement, yyrl1)
+ }
+ } else {
+ yyv1 = make([]LabelSelectorRequirement, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = LabelSelectorRequirement{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, LabelSelectorRequirement{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = LabelSelectorRequirement{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, LabelSelectorRequirement{}) // var yyz1 LabelSelectorRequirement
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = LabelSelectorRequirement{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []LabelSelectorRequirement{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types.go
new file mode 100644
index 0000000..0402df2
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types.go
@@ -0,0 +1,283 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v2alpha1
+
+import (
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/api/v1"
+)
+
+// Job represents the configuration of a single job.
+type Job struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Spec is a structure defining the expected behavior of a job.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ Spec JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+ // Status is a structure describing current status of a job.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ Status JobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// JobList is a collection of jobs.
+type JobList struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard list metadata
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is the list of Job.
+ Items []Job `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// JobTemplate describes a template for creating copies of a predefined pod.
+type JobTemplate struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Template defines jobs that will be created from this template
+ // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ Template JobTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"`
+}
+
+// JobTemplateSpec describes the data a Job should have when created from a template
+type JobTemplateSpec struct {
+ // Standard object's metadata of the jobs created from this template.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Specification of the desired behavior of the job.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ Spec JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+}
+
+// JobSpec describes how the job execution will look like.
+type JobSpec struct {
+
+ // Parallelism specifies the maximum desired number of pods the job should
+ // run at any given time. The actual number of pods running in steady state will
+ // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism),
+ // i.e. when the work left to do is less than max parallelism.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md
+ Parallelism *int32 `json:"parallelism,omitempty" protobuf:"varint,1,opt,name=parallelism"`
+
+ // Completions specifies the desired number of successfully finished pods the
+ // job should be run with. Setting to nil means that the success of any
+ // pod signals the success of all pods, and allows parallelism to have any positive
+ // value. Setting to 1 means that parallelism is limited to 1 and the success of that
+ // pod signals the success of the job.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md
+ Completions *int32 `json:"completions,omitempty" protobuf:"varint,2,opt,name=completions"`
+
+ // Optional duration in seconds relative to the startTime that the job may be active
+ // before the system tries to terminate it; value must be positive integer
+ ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,3,opt,name=activeDeadlineSeconds"`
+
+ // Selector is a label query over pods that should match the pod count.
+ // Normally, the system sets this field for you.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors
+ Selector *LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"`
+
+ // ManualSelector controls generation of pod labels and pod selectors.
+ // Leave `manualSelector` unset unless you are certain what you are doing.
+ // When false or unset, the system pick labels unique to this job
+ // and appends those labels to the pod template. When true,
+ // the user is responsible for picking unique labels and specifying
+ // the selector. Failure to pick a unique label may cause this
+ // and other jobs to not function correctly. However, You may see
+ // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1`
+ // API.
+ // More info: http://releases.k8s.io/HEAD/docs/design/selector-generation.md
+ ManualSelector *bool `json:"manualSelector,omitempty" protobuf:"varint,5,opt,name=manualSelector"`
+
+ // Template is the object that describes the pod that will be created when
+ // executing a job.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md
+ Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,6,opt,name=template"`
+}
+
+// JobStatus represents the current state of a Job.
+type JobStatus struct {
+
+ // Conditions represent the latest available observations of an object's current state.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md
+ Conditions []JobCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
+
+ // StartTime represents time when the job was acknowledged by the Job Manager.
+ // It is not guaranteed to be set in happens-before order across separate operations.
+ // It is represented in RFC3339 form and is in UTC.
+ StartTime *unversioned.Time `json:"startTime,omitempty" protobuf:"bytes,2,opt,name=startTime"`
+
+ // CompletionTime represents time when the job was completed. It is not guaranteed to
+ // be set in happens-before order across separate operations.
+ // It is represented in RFC3339 form and is in UTC.
+ CompletionTime *unversioned.Time `json:"completionTime,omitempty" protobuf:"bytes,3,opt,name=completionTime"`
+
+ // Active is the number of actively running pods.
+ Active int32 `json:"active,omitempty" protobuf:"varint,4,opt,name=active"`
+
+ // Succeeded is the number of pods which reached Phase Succeeded.
+ Succeeded int32 `json:"succeeded,omitempty" protobuf:"varint,5,opt,name=succeeded"`
+
+ // Failed is the number of pods which reached Phase Failed.
+ Failed int32 `json:"failed,omitempty" protobuf:"varint,6,opt,name=failed"`
+}
+
+type JobConditionType string
+
+// These are valid conditions of a job.
+const (
+ // JobComplete means the job has completed its execution.
+ JobComplete JobConditionType = "Complete"
+ // JobFailed means the job has failed its execution.
+ JobFailed JobConditionType = "Failed"
+)
+
+// JobCondition describes current state of a job.
+type JobCondition struct {
+ // Type of job condition, Complete or Failed.
+ Type JobConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=JobConditionType"`
+ // Status of the condition, one of True, False, Unknown.
+ Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"`
+ // Last time the condition was checked.
+ LastProbeTime unversioned.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"`
+ // Last time the condition transit from one status to another.
+ LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"`
+ // (brief) reason for the condition's last transition.
+ Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"`
+ // Human readable message indicating details about last transition.
+ Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
+}
+
+// ScheduledJob represents the configuration of a single scheduled job.
+type ScheduledJob struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Spec is a structure defining the expected behavior of a job, including the schedule.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ Spec ScheduledJobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+ // Status is a structure describing current status of a job.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ Status ScheduledJobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// ScheduledJobList is a collection of scheduled jobs.
+type ScheduledJobList struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard list metadata
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is the list of ScheduledJob.
+ Items []ScheduledJob `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// ScheduledJobSpec describes how the job execution will look like and when it will actually run.
+type ScheduledJobSpec struct {
+
+ // Schedule contains the schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.
+ Schedule string `json:"schedule" protobuf:"bytes,1,opt,name=schedule"`
+
+ // Optional deadline in seconds for starting the job if it misses scheduled
+ // time for any reason. Missed jobs executions will be counted as failed ones.
+ StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty" protobuf:"varint,2,opt,name=startingDeadlineSeconds"`
+
+ // ConcurrencyPolicy specifies how to treat concurrent executions of a Job.
+ ConcurrencyPolicy ConcurrencyPolicy `json:"concurrencyPolicy,omitempty" protobuf:"bytes,3,opt,name=concurrencyPolicy,casttype=ConcurrencyPolicy"`
+
+ // Suspend flag tells the controller to suspend subsequent executions, it does
+ // not apply to already started executions. Defaults to false.
+ Suspend *bool `json:"suspend" protobuf:"varint,4,opt,name=suspend"`
+
+ // JobTemplate is the object that describes the job that will be created when
+ // executing a ScheduledJob.
+ JobTemplate JobTemplateSpec `json:"jobTemplate" protobuf:"bytes,5,opt,name=jobTemplate"`
+}
+
+// ConcurrencyPolicy describes how the job will be handled.
+// Only one of the following concurrent policies may be specified.
+// If none of the following policies is specified, the default one
+// is AllowConcurrent.
+type ConcurrencyPolicy string
+
+const (
+ // AllowConcurrent allows ScheduledJobs to run concurrently.
+ AllowConcurrent ConcurrencyPolicy = "Allow"
+
+ // ForbidConcurrent forbids concurrent runs, skipping next run if previous
+ // hasn't finished yet.
+ ForbidConcurrent ConcurrencyPolicy = "Forbid"
+
+ // ReplaceConcurrent cancels currently running job and replaces it with a new one.
+ ReplaceConcurrent ConcurrencyPolicy = "Replace"
+)
+
+// ScheduledJobStatus represents the current state of a Job.
+type ScheduledJobStatus struct {
+ // Active holds pointers to currently running jobs.
+ Active []v1.ObjectReference `json:"active,omitempty" protobuf:"bytes,1,rep,name=active"`
+
+ // LastScheduleTime keeps information of when was the last time the job was successfully scheduled.
+ LastScheduleTime *unversioned.Time `json:"lastScheduleTime,omitempty" protobuf:"bytes,4,opt,name=lastScheduleTime"`
+}
+
+// A label selector is a label query over a set of resources. The result of matchLabels and
+// matchExpressions are ANDed. An empty label selector matches all objects. A null
+// label selector matches no objects.
+type LabelSelector struct {
+ // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ // map is equivalent to an element of matchExpressions, whose key field is "key", the
+ // operator is "In", and the values array contains only "value". The requirements are ANDed.
+ MatchLabels map[string]string `json:"matchLabels,omitempty" protobuf:"bytes,1,rep,name=matchLabels"`
+ // matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ MatchExpressions []LabelSelectorRequirement `json:"matchExpressions,omitempty" protobuf:"bytes,2,rep,name=matchExpressions"`
+}
+
+// A label selector requirement is a selector that contains values, a key, and an operator that
+// relates the key and values.
+type LabelSelectorRequirement struct {
+ // key is the label key that the selector applies to.
+ Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"`
+ // operator represents a key's relationship to a set of values.
+ // Valid operators ard In, NotIn, Exists and DoesNotExist.
+ Operator LabelSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=LabelSelectorOperator"`
+ // values is an array of string values. If the operator is In or NotIn,
+ // the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ // the values array must be empty. This array is replaced during a strategic
+ // merge patch.
+ Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"`
+}
+
+// A label selector operator is the set of operators that can be used in a selector requirement.
+type LabelSelectorOperator string
+
+const (
+ LabelSelectorOpIn LabelSelectorOperator = "In"
+ LabelSelectorOpNotIn LabelSelectorOperator = "NotIn"
+ LabelSelectorOpExists LabelSelectorOperator = "Exists"
+ LabelSelectorOpDoesNotExist LabelSelectorOperator = "DoesNotExist"
+)
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types_swagger_doc_generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types_swagger_doc_generated.go
new file mode 100644
index 0000000..17d4331
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types_swagger_doc_generated.go
@@ -0,0 +1,178 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v2alpha1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE
+var map_Job = map[string]string{
+ "": "Job represents the configuration of a single job.",
+ "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
+ "spec": "Spec is a structure defining the expected behavior of a job. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status",
+ "status": "Status is a structure describing current status of a job. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status",
+}
+
+func (Job) SwaggerDoc() map[string]string {
+ return map_Job
+}
+
+var map_JobCondition = map[string]string{
+ "": "JobCondition describes current state of a job.",
+ "type": "Type of job condition, Complete or Failed.",
+ "status": "Status of the condition, one of True, False, Unknown.",
+ "lastProbeTime": "Last time the condition was checked.",
+ "lastTransitionTime": "Last time the condition transit from one status to another.",
+ "reason": "(brief) reason for the condition's last transition.",
+ "message": "Human readable message indicating details about last transition.",
+}
+
+func (JobCondition) SwaggerDoc() map[string]string {
+ return map_JobCondition
+}
+
+var map_JobList = map[string]string{
+ "": "JobList is a collection of jobs.",
+ "metadata": "Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
+ "items": "Items is the list of Job.",
+}
+
+func (JobList) SwaggerDoc() map[string]string {
+ return map_JobList
+}
+
+var map_JobSpec = map[string]string{
+ "": "JobSpec describes how the job execution will look like.",
+ "parallelism": "Parallelism specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md",
+ "completions": "Completions specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md",
+ "activeDeadlineSeconds": "Optional duration in seconds relative to the startTime that the job may be active before the system tries to terminate it; value must be positive integer",
+ "selector": "Selector is a label query over pods that should match the pod count. Normally, the system sets this field for you. More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors",
+ "manualSelector": "ManualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: http://releases.k8s.io/HEAD/docs/design/selector-generation.md",
+ "template": "Template is the object that describes the pod that will be created when executing a job. More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md",
+}
+
+func (JobSpec) SwaggerDoc() map[string]string {
+ return map_JobSpec
+}
+
+var map_JobStatus = map[string]string{
+ "": "JobStatus represents the current state of a Job.",
+ "conditions": "Conditions represent the latest available observations of an object's current state. More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md",
+ "startTime": "StartTime represents time when the job was acknowledged by the Job Manager. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.",
+ "completionTime": "CompletionTime represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.",
+ "active": "Active is the number of actively running pods.",
+ "succeeded": "Succeeded is the number of pods which reached Phase Succeeded.",
+ "failed": "Failed is the number of pods which reached Phase Failed.",
+}
+
+func (JobStatus) SwaggerDoc() map[string]string {
+ return map_JobStatus
+}
+
+var map_JobTemplate = map[string]string{
+ "": "JobTemplate describes a template for creating copies of a predefined pod.",
+ "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
+ "template": "Template defines jobs that will be created from this template http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status",
+}
+
+func (JobTemplate) SwaggerDoc() map[string]string {
+ return map_JobTemplate
+}
+
+var map_JobTemplateSpec = map[string]string{
+ "": "JobTemplateSpec describes the data a Job should have when created from a template",
+ "metadata": "Standard object's metadata of the jobs created from this template. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
+ "spec": "Specification of the desired behavior of the job. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status",
+}
+
+func (JobTemplateSpec) SwaggerDoc() map[string]string {
+ return map_JobTemplateSpec
+}
+
+var map_LabelSelector = map[string]string{
+ "": "A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.",
+ "matchLabels": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.",
+ "matchExpressions": "matchExpressions is a list of label selector requirements. The requirements are ANDed.",
+}
+
+func (LabelSelector) SwaggerDoc() map[string]string {
+ return map_LabelSelector
+}
+
+var map_LabelSelectorRequirement = map[string]string{
+ "": "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.",
+ "key": "key is the label key that the selector applies to.",
+ "operator": "operator represents a key's relationship to a set of values. Valid operators ard In, NotIn, Exists and DoesNotExist.",
+ "values": "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.",
+}
+
+func (LabelSelectorRequirement) SwaggerDoc() map[string]string {
+ return map_LabelSelectorRequirement
+}
+
+var map_ScheduledJob = map[string]string{
+ "": "ScheduledJob represents the configuration of a single scheduled job.",
+ "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
+ "spec": "Spec is a structure defining the expected behavior of a job, including the schedule. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status",
+ "status": "Status is a structure describing current status of a job. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status",
+}
+
+func (ScheduledJob) SwaggerDoc() map[string]string {
+ return map_ScheduledJob
+}
+
+var map_ScheduledJobList = map[string]string{
+ "": "ScheduledJobList is a collection of scheduled jobs.",
+ "metadata": "Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
+ "items": "Items is the list of ScheduledJob.",
+}
+
+func (ScheduledJobList) SwaggerDoc() map[string]string {
+ return map_ScheduledJobList
+}
+
+var map_ScheduledJobSpec = map[string]string{
+ "": "ScheduledJobSpec describes how the job execution will look like and when it will actually run.",
+ "schedule": "Schedule contains the schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.",
+ "startingDeadlineSeconds": "Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.",
+ "concurrencyPolicy": "ConcurrencyPolicy specifies how to treat concurrent executions of a Job.",
+ "suspend": "Suspend flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.",
+ "jobTemplate": "JobTemplate is the object that describes the job that will be created when executing a ScheduledJob.",
+}
+
+func (ScheduledJobSpec) SwaggerDoc() map[string]string {
+ return map_ScheduledJobSpec
+}
+
+var map_ScheduledJobStatus = map[string]string{
+ "": "ScheduledJobStatus represents the current state of a Job.",
+ "active": "Active holds pointers to currently running jobs.",
+ "lastScheduleTime": "LastScheduleTime keeps information of when was the last time the job was successfully scheduled.",
+}
+
+func (ScheduledJobStatus) SwaggerDoc() map[string]string {
+ return map_ScheduledJobStatus
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/deep_copy_generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/deep_copy_generated.go
new file mode 100644
index 0000000..07b1b2d
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/deep_copy_generated.go
@@ -0,0 +1,120 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by deepcopy-gen. Do not edit it manually!
+
+package certificates
+
+import (
+ api "k8s.io/kubernetes/pkg/api"
+ conversion "k8s.io/kubernetes/pkg/conversion"
+)
+
+func init() {
+ if err := api.Scheme.AddGeneratedDeepCopyFuncs(
+ DeepCopy_certificates_CertificateSigningRequest,
+ DeepCopy_certificates_CertificateSigningRequestCondition,
+ DeepCopy_certificates_CertificateSigningRequestList,
+ DeepCopy_certificates_CertificateSigningRequestSpec,
+ DeepCopy_certificates_CertificateSigningRequestStatus,
+ ); err != nil {
+ // if one of the deep copy functions is malformed, detect it immediately.
+ panic(err)
+ }
+}
+
+func DeepCopy_certificates_CertificateSigningRequest(in CertificateSigningRequest, out *CertificateSigningRequest, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_certificates_CertificateSigningRequestSpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_certificates_CertificateSigningRequestStatus(in.Status, &out.Status, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_certificates_CertificateSigningRequestCondition(in CertificateSigningRequestCondition, out *CertificateSigningRequestCondition, c *conversion.Cloner) error {
+ out.Type = in.Type
+ out.Reason = in.Reason
+ out.Message = in.Message
+ out.LastUpdateTime = in.LastUpdateTime.DeepCopy()
+ return nil
+}
+
+func DeepCopy_certificates_CertificateSigningRequestList(in CertificateSigningRequestList, out *CertificateSigningRequestList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]CertificateSigningRequest, len(in))
+ for i := range in {
+ if err := DeepCopy_certificates_CertificateSigningRequest(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_certificates_CertificateSigningRequestSpec(in CertificateSigningRequestSpec, out *CertificateSigningRequestSpec, c *conversion.Cloner) error {
+ if in.Request != nil {
+ in, out := in.Request, &out.Request
+ *out = make([]byte, len(in))
+ copy(*out, in)
+ } else {
+ out.Request = nil
+ }
+ out.Username = in.Username
+ out.UID = in.UID
+ if in.Groups != nil {
+ in, out := in.Groups, &out.Groups
+ *out = make([]string, len(in))
+ copy(*out, in)
+ } else {
+ out.Groups = nil
+ }
+ return nil
+}
+
+func DeepCopy_certificates_CertificateSigningRequestStatus(in CertificateSigningRequestStatus, out *CertificateSigningRequestStatus, c *conversion.Cloner) error {
+ if in.Conditions != nil {
+ in, out := in.Conditions, &out.Conditions
+ *out = make([]CertificateSigningRequestCondition, len(in))
+ for i := range in {
+ if err := DeepCopy_certificates_CertificateSigningRequestCondition(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Conditions = nil
+ }
+ if in.Certificate != nil {
+ in, out := in.Certificate, &out.Certificate
+ *out = make([]byte, len(in))
+ copy(*out, in)
+ } else {
+ out.Certificate = nil
+ }
+ return nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/doc.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/doc.go
new file mode 100644
index 0000000..d2897cb
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package,register
+
+package certificates
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/install/install.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/install/install.go
new file mode 100644
index 0000000..cf54da4
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/install/install.go
@@ -0,0 +1,131 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package install installs the certificates API group, making it available as
+// an option to all of the API encoding/decoding machinery.
+package install
+
+import (
+ "fmt"
+
+ "github.com/golang/glog"
+
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/meta"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/apimachinery"
+ "k8s.io/kubernetes/pkg/apimachinery/registered"
+ "k8s.io/kubernetes/pkg/apis/certificates"
+ "k8s.io/kubernetes/pkg/apis/certificates/v1alpha1"
+ "k8s.io/kubernetes/pkg/runtime"
+ "k8s.io/kubernetes/pkg/util/sets"
+)
+
+const importPrefix = "k8s.io/kubernetes/pkg/apis/certificates"
+
+var accessor = meta.NewAccessor()
+
+// availableVersions lists all known external versions for this group from most preferred to least preferred
+var availableVersions = []unversioned.GroupVersion{v1alpha1.SchemeGroupVersion}
+
+func init() {
+ registered.RegisterVersions(availableVersions)
+ externalVersions := []unversioned.GroupVersion{}
+ for _, v := range availableVersions {
+ if registered.IsAllowedVersion(v) {
+ externalVersions = append(externalVersions, v)
+ }
+ }
+ if len(externalVersions) == 0 {
+ glog.V(4).Infof("No version is registered for group %v", certificates.GroupName)
+ return
+ }
+
+ if err := registered.EnableVersions(externalVersions...); err != nil {
+ glog.V(4).Infof("%v", err)
+ return
+ }
+ if err := enableVersions(externalVersions); err != nil {
+ glog.V(4).Infof("%v", err)
+ return
+ }
+}
+
+// TODO: enableVersions should be centralized rather than spread in each API
+// group.
+// We can combine registered.RegisterVersions, registered.EnableVersions and
+// registered.RegisterGroup once we have moved enableVersions therecertificates
+func enableVersions(externalVersions []unversioned.GroupVersion) error {
+ addVersionsToScheme(externalVersions...)
+ preferredExternalVersion := externalVersions[0]
+
+ groupMeta := apimachinery.GroupMeta{
+ GroupVersion: preferredExternalVersion,
+ GroupVersions: externalVersions,
+ RESTMapper: newRESTMapper(externalVersions),
+ SelfLinker: runtime.SelfLinker(accessor),
+ InterfacesFor: interfacesFor,
+ }
+
+ if err := registered.RegisterGroup(groupMeta); err != nil {
+ return err
+ }
+ api.RegisterRESTMapper(groupMeta.RESTMapper)
+ return nil
+}
+
+func newRESTMapper(externalVersions []unversioned.GroupVersion) meta.RESTMapper {
+ // the list of kinds that are scoped at the root of the api hierarchy
+ // if a kind is not enumerated here, it is assumed to have a namespace scope
+ rootScoped := sets.NewString(
+ "CertificateSigningRequest",
+ )
+
+ ignoredKinds := sets.NewString()
+
+ return api.NewDefaultRESTMapper(externalVersions, interfacesFor, importPrefix, ignoredKinds, rootScoped)
+}
+
+// interfacesFor returns the default Codec and ResourceVersioner for a given version
+// string, or an error if the version is not known.
+func interfacesFor(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) {
+ switch version {
+ case v1alpha1.SchemeGroupVersion:
+ return &meta.VersionInterfaces{
+ ObjectConvertor: api.Scheme,
+ MetadataAccessor: accessor,
+ }, nil
+ default:
+ g, _ := registered.Group(certificates.GroupName)
+ return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, g.GroupVersions)
+ }
+}
+
+func addVersionsToScheme(externalVersions ...unversioned.GroupVersion) {
+ // add the internal version to Scheme
+ certificates.AddToScheme(api.Scheme)
+ // add the enabled external versions to Scheme
+ for _, v := range externalVersions {
+ if !registered.IsEnabledVersion(v) {
+ glog.Errorf("Version %s is not enabled, so it will not be added to the Scheme.", v)
+ continue
+ }
+ switch v {
+ case v1alpha1.SchemeGroupVersion:
+ v1alpha1.AddToScheme(api.Scheme)
+ }
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/register.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/register.go
new file mode 100644
index 0000000..d232b3c
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/register.go
@@ -0,0 +1,57 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package certificates
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/runtime"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "certificates"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
+
+// Kind takes an unqualified kind and returns back a Group qualified GroupKind
+func Kind(kind string) unversioned.GroupKind {
+ return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// Resource takes an unqualified resource and returns back a Group qualified GroupResource
+func Resource(resource string) unversioned.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+func AddToScheme(scheme *runtime.Scheme) {
+ // Add the API to Scheme.
+ addKnownTypes(scheme)
+}
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &CertificateSigningRequest{},
+ &CertificateSigningRequestList{},
+ &api.ListOptions{},
+ &api.DeleteOptions{},
+ )
+}
+
+func (obj *CertificateSigningRequest) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
+func (obj *CertificateSigningRequestList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/types.generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/types.generated.go
new file mode 100644
index 0000000..21798df
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/types.generated.go
@@ -0,0 +1,1963 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// ************************************************************
+// DO NOT EDIT.
+// THIS FILE IS AUTO-GENERATED BY codecgen.
+// ************************************************************
+
+package certificates
+
+import (
+ "errors"
+ "fmt"
+ codec1978 "github.com/ugorji/go/codec"
+ pkg2_api "k8s.io/kubernetes/pkg/api"
+ pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned"
+ pkg3_types "k8s.io/kubernetes/pkg/types"
+ "reflect"
+ "runtime"
+ time "time"
+)
+
+const (
+ // ----- content types ----
+ codecSelferC_UTF81234 = 1
+ codecSelferC_RAW1234 = 0
+ // ----- value types used ----
+ codecSelferValueTypeArray1234 = 10
+ codecSelferValueTypeMap1234 = 9
+ // ----- containerStateValues ----
+ codecSelfer_containerMapKey1234 = 2
+ codecSelfer_containerMapValue1234 = 3
+ codecSelfer_containerMapEnd1234 = 4
+ codecSelfer_containerArrayElem1234 = 6
+ codecSelfer_containerArrayEnd1234 = 7
+)
+
+var (
+ codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits())
+ codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`)
+)
+
+type codecSelfer1234 struct{}
+
+func init() {
+ if codec1978.GenVersion != 5 {
+ _, file, _, _ := runtime.Caller(0)
+ err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v",
+ 5, codec1978.GenVersion, file)
+ panic(err)
+ }
+ if false { // reference the types, but skip this branch at build/run time
+ var v0 pkg2_api.ObjectMeta
+ var v1 pkg1_unversioned.TypeMeta
+ var v2 pkg3_types.UID
+ var v3 time.Time
+ _, _, _, _ = v0, v1, v2, v3
+ }
+}
+
+func (x *CertificateSigningRequest) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = true
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy14 := &x.Status
+ yy14.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy16 := &x.Status
+ yy16.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *CertificateSigningRequest) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *CertificateSigningRequest) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_api.ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = CertificateSigningRequestSpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = CertificateSigningRequestStatus{}
+ } else {
+ yyv6 := &x.Status
+ yyv6.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *CertificateSigningRequest) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_api.ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = CertificateSigningRequestSpec{}
+ } else {
+ yyv11 := &x.Spec
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = CertificateSigningRequestStatus{}
+ } else {
+ yyv12 := &x.Status
+ yyv12.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *CertificateSigningRequestSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.Username != ""
+ yyq2[2] = x.UID != ""
+ yyq2[3] = len(x.Groups) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Request == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Request))
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("request"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Request == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Request))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Username))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("username"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Username))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.UID))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("uid"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.UID))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ if x.Groups == nil {
+ r.EncodeNil()
+ } else {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Groups, false, e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("groups"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Groups == nil {
+ r.EncodeNil()
+ } else {
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Groups, false, e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *CertificateSigningRequestSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *CertificateSigningRequestSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "request":
+ if r.TryDecodeAsNil() {
+ x.Request = nil
+ } else {
+ yyv4 := &x.Request
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ *yyv4 = r.DecodeBytes(*(*[]byte)(yyv4), false, false)
+ }
+ }
+ case "username":
+ if r.TryDecodeAsNil() {
+ x.Username = ""
+ } else {
+ x.Username = string(r.DecodeString())
+ }
+ case "uid":
+ if r.TryDecodeAsNil() {
+ x.UID = ""
+ } else {
+ x.UID = string(r.DecodeString())
+ }
+ case "groups":
+ if r.TryDecodeAsNil() {
+ x.Groups = nil
+ } else {
+ yyv8 := &x.Groups
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv8, false, d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *CertificateSigningRequestSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Request = nil
+ } else {
+ yyv11 := &x.Request
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ *yyv11 = r.DecodeBytes(*(*[]byte)(yyv11), false, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Username = ""
+ } else {
+ x.Username = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.UID = ""
+ } else {
+ x.UID = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Groups = nil
+ } else {
+ yyv15 := &x.Groups
+ yym16 := z.DecBinary()
+ _ = yym16
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv15, false, d)
+ }
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *CertificateSigningRequestStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = len(x.Conditions) != 0
+ yyq2[1] = len(x.Certificate) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Conditions == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ h.encSliceCertificateSigningRequestCondition(([]CertificateSigningRequestCondition)(x.Conditions), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("conditions"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Conditions == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.encSliceCertificateSigningRequestCondition(([]CertificateSigningRequestCondition)(x.Conditions), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Certificate == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Certificate))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("certificate"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Certificate == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Certificate))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *CertificateSigningRequestStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *CertificateSigningRequestStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "conditions":
+ if r.TryDecodeAsNil() {
+ x.Conditions = nil
+ } else {
+ yyv4 := &x.Conditions
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.decSliceCertificateSigningRequestCondition((*[]CertificateSigningRequestCondition)(yyv4), d)
+ }
+ }
+ case "certificate":
+ if r.TryDecodeAsNil() {
+ x.Certificate = nil
+ } else {
+ yyv6 := &x.Certificate
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ *yyv6 = r.DecodeBytes(*(*[]byte)(yyv6), false, false)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *CertificateSigningRequestStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Conditions = nil
+ } else {
+ yyv9 := &x.Conditions
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.decSliceCertificateSigningRequestCondition((*[]CertificateSigningRequestCondition)(yyv9), d)
+ }
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Certificate = nil
+ } else {
+ yyv11 := &x.Certificate
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ *yyv11 = r.DecodeBytes(*(*[]byte)(yyv11), false, false)
+ }
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x RequestConditionType) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *RequestConditionType) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *CertificateSigningRequestCondition) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.Reason != ""
+ yyq2[2] = x.Message != ""
+ yyq2[3] = true
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ x.Type.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("type"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Type.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Reason))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("reason"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Reason))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Message))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("message"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Message))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yy13 := &x.LastUpdateTime
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy13) {
+ } else if yym14 {
+ z.EncBinaryMarshal(yy13)
+ } else if !yym14 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy13)
+ } else {
+ z.EncFallback(yy13)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("lastUpdateTime"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy15 := &x.LastUpdateTime
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy15) {
+ } else if yym16 {
+ z.EncBinaryMarshal(yy15)
+ } else if !yym16 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy15)
+ } else {
+ z.EncFallback(yy15)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *CertificateSigningRequestCondition) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *CertificateSigningRequestCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "type":
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = RequestConditionType(r.DecodeString())
+ }
+ case "reason":
+ if r.TryDecodeAsNil() {
+ x.Reason = ""
+ } else {
+ x.Reason = string(r.DecodeString())
+ }
+ case "message":
+ if r.TryDecodeAsNil() {
+ x.Message = ""
+ } else {
+ x.Message = string(r.DecodeString())
+ }
+ case "lastUpdateTime":
+ if r.TryDecodeAsNil() {
+ x.LastUpdateTime = pkg1_unversioned.Time{}
+ } else {
+ yyv7 := &x.LastUpdateTime
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv7) {
+ } else if yym8 {
+ z.DecBinaryUnmarshal(yyv7)
+ } else if !yym8 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv7)
+ } else {
+ z.DecFallback(yyv7, false)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *CertificateSigningRequestCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = RequestConditionType(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Reason = ""
+ } else {
+ x.Reason = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Message = ""
+ } else {
+ x.Message = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.LastUpdateTime = pkg1_unversioned.Time{}
+ } else {
+ yyv13 := &x.LastUpdateTime
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv13) {
+ } else if yym14 {
+ z.DecBinaryUnmarshal(yyv13)
+ } else if !yym14 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv13)
+ } else {
+ z.DecFallback(yyv13, false)
+ }
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *CertificateSigningRequestList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = len(x.Items) != 0
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceCertificateSigningRequest(([]CertificateSigningRequest)(x.Items), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceCertificateSigningRequest(([]CertificateSigningRequest)(x.Items), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *CertificateSigningRequestList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *CertificateSigningRequestList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceCertificateSigningRequest((*[]CertificateSigningRequest)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *CertificateSigningRequestList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceCertificateSigningRequest((*[]CertificateSigningRequest)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) encSliceCertificateSigningRequestCondition(v []CertificateSigningRequestCondition, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceCertificateSigningRequestCondition(v *[]CertificateSigningRequestCondition, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []CertificateSigningRequestCondition{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 72)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]CertificateSigningRequestCondition, yyrl1)
+ }
+ } else {
+ yyv1 = make([]CertificateSigningRequestCondition, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = CertificateSigningRequestCondition{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, CertificateSigningRequestCondition{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = CertificateSigningRequestCondition{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, CertificateSigningRequestCondition{}) // var yyz1 CertificateSigningRequestCondition
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = CertificateSigningRequestCondition{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []CertificateSigningRequestCondition{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceCertificateSigningRequest(v []CertificateSigningRequest, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceCertificateSigningRequest(v *[]CertificateSigningRequest, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []CertificateSigningRequest{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 368)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]CertificateSigningRequest, yyrl1)
+ }
+ } else {
+ yyv1 = make([]CertificateSigningRequest, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = CertificateSigningRequest{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, CertificateSigningRequest{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = CertificateSigningRequest{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, CertificateSigningRequest{}) // var yyz1 CertificateSigningRequest
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = CertificateSigningRequest{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []CertificateSigningRequest{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/types.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/types.go
new file mode 100644
index 0000000..c0eb47b
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/types.go
@@ -0,0 +1,85 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package certificates
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+)
+
+// +genclient=true
+// +nonNamespaced=true
+
+// Describes a certificate signing request
+type CertificateSigningRequest struct {
+ unversioned.TypeMeta `json:",inline"`
+ api.ObjectMeta `json:"metadata,omitempty"`
+
+ // The certificate request itself and any additonal information.
+ Spec CertificateSigningRequestSpec `json:"spec,omitempty"`
+
+ // Derived information about the request.
+ Status CertificateSigningRequestStatus `json:"status,omitempty"`
+}
+
+// This information is immutable after the request is created. Only the Request
+// and ExtraInfo fields can be set on creation, other fields are derived by
+// Kubernetes and cannot be modified by users.
+type CertificateSigningRequestSpec struct {
+ // Base64-encoded PKCS#10 CSR data
+ Request []byte `json:"request"`
+
+ // Information about the requesting user (if relevant)
+ // See user.Info interface for details
+ Username string `json:"username,omitempty"`
+ UID string `json:"uid,omitempty"`
+ Groups []string `json:"groups,omitempty"`
+}
+
+type CertificateSigningRequestStatus struct {
+ // Conditions applied to the request, such as approval or denial.
+ Conditions []CertificateSigningRequestCondition `json:"conditions,omitempty"`
+
+ // If request was approved, the controller will place the issued certificate here.
+ Certificate []byte `json:"certificate,omitempty"`
+}
+
+type RequestConditionType string
+
+// These are the possible conditions for a certificate request.
+const (
+ CertificateApproved RequestConditionType = "Approved"
+ CertificateDenied RequestConditionType = "Denied"
+)
+
+type CertificateSigningRequestCondition struct {
+ // request approval state, currently Approved or Denied.
+ Type RequestConditionType `json:"type"`
+ // brief reason for the request state
+ Reason string `json:"reason,omitempty"`
+ // human readable message with details about the request state
+ Message string `json:"message,omitempty"`
+ // timestamp for the last update to this condition
+ LastUpdateTime unversioned.Time `json:"lastUpdateTime,omitempty"`
+}
+
+type CertificateSigningRequestList struct {
+ unversioned.TypeMeta `json:",inline"`
+ unversioned.ListMeta `json:"metadata,omitempty"`
+
+ Items []CertificateSigningRequest `json:"items,omitempty"`
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/conversion.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/conversion.go
new file mode 100644
index 0000000..0b933b2
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/conversion.go
@@ -0,0 +1,23 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import "k8s.io/kubernetes/pkg/runtime"
+
+func addConversionFuncs(scheme *runtime.Scheme) {
+ // Add non-generated conversion functions here. Currently there are none.
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/conversion_generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/conversion_generated.go
new file mode 100644
index 0000000..454a98b
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/conversion_generated.go
@@ -0,0 +1,237 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by conversion-gen. Do not edit it manually!
+
+package v1alpha1
+
+import (
+ api "k8s.io/kubernetes/pkg/api"
+ certificates "k8s.io/kubernetes/pkg/apis/certificates"
+ conversion "k8s.io/kubernetes/pkg/conversion"
+)
+
+func init() {
+ if err := api.Scheme.AddGeneratedConversionFuncs(
+ Convert_v1alpha1_CertificateSigningRequest_To_certificates_CertificateSigningRequest,
+ Convert_certificates_CertificateSigningRequest_To_v1alpha1_CertificateSigningRequest,
+ Convert_v1alpha1_CertificateSigningRequestCondition_To_certificates_CertificateSigningRequestCondition,
+ Convert_certificates_CertificateSigningRequestCondition_To_v1alpha1_CertificateSigningRequestCondition,
+ Convert_v1alpha1_CertificateSigningRequestList_To_certificates_CertificateSigningRequestList,
+ Convert_certificates_CertificateSigningRequestList_To_v1alpha1_CertificateSigningRequestList,
+ Convert_v1alpha1_CertificateSigningRequestSpec_To_certificates_CertificateSigningRequestSpec,
+ Convert_certificates_CertificateSigningRequestSpec_To_v1alpha1_CertificateSigningRequestSpec,
+ Convert_v1alpha1_CertificateSigningRequestStatus_To_certificates_CertificateSigningRequestStatus,
+ Convert_certificates_CertificateSigningRequestStatus_To_v1alpha1_CertificateSigningRequestStatus,
+ ); err != nil {
+ // if one of the conversion functions is malformed, detect it immediately.
+ panic(err)
+ }
+}
+
+func autoConvert_v1alpha1_CertificateSigningRequest_To_certificates_CertificateSigningRequest(in *CertificateSigningRequest, out *certificates.CertificateSigningRequest, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ if err := Convert_v1alpha1_CertificateSigningRequestSpec_To_certificates_CertificateSigningRequestSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1alpha1_CertificateSigningRequestStatus_To_certificates_CertificateSigningRequestStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1alpha1_CertificateSigningRequest_To_certificates_CertificateSigningRequest(in *CertificateSigningRequest, out *certificates.CertificateSigningRequest, s conversion.Scope) error {
+ return autoConvert_v1alpha1_CertificateSigningRequest_To_certificates_CertificateSigningRequest(in, out, s)
+}
+
+func autoConvert_certificates_CertificateSigningRequest_To_v1alpha1_CertificateSigningRequest(in *certificates.CertificateSigningRequest, out *CertificateSigningRequest, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ if err := Convert_certificates_CertificateSigningRequestSpec_To_v1alpha1_CertificateSigningRequestSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_certificates_CertificateSigningRequestStatus_To_v1alpha1_CertificateSigningRequestStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_certificates_CertificateSigningRequest_To_v1alpha1_CertificateSigningRequest(in *certificates.CertificateSigningRequest, out *CertificateSigningRequest, s conversion.Scope) error {
+ return autoConvert_certificates_CertificateSigningRequest_To_v1alpha1_CertificateSigningRequest(in, out, s)
+}
+
+func autoConvert_v1alpha1_CertificateSigningRequestCondition_To_certificates_CertificateSigningRequestCondition(in *CertificateSigningRequestCondition, out *certificates.CertificateSigningRequestCondition, s conversion.Scope) error {
+ out.Type = certificates.RequestConditionType(in.Type)
+ out.Reason = in.Reason
+ out.Message = in.Message
+ if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastUpdateTime, &out.LastUpdateTime, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1alpha1_CertificateSigningRequestCondition_To_certificates_CertificateSigningRequestCondition(in *CertificateSigningRequestCondition, out *certificates.CertificateSigningRequestCondition, s conversion.Scope) error {
+ return autoConvert_v1alpha1_CertificateSigningRequestCondition_To_certificates_CertificateSigningRequestCondition(in, out, s)
+}
+
+func autoConvert_certificates_CertificateSigningRequestCondition_To_v1alpha1_CertificateSigningRequestCondition(in *certificates.CertificateSigningRequestCondition, out *CertificateSigningRequestCondition, s conversion.Scope) error {
+ out.Type = RequestConditionType(in.Type)
+ out.Reason = in.Reason
+ out.Message = in.Message
+ if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastUpdateTime, &out.LastUpdateTime, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_certificates_CertificateSigningRequestCondition_To_v1alpha1_CertificateSigningRequestCondition(in *certificates.CertificateSigningRequestCondition, out *CertificateSigningRequestCondition, s conversion.Scope) error {
+ return autoConvert_certificates_CertificateSigningRequestCondition_To_v1alpha1_CertificateSigningRequestCondition(in, out, s)
+}
+
+func autoConvert_v1alpha1_CertificateSigningRequestList_To_certificates_CertificateSigningRequestList(in *CertificateSigningRequestList, out *certificates.CertificateSigningRequestList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]certificates.CertificateSigningRequest, len(*in))
+ for i := range *in {
+ if err := Convert_v1alpha1_CertificateSigningRequest_To_certificates_CertificateSigningRequest(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_v1alpha1_CertificateSigningRequestList_To_certificates_CertificateSigningRequestList(in *CertificateSigningRequestList, out *certificates.CertificateSigningRequestList, s conversion.Scope) error {
+ return autoConvert_v1alpha1_CertificateSigningRequestList_To_certificates_CertificateSigningRequestList(in, out, s)
+}
+
+func autoConvert_certificates_CertificateSigningRequestList_To_v1alpha1_CertificateSigningRequestList(in *certificates.CertificateSigningRequestList, out *CertificateSigningRequestList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]CertificateSigningRequest, len(*in))
+ for i := range *in {
+ if err := Convert_certificates_CertificateSigningRequest_To_v1alpha1_CertificateSigningRequest(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_certificates_CertificateSigningRequestList_To_v1alpha1_CertificateSigningRequestList(in *certificates.CertificateSigningRequestList, out *CertificateSigningRequestList, s conversion.Scope) error {
+ return autoConvert_certificates_CertificateSigningRequestList_To_v1alpha1_CertificateSigningRequestList(in, out, s)
+}
+
+func autoConvert_v1alpha1_CertificateSigningRequestSpec_To_certificates_CertificateSigningRequestSpec(in *CertificateSigningRequestSpec, out *certificates.CertificateSigningRequestSpec, s conversion.Scope) error {
+ if err := conversion.Convert_Slice_byte_To_Slice_byte(&in.Request, &out.Request, s); err != nil {
+ return err
+ }
+ out.Username = in.Username
+ out.UID = in.UID
+ out.Groups = in.Groups
+ return nil
+}
+
+func Convert_v1alpha1_CertificateSigningRequestSpec_To_certificates_CertificateSigningRequestSpec(in *CertificateSigningRequestSpec, out *certificates.CertificateSigningRequestSpec, s conversion.Scope) error {
+ return autoConvert_v1alpha1_CertificateSigningRequestSpec_To_certificates_CertificateSigningRequestSpec(in, out, s)
+}
+
+func autoConvert_certificates_CertificateSigningRequestSpec_To_v1alpha1_CertificateSigningRequestSpec(in *certificates.CertificateSigningRequestSpec, out *CertificateSigningRequestSpec, s conversion.Scope) error {
+ if err := conversion.Convert_Slice_byte_To_Slice_byte(&in.Request, &out.Request, s); err != nil {
+ return err
+ }
+ out.Username = in.Username
+ out.UID = in.UID
+ out.Groups = in.Groups
+ return nil
+}
+
+func Convert_certificates_CertificateSigningRequestSpec_To_v1alpha1_CertificateSigningRequestSpec(in *certificates.CertificateSigningRequestSpec, out *CertificateSigningRequestSpec, s conversion.Scope) error {
+ return autoConvert_certificates_CertificateSigningRequestSpec_To_v1alpha1_CertificateSigningRequestSpec(in, out, s)
+}
+
+func autoConvert_v1alpha1_CertificateSigningRequestStatus_To_certificates_CertificateSigningRequestStatus(in *CertificateSigningRequestStatus, out *certificates.CertificateSigningRequestStatus, s conversion.Scope) error {
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]certificates.CertificateSigningRequestCondition, len(*in))
+ for i := range *in {
+ if err := Convert_v1alpha1_CertificateSigningRequestCondition_To_certificates_CertificateSigningRequestCondition(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Conditions = nil
+ }
+ if err := conversion.Convert_Slice_byte_To_Slice_byte(&in.Certificate, &out.Certificate, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1alpha1_CertificateSigningRequestStatus_To_certificates_CertificateSigningRequestStatus(in *CertificateSigningRequestStatus, out *certificates.CertificateSigningRequestStatus, s conversion.Scope) error {
+ return autoConvert_v1alpha1_CertificateSigningRequestStatus_To_certificates_CertificateSigningRequestStatus(in, out, s)
+}
+
+func autoConvert_certificates_CertificateSigningRequestStatus_To_v1alpha1_CertificateSigningRequestStatus(in *certificates.CertificateSigningRequestStatus, out *CertificateSigningRequestStatus, s conversion.Scope) error {
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]CertificateSigningRequestCondition, len(*in))
+ for i := range *in {
+ if err := Convert_certificates_CertificateSigningRequestCondition_To_v1alpha1_CertificateSigningRequestCondition(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Conditions = nil
+ }
+ if err := conversion.Convert_Slice_byte_To_Slice_byte(&in.Certificate, &out.Certificate, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_certificates_CertificateSigningRequestStatus_To_v1alpha1_CertificateSigningRequestStatus(in *certificates.CertificateSigningRequestStatus, out *CertificateSigningRequestStatus, s conversion.Scope) error {
+ return autoConvert_certificates_CertificateSigningRequestStatus_To_v1alpha1_CertificateSigningRequestStatus(in, out, s)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/deep_copy_generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/deep_copy_generated.go
new file mode 100644
index 0000000..5b4a3ad
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/deep_copy_generated.go
@@ -0,0 +1,121 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by deepcopy-gen. Do not edit it manually!
+
+package v1alpha1
+
+import (
+ api "k8s.io/kubernetes/pkg/api"
+ v1 "k8s.io/kubernetes/pkg/api/v1"
+ conversion "k8s.io/kubernetes/pkg/conversion"
+)
+
+func init() {
+ if err := api.Scheme.AddGeneratedDeepCopyFuncs(
+ DeepCopy_v1alpha1_CertificateSigningRequest,
+ DeepCopy_v1alpha1_CertificateSigningRequestCondition,
+ DeepCopy_v1alpha1_CertificateSigningRequestList,
+ DeepCopy_v1alpha1_CertificateSigningRequestSpec,
+ DeepCopy_v1alpha1_CertificateSigningRequestStatus,
+ ); err != nil {
+ // if one of the deep copy functions is malformed, detect it immediately.
+ panic(err)
+ }
+}
+
+func DeepCopy_v1alpha1_CertificateSigningRequest(in CertificateSigningRequest, out *CertificateSigningRequest, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_v1alpha1_CertificateSigningRequestSpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_v1alpha1_CertificateSigningRequestStatus(in.Status, &out.Status, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_v1alpha1_CertificateSigningRequestCondition(in CertificateSigningRequestCondition, out *CertificateSigningRequestCondition, c *conversion.Cloner) error {
+ out.Type = in.Type
+ out.Reason = in.Reason
+ out.Message = in.Message
+ out.LastUpdateTime = in.LastUpdateTime.DeepCopy()
+ return nil
+}
+
+func DeepCopy_v1alpha1_CertificateSigningRequestList(in CertificateSigningRequestList, out *CertificateSigningRequestList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]CertificateSigningRequest, len(in))
+ for i := range in {
+ if err := DeepCopy_v1alpha1_CertificateSigningRequest(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1alpha1_CertificateSigningRequestSpec(in CertificateSigningRequestSpec, out *CertificateSigningRequestSpec, c *conversion.Cloner) error {
+ if in.Request != nil {
+ in, out := in.Request, &out.Request
+ *out = make([]byte, len(in))
+ copy(*out, in)
+ } else {
+ out.Request = nil
+ }
+ out.Username = in.Username
+ out.UID = in.UID
+ if in.Groups != nil {
+ in, out := in.Groups, &out.Groups
+ *out = make([]string, len(in))
+ copy(*out, in)
+ } else {
+ out.Groups = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1alpha1_CertificateSigningRequestStatus(in CertificateSigningRequestStatus, out *CertificateSigningRequestStatus, c *conversion.Cloner) error {
+ if in.Conditions != nil {
+ in, out := in.Conditions, &out.Conditions
+ *out = make([]CertificateSigningRequestCondition, len(in))
+ for i := range in {
+ if err := DeepCopy_v1alpha1_CertificateSigningRequestCondition(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Conditions = nil
+ }
+ if in.Certificate != nil {
+ in, out := in.Certificate, &out.Certificate
+ *out = make([]byte, len(in))
+ copy(*out, in)
+ } else {
+ out.Certificate = nil
+ }
+ return nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/doc.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/doc.go
new file mode 100644
index 0000000..3528c16
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package,register
+// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/certificates
+
+package v1alpha1
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/generated.pb.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/generated.pb.go
new file mode 100644
index 0000000..174d7ef
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/generated.pb.go
@@ -0,0 +1,1192 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by protoc-gen-gogo.
+// source: k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/generated.proto
+// DO NOT EDIT!
+
+/*
+ Package v1alpha1 is a generated protocol buffer package.
+
+ It is generated from these files:
+ k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/generated.proto
+
+ It has these top-level messages:
+ CertificateSigningRequest
+ CertificateSigningRequestCondition
+ CertificateSigningRequestList
+ CertificateSigningRequestSpec
+ CertificateSigningRequestStatus
+*/
+package v1alpha1
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+func (m *CertificateSigningRequest) Reset() { *m = CertificateSigningRequest{} }
+func (m *CertificateSigningRequest) String() string { return proto.CompactTextString(m) }
+func (*CertificateSigningRequest) ProtoMessage() {}
+
+func (m *CertificateSigningRequestCondition) Reset() { *m = CertificateSigningRequestCondition{} }
+func (m *CertificateSigningRequestCondition) String() string { return proto.CompactTextString(m) }
+func (*CertificateSigningRequestCondition) ProtoMessage() {}
+
+func (m *CertificateSigningRequestList) Reset() { *m = CertificateSigningRequestList{} }
+func (m *CertificateSigningRequestList) String() string { return proto.CompactTextString(m) }
+func (*CertificateSigningRequestList) ProtoMessage() {}
+
+func (m *CertificateSigningRequestSpec) Reset() { *m = CertificateSigningRequestSpec{} }
+func (m *CertificateSigningRequestSpec) String() string { return proto.CompactTextString(m) }
+func (*CertificateSigningRequestSpec) ProtoMessage() {}
+
+func (m *CertificateSigningRequestStatus) Reset() { *m = CertificateSigningRequestStatus{} }
+func (m *CertificateSigningRequestStatus) String() string { return proto.CompactTextString(m) }
+func (*CertificateSigningRequestStatus) ProtoMessage() {}
+
+func init() {
+ proto.RegisterType((*CertificateSigningRequest)(nil), "k8s.io.kubernetes.pkg.apis.certificates.v1alpha1.CertificateSigningRequest")
+ proto.RegisterType((*CertificateSigningRequestCondition)(nil), "k8s.io.kubernetes.pkg.apis.certificates.v1alpha1.CertificateSigningRequestCondition")
+ proto.RegisterType((*CertificateSigningRequestList)(nil), "k8s.io.kubernetes.pkg.apis.certificates.v1alpha1.CertificateSigningRequestList")
+ proto.RegisterType((*CertificateSigningRequestSpec)(nil), "k8s.io.kubernetes.pkg.apis.certificates.v1alpha1.CertificateSigningRequestSpec")
+ proto.RegisterType((*CertificateSigningRequestStatus)(nil), "k8s.io.kubernetes.pkg.apis.certificates.v1alpha1.CertificateSigningRequestStatus")
+}
+func (m *CertificateSigningRequest) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *CertificateSigningRequest) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
+ n1, err := m.ObjectMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n1
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size()))
+ n2, err := m.Spec.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n2
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Status.Size()))
+ n3, err := m.Status.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n3
+ return i, nil
+}
+
+func (m *CertificateSigningRequestCondition) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *CertificateSigningRequestCondition) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Type)))
+ i += copy(data[i:], m.Type)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Reason)))
+ i += copy(data[i:], m.Reason)
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Message)))
+ i += copy(data[i:], m.Message)
+ data[i] = 0x22
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.LastUpdateTime.Size()))
+ n4, err := m.LastUpdateTime.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n4
+ return i, nil
+}
+
+func (m *CertificateSigningRequestList) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *CertificateSigningRequestList) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size()))
+ n5, err := m.ListMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n5
+ if len(m.Items) > 0 {
+ for _, msg := range m.Items {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *CertificateSigningRequestSpec) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *CertificateSigningRequestSpec) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Request != nil {
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Request)))
+ i += copy(data[i:], m.Request)
+ }
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Username)))
+ i += copy(data[i:], m.Username)
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.UID)))
+ i += copy(data[i:], m.UID)
+ if len(m.Groups) > 0 {
+ for _, s := range m.Groups {
+ data[i] = 0x22
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ return i, nil
+}
+
+func (m *CertificateSigningRequestStatus) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *CertificateSigningRequestStatus) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Conditions) > 0 {
+ for _, msg := range m.Conditions {
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ if m.Certificate != nil {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Certificate)))
+ i += copy(data[i:], m.Certificate)
+ }
+ return i, nil
+}
+
+func encodeFixed64Generated(data []byte, offset int, v uint64) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ data[offset+4] = uint8(v >> 32)
+ data[offset+5] = uint8(v >> 40)
+ data[offset+6] = uint8(v >> 48)
+ data[offset+7] = uint8(v >> 56)
+ return offset + 8
+}
+func encodeFixed32Generated(data []byte, offset int, v uint32) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ return offset + 4
+}
+func encodeVarintGenerated(data []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ data[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ data[offset] = uint8(v)
+ return offset + 1
+}
+func (m *CertificateSigningRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *CertificateSigningRequestCondition) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Reason)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Message)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.LastUpdateTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *CertificateSigningRequestList) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *CertificateSigningRequestSpec) Size() (n int) {
+ var l int
+ _ = l
+ if m.Request != nil {
+ l = len(m.Request)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = len(m.Username)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.UID)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Groups) > 0 {
+ for _, s := range m.Groups {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *CertificateSigningRequestStatus) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Conditions) > 0 {
+ for _, e := range m.Conditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.Certificate != nil {
+ l = len(m.Certificate)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *CertificateSigningRequest) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CertificateSigningRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CertificateSigningRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CertificateSigningRequestCondition) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CertificateSigningRequestCondition: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CertificateSigningRequestCondition: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = RequestConditionType(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Reason = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LastUpdateTime.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CertificateSigningRequestList) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CertificateSigningRequestList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CertificateSigningRequestList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, CertificateSigningRequest{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CertificateSigningRequestSpec) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CertificateSigningRequestSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CertificateSigningRequestSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Request = append(m.Request[:0], data[iNdEx:postIndex]...)
+ if m.Request == nil {
+ m.Request = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Username = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.UID = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Groups = append(m.Groups, string(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CertificateSigningRequestStatus) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CertificateSigningRequestStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CertificateSigningRequestStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Conditions = append(m.Conditions, CertificateSigningRequestCondition{})
+ if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Certificate", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Certificate = append(m.Certificate[:0], data[iNdEx:postIndex]...)
+ if m.Certificate == nil {
+ m.Certificate = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenerated(data []byte) (n int, err error) {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if data[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipGenerated(data[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
+)
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/generated.proto b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/generated.proto
new file mode 100644
index 0000000..450b374
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/generated.proto
@@ -0,0 +1,86 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.kubernetes.pkg.apis.certificates.v1alpha1;
+
+import "k8s.io/kubernetes/pkg/api/resource/generated.proto";
+import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto";
+import "k8s.io/kubernetes/pkg/api/v1/generated.proto";
+import "k8s.io/kubernetes/pkg/util/intstr/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v1alpha1";
+
+// Describes a certificate signing request
+message CertificateSigningRequest {
+ optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1;
+
+ // The certificate request itself and any additonal information.
+ optional CertificateSigningRequestSpec spec = 2;
+
+ // Derived information about the request.
+ optional CertificateSigningRequestStatus status = 3;
+}
+
+message CertificateSigningRequestCondition {
+ // request approval state, currently Approved or Denied.
+ optional string type = 1;
+
+ // brief reason for the request state
+ optional string reason = 2;
+
+ // human readable message with details about the request state
+ optional string message = 3;
+
+ // timestamp for the last update to this condition
+ optional k8s.io.kubernetes.pkg.api.unversioned.Time lastUpdateTime = 4;
+}
+
+message CertificateSigningRequestList {
+ optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1;
+
+ repeated CertificateSigningRequest items = 2;
+}
+
+// This information is immutable after the request is created. Only the Request
+// and ExtraInfo fields can be set on creation, other fields are derived by
+// Kubernetes and cannot be modified by users.
+message CertificateSigningRequestSpec {
+ // Base64-encoded PKCS#10 CSR data
+ optional bytes request = 1;
+
+ // Information about the requesting user (if relevant)
+ // See user.Info interface for details
+ optional string username = 2;
+
+ optional string uid = 3;
+
+ repeated string groups = 4;
+}
+
+message CertificateSigningRequestStatus {
+ // Conditions applied to the request, such as approval or denial.
+ repeated CertificateSigningRequestCondition conditions = 1;
+
+ // If request was approved, the controller will place the issued certificate here.
+ optional bytes certificate = 2;
+}
+
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/register.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/register.go
new file mode 100644
index 0000000..7b841c0
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/register.go
@@ -0,0 +1,62 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/api/v1"
+ "k8s.io/kubernetes/pkg/runtime"
+ versionedwatch "k8s.io/kubernetes/pkg/watch/versioned"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "certificates"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1alpha1"}
+
+// Kind takes an unqualified kind and returns back a Group qualified GroupKind
+func Kind(kind string) unversioned.GroupKind {
+ return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// Resource takes an unqualified resource and returns back a Group qualified GroupResource
+func Resource(resource string) unversioned.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+func AddToScheme(scheme *runtime.Scheme) {
+ addKnownTypes(scheme)
+ // addDefaultingFuncs(scheme)
+ addConversionFuncs(scheme)
+}
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &CertificateSigningRequest{},
+ &CertificateSigningRequestList{},
+ &v1.ListOptions{},
+ &v1.DeleteOptions{},
+ )
+
+ // Add the watch version that applies
+ versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion)
+}
+
+func (obj *CertificateSigningRequest) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
+func (obj *CertificateSigningRequestList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/types.generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/types.generated.go
new file mode 100644
index 0000000..74d6765
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/types.generated.go
@@ -0,0 +1,1963 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// ************************************************************
+// DO NOT EDIT.
+// THIS FILE IS AUTO-GENERATED BY codecgen.
+// ************************************************************
+
+package v1alpha1
+
+import (
+ "errors"
+ "fmt"
+ codec1978 "github.com/ugorji/go/codec"
+ pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned"
+ pkg2_v1 "k8s.io/kubernetes/pkg/api/v1"
+ pkg3_types "k8s.io/kubernetes/pkg/types"
+ "reflect"
+ "runtime"
+ time "time"
+)
+
+const (
+ // ----- content types ----
+ codecSelferC_UTF81234 = 1
+ codecSelferC_RAW1234 = 0
+ // ----- value types used ----
+ codecSelferValueTypeArray1234 = 10
+ codecSelferValueTypeMap1234 = 9
+ // ----- containerStateValues ----
+ codecSelfer_containerMapKey1234 = 2
+ codecSelfer_containerMapValue1234 = 3
+ codecSelfer_containerMapEnd1234 = 4
+ codecSelfer_containerArrayElem1234 = 6
+ codecSelfer_containerArrayEnd1234 = 7
+)
+
+var (
+ codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits())
+ codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`)
+)
+
+type codecSelfer1234 struct{}
+
+func init() {
+ if codec1978.GenVersion != 5 {
+ _, file, _, _ := runtime.Caller(0)
+ err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v",
+ 5, codec1978.GenVersion, file)
+ panic(err)
+ }
+ if false { // reference the types, but skip this branch at build/run time
+ var v0 pkg1_unversioned.TypeMeta
+ var v1 pkg2_v1.ObjectMeta
+ var v2 pkg3_types.UID
+ var v3 time.Time
+ _, _, _, _ = v0, v1, v2, v3
+ }
+}
+
+func (x *CertificateSigningRequest) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = true
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy14 := &x.Status
+ yy14.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy16 := &x.Status
+ yy16.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *CertificateSigningRequest) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *CertificateSigningRequest) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_v1.ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = CertificateSigningRequestSpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = CertificateSigningRequestStatus{}
+ } else {
+ yyv6 := &x.Status
+ yyv6.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *CertificateSigningRequest) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_v1.ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = CertificateSigningRequestSpec{}
+ } else {
+ yyv11 := &x.Spec
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = CertificateSigningRequestStatus{}
+ } else {
+ yyv12 := &x.Status
+ yyv12.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *CertificateSigningRequestSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.Username != ""
+ yyq2[2] = x.UID != ""
+ yyq2[3] = len(x.Groups) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Request == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Request))
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("request"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Request == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Request))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Username))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("username"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Username))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.UID))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("uid"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.UID))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ if x.Groups == nil {
+ r.EncodeNil()
+ } else {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Groups, false, e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("groups"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Groups == nil {
+ r.EncodeNil()
+ } else {
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Groups, false, e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *CertificateSigningRequestSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *CertificateSigningRequestSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "request":
+ if r.TryDecodeAsNil() {
+ x.Request = nil
+ } else {
+ yyv4 := &x.Request
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ *yyv4 = r.DecodeBytes(*(*[]byte)(yyv4), false, false)
+ }
+ }
+ case "username":
+ if r.TryDecodeAsNil() {
+ x.Username = ""
+ } else {
+ x.Username = string(r.DecodeString())
+ }
+ case "uid":
+ if r.TryDecodeAsNil() {
+ x.UID = ""
+ } else {
+ x.UID = string(r.DecodeString())
+ }
+ case "groups":
+ if r.TryDecodeAsNil() {
+ x.Groups = nil
+ } else {
+ yyv8 := &x.Groups
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv8, false, d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *CertificateSigningRequestSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Request = nil
+ } else {
+ yyv11 := &x.Request
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ *yyv11 = r.DecodeBytes(*(*[]byte)(yyv11), false, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Username = ""
+ } else {
+ x.Username = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.UID = ""
+ } else {
+ x.UID = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Groups = nil
+ } else {
+ yyv15 := &x.Groups
+ yym16 := z.DecBinary()
+ _ = yym16
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv15, false, d)
+ }
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *CertificateSigningRequestStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = len(x.Conditions) != 0
+ yyq2[1] = len(x.Certificate) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Conditions == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ h.encSliceCertificateSigningRequestCondition(([]CertificateSigningRequestCondition)(x.Conditions), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("conditions"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Conditions == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.encSliceCertificateSigningRequestCondition(([]CertificateSigningRequestCondition)(x.Conditions), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Certificate == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Certificate))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("certificate"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Certificate == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Certificate))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *CertificateSigningRequestStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *CertificateSigningRequestStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "conditions":
+ if r.TryDecodeAsNil() {
+ x.Conditions = nil
+ } else {
+ yyv4 := &x.Conditions
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.decSliceCertificateSigningRequestCondition((*[]CertificateSigningRequestCondition)(yyv4), d)
+ }
+ }
+ case "certificate":
+ if r.TryDecodeAsNil() {
+ x.Certificate = nil
+ } else {
+ yyv6 := &x.Certificate
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ *yyv6 = r.DecodeBytes(*(*[]byte)(yyv6), false, false)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *CertificateSigningRequestStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Conditions = nil
+ } else {
+ yyv9 := &x.Conditions
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.decSliceCertificateSigningRequestCondition((*[]CertificateSigningRequestCondition)(yyv9), d)
+ }
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Certificate = nil
+ } else {
+ yyv11 := &x.Certificate
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ *yyv11 = r.DecodeBytes(*(*[]byte)(yyv11), false, false)
+ }
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x RequestConditionType) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *RequestConditionType) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *CertificateSigningRequestCondition) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.Reason != ""
+ yyq2[2] = x.Message != ""
+ yyq2[3] = true
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ x.Type.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("type"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Type.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Reason))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("reason"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Reason))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Message))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("message"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Message))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yy13 := &x.LastUpdateTime
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy13) {
+ } else if yym14 {
+ z.EncBinaryMarshal(yy13)
+ } else if !yym14 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy13)
+ } else {
+ z.EncFallback(yy13)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("lastUpdateTime"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy15 := &x.LastUpdateTime
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy15) {
+ } else if yym16 {
+ z.EncBinaryMarshal(yy15)
+ } else if !yym16 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy15)
+ } else {
+ z.EncFallback(yy15)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *CertificateSigningRequestCondition) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *CertificateSigningRequestCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "type":
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = RequestConditionType(r.DecodeString())
+ }
+ case "reason":
+ if r.TryDecodeAsNil() {
+ x.Reason = ""
+ } else {
+ x.Reason = string(r.DecodeString())
+ }
+ case "message":
+ if r.TryDecodeAsNil() {
+ x.Message = ""
+ } else {
+ x.Message = string(r.DecodeString())
+ }
+ case "lastUpdateTime":
+ if r.TryDecodeAsNil() {
+ x.LastUpdateTime = pkg1_unversioned.Time{}
+ } else {
+ yyv7 := &x.LastUpdateTime
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv7) {
+ } else if yym8 {
+ z.DecBinaryUnmarshal(yyv7)
+ } else if !yym8 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv7)
+ } else {
+ z.DecFallback(yyv7, false)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *CertificateSigningRequestCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = RequestConditionType(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Reason = ""
+ } else {
+ x.Reason = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Message = ""
+ } else {
+ x.Message = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.LastUpdateTime = pkg1_unversioned.Time{}
+ } else {
+ yyv13 := &x.LastUpdateTime
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv13) {
+ } else if yym14 {
+ z.DecBinaryUnmarshal(yyv13)
+ } else if !yym14 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv13)
+ } else {
+ z.DecFallback(yyv13, false)
+ }
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *CertificateSigningRequestList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = len(x.Items) != 0
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceCertificateSigningRequest(([]CertificateSigningRequest)(x.Items), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceCertificateSigningRequest(([]CertificateSigningRequest)(x.Items), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *CertificateSigningRequestList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *CertificateSigningRequestList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceCertificateSigningRequest((*[]CertificateSigningRequest)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *CertificateSigningRequestList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceCertificateSigningRequest((*[]CertificateSigningRequest)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) encSliceCertificateSigningRequestCondition(v []CertificateSigningRequestCondition, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceCertificateSigningRequestCondition(v *[]CertificateSigningRequestCondition, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []CertificateSigningRequestCondition{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 72)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]CertificateSigningRequestCondition, yyrl1)
+ }
+ } else {
+ yyv1 = make([]CertificateSigningRequestCondition, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = CertificateSigningRequestCondition{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, CertificateSigningRequestCondition{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = CertificateSigningRequestCondition{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, CertificateSigningRequestCondition{}) // var yyz1 CertificateSigningRequestCondition
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = CertificateSigningRequestCondition{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []CertificateSigningRequestCondition{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceCertificateSigningRequest(v []CertificateSigningRequest, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceCertificateSigningRequest(v *[]CertificateSigningRequest, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []CertificateSigningRequest{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 368)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]CertificateSigningRequest, yyrl1)
+ }
+ } else {
+ yyv1 = make([]CertificateSigningRequest, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = CertificateSigningRequest{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, CertificateSigningRequest{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = CertificateSigningRequest{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, CertificateSigningRequest{}) // var yyz1 CertificateSigningRequest
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = CertificateSigningRequest{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []CertificateSigningRequest{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/types.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/types.go
new file mode 100644
index 0000000..5f046ba
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/types.go
@@ -0,0 +1,85 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/api/v1"
+)
+
+// +genclient=true
+// +nonNamespaced=true
+
+// Describes a certificate signing request
+type CertificateSigningRequest struct {
+ unversioned.TypeMeta `json:",inline"`
+ v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // The certificate request itself and any additonal information.
+ Spec CertificateSigningRequestSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+ // Derived information about the request.
+ Status CertificateSigningRequestStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// This information is immutable after the request is created. Only the Request
+// and ExtraInfo fields can be set on creation, other fields are derived by
+// Kubernetes and cannot be modified by users.
+type CertificateSigningRequestSpec struct {
+ // Base64-encoded PKCS#10 CSR data
+ Request []byte `json:"request" protobuf:"bytes,1,opt,name=request"`
+
+ // Information about the requesting user (if relevant)
+ // See user.Info interface for details
+ Username string `json:"username,omitempty" protobuf:"bytes,2,opt,name=username"`
+ UID string `json:"uid,omitempty" protobuf:"bytes,3,opt,name=uid"`
+ Groups []string `json:"groups,omitempty" protobuf:"bytes,4,rep,name=groups"`
+}
+
+type CertificateSigningRequestStatus struct {
+ // Conditions applied to the request, such as approval or denial.
+ Conditions []CertificateSigningRequestCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"`
+
+ // If request was approved, the controller will place the issued certificate here.
+ Certificate []byte `json:"certificate,omitempty" protobuf:"bytes,2,opt,name=certificate"`
+}
+
+type RequestConditionType string
+
+// These are the possible conditions for a certificate request.
+const (
+ CertificateApproved RequestConditionType = "Approved"
+ CertificateDenied RequestConditionType = "Denied"
+)
+
+type CertificateSigningRequestCondition struct {
+ // request approval state, currently Approved or Denied.
+ Type RequestConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=RequestConditionType"`
+ // brief reason for the request state
+ Reason string `json:"reason,omitempty" protobuf:"bytes,2,opt,name=reason"`
+ // human readable message with details about the request state
+ Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"`
+ // timestamp for the last update to this condition
+ LastUpdateTime unversioned.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,4,opt,name=lastUpdateTime"`
+}
+
+type CertificateSigningRequestList struct {
+ unversioned.TypeMeta `json:",inline"`
+ unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ Items []CertificateSigningRequest `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/types_swagger_doc_generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/types_swagger_doc_generated.go
new file mode 100644
index 0000000..47b3b28
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/types_swagger_doc_generated.go
@@ -0,0 +1,70 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE
+var map_CertificateSigningRequest = map[string]string{
+ "": "Describes a certificate signing request",
+ "spec": "The certificate request itself and any additonal information.",
+ "status": "Derived information about the request.",
+}
+
+func (CertificateSigningRequest) SwaggerDoc() map[string]string {
+ return map_CertificateSigningRequest
+}
+
+var map_CertificateSigningRequestCondition = map[string]string{
+ "type": "request approval state, currently Approved or Denied.",
+ "reason": "brief reason for the request state",
+ "message": "human readable message with details about the request state",
+ "lastUpdateTime": "timestamp for the last update to this condition",
+}
+
+func (CertificateSigningRequestCondition) SwaggerDoc() map[string]string {
+ return map_CertificateSigningRequestCondition
+}
+
+var map_CertificateSigningRequestSpec = map[string]string{
+ "": "This information is immutable after the request is created. Only the Request and ExtraInfo fields can be set on creation, other fields are derived by Kubernetes and cannot be modified by users.",
+ "request": "Base64-encoded PKCS#10 CSR data",
+ "username": "Information about the requesting user (if relevant) See user.Info interface for details",
+}
+
+func (CertificateSigningRequestSpec) SwaggerDoc() map[string]string {
+ return map_CertificateSigningRequestSpec
+}
+
+var map_CertificateSigningRequestStatus = map[string]string{
+ "conditions": "Conditions applied to the request, such as approval or denial.",
+ "certificate": "If request was approved, the controller will place the issued certificate here.",
+}
+
+func (CertificateSigningRequestStatus) SwaggerDoc() map[string]string {
+ return map_CertificateSigningRequestStatus
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/deep_copy_generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/deep_copy_generated.go
new file mode 100644
index 0000000..37615c4
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/deep_copy_generated.go
@@ -0,0 +1,299 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by deepcopy-gen. Do not edit it manually!
+
+package componentconfig
+
+import (
+ api "k8s.io/kubernetes/pkg/api"
+ conversion "k8s.io/kubernetes/pkg/conversion"
+)
+
+func init() {
+ if err := api.Scheme.AddGeneratedDeepCopyFuncs(
+ DeepCopy_componentconfig_IPVar,
+ DeepCopy_componentconfig_KubeControllerManagerConfiguration,
+ DeepCopy_componentconfig_KubeProxyConfiguration,
+ DeepCopy_componentconfig_KubeSchedulerConfiguration,
+ DeepCopy_componentconfig_KubeletConfiguration,
+ DeepCopy_componentconfig_LeaderElectionConfiguration,
+ DeepCopy_componentconfig_PersistentVolumeRecyclerConfiguration,
+ DeepCopy_componentconfig_PortRangeVar,
+ DeepCopy_componentconfig_VolumeConfiguration,
+ ); err != nil {
+ // if one of the deep copy functions is malformed, detect it immediately.
+ panic(err)
+ }
+}
+
+func DeepCopy_componentconfig_IPVar(in IPVar, out *IPVar, c *conversion.Cloner) error {
+ if in.Val != nil {
+ in, out := in.Val, &out.Val
+ *out = new(string)
+ **out = *in
+ } else {
+ out.Val = nil
+ }
+ return nil
+}
+
+func DeepCopy_componentconfig_KubeControllerManagerConfiguration(in KubeControllerManagerConfiguration, out *KubeControllerManagerConfiguration, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.Port = in.Port
+ out.Address = in.Address
+ out.CloudProvider = in.CloudProvider
+ out.CloudConfigFile = in.CloudConfigFile
+ out.ConcurrentEndpointSyncs = in.ConcurrentEndpointSyncs
+ out.ConcurrentRSSyncs = in.ConcurrentRSSyncs
+ out.ConcurrentRCSyncs = in.ConcurrentRCSyncs
+ out.ConcurrentResourceQuotaSyncs = in.ConcurrentResourceQuotaSyncs
+ out.ConcurrentDeploymentSyncs = in.ConcurrentDeploymentSyncs
+ out.ConcurrentDaemonSetSyncs = in.ConcurrentDaemonSetSyncs
+ out.ConcurrentJobSyncs = in.ConcurrentJobSyncs
+ out.ConcurrentNamespaceSyncs = in.ConcurrentNamespaceSyncs
+ out.ConcurrentSATokenSyncs = in.ConcurrentSATokenSyncs
+ out.LookupCacheSizeForRC = in.LookupCacheSizeForRC
+ out.LookupCacheSizeForRS = in.LookupCacheSizeForRS
+ out.LookupCacheSizeForDaemonSet = in.LookupCacheSizeForDaemonSet
+ out.ServiceSyncPeriod = in.ServiceSyncPeriod
+ out.NodeSyncPeriod = in.NodeSyncPeriod
+ out.ResourceQuotaSyncPeriod = in.ResourceQuotaSyncPeriod
+ out.NamespaceSyncPeriod = in.NamespaceSyncPeriod
+ out.PVClaimBinderSyncPeriod = in.PVClaimBinderSyncPeriod
+ out.MinResyncPeriod = in.MinResyncPeriod
+ out.TerminatedPodGCThreshold = in.TerminatedPodGCThreshold
+ out.HorizontalPodAutoscalerSyncPeriod = in.HorizontalPodAutoscalerSyncPeriod
+ out.DeploymentControllerSyncPeriod = in.DeploymentControllerSyncPeriod
+ out.PodEvictionTimeout = in.PodEvictionTimeout
+ out.DeletingPodsQps = in.DeletingPodsQps
+ out.DeletingPodsBurst = in.DeletingPodsBurst
+ out.NodeMonitorGracePeriod = in.NodeMonitorGracePeriod
+ out.RegisterRetryCount = in.RegisterRetryCount
+ out.NodeStartupGracePeriod = in.NodeStartupGracePeriod
+ out.NodeMonitorPeriod = in.NodeMonitorPeriod
+ out.ServiceAccountKeyFile = in.ServiceAccountKeyFile
+ out.EnableProfiling = in.EnableProfiling
+ out.ClusterName = in.ClusterName
+ out.ClusterCIDR = in.ClusterCIDR
+ out.ServiceCIDR = in.ServiceCIDR
+ out.NodeCIDRMaskSize = in.NodeCIDRMaskSize
+ out.AllocateNodeCIDRs = in.AllocateNodeCIDRs
+ out.ConfigureCloudRoutes = in.ConfigureCloudRoutes
+ out.RootCAFile = in.RootCAFile
+ out.ContentType = in.ContentType
+ out.KubeAPIQPS = in.KubeAPIQPS
+ out.KubeAPIBurst = in.KubeAPIBurst
+ out.LeaderElection = in.LeaderElection
+ out.VolumeConfiguration = in.VolumeConfiguration
+ out.ControllerStartInterval = in.ControllerStartInterval
+ out.EnableGarbageCollector = in.EnableGarbageCollector
+ return nil
+}
+
+func DeepCopy_componentconfig_KubeProxyConfiguration(in KubeProxyConfiguration, out *KubeProxyConfiguration, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.BindAddress = in.BindAddress
+ out.ClusterCIDR = in.ClusterCIDR
+ out.HealthzBindAddress = in.HealthzBindAddress
+ out.HealthzPort = in.HealthzPort
+ out.HostnameOverride = in.HostnameOverride
+ if in.IPTablesMasqueradeBit != nil {
+ in, out := in.IPTablesMasqueradeBit, &out.IPTablesMasqueradeBit
+ *out = new(int32)
+ **out = *in
+ } else {
+ out.IPTablesMasqueradeBit = nil
+ }
+ out.IPTablesSyncPeriod = in.IPTablesSyncPeriod
+ out.KubeconfigPath = in.KubeconfigPath
+ out.MasqueradeAll = in.MasqueradeAll
+ out.Master = in.Master
+ if in.OOMScoreAdj != nil {
+ in, out := in.OOMScoreAdj, &out.OOMScoreAdj
+ *out = new(int32)
+ **out = *in
+ } else {
+ out.OOMScoreAdj = nil
+ }
+ out.Mode = in.Mode
+ out.PortRange = in.PortRange
+ out.ResourceContainer = in.ResourceContainer
+ out.UDPIdleTimeout = in.UDPIdleTimeout
+ out.ConntrackMax = in.ConntrackMax
+ out.ConntrackTCPEstablishedTimeout = in.ConntrackTCPEstablishedTimeout
+ return nil
+}
+
+func DeepCopy_componentconfig_KubeSchedulerConfiguration(in KubeSchedulerConfiguration, out *KubeSchedulerConfiguration, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.Port = in.Port
+ out.Address = in.Address
+ out.AlgorithmProvider = in.AlgorithmProvider
+ out.PolicyConfigFile = in.PolicyConfigFile
+ out.EnableProfiling = in.EnableProfiling
+ out.ContentType = in.ContentType
+ out.KubeAPIQPS = in.KubeAPIQPS
+ out.KubeAPIBurst = in.KubeAPIBurst
+ out.SchedulerName = in.SchedulerName
+ out.HardPodAffinitySymmetricWeight = in.HardPodAffinitySymmetricWeight
+ out.FailureDomains = in.FailureDomains
+ out.LeaderElection = in.LeaderElection
+ return nil
+}
+
+func DeepCopy_componentconfig_KubeletConfiguration(in KubeletConfiguration, out *KubeletConfiguration, c *conversion.Cloner) error {
+ out.Config = in.Config
+ out.SyncFrequency = in.SyncFrequency
+ out.FileCheckFrequency = in.FileCheckFrequency
+ out.HTTPCheckFrequency = in.HTTPCheckFrequency
+ out.ManifestURL = in.ManifestURL
+ out.ManifestURLHeader = in.ManifestURLHeader
+ out.EnableServer = in.EnableServer
+ out.Address = in.Address
+ out.Port = in.Port
+ out.ReadOnlyPort = in.ReadOnlyPort
+ out.TLSCertFile = in.TLSCertFile
+ out.TLSPrivateKeyFile = in.TLSPrivateKeyFile
+ out.CertDirectory = in.CertDirectory
+ out.HostnameOverride = in.HostnameOverride
+ out.PodInfraContainerImage = in.PodInfraContainerImage
+ out.DockerEndpoint = in.DockerEndpoint
+ out.RootDirectory = in.RootDirectory
+ out.SeccompProfileRoot = in.SeccompProfileRoot
+ out.AllowPrivileged = in.AllowPrivileged
+ out.HostNetworkSources = in.HostNetworkSources
+ out.HostPIDSources = in.HostPIDSources
+ out.HostIPCSources = in.HostIPCSources
+ out.RegistryPullQPS = in.RegistryPullQPS
+ out.RegistryBurst = in.RegistryBurst
+ out.EventRecordQPS = in.EventRecordQPS
+ out.EventBurst = in.EventBurst
+ out.EnableDebuggingHandlers = in.EnableDebuggingHandlers
+ out.MinimumGCAge = in.MinimumGCAge
+ out.MaxPerPodContainerCount = in.MaxPerPodContainerCount
+ out.MaxContainerCount = in.MaxContainerCount
+ out.CAdvisorPort = in.CAdvisorPort
+ out.HealthzPort = in.HealthzPort
+ out.HealthzBindAddress = in.HealthzBindAddress
+ out.OOMScoreAdj = in.OOMScoreAdj
+ out.RegisterNode = in.RegisterNode
+ out.ClusterDomain = in.ClusterDomain
+ out.MasterServiceNamespace = in.MasterServiceNamespace
+ out.ClusterDNS = in.ClusterDNS
+ out.StreamingConnectionIdleTimeout = in.StreamingConnectionIdleTimeout
+ out.NodeStatusUpdateFrequency = in.NodeStatusUpdateFrequency
+ out.ImageMinimumGCAge = in.ImageMinimumGCAge
+ out.ImageGCHighThresholdPercent = in.ImageGCHighThresholdPercent
+ out.ImageGCLowThresholdPercent = in.ImageGCLowThresholdPercent
+ out.LowDiskSpaceThresholdMB = in.LowDiskSpaceThresholdMB
+ out.VolumeStatsAggPeriod = in.VolumeStatsAggPeriod
+ out.NetworkPluginName = in.NetworkPluginName
+ out.NetworkPluginDir = in.NetworkPluginDir
+ out.VolumePluginDir = in.VolumePluginDir
+ out.CloudProvider = in.CloudProvider
+ out.CloudConfigFile = in.CloudConfigFile
+ out.KubeletCgroups = in.KubeletCgroups
+ out.RuntimeCgroups = in.RuntimeCgroups
+ out.SystemCgroups = in.SystemCgroups
+ out.CgroupRoot = in.CgroupRoot
+ out.ContainerRuntime = in.ContainerRuntime
+ out.RuntimeRequestTimeout = in.RuntimeRequestTimeout
+ out.RktPath = in.RktPath
+ out.RktAPIEndpoint = in.RktAPIEndpoint
+ out.RktStage1Image = in.RktStage1Image
+ out.LockFilePath = in.LockFilePath
+ out.ExitOnLockContention = in.ExitOnLockContention
+ out.ConfigureCBR0 = in.ConfigureCBR0
+ out.HairpinMode = in.HairpinMode
+ out.BabysitDaemons = in.BabysitDaemons
+ out.MaxPods = in.MaxPods
+ out.NvidiaGPUs = in.NvidiaGPUs
+ out.DockerExecHandlerName = in.DockerExecHandlerName
+ out.PodCIDR = in.PodCIDR
+ out.ResolverConfig = in.ResolverConfig
+ out.CPUCFSQuota = in.CPUCFSQuota
+ out.Containerized = in.Containerized
+ out.MaxOpenFiles = in.MaxOpenFiles
+ out.ReconcileCIDR = in.ReconcileCIDR
+ out.RegisterSchedulable = in.RegisterSchedulable
+ out.ContentType = in.ContentType
+ out.KubeAPIQPS = in.KubeAPIQPS
+ out.KubeAPIBurst = in.KubeAPIBurst
+ out.SerializeImagePulls = in.SerializeImagePulls
+ out.ExperimentalFlannelOverlay = in.ExperimentalFlannelOverlay
+ out.OutOfDiskTransitionFrequency = in.OutOfDiskTransitionFrequency
+ out.NodeIP = in.NodeIP
+ if in.NodeLabels != nil {
+ in, out := in.NodeLabels, &out.NodeLabels
+ *out = make(map[string]string)
+ for key, val := range in {
+ (*out)[key] = val
+ }
+ } else {
+ out.NodeLabels = nil
+ }
+ out.NonMasqueradeCIDR = in.NonMasqueradeCIDR
+ out.EnableCustomMetrics = in.EnableCustomMetrics
+ out.EvictionHard = in.EvictionHard
+ out.EvictionSoft = in.EvictionSoft
+ out.EvictionSoftGracePeriod = in.EvictionSoftGracePeriod
+ out.EvictionPressureTransitionPeriod = in.EvictionPressureTransitionPeriod
+ out.EvictionMaxPodGracePeriod = in.EvictionMaxPodGracePeriod
+ out.PodsPerCore = in.PodsPerCore
+ out.EnableControllerAttachDetach = in.EnableControllerAttachDetach
+ return nil
+}
+
+func DeepCopy_componentconfig_LeaderElectionConfiguration(in LeaderElectionConfiguration, out *LeaderElectionConfiguration, c *conversion.Cloner) error {
+ out.LeaderElect = in.LeaderElect
+ out.LeaseDuration = in.LeaseDuration
+ out.RenewDeadline = in.RenewDeadline
+ out.RetryPeriod = in.RetryPeriod
+ return nil
+}
+
+func DeepCopy_componentconfig_PersistentVolumeRecyclerConfiguration(in PersistentVolumeRecyclerConfiguration, out *PersistentVolumeRecyclerConfiguration, c *conversion.Cloner) error {
+ out.MaximumRetry = in.MaximumRetry
+ out.MinimumTimeoutNFS = in.MinimumTimeoutNFS
+ out.PodTemplateFilePathNFS = in.PodTemplateFilePathNFS
+ out.IncrementTimeoutNFS = in.IncrementTimeoutNFS
+ out.PodTemplateFilePathHostPath = in.PodTemplateFilePathHostPath
+ out.MinimumTimeoutHostPath = in.MinimumTimeoutHostPath
+ out.IncrementTimeoutHostPath = in.IncrementTimeoutHostPath
+ return nil
+}
+
+func DeepCopy_componentconfig_PortRangeVar(in PortRangeVar, out *PortRangeVar, c *conversion.Cloner) error {
+ if in.Val != nil {
+ in, out := in.Val, &out.Val
+ *out = new(string)
+ **out = *in
+ } else {
+ out.Val = nil
+ }
+ return nil
+}
+
+func DeepCopy_componentconfig_VolumeConfiguration(in VolumeConfiguration, out *VolumeConfiguration, c *conversion.Cloner) error {
+ out.EnableHostPathProvisioning = in.EnableHostPathProvisioning
+ out.EnableDynamicProvisioning = in.EnableDynamicProvisioning
+ out.PersistentVolumeRecyclerConfiguration = in.PersistentVolumeRecyclerConfiguration
+ out.FlexVolumePluginDir = in.FlexVolumePluginDir
+ return nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/doc.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/doc.go
new file mode 100644
index 0000000..d044b16
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package,register
+
+package componentconfig
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/helpers.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/helpers.go
new file mode 100644
index 0000000..43b625b
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/helpers.go
@@ -0,0 +1,97 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package componentconfig
+
+import (
+ "fmt"
+ "net"
+
+ utilnet "k8s.io/kubernetes/pkg/util/net"
+)
+
+// used for validating command line opts
+// TODO(mikedanese): remove these when we remove command line flags
+
+type IPVar struct {
+ Val *string
+}
+
+func (v IPVar) Set(s string) error {
+ if net.ParseIP(s) == nil {
+ return fmt.Errorf("%q is not a valid IP address", s)
+ }
+ if v.Val == nil {
+ // it's okay to panic here since this is programmer error
+ panic("the string pointer passed into IPVar should not be nil")
+ }
+ *v.Val = s
+ return nil
+}
+
+func (v IPVar) String() string {
+ if v.Val == nil {
+ return ""
+ }
+ return *v.Val
+}
+
+func (v IPVar) Type() string {
+ return "ip"
+}
+
+func (m *ProxyMode) Set(s string) error {
+ *m = ProxyMode(s)
+ return nil
+}
+
+func (m *ProxyMode) String() string {
+ if m != nil {
+ return string(*m)
+ }
+ return ""
+}
+
+func (m *ProxyMode) Type() string {
+ return "ProxyMode"
+}
+
+type PortRangeVar struct {
+ Val *string
+}
+
+func (v PortRangeVar) Set(s string) error {
+ if _, err := utilnet.ParsePortRange(s); err != nil {
+ return fmt.Errorf("%q is not a valid port range: %v", s, err)
+ }
+ if v.Val == nil {
+ // it's okay to panic here since this is programmer error
+ panic("the string pointer passed into PortRangeVar should not be nil")
+ }
+ *v.Val = s
+ return nil
+}
+
+func (v PortRangeVar) String() string {
+ if v.Val == nil {
+ return ""
+ }
+ return *v.Val
+}
+
+func (v PortRangeVar) Type() string {
+ return "port-range"
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/install/install.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/install/install.go
new file mode 100644
index 0000000..1a8b0cd
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/install/install.go
@@ -0,0 +1,129 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package install installs the experimental API group, making it available as
+// an option to all of the API encoding/decoding machinery.
+package install
+
+import (
+ "fmt"
+
+ "github.com/golang/glog"
+
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/meta"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/apimachinery"
+ "k8s.io/kubernetes/pkg/apimachinery/registered"
+ "k8s.io/kubernetes/pkg/apis/componentconfig"
+ "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1"
+ "k8s.io/kubernetes/pkg/runtime"
+ "k8s.io/kubernetes/pkg/util/sets"
+)
+
+const importPrefix = "k8s.io/kubernetes/pkg/apis/componentconfig"
+
+var accessor = meta.NewAccessor()
+
+// availableVersions lists all known external versions for this group from most preferred to least preferred
+var availableVersions = []unversioned.GroupVersion{v1alpha1.SchemeGroupVersion}
+
+func init() {
+ registered.RegisterVersions(availableVersions)
+ externalVersions := []unversioned.GroupVersion{}
+ for _, v := range availableVersions {
+ if registered.IsAllowedVersion(v) {
+ externalVersions = append(externalVersions, v)
+ }
+ }
+ if len(externalVersions) == 0 {
+ glog.V(4).Infof("No version is registered for group %v", componentconfig.GroupName)
+ return
+ }
+
+ if err := registered.EnableVersions(externalVersions...); err != nil {
+ glog.V(4).Infof("%v", err)
+ return
+ }
+ if err := enableVersions(externalVersions); err != nil {
+ glog.V(4).Infof("%v", err)
+ return
+ }
+}
+
+// TODO: enableVersions should be centralized rather than spread in each API
+// group.
+// We can combine registered.RegisterVersions, registered.EnableVersions and
+// registered.RegisterGroup once we have moved enableVersions there.
+func enableVersions(externalVersions []unversioned.GroupVersion) error {
+ addVersionsToScheme(externalVersions...)
+ preferredExternalVersion := externalVersions[0]
+
+ groupMeta := apimachinery.GroupMeta{
+ GroupVersion: preferredExternalVersion,
+ GroupVersions: externalVersions,
+ RESTMapper: newRESTMapper(externalVersions),
+ SelfLinker: runtime.SelfLinker(accessor),
+ InterfacesFor: interfacesFor,
+ }
+
+ if err := registered.RegisterGroup(groupMeta); err != nil {
+ return err
+ }
+ api.RegisterRESTMapper(groupMeta.RESTMapper)
+ return nil
+}
+
+func newRESTMapper(externalVersions []unversioned.GroupVersion) meta.RESTMapper {
+ // the list of kinds that are scoped at the root of the api hierarchy
+ // if a kind is not enumerated here, it is assumed to have a namespace scope
+ rootScoped := sets.NewString()
+
+ ignoredKinds := sets.NewString()
+
+ return api.NewDefaultRESTMapper(externalVersions, interfacesFor, importPrefix, ignoredKinds, rootScoped)
+}
+
+// interfacesFor returns the default Codec and ResourceVersioner for a given version
+// string, or an error if the version is not known.
+func interfacesFor(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) {
+ switch version {
+ case v1alpha1.SchemeGroupVersion:
+ return &meta.VersionInterfaces{
+ ObjectConvertor: api.Scheme,
+ MetadataAccessor: accessor,
+ }, nil
+ default:
+ g, _ := registered.Group(componentconfig.GroupName)
+ return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, g.GroupVersions)
+ }
+}
+
+func addVersionsToScheme(externalVersions ...unversioned.GroupVersion) {
+ // add the internal version to Scheme
+ componentconfig.AddToScheme(api.Scheme)
+ // add the enabled external versions to Scheme
+ for _, v := range externalVersions {
+ if !registered.IsEnabledVersion(v) {
+ glog.Errorf("Version %s is not enabled, so it will not be added to the Scheme.", v)
+ continue
+ }
+ switch v {
+ case v1alpha1.SchemeGroupVersion:
+ v1alpha1.AddToScheme(api.Scheme)
+ }
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/register.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/register.go
new file mode 100644
index 0000000..599a44e
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/register.go
@@ -0,0 +1,50 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package componentconfig
+
+import (
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/runtime"
+)
+
+func AddToScheme(scheme *runtime.Scheme) {
+ addKnownTypes(scheme)
+}
+
+// GroupName is the group name use in this package
+const GroupName = "componentconfig"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
+
+// Kind takes an unqualified kind and returns back a Group qualified GroupKind
+func Kind(kind string) unversioned.GroupKind {
+ return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// Resource takes an unqualified resource and returns back a Group qualified GroupResource
+func Resource(resource string) unversioned.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+func addKnownTypes(scheme *runtime.Scheme) {
+ // TODO this will get cleaned up with the scheme types are fixed
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &KubeProxyConfiguration{},
+ &KubeSchedulerConfiguration{},
+ )
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/types.generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/types.generated.go
new file mode 100644
index 0000000..c9fd484
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/types.generated.go
@@ -0,0 +1,9703 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// ************************************************************
+// DO NOT EDIT.
+// THIS FILE IS AUTO-GENERATED BY codecgen.
+// ************************************************************
+
+package componentconfig
+
+import (
+ "errors"
+ "fmt"
+ codec1978 "github.com/ugorji/go/codec"
+ pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned"
+ "reflect"
+ "runtime"
+ time "time"
+)
+
+const (
+ // ----- content types ----
+ codecSelferC_UTF81234 = 1
+ codecSelferC_RAW1234 = 0
+ // ----- value types used ----
+ codecSelferValueTypeArray1234 = 10
+ codecSelferValueTypeMap1234 = 9
+ // ----- containerStateValues ----
+ codecSelfer_containerMapKey1234 = 2
+ codecSelfer_containerMapValue1234 = 3
+ codecSelfer_containerMapEnd1234 = 4
+ codecSelfer_containerArrayElem1234 = 6
+ codecSelfer_containerArrayEnd1234 = 7
+)
+
+var (
+ codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits())
+ codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`)
+)
+
+type codecSelfer1234 struct{}
+
+func init() {
+ if codec1978.GenVersion != 5 {
+ _, file, _, _ := runtime.Caller(0)
+ err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v",
+ 5, codec1978.GenVersion, file)
+ panic(err)
+ }
+ if false { // reference the types, but skip this branch at build/run time
+ var v0 pkg1_unversioned.TypeMeta
+ var v1 time.Duration
+ _, _ = v0, v1
+ }
+}
+
+func (x *KubeProxyConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [19]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[17] = x.Kind != ""
+ yyq2[18] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(19)
+ } else {
+ yynn2 = 17
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.BindAddress))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("bindAddress"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.BindAddress))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ClusterCIDR))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("clusterCIDR"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ClusterCIDR))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.HealthzBindAddress))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("healthzBindAddress"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.HealthzBindAddress))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeInt(int64(x.HealthzPort))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("healthzPort"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeInt(int64(x.HealthzPort))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.HostnameOverride))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("hostnameOverride"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.HostnameOverride))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.IPTablesMasqueradeBit == nil {
+ r.EncodeNil()
+ } else {
+ yy19 := *x.IPTablesMasqueradeBit
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeInt(int64(yy19))
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("iptablesMasqueradeBit"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.IPTablesMasqueradeBit == nil {
+ r.EncodeNil()
+ } else {
+ yy21 := *x.IPTablesMasqueradeBit
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeInt(int64(yy21))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy24 := &x.IPTablesSyncPeriod
+ yym25 := z.EncBinary()
+ _ = yym25
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy24) {
+ } else if !yym25 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy24)
+ } else {
+ z.EncFallback(yy24)
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("iptablesSyncPeriodSeconds"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy26 := &x.IPTablesSyncPeriod
+ yym27 := z.EncBinary()
+ _ = yym27
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy26) {
+ } else if !yym27 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy26)
+ } else {
+ z.EncFallback(yy26)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym29 := z.EncBinary()
+ _ = yym29
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.KubeconfigPath))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kubeconfigPath"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym30 := z.EncBinary()
+ _ = yym30
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.KubeconfigPath))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym32 := z.EncBinary()
+ _ = yym32
+ if false {
+ } else {
+ r.EncodeBool(bool(x.MasqueradeAll))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("masqueradeAll"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym33 := z.EncBinary()
+ _ = yym33
+ if false {
+ } else {
+ r.EncodeBool(bool(x.MasqueradeAll))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym35 := z.EncBinary()
+ _ = yym35
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Master))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("master"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym36 := z.EncBinary()
+ _ = yym36
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Master))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.OOMScoreAdj == nil {
+ r.EncodeNil()
+ } else {
+ yy38 := *x.OOMScoreAdj
+ yym39 := z.EncBinary()
+ _ = yym39
+ if false {
+ } else {
+ r.EncodeInt(int64(yy38))
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("oomScoreAdj"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.OOMScoreAdj == nil {
+ r.EncodeNil()
+ } else {
+ yy40 := *x.OOMScoreAdj
+ yym41 := z.EncBinary()
+ _ = yym41
+ if false {
+ } else {
+ r.EncodeInt(int64(yy40))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ x.Mode.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("mode"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Mode.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym46 := z.EncBinary()
+ _ = yym46
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.PortRange))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("portRange"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym47 := z.EncBinary()
+ _ = yym47
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.PortRange))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym49 := z.EncBinary()
+ _ = yym49
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ResourceContainer))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kubeletCgroups"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym50 := z.EncBinary()
+ _ = yym50
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ResourceContainer))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy52 := &x.UDPIdleTimeout
+ yym53 := z.EncBinary()
+ _ = yym53
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy52) {
+ } else if !yym53 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy52)
+ } else {
+ z.EncFallback(yy52)
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("udpTimeoutMilliseconds"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy54 := &x.UDPIdleTimeout
+ yym55 := z.EncBinary()
+ _ = yym55
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy54) {
+ } else if !yym55 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy54)
+ } else {
+ z.EncFallback(yy54)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym57 := z.EncBinary()
+ _ = yym57
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ConntrackMax))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("conntrackMax"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym58 := z.EncBinary()
+ _ = yym58
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ConntrackMax))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy60 := &x.ConntrackTCPEstablishedTimeout
+ yym61 := z.EncBinary()
+ _ = yym61
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy60) {
+ } else if !yym61 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy60)
+ } else {
+ z.EncFallback(yy60)
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("conntrackTCPEstablishedTimeout"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy62 := &x.ConntrackTCPEstablishedTimeout
+ yym63 := z.EncBinary()
+ _ = yym63
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy62) {
+ } else if !yym63 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy62)
+ } else {
+ z.EncFallback(yy62)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[17] {
+ yym65 := z.EncBinary()
+ _ = yym65
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[17] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym66 := z.EncBinary()
+ _ = yym66
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[18] {
+ yym68 := z.EncBinary()
+ _ = yym68
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[18] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym69 := z.EncBinary()
+ _ = yym69
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *KubeProxyConfiguration) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *KubeProxyConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "bindAddress":
+ if r.TryDecodeAsNil() {
+ x.BindAddress = ""
+ } else {
+ x.BindAddress = string(r.DecodeString())
+ }
+ case "clusterCIDR":
+ if r.TryDecodeAsNil() {
+ x.ClusterCIDR = ""
+ } else {
+ x.ClusterCIDR = string(r.DecodeString())
+ }
+ case "healthzBindAddress":
+ if r.TryDecodeAsNil() {
+ x.HealthzBindAddress = ""
+ } else {
+ x.HealthzBindAddress = string(r.DecodeString())
+ }
+ case "healthzPort":
+ if r.TryDecodeAsNil() {
+ x.HealthzPort = 0
+ } else {
+ x.HealthzPort = int32(r.DecodeInt(32))
+ }
+ case "hostnameOverride":
+ if r.TryDecodeAsNil() {
+ x.HostnameOverride = ""
+ } else {
+ x.HostnameOverride = string(r.DecodeString())
+ }
+ case "iptablesMasqueradeBit":
+ if r.TryDecodeAsNil() {
+ if x.IPTablesMasqueradeBit != nil {
+ x.IPTablesMasqueradeBit = nil
+ }
+ } else {
+ if x.IPTablesMasqueradeBit == nil {
+ x.IPTablesMasqueradeBit = new(int32)
+ }
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else {
+ *((*int32)(x.IPTablesMasqueradeBit)) = int32(r.DecodeInt(32))
+ }
+ }
+ case "iptablesSyncPeriodSeconds":
+ if r.TryDecodeAsNil() {
+ x.IPTablesSyncPeriod = pkg1_unversioned.Duration{}
+ } else {
+ yyv11 := &x.IPTablesSyncPeriod
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else if !yym12 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv11)
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ case "kubeconfigPath":
+ if r.TryDecodeAsNil() {
+ x.KubeconfigPath = ""
+ } else {
+ x.KubeconfigPath = string(r.DecodeString())
+ }
+ case "masqueradeAll":
+ if r.TryDecodeAsNil() {
+ x.MasqueradeAll = false
+ } else {
+ x.MasqueradeAll = bool(r.DecodeBool())
+ }
+ case "master":
+ if r.TryDecodeAsNil() {
+ x.Master = ""
+ } else {
+ x.Master = string(r.DecodeString())
+ }
+ case "oomScoreAdj":
+ if r.TryDecodeAsNil() {
+ if x.OOMScoreAdj != nil {
+ x.OOMScoreAdj = nil
+ }
+ } else {
+ if x.OOMScoreAdj == nil {
+ x.OOMScoreAdj = new(int32)
+ }
+ yym17 := z.DecBinary()
+ _ = yym17
+ if false {
+ } else {
+ *((*int32)(x.OOMScoreAdj)) = int32(r.DecodeInt(32))
+ }
+ }
+ case "mode":
+ if r.TryDecodeAsNil() {
+ x.Mode = ""
+ } else {
+ x.Mode = ProxyMode(r.DecodeString())
+ }
+ case "portRange":
+ if r.TryDecodeAsNil() {
+ x.PortRange = ""
+ } else {
+ x.PortRange = string(r.DecodeString())
+ }
+ case "kubeletCgroups":
+ if r.TryDecodeAsNil() {
+ x.ResourceContainer = ""
+ } else {
+ x.ResourceContainer = string(r.DecodeString())
+ }
+ case "udpTimeoutMilliseconds":
+ if r.TryDecodeAsNil() {
+ x.UDPIdleTimeout = pkg1_unversioned.Duration{}
+ } else {
+ yyv21 := &x.UDPIdleTimeout
+ yym22 := z.DecBinary()
+ _ = yym22
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv21) {
+ } else if !yym22 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv21)
+ } else {
+ z.DecFallback(yyv21, false)
+ }
+ }
+ case "conntrackMax":
+ if r.TryDecodeAsNil() {
+ x.ConntrackMax = 0
+ } else {
+ x.ConntrackMax = int32(r.DecodeInt(32))
+ }
+ case "conntrackTCPEstablishedTimeout":
+ if r.TryDecodeAsNil() {
+ x.ConntrackTCPEstablishedTimeout = pkg1_unversioned.Duration{}
+ } else {
+ yyv24 := &x.ConntrackTCPEstablishedTimeout
+ yym25 := z.DecBinary()
+ _ = yym25
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv24) {
+ } else if !yym25 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv24)
+ } else {
+ z.DecFallback(yyv24, false)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *KubeProxyConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj28 int
+ var yyb28 bool
+ var yyhl28 bool = l >= 0
+ yyj28++
+ if yyhl28 {
+ yyb28 = yyj28 > l
+ } else {
+ yyb28 = r.CheckBreak()
+ }
+ if yyb28 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.BindAddress = ""
+ } else {
+ x.BindAddress = string(r.DecodeString())
+ }
+ yyj28++
+ if yyhl28 {
+ yyb28 = yyj28 > l
+ } else {
+ yyb28 = r.CheckBreak()
+ }
+ if yyb28 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ClusterCIDR = ""
+ } else {
+ x.ClusterCIDR = string(r.DecodeString())
+ }
+ yyj28++
+ if yyhl28 {
+ yyb28 = yyj28 > l
+ } else {
+ yyb28 = r.CheckBreak()
+ }
+ if yyb28 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.HealthzBindAddress = ""
+ } else {
+ x.HealthzBindAddress = string(r.DecodeString())
+ }
+ yyj28++
+ if yyhl28 {
+ yyb28 = yyj28 > l
+ } else {
+ yyb28 = r.CheckBreak()
+ }
+ if yyb28 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.HealthzPort = 0
+ } else {
+ x.HealthzPort = int32(r.DecodeInt(32))
+ }
+ yyj28++
+ if yyhl28 {
+ yyb28 = yyj28 > l
+ } else {
+ yyb28 = r.CheckBreak()
+ }
+ if yyb28 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.HostnameOverride = ""
+ } else {
+ x.HostnameOverride = string(r.DecodeString())
+ }
+ yyj28++
+ if yyhl28 {
+ yyb28 = yyj28 > l
+ } else {
+ yyb28 = r.CheckBreak()
+ }
+ if yyb28 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.IPTablesMasqueradeBit != nil {
+ x.IPTablesMasqueradeBit = nil
+ }
+ } else {
+ if x.IPTablesMasqueradeBit == nil {
+ x.IPTablesMasqueradeBit = new(int32)
+ }
+ yym35 := z.DecBinary()
+ _ = yym35
+ if false {
+ } else {
+ *((*int32)(x.IPTablesMasqueradeBit)) = int32(r.DecodeInt(32))
+ }
+ }
+ yyj28++
+ if yyhl28 {
+ yyb28 = yyj28 > l
+ } else {
+ yyb28 = r.CheckBreak()
+ }
+ if yyb28 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.IPTablesSyncPeriod = pkg1_unversioned.Duration{}
+ } else {
+ yyv36 := &x.IPTablesSyncPeriod
+ yym37 := z.DecBinary()
+ _ = yym37
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv36) {
+ } else if !yym37 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv36)
+ } else {
+ z.DecFallback(yyv36, false)
+ }
+ }
+ yyj28++
+ if yyhl28 {
+ yyb28 = yyj28 > l
+ } else {
+ yyb28 = r.CheckBreak()
+ }
+ if yyb28 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.KubeconfigPath = ""
+ } else {
+ x.KubeconfigPath = string(r.DecodeString())
+ }
+ yyj28++
+ if yyhl28 {
+ yyb28 = yyj28 > l
+ } else {
+ yyb28 = r.CheckBreak()
+ }
+ if yyb28 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.MasqueradeAll = false
+ } else {
+ x.MasqueradeAll = bool(r.DecodeBool())
+ }
+ yyj28++
+ if yyhl28 {
+ yyb28 = yyj28 > l
+ } else {
+ yyb28 = r.CheckBreak()
+ }
+ if yyb28 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Master = ""
+ } else {
+ x.Master = string(r.DecodeString())
+ }
+ yyj28++
+ if yyhl28 {
+ yyb28 = yyj28 > l
+ } else {
+ yyb28 = r.CheckBreak()
+ }
+ if yyb28 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.OOMScoreAdj != nil {
+ x.OOMScoreAdj = nil
+ }
+ } else {
+ if x.OOMScoreAdj == nil {
+ x.OOMScoreAdj = new(int32)
+ }
+ yym42 := z.DecBinary()
+ _ = yym42
+ if false {
+ } else {
+ *((*int32)(x.OOMScoreAdj)) = int32(r.DecodeInt(32))
+ }
+ }
+ yyj28++
+ if yyhl28 {
+ yyb28 = yyj28 > l
+ } else {
+ yyb28 = r.CheckBreak()
+ }
+ if yyb28 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Mode = ""
+ } else {
+ x.Mode = ProxyMode(r.DecodeString())
+ }
+ yyj28++
+ if yyhl28 {
+ yyb28 = yyj28 > l
+ } else {
+ yyb28 = r.CheckBreak()
+ }
+ if yyb28 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.PortRange = ""
+ } else {
+ x.PortRange = string(r.DecodeString())
+ }
+ yyj28++
+ if yyhl28 {
+ yyb28 = yyj28 > l
+ } else {
+ yyb28 = r.CheckBreak()
+ }
+ if yyb28 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ResourceContainer = ""
+ } else {
+ x.ResourceContainer = string(r.DecodeString())
+ }
+ yyj28++
+ if yyhl28 {
+ yyb28 = yyj28 > l
+ } else {
+ yyb28 = r.CheckBreak()
+ }
+ if yyb28 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.UDPIdleTimeout = pkg1_unversioned.Duration{}
+ } else {
+ yyv46 := &x.UDPIdleTimeout
+ yym47 := z.DecBinary()
+ _ = yym47
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv46) {
+ } else if !yym47 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv46)
+ } else {
+ z.DecFallback(yyv46, false)
+ }
+ }
+ yyj28++
+ if yyhl28 {
+ yyb28 = yyj28 > l
+ } else {
+ yyb28 = r.CheckBreak()
+ }
+ if yyb28 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ConntrackMax = 0
+ } else {
+ x.ConntrackMax = int32(r.DecodeInt(32))
+ }
+ yyj28++
+ if yyhl28 {
+ yyb28 = yyj28 > l
+ } else {
+ yyb28 = r.CheckBreak()
+ }
+ if yyb28 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ConntrackTCPEstablishedTimeout = pkg1_unversioned.Duration{}
+ } else {
+ yyv49 := &x.ConntrackTCPEstablishedTimeout
+ yym50 := z.DecBinary()
+ _ = yym50
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv49) {
+ } else if !yym50 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv49)
+ } else {
+ z.DecFallback(yyv49, false)
+ }
+ }
+ yyj28++
+ if yyhl28 {
+ yyb28 = yyj28 > l
+ } else {
+ yyb28 = r.CheckBreak()
+ }
+ if yyb28 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj28++
+ if yyhl28 {
+ yyb28 = yyj28 > l
+ } else {
+ yyb28 = r.CheckBreak()
+ }
+ if yyb28 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj28++
+ if yyhl28 {
+ yyb28 = yyj28 > l
+ } else {
+ yyb28 = r.CheckBreak()
+ }
+ if yyb28 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj28-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x ProxyMode) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *ProxyMode) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x HairpinMode) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *HairpinMode) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [91]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[48] = x.CloudProvider != ""
+ yyq2[49] = x.CloudConfigFile != ""
+ yyq2[50] = x.KubeletCgroups != ""
+ yyq2[51] = x.RuntimeCgroups != ""
+ yyq2[52] = x.SystemCgroups != ""
+ yyq2[53] = x.CgroupRoot != ""
+ yyq2[55] = true
+ yyq2[56] = x.RktPath != ""
+ yyq2[57] = x.RktAPIEndpoint != ""
+ yyq2[58] = x.RktStage1Image != ""
+ yyq2[79] = true
+ yyq2[80] = x.NodeIP != ""
+ yyq2[84] = x.EvictionHard != ""
+ yyq2[85] = x.EvictionSoft != ""
+ yyq2[86] = x.EvictionSoftGracePeriod != ""
+ yyq2[87] = true
+ yyq2[88] = x.EvictionMaxPodGracePeriod != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(91)
+ } else {
+ yynn2 = 74
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Config))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("config"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Config))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy7 := &x.SyncFrequency
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy7) {
+ } else if !yym8 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy7)
+ } else {
+ z.EncFallback(yy7)
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("syncFrequency"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy9 := &x.SyncFrequency
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy9) {
+ } else if !yym10 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy9)
+ } else {
+ z.EncFallback(yy9)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy12 := &x.FileCheckFrequency
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy12) {
+ } else if !yym13 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy12)
+ } else {
+ z.EncFallback(yy12)
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("fileCheckFrequency"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy14 := &x.FileCheckFrequency
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy14) {
+ } else if !yym15 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy14)
+ } else {
+ z.EncFallback(yy14)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy17 := &x.HTTPCheckFrequency
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy17) {
+ } else if !yym18 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy17)
+ } else {
+ z.EncFallback(yy17)
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("httpCheckFrequency"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy19 := &x.HTTPCheckFrequency
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy19) {
+ } else if !yym20 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy19)
+ } else {
+ z.EncFallback(yy19)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ManifestURL))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("manifestURL"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ManifestURL))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym25 := z.EncBinary()
+ _ = yym25
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ManifestURLHeader))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("manifestURLHeader"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym26 := z.EncBinary()
+ _ = yym26
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ManifestURLHeader))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym28 := z.EncBinary()
+ _ = yym28
+ if false {
+ } else {
+ r.EncodeBool(bool(x.EnableServer))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("enableServer"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym29 := z.EncBinary()
+ _ = yym29
+ if false {
+ } else {
+ r.EncodeBool(bool(x.EnableServer))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym31 := z.EncBinary()
+ _ = yym31
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Address))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("address"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym32 := z.EncBinary()
+ _ = yym32
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Address))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym34 := z.EncBinary()
+ _ = yym34
+ if false {
+ } else {
+ r.EncodeUint(uint64(x.Port))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("port"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym35 := z.EncBinary()
+ _ = yym35
+ if false {
+ } else {
+ r.EncodeUint(uint64(x.Port))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym37 := z.EncBinary()
+ _ = yym37
+ if false {
+ } else {
+ r.EncodeUint(uint64(x.ReadOnlyPort))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("readOnlyPort"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym38 := z.EncBinary()
+ _ = yym38
+ if false {
+ } else {
+ r.EncodeUint(uint64(x.ReadOnlyPort))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym40 := z.EncBinary()
+ _ = yym40
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.TLSCertFile))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("tLSCertFile"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym41 := z.EncBinary()
+ _ = yym41
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.TLSCertFile))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym43 := z.EncBinary()
+ _ = yym43
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.TLSPrivateKeyFile))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("tLSPrivateKeyFile"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym44 := z.EncBinary()
+ _ = yym44
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.TLSPrivateKeyFile))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym46 := z.EncBinary()
+ _ = yym46
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.CertDirectory))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("certDirectory"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym47 := z.EncBinary()
+ _ = yym47
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.CertDirectory))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym49 := z.EncBinary()
+ _ = yym49
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.HostnameOverride))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("hostnameOverride"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym50 := z.EncBinary()
+ _ = yym50
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.HostnameOverride))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym52 := z.EncBinary()
+ _ = yym52
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.PodInfraContainerImage))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("podInfraContainerImage"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym53 := z.EncBinary()
+ _ = yym53
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.PodInfraContainerImage))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym55 := z.EncBinary()
+ _ = yym55
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.DockerEndpoint))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("dockerEndpoint"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym56 := z.EncBinary()
+ _ = yym56
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.DockerEndpoint))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym58 := z.EncBinary()
+ _ = yym58
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.RootDirectory))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("rootDirectory"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym59 := z.EncBinary()
+ _ = yym59
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.RootDirectory))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym61 := z.EncBinary()
+ _ = yym61
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.SeccompProfileRoot))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("seccompProfileRoot"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym62 := z.EncBinary()
+ _ = yym62
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.SeccompProfileRoot))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym64 := z.EncBinary()
+ _ = yym64
+ if false {
+ } else {
+ r.EncodeBool(bool(x.AllowPrivileged))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("allowPrivileged"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym65 := z.EncBinary()
+ _ = yym65
+ if false {
+ } else {
+ r.EncodeBool(bool(x.AllowPrivileged))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym67 := z.EncBinary()
+ _ = yym67
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.HostNetworkSources))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("hostNetworkSources"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym68 := z.EncBinary()
+ _ = yym68
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.HostNetworkSources))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym70 := z.EncBinary()
+ _ = yym70
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.HostPIDSources))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("hostPIDSources"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym71 := z.EncBinary()
+ _ = yym71
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.HostPIDSources))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym73 := z.EncBinary()
+ _ = yym73
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.HostIPCSources))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("hostIPCSources"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym74 := z.EncBinary()
+ _ = yym74
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.HostIPCSources))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym76 := z.EncBinary()
+ _ = yym76
+ if false {
+ } else {
+ r.EncodeFloat64(float64(x.RegistryPullQPS))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("registryPullQPS"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym77 := z.EncBinary()
+ _ = yym77
+ if false {
+ } else {
+ r.EncodeFloat64(float64(x.RegistryPullQPS))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym79 := z.EncBinary()
+ _ = yym79
+ if false {
+ } else {
+ r.EncodeInt(int64(x.RegistryBurst))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("registryBurst"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym80 := z.EncBinary()
+ _ = yym80
+ if false {
+ } else {
+ r.EncodeInt(int64(x.RegistryBurst))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym82 := z.EncBinary()
+ _ = yym82
+ if false {
+ } else {
+ r.EncodeFloat32(float32(x.EventRecordQPS))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("eventRecordQPS"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym83 := z.EncBinary()
+ _ = yym83
+ if false {
+ } else {
+ r.EncodeFloat32(float32(x.EventRecordQPS))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym85 := z.EncBinary()
+ _ = yym85
+ if false {
+ } else {
+ r.EncodeInt(int64(x.EventBurst))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("eventBurst"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym86 := z.EncBinary()
+ _ = yym86
+ if false {
+ } else {
+ r.EncodeInt(int64(x.EventBurst))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym88 := z.EncBinary()
+ _ = yym88
+ if false {
+ } else {
+ r.EncodeBool(bool(x.EnableDebuggingHandlers))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("enableDebuggingHandlers"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym89 := z.EncBinary()
+ _ = yym89
+ if false {
+ } else {
+ r.EncodeBool(bool(x.EnableDebuggingHandlers))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy91 := &x.MinimumGCAge
+ yym92 := z.EncBinary()
+ _ = yym92
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy91) {
+ } else if !yym92 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy91)
+ } else {
+ z.EncFallback(yy91)
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("minimumGCAge"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy93 := &x.MinimumGCAge
+ yym94 := z.EncBinary()
+ _ = yym94
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy93) {
+ } else if !yym94 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy93)
+ } else {
+ z.EncFallback(yy93)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym96 := z.EncBinary()
+ _ = yym96
+ if false {
+ } else {
+ r.EncodeInt(int64(x.MaxPerPodContainerCount))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("maxPerPodContainerCount"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym97 := z.EncBinary()
+ _ = yym97
+ if false {
+ } else {
+ r.EncodeInt(int64(x.MaxPerPodContainerCount))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym99 := z.EncBinary()
+ _ = yym99
+ if false {
+ } else {
+ r.EncodeInt(int64(x.MaxContainerCount))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("maxContainerCount"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym100 := z.EncBinary()
+ _ = yym100
+ if false {
+ } else {
+ r.EncodeInt(int64(x.MaxContainerCount))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym102 := z.EncBinary()
+ _ = yym102
+ if false {
+ } else {
+ r.EncodeUint(uint64(x.CAdvisorPort))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("cAdvisorPort"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym103 := z.EncBinary()
+ _ = yym103
+ if false {
+ } else {
+ r.EncodeUint(uint64(x.CAdvisorPort))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym105 := z.EncBinary()
+ _ = yym105
+ if false {
+ } else {
+ r.EncodeInt(int64(x.HealthzPort))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("healthzPort"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym106 := z.EncBinary()
+ _ = yym106
+ if false {
+ } else {
+ r.EncodeInt(int64(x.HealthzPort))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym108 := z.EncBinary()
+ _ = yym108
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.HealthzBindAddress))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("healthzBindAddress"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym109 := z.EncBinary()
+ _ = yym109
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.HealthzBindAddress))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym111 := z.EncBinary()
+ _ = yym111
+ if false {
+ } else {
+ r.EncodeInt(int64(x.OOMScoreAdj))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("oomScoreAdj"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym112 := z.EncBinary()
+ _ = yym112
+ if false {
+ } else {
+ r.EncodeInt(int64(x.OOMScoreAdj))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym114 := z.EncBinary()
+ _ = yym114
+ if false {
+ } else {
+ r.EncodeBool(bool(x.RegisterNode))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("registerNode"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym115 := z.EncBinary()
+ _ = yym115
+ if false {
+ } else {
+ r.EncodeBool(bool(x.RegisterNode))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym117 := z.EncBinary()
+ _ = yym117
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ClusterDomain))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("clusterDomain"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym118 := z.EncBinary()
+ _ = yym118
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ClusterDomain))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym120 := z.EncBinary()
+ _ = yym120
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.MasterServiceNamespace))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("masterServiceNamespace"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym121 := z.EncBinary()
+ _ = yym121
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.MasterServiceNamespace))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym123 := z.EncBinary()
+ _ = yym123
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ClusterDNS))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("clusterDNS"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym124 := z.EncBinary()
+ _ = yym124
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ClusterDNS))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy126 := &x.StreamingConnectionIdleTimeout
+ yym127 := z.EncBinary()
+ _ = yym127
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy126) {
+ } else if !yym127 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy126)
+ } else {
+ z.EncFallback(yy126)
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("streamingConnectionIdleTimeout"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy128 := &x.StreamingConnectionIdleTimeout
+ yym129 := z.EncBinary()
+ _ = yym129
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy128) {
+ } else if !yym129 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy128)
+ } else {
+ z.EncFallback(yy128)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy131 := &x.NodeStatusUpdateFrequency
+ yym132 := z.EncBinary()
+ _ = yym132
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy131) {
+ } else if !yym132 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy131)
+ } else {
+ z.EncFallback(yy131)
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("nodeStatusUpdateFrequency"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy133 := &x.NodeStatusUpdateFrequency
+ yym134 := z.EncBinary()
+ _ = yym134
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy133) {
+ } else if !yym134 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy133)
+ } else {
+ z.EncFallback(yy133)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy136 := &x.ImageMinimumGCAge
+ yym137 := z.EncBinary()
+ _ = yym137
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy136) {
+ } else if !yym137 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy136)
+ } else {
+ z.EncFallback(yy136)
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("imageMinimumGCAge"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy138 := &x.ImageMinimumGCAge
+ yym139 := z.EncBinary()
+ _ = yym139
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy138) {
+ } else if !yym139 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy138)
+ } else {
+ z.EncFallback(yy138)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym141 := z.EncBinary()
+ _ = yym141
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ImageGCHighThresholdPercent))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("imageGCHighThresholdPercent"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym142 := z.EncBinary()
+ _ = yym142
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ImageGCHighThresholdPercent))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym144 := z.EncBinary()
+ _ = yym144
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ImageGCLowThresholdPercent))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("imageGCLowThresholdPercent"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym145 := z.EncBinary()
+ _ = yym145
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ImageGCLowThresholdPercent))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym147 := z.EncBinary()
+ _ = yym147
+ if false {
+ } else {
+ r.EncodeInt(int64(x.LowDiskSpaceThresholdMB))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("lowDiskSpaceThresholdMB"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym148 := z.EncBinary()
+ _ = yym148
+ if false {
+ } else {
+ r.EncodeInt(int64(x.LowDiskSpaceThresholdMB))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy150 := &x.VolumeStatsAggPeriod
+ yym151 := z.EncBinary()
+ _ = yym151
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy150) {
+ } else if !yym151 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy150)
+ } else {
+ z.EncFallback(yy150)
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("volumeStatsAggPeriod"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy152 := &x.VolumeStatsAggPeriod
+ yym153 := z.EncBinary()
+ _ = yym153
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy152) {
+ } else if !yym153 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy152)
+ } else {
+ z.EncFallback(yy152)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym155 := z.EncBinary()
+ _ = yym155
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.NetworkPluginName))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("networkPluginName"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym156 := z.EncBinary()
+ _ = yym156
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.NetworkPluginName))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym158 := z.EncBinary()
+ _ = yym158
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.NetworkPluginDir))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("networkPluginDir"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym159 := z.EncBinary()
+ _ = yym159
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.NetworkPluginDir))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym161 := z.EncBinary()
+ _ = yym161
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.VolumePluginDir))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("volumePluginDir"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym162 := z.EncBinary()
+ _ = yym162
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.VolumePluginDir))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[48] {
+ yym164 := z.EncBinary()
+ _ = yym164
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.CloudProvider))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[48] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("cloudProvider"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym165 := z.EncBinary()
+ _ = yym165
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.CloudProvider))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[49] {
+ yym167 := z.EncBinary()
+ _ = yym167
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.CloudConfigFile))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[49] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("cloudConfigFile"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym168 := z.EncBinary()
+ _ = yym168
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.CloudConfigFile))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[50] {
+ yym170 := z.EncBinary()
+ _ = yym170
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.KubeletCgroups))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[50] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kubeletCgroups"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym171 := z.EncBinary()
+ _ = yym171
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.KubeletCgroups))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[51] {
+ yym173 := z.EncBinary()
+ _ = yym173
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.RuntimeCgroups))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[51] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("runtimeCgroups"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym174 := z.EncBinary()
+ _ = yym174
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.RuntimeCgroups))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[52] {
+ yym176 := z.EncBinary()
+ _ = yym176
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.SystemCgroups))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[52] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("systemContainer"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym177 := z.EncBinary()
+ _ = yym177
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.SystemCgroups))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[53] {
+ yym179 := z.EncBinary()
+ _ = yym179
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.CgroupRoot))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[53] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("cgroupRoot"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym180 := z.EncBinary()
+ _ = yym180
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.CgroupRoot))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym182 := z.EncBinary()
+ _ = yym182
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ContainerRuntime))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("containerRuntime"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym183 := z.EncBinary()
+ _ = yym183
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ContainerRuntime))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[55] {
+ yy185 := &x.RuntimeRequestTimeout
+ yym186 := z.EncBinary()
+ _ = yym186
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy185) {
+ } else if !yym186 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy185)
+ } else {
+ z.EncFallback(yy185)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[55] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("runtimeRequestTimeout"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy187 := &x.RuntimeRequestTimeout
+ yym188 := z.EncBinary()
+ _ = yym188
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy187) {
+ } else if !yym188 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy187)
+ } else {
+ z.EncFallback(yy187)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[56] {
+ yym190 := z.EncBinary()
+ _ = yym190
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.RktPath))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[56] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("rktPath"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym191 := z.EncBinary()
+ _ = yym191
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.RktPath))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[57] {
+ yym193 := z.EncBinary()
+ _ = yym193
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.RktAPIEndpoint))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[57] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("rktAPIEndpoint"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym194 := z.EncBinary()
+ _ = yym194
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.RktAPIEndpoint))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[58] {
+ yym196 := z.EncBinary()
+ _ = yym196
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.RktStage1Image))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[58] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("rktStage1Image"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym197 := z.EncBinary()
+ _ = yym197
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.RktStage1Image))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym199 := z.EncBinary()
+ _ = yym199
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.LockFilePath))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("lockFilePath"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym200 := z.EncBinary()
+ _ = yym200
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.LockFilePath))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym202 := z.EncBinary()
+ _ = yym202
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ExitOnLockContention))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("exitOnLockContention"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym203 := z.EncBinary()
+ _ = yym203
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ExitOnLockContention))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym205 := z.EncBinary()
+ _ = yym205
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ConfigureCBR0))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("configureCbr0"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym206 := z.EncBinary()
+ _ = yym206
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ConfigureCBR0))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym208 := z.EncBinary()
+ _ = yym208
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.HairpinMode))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("hairpinMode"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym209 := z.EncBinary()
+ _ = yym209
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.HairpinMode))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym211 := z.EncBinary()
+ _ = yym211
+ if false {
+ } else {
+ r.EncodeBool(bool(x.BabysitDaemons))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("babysitDaemons"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym212 := z.EncBinary()
+ _ = yym212
+ if false {
+ } else {
+ r.EncodeBool(bool(x.BabysitDaemons))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym214 := z.EncBinary()
+ _ = yym214
+ if false {
+ } else {
+ r.EncodeInt(int64(x.MaxPods))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("maxPods"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym215 := z.EncBinary()
+ _ = yym215
+ if false {
+ } else {
+ r.EncodeInt(int64(x.MaxPods))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym217 := z.EncBinary()
+ _ = yym217
+ if false {
+ } else {
+ r.EncodeInt(int64(x.NvidiaGPUs))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("nvidiaGPUs"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym218 := z.EncBinary()
+ _ = yym218
+ if false {
+ } else {
+ r.EncodeInt(int64(x.NvidiaGPUs))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym220 := z.EncBinary()
+ _ = yym220
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.DockerExecHandlerName))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("dockerExecHandlerName"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym221 := z.EncBinary()
+ _ = yym221
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.DockerExecHandlerName))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym223 := z.EncBinary()
+ _ = yym223
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.PodCIDR))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("podCIDR"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym224 := z.EncBinary()
+ _ = yym224
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.PodCIDR))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym226 := z.EncBinary()
+ _ = yym226
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ResolverConfig))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("resolvConf"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym227 := z.EncBinary()
+ _ = yym227
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ResolverConfig))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym229 := z.EncBinary()
+ _ = yym229
+ if false {
+ } else {
+ r.EncodeBool(bool(x.CPUCFSQuota))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("cpuCFSQuota"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym230 := z.EncBinary()
+ _ = yym230
+ if false {
+ } else {
+ r.EncodeBool(bool(x.CPUCFSQuota))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym232 := z.EncBinary()
+ _ = yym232
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Containerized))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("containerized"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym233 := z.EncBinary()
+ _ = yym233
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Containerized))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym235 := z.EncBinary()
+ _ = yym235
+ if false {
+ } else {
+ r.EncodeUint(uint64(x.MaxOpenFiles))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("maxOpenFiles"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym236 := z.EncBinary()
+ _ = yym236
+ if false {
+ } else {
+ r.EncodeUint(uint64(x.MaxOpenFiles))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym238 := z.EncBinary()
+ _ = yym238
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReconcileCIDR))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("reconcileCIDR"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym239 := z.EncBinary()
+ _ = yym239
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReconcileCIDR))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym241 := z.EncBinary()
+ _ = yym241
+ if false {
+ } else {
+ r.EncodeBool(bool(x.RegisterSchedulable))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("registerSchedulable"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym242 := z.EncBinary()
+ _ = yym242
+ if false {
+ } else {
+ r.EncodeBool(bool(x.RegisterSchedulable))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym244 := z.EncBinary()
+ _ = yym244
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ContentType))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("contentType"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym245 := z.EncBinary()
+ _ = yym245
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ContentType))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym247 := z.EncBinary()
+ _ = yym247
+ if false {
+ } else {
+ r.EncodeFloat32(float32(x.KubeAPIQPS))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kubeAPIQPS"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym248 := z.EncBinary()
+ _ = yym248
+ if false {
+ } else {
+ r.EncodeFloat32(float32(x.KubeAPIQPS))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym250 := z.EncBinary()
+ _ = yym250
+ if false {
+ } else {
+ r.EncodeInt(int64(x.KubeAPIBurst))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kubeAPIBurst"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym251 := z.EncBinary()
+ _ = yym251
+ if false {
+ } else {
+ r.EncodeInt(int64(x.KubeAPIBurst))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym253 := z.EncBinary()
+ _ = yym253
+ if false {
+ } else {
+ r.EncodeBool(bool(x.SerializeImagePulls))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("serializeImagePulls"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym254 := z.EncBinary()
+ _ = yym254
+ if false {
+ } else {
+ r.EncodeBool(bool(x.SerializeImagePulls))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym256 := z.EncBinary()
+ _ = yym256
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ExperimentalFlannelOverlay))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("experimentalFlannelOverlay"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym257 := z.EncBinary()
+ _ = yym257
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ExperimentalFlannelOverlay))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[79] {
+ yy259 := &x.OutOfDiskTransitionFrequency
+ yym260 := z.EncBinary()
+ _ = yym260
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy259) {
+ } else if !yym260 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy259)
+ } else {
+ z.EncFallback(yy259)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[79] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("outOfDiskTransitionFrequency"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy261 := &x.OutOfDiskTransitionFrequency
+ yym262 := z.EncBinary()
+ _ = yym262
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy261) {
+ } else if !yym262 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy261)
+ } else {
+ z.EncFallback(yy261)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[80] {
+ yym264 := z.EncBinary()
+ _ = yym264
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.NodeIP))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[80] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("nodeIP"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym265 := z.EncBinary()
+ _ = yym265
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.NodeIP))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.NodeLabels == nil {
+ r.EncodeNil()
+ } else {
+ yym267 := z.EncBinary()
+ _ = yym267
+ if false {
+ } else {
+ z.F.EncMapStringStringV(x.NodeLabels, false, e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("nodeLabels"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.NodeLabels == nil {
+ r.EncodeNil()
+ } else {
+ yym268 := z.EncBinary()
+ _ = yym268
+ if false {
+ } else {
+ z.F.EncMapStringStringV(x.NodeLabels, false, e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym270 := z.EncBinary()
+ _ = yym270
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.NonMasqueradeCIDR))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("nonMasqueradeCIDR"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym271 := z.EncBinary()
+ _ = yym271
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.NonMasqueradeCIDR))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym273 := z.EncBinary()
+ _ = yym273
+ if false {
+ } else {
+ r.EncodeBool(bool(x.EnableCustomMetrics))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("enableCustomMetrics"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym274 := z.EncBinary()
+ _ = yym274
+ if false {
+ } else {
+ r.EncodeBool(bool(x.EnableCustomMetrics))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[84] {
+ yym276 := z.EncBinary()
+ _ = yym276
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.EvictionHard))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[84] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("evictionHard"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym277 := z.EncBinary()
+ _ = yym277
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.EvictionHard))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[85] {
+ yym279 := z.EncBinary()
+ _ = yym279
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.EvictionSoft))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[85] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("evictionSoft"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym280 := z.EncBinary()
+ _ = yym280
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.EvictionSoft))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[86] {
+ yym282 := z.EncBinary()
+ _ = yym282
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.EvictionSoftGracePeriod))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[86] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("evictionSoftGracePeriod"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym283 := z.EncBinary()
+ _ = yym283
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.EvictionSoftGracePeriod))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[87] {
+ yy285 := &x.EvictionPressureTransitionPeriod
+ yym286 := z.EncBinary()
+ _ = yym286
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy285) {
+ } else if !yym286 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy285)
+ } else {
+ z.EncFallback(yy285)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[87] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("evictionPressureTransitionPeriod"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy287 := &x.EvictionPressureTransitionPeriod
+ yym288 := z.EncBinary()
+ _ = yym288
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy287) {
+ } else if !yym288 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy287)
+ } else {
+ z.EncFallback(yy287)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[88] {
+ yym290 := z.EncBinary()
+ _ = yym290
+ if false {
+ } else {
+ r.EncodeInt(int64(x.EvictionMaxPodGracePeriod))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[88] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("evictionMaxPodGracePeriod"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym291 := z.EncBinary()
+ _ = yym291
+ if false {
+ } else {
+ r.EncodeInt(int64(x.EvictionMaxPodGracePeriod))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym293 := z.EncBinary()
+ _ = yym293
+ if false {
+ } else {
+ r.EncodeInt(int64(x.PodsPerCore))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("podsPerCore"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym294 := z.EncBinary()
+ _ = yym294
+ if false {
+ } else {
+ r.EncodeInt(int64(x.PodsPerCore))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym296 := z.EncBinary()
+ _ = yym296
+ if false {
+ } else {
+ r.EncodeBool(bool(x.EnableControllerAttachDetach))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("enableControllerAttachDetach"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym297 := z.EncBinary()
+ _ = yym297
+ if false {
+ } else {
+ r.EncodeBool(bool(x.EnableControllerAttachDetach))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *KubeletConfiguration) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *KubeletConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "config":
+ if r.TryDecodeAsNil() {
+ x.Config = ""
+ } else {
+ x.Config = string(r.DecodeString())
+ }
+ case "syncFrequency":
+ if r.TryDecodeAsNil() {
+ x.SyncFrequency = pkg1_unversioned.Duration{}
+ } else {
+ yyv5 := &x.SyncFrequency
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv5) {
+ } else if !yym6 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv5)
+ } else {
+ z.DecFallback(yyv5, false)
+ }
+ }
+ case "fileCheckFrequency":
+ if r.TryDecodeAsNil() {
+ x.FileCheckFrequency = pkg1_unversioned.Duration{}
+ } else {
+ yyv7 := &x.FileCheckFrequency
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv7) {
+ } else if !yym8 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv7)
+ } else {
+ z.DecFallback(yyv7, false)
+ }
+ }
+ case "httpCheckFrequency":
+ if r.TryDecodeAsNil() {
+ x.HTTPCheckFrequency = pkg1_unversioned.Duration{}
+ } else {
+ yyv9 := &x.HTTPCheckFrequency
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv9) {
+ } else if !yym10 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv9)
+ } else {
+ z.DecFallback(yyv9, false)
+ }
+ }
+ case "manifestURL":
+ if r.TryDecodeAsNil() {
+ x.ManifestURL = ""
+ } else {
+ x.ManifestURL = string(r.DecodeString())
+ }
+ case "manifestURLHeader":
+ if r.TryDecodeAsNil() {
+ x.ManifestURLHeader = ""
+ } else {
+ x.ManifestURLHeader = string(r.DecodeString())
+ }
+ case "enableServer":
+ if r.TryDecodeAsNil() {
+ x.EnableServer = false
+ } else {
+ x.EnableServer = bool(r.DecodeBool())
+ }
+ case "address":
+ if r.TryDecodeAsNil() {
+ x.Address = ""
+ } else {
+ x.Address = string(r.DecodeString())
+ }
+ case "port":
+ if r.TryDecodeAsNil() {
+ x.Port = 0
+ } else {
+ x.Port = uint(r.DecodeUint(codecSelferBitsize1234))
+ }
+ case "readOnlyPort":
+ if r.TryDecodeAsNil() {
+ x.ReadOnlyPort = 0
+ } else {
+ x.ReadOnlyPort = uint(r.DecodeUint(codecSelferBitsize1234))
+ }
+ case "tLSCertFile":
+ if r.TryDecodeAsNil() {
+ x.TLSCertFile = ""
+ } else {
+ x.TLSCertFile = string(r.DecodeString())
+ }
+ case "tLSPrivateKeyFile":
+ if r.TryDecodeAsNil() {
+ x.TLSPrivateKeyFile = ""
+ } else {
+ x.TLSPrivateKeyFile = string(r.DecodeString())
+ }
+ case "certDirectory":
+ if r.TryDecodeAsNil() {
+ x.CertDirectory = ""
+ } else {
+ x.CertDirectory = string(r.DecodeString())
+ }
+ case "hostnameOverride":
+ if r.TryDecodeAsNil() {
+ x.HostnameOverride = ""
+ } else {
+ x.HostnameOverride = string(r.DecodeString())
+ }
+ case "podInfraContainerImage":
+ if r.TryDecodeAsNil() {
+ x.PodInfraContainerImage = ""
+ } else {
+ x.PodInfraContainerImage = string(r.DecodeString())
+ }
+ case "dockerEndpoint":
+ if r.TryDecodeAsNil() {
+ x.DockerEndpoint = ""
+ } else {
+ x.DockerEndpoint = string(r.DecodeString())
+ }
+ case "rootDirectory":
+ if r.TryDecodeAsNil() {
+ x.RootDirectory = ""
+ } else {
+ x.RootDirectory = string(r.DecodeString())
+ }
+ case "seccompProfileRoot":
+ if r.TryDecodeAsNil() {
+ x.SeccompProfileRoot = ""
+ } else {
+ x.SeccompProfileRoot = string(r.DecodeString())
+ }
+ case "allowPrivileged":
+ if r.TryDecodeAsNil() {
+ x.AllowPrivileged = false
+ } else {
+ x.AllowPrivileged = bool(r.DecodeBool())
+ }
+ case "hostNetworkSources":
+ if r.TryDecodeAsNil() {
+ x.HostNetworkSources = ""
+ } else {
+ x.HostNetworkSources = string(r.DecodeString())
+ }
+ case "hostPIDSources":
+ if r.TryDecodeAsNil() {
+ x.HostPIDSources = ""
+ } else {
+ x.HostPIDSources = string(r.DecodeString())
+ }
+ case "hostIPCSources":
+ if r.TryDecodeAsNil() {
+ x.HostIPCSources = ""
+ } else {
+ x.HostIPCSources = string(r.DecodeString())
+ }
+ case "registryPullQPS":
+ if r.TryDecodeAsNil() {
+ x.RegistryPullQPS = 0
+ } else {
+ x.RegistryPullQPS = float64(r.DecodeFloat(false))
+ }
+ case "registryBurst":
+ if r.TryDecodeAsNil() {
+ x.RegistryBurst = 0
+ } else {
+ x.RegistryBurst = int32(r.DecodeInt(32))
+ }
+ case "eventRecordQPS":
+ if r.TryDecodeAsNil() {
+ x.EventRecordQPS = 0
+ } else {
+ x.EventRecordQPS = float32(r.DecodeFloat(true))
+ }
+ case "eventBurst":
+ if r.TryDecodeAsNil() {
+ x.EventBurst = 0
+ } else {
+ x.EventBurst = int32(r.DecodeInt(32))
+ }
+ case "enableDebuggingHandlers":
+ if r.TryDecodeAsNil() {
+ x.EnableDebuggingHandlers = false
+ } else {
+ x.EnableDebuggingHandlers = bool(r.DecodeBool())
+ }
+ case "minimumGCAge":
+ if r.TryDecodeAsNil() {
+ x.MinimumGCAge = pkg1_unversioned.Duration{}
+ } else {
+ yyv34 := &x.MinimumGCAge
+ yym35 := z.DecBinary()
+ _ = yym35
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv34) {
+ } else if !yym35 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv34)
+ } else {
+ z.DecFallback(yyv34, false)
+ }
+ }
+ case "maxPerPodContainerCount":
+ if r.TryDecodeAsNil() {
+ x.MaxPerPodContainerCount = 0
+ } else {
+ x.MaxPerPodContainerCount = int32(r.DecodeInt(32))
+ }
+ case "maxContainerCount":
+ if r.TryDecodeAsNil() {
+ x.MaxContainerCount = 0
+ } else {
+ x.MaxContainerCount = int32(r.DecodeInt(32))
+ }
+ case "cAdvisorPort":
+ if r.TryDecodeAsNil() {
+ x.CAdvisorPort = 0
+ } else {
+ x.CAdvisorPort = uint(r.DecodeUint(codecSelferBitsize1234))
+ }
+ case "healthzPort":
+ if r.TryDecodeAsNil() {
+ x.HealthzPort = 0
+ } else {
+ x.HealthzPort = int32(r.DecodeInt(32))
+ }
+ case "healthzBindAddress":
+ if r.TryDecodeAsNil() {
+ x.HealthzBindAddress = ""
+ } else {
+ x.HealthzBindAddress = string(r.DecodeString())
+ }
+ case "oomScoreAdj":
+ if r.TryDecodeAsNil() {
+ x.OOMScoreAdj = 0
+ } else {
+ x.OOMScoreAdj = int32(r.DecodeInt(32))
+ }
+ case "registerNode":
+ if r.TryDecodeAsNil() {
+ x.RegisterNode = false
+ } else {
+ x.RegisterNode = bool(r.DecodeBool())
+ }
+ case "clusterDomain":
+ if r.TryDecodeAsNil() {
+ x.ClusterDomain = ""
+ } else {
+ x.ClusterDomain = string(r.DecodeString())
+ }
+ case "masterServiceNamespace":
+ if r.TryDecodeAsNil() {
+ x.MasterServiceNamespace = ""
+ } else {
+ x.MasterServiceNamespace = string(r.DecodeString())
+ }
+ case "clusterDNS":
+ if r.TryDecodeAsNil() {
+ x.ClusterDNS = ""
+ } else {
+ x.ClusterDNS = string(r.DecodeString())
+ }
+ case "streamingConnectionIdleTimeout":
+ if r.TryDecodeAsNil() {
+ x.StreamingConnectionIdleTimeout = pkg1_unversioned.Duration{}
+ } else {
+ yyv46 := &x.StreamingConnectionIdleTimeout
+ yym47 := z.DecBinary()
+ _ = yym47
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv46) {
+ } else if !yym47 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv46)
+ } else {
+ z.DecFallback(yyv46, false)
+ }
+ }
+ case "nodeStatusUpdateFrequency":
+ if r.TryDecodeAsNil() {
+ x.NodeStatusUpdateFrequency = pkg1_unversioned.Duration{}
+ } else {
+ yyv48 := &x.NodeStatusUpdateFrequency
+ yym49 := z.DecBinary()
+ _ = yym49
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv48) {
+ } else if !yym49 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv48)
+ } else {
+ z.DecFallback(yyv48, false)
+ }
+ }
+ case "imageMinimumGCAge":
+ if r.TryDecodeAsNil() {
+ x.ImageMinimumGCAge = pkg1_unversioned.Duration{}
+ } else {
+ yyv50 := &x.ImageMinimumGCAge
+ yym51 := z.DecBinary()
+ _ = yym51
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv50) {
+ } else if !yym51 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv50)
+ } else {
+ z.DecFallback(yyv50, false)
+ }
+ }
+ case "imageGCHighThresholdPercent":
+ if r.TryDecodeAsNil() {
+ x.ImageGCHighThresholdPercent = 0
+ } else {
+ x.ImageGCHighThresholdPercent = int32(r.DecodeInt(32))
+ }
+ case "imageGCLowThresholdPercent":
+ if r.TryDecodeAsNil() {
+ x.ImageGCLowThresholdPercent = 0
+ } else {
+ x.ImageGCLowThresholdPercent = int32(r.DecodeInt(32))
+ }
+ case "lowDiskSpaceThresholdMB":
+ if r.TryDecodeAsNil() {
+ x.LowDiskSpaceThresholdMB = 0
+ } else {
+ x.LowDiskSpaceThresholdMB = int32(r.DecodeInt(32))
+ }
+ case "volumeStatsAggPeriod":
+ if r.TryDecodeAsNil() {
+ x.VolumeStatsAggPeriod = pkg1_unversioned.Duration{}
+ } else {
+ yyv55 := &x.VolumeStatsAggPeriod
+ yym56 := z.DecBinary()
+ _ = yym56
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv55) {
+ } else if !yym56 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv55)
+ } else {
+ z.DecFallback(yyv55, false)
+ }
+ }
+ case "networkPluginName":
+ if r.TryDecodeAsNil() {
+ x.NetworkPluginName = ""
+ } else {
+ x.NetworkPluginName = string(r.DecodeString())
+ }
+ case "networkPluginDir":
+ if r.TryDecodeAsNil() {
+ x.NetworkPluginDir = ""
+ } else {
+ x.NetworkPluginDir = string(r.DecodeString())
+ }
+ case "volumePluginDir":
+ if r.TryDecodeAsNil() {
+ x.VolumePluginDir = ""
+ } else {
+ x.VolumePluginDir = string(r.DecodeString())
+ }
+ case "cloudProvider":
+ if r.TryDecodeAsNil() {
+ x.CloudProvider = ""
+ } else {
+ x.CloudProvider = string(r.DecodeString())
+ }
+ case "cloudConfigFile":
+ if r.TryDecodeAsNil() {
+ x.CloudConfigFile = ""
+ } else {
+ x.CloudConfigFile = string(r.DecodeString())
+ }
+ case "kubeletCgroups":
+ if r.TryDecodeAsNil() {
+ x.KubeletCgroups = ""
+ } else {
+ x.KubeletCgroups = string(r.DecodeString())
+ }
+ case "runtimeCgroups":
+ if r.TryDecodeAsNil() {
+ x.RuntimeCgroups = ""
+ } else {
+ x.RuntimeCgroups = string(r.DecodeString())
+ }
+ case "systemContainer":
+ if r.TryDecodeAsNil() {
+ x.SystemCgroups = ""
+ } else {
+ x.SystemCgroups = string(r.DecodeString())
+ }
+ case "cgroupRoot":
+ if r.TryDecodeAsNil() {
+ x.CgroupRoot = ""
+ } else {
+ x.CgroupRoot = string(r.DecodeString())
+ }
+ case "containerRuntime":
+ if r.TryDecodeAsNil() {
+ x.ContainerRuntime = ""
+ } else {
+ x.ContainerRuntime = string(r.DecodeString())
+ }
+ case "runtimeRequestTimeout":
+ if r.TryDecodeAsNil() {
+ x.RuntimeRequestTimeout = pkg1_unversioned.Duration{}
+ } else {
+ yyv67 := &x.RuntimeRequestTimeout
+ yym68 := z.DecBinary()
+ _ = yym68
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv67) {
+ } else if !yym68 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv67)
+ } else {
+ z.DecFallback(yyv67, false)
+ }
+ }
+ case "rktPath":
+ if r.TryDecodeAsNil() {
+ x.RktPath = ""
+ } else {
+ x.RktPath = string(r.DecodeString())
+ }
+ case "rktAPIEndpoint":
+ if r.TryDecodeAsNil() {
+ x.RktAPIEndpoint = ""
+ } else {
+ x.RktAPIEndpoint = string(r.DecodeString())
+ }
+ case "rktStage1Image":
+ if r.TryDecodeAsNil() {
+ x.RktStage1Image = ""
+ } else {
+ x.RktStage1Image = string(r.DecodeString())
+ }
+ case "lockFilePath":
+ if r.TryDecodeAsNil() {
+ x.LockFilePath = ""
+ } else {
+ x.LockFilePath = string(r.DecodeString())
+ }
+ case "exitOnLockContention":
+ if r.TryDecodeAsNil() {
+ x.ExitOnLockContention = false
+ } else {
+ x.ExitOnLockContention = bool(r.DecodeBool())
+ }
+ case "configureCbr0":
+ if r.TryDecodeAsNil() {
+ x.ConfigureCBR0 = false
+ } else {
+ x.ConfigureCBR0 = bool(r.DecodeBool())
+ }
+ case "hairpinMode":
+ if r.TryDecodeAsNil() {
+ x.HairpinMode = ""
+ } else {
+ x.HairpinMode = string(r.DecodeString())
+ }
+ case "babysitDaemons":
+ if r.TryDecodeAsNil() {
+ x.BabysitDaemons = false
+ } else {
+ x.BabysitDaemons = bool(r.DecodeBool())
+ }
+ case "maxPods":
+ if r.TryDecodeAsNil() {
+ x.MaxPods = 0
+ } else {
+ x.MaxPods = int32(r.DecodeInt(32))
+ }
+ case "nvidiaGPUs":
+ if r.TryDecodeAsNil() {
+ x.NvidiaGPUs = 0
+ } else {
+ x.NvidiaGPUs = int32(r.DecodeInt(32))
+ }
+ case "dockerExecHandlerName":
+ if r.TryDecodeAsNil() {
+ x.DockerExecHandlerName = ""
+ } else {
+ x.DockerExecHandlerName = string(r.DecodeString())
+ }
+ case "podCIDR":
+ if r.TryDecodeAsNil() {
+ x.PodCIDR = ""
+ } else {
+ x.PodCIDR = string(r.DecodeString())
+ }
+ case "resolvConf":
+ if r.TryDecodeAsNil() {
+ x.ResolverConfig = ""
+ } else {
+ x.ResolverConfig = string(r.DecodeString())
+ }
+ case "cpuCFSQuota":
+ if r.TryDecodeAsNil() {
+ x.CPUCFSQuota = false
+ } else {
+ x.CPUCFSQuota = bool(r.DecodeBool())
+ }
+ case "containerized":
+ if r.TryDecodeAsNil() {
+ x.Containerized = false
+ } else {
+ x.Containerized = bool(r.DecodeBool())
+ }
+ case "maxOpenFiles":
+ if r.TryDecodeAsNil() {
+ x.MaxOpenFiles = 0
+ } else {
+ x.MaxOpenFiles = uint64(r.DecodeUint(64))
+ }
+ case "reconcileCIDR":
+ if r.TryDecodeAsNil() {
+ x.ReconcileCIDR = false
+ } else {
+ x.ReconcileCIDR = bool(r.DecodeBool())
+ }
+ case "registerSchedulable":
+ if r.TryDecodeAsNil() {
+ x.RegisterSchedulable = false
+ } else {
+ x.RegisterSchedulable = bool(r.DecodeBool())
+ }
+ case "contentType":
+ if r.TryDecodeAsNil() {
+ x.ContentType = ""
+ } else {
+ x.ContentType = string(r.DecodeString())
+ }
+ case "kubeAPIQPS":
+ if r.TryDecodeAsNil() {
+ x.KubeAPIQPS = 0
+ } else {
+ x.KubeAPIQPS = float32(r.DecodeFloat(true))
+ }
+ case "kubeAPIBurst":
+ if r.TryDecodeAsNil() {
+ x.KubeAPIBurst = 0
+ } else {
+ x.KubeAPIBurst = int32(r.DecodeInt(32))
+ }
+ case "serializeImagePulls":
+ if r.TryDecodeAsNil() {
+ x.SerializeImagePulls = false
+ } else {
+ x.SerializeImagePulls = bool(r.DecodeBool())
+ }
+ case "experimentalFlannelOverlay":
+ if r.TryDecodeAsNil() {
+ x.ExperimentalFlannelOverlay = false
+ } else {
+ x.ExperimentalFlannelOverlay = bool(r.DecodeBool())
+ }
+ case "outOfDiskTransitionFrequency":
+ if r.TryDecodeAsNil() {
+ x.OutOfDiskTransitionFrequency = pkg1_unversioned.Duration{}
+ } else {
+ yyv92 := &x.OutOfDiskTransitionFrequency
+ yym93 := z.DecBinary()
+ _ = yym93
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv92) {
+ } else if !yym93 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv92)
+ } else {
+ z.DecFallback(yyv92, false)
+ }
+ }
+ case "nodeIP":
+ if r.TryDecodeAsNil() {
+ x.NodeIP = ""
+ } else {
+ x.NodeIP = string(r.DecodeString())
+ }
+ case "nodeLabels":
+ if r.TryDecodeAsNil() {
+ x.NodeLabels = nil
+ } else {
+ yyv95 := &x.NodeLabels
+ yym96 := z.DecBinary()
+ _ = yym96
+ if false {
+ } else {
+ z.F.DecMapStringStringX(yyv95, false, d)
+ }
+ }
+ case "nonMasqueradeCIDR":
+ if r.TryDecodeAsNil() {
+ x.NonMasqueradeCIDR = ""
+ } else {
+ x.NonMasqueradeCIDR = string(r.DecodeString())
+ }
+ case "enableCustomMetrics":
+ if r.TryDecodeAsNil() {
+ x.EnableCustomMetrics = false
+ } else {
+ x.EnableCustomMetrics = bool(r.DecodeBool())
+ }
+ case "evictionHard":
+ if r.TryDecodeAsNil() {
+ x.EvictionHard = ""
+ } else {
+ x.EvictionHard = string(r.DecodeString())
+ }
+ case "evictionSoft":
+ if r.TryDecodeAsNil() {
+ x.EvictionSoft = ""
+ } else {
+ x.EvictionSoft = string(r.DecodeString())
+ }
+ case "evictionSoftGracePeriod":
+ if r.TryDecodeAsNil() {
+ x.EvictionSoftGracePeriod = ""
+ } else {
+ x.EvictionSoftGracePeriod = string(r.DecodeString())
+ }
+ case "evictionPressureTransitionPeriod":
+ if r.TryDecodeAsNil() {
+ x.EvictionPressureTransitionPeriod = pkg1_unversioned.Duration{}
+ } else {
+ yyv102 := &x.EvictionPressureTransitionPeriod
+ yym103 := z.DecBinary()
+ _ = yym103
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv102) {
+ } else if !yym103 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv102)
+ } else {
+ z.DecFallback(yyv102, false)
+ }
+ }
+ case "evictionMaxPodGracePeriod":
+ if r.TryDecodeAsNil() {
+ x.EvictionMaxPodGracePeriod = 0
+ } else {
+ x.EvictionMaxPodGracePeriod = int32(r.DecodeInt(32))
+ }
+ case "podsPerCore":
+ if r.TryDecodeAsNil() {
+ x.PodsPerCore = 0
+ } else {
+ x.PodsPerCore = int32(r.DecodeInt(32))
+ }
+ case "enableControllerAttachDetach":
+ if r.TryDecodeAsNil() {
+ x.EnableControllerAttachDetach = false
+ } else {
+ x.EnableControllerAttachDetach = bool(r.DecodeBool())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj107 int
+ var yyb107 bool
+ var yyhl107 bool = l >= 0
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Config = ""
+ } else {
+ x.Config = string(r.DecodeString())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.SyncFrequency = pkg1_unversioned.Duration{}
+ } else {
+ yyv109 := &x.SyncFrequency
+ yym110 := z.DecBinary()
+ _ = yym110
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv109) {
+ } else if !yym110 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv109)
+ } else {
+ z.DecFallback(yyv109, false)
+ }
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.FileCheckFrequency = pkg1_unversioned.Duration{}
+ } else {
+ yyv111 := &x.FileCheckFrequency
+ yym112 := z.DecBinary()
+ _ = yym112
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv111) {
+ } else if !yym112 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv111)
+ } else {
+ z.DecFallback(yyv111, false)
+ }
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.HTTPCheckFrequency = pkg1_unversioned.Duration{}
+ } else {
+ yyv113 := &x.HTTPCheckFrequency
+ yym114 := z.DecBinary()
+ _ = yym114
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv113) {
+ } else if !yym114 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv113)
+ } else {
+ z.DecFallback(yyv113, false)
+ }
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ManifestURL = ""
+ } else {
+ x.ManifestURL = string(r.DecodeString())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ManifestURLHeader = ""
+ } else {
+ x.ManifestURLHeader = string(r.DecodeString())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.EnableServer = false
+ } else {
+ x.EnableServer = bool(r.DecodeBool())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Address = ""
+ } else {
+ x.Address = string(r.DecodeString())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Port = 0
+ } else {
+ x.Port = uint(r.DecodeUint(codecSelferBitsize1234))
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ReadOnlyPort = 0
+ } else {
+ x.ReadOnlyPort = uint(r.DecodeUint(codecSelferBitsize1234))
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.TLSCertFile = ""
+ } else {
+ x.TLSCertFile = string(r.DecodeString())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.TLSPrivateKeyFile = ""
+ } else {
+ x.TLSPrivateKeyFile = string(r.DecodeString())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.CertDirectory = ""
+ } else {
+ x.CertDirectory = string(r.DecodeString())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.HostnameOverride = ""
+ } else {
+ x.HostnameOverride = string(r.DecodeString())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.PodInfraContainerImage = ""
+ } else {
+ x.PodInfraContainerImage = string(r.DecodeString())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.DockerEndpoint = ""
+ } else {
+ x.DockerEndpoint = string(r.DecodeString())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.RootDirectory = ""
+ } else {
+ x.RootDirectory = string(r.DecodeString())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.SeccompProfileRoot = ""
+ } else {
+ x.SeccompProfileRoot = string(r.DecodeString())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.AllowPrivileged = false
+ } else {
+ x.AllowPrivileged = bool(r.DecodeBool())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.HostNetworkSources = ""
+ } else {
+ x.HostNetworkSources = string(r.DecodeString())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.HostPIDSources = ""
+ } else {
+ x.HostPIDSources = string(r.DecodeString())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.HostIPCSources = ""
+ } else {
+ x.HostIPCSources = string(r.DecodeString())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.RegistryPullQPS = 0
+ } else {
+ x.RegistryPullQPS = float64(r.DecodeFloat(false))
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.RegistryBurst = 0
+ } else {
+ x.RegistryBurst = int32(r.DecodeInt(32))
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.EventRecordQPS = 0
+ } else {
+ x.EventRecordQPS = float32(r.DecodeFloat(true))
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.EventBurst = 0
+ } else {
+ x.EventBurst = int32(r.DecodeInt(32))
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.EnableDebuggingHandlers = false
+ } else {
+ x.EnableDebuggingHandlers = bool(r.DecodeBool())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.MinimumGCAge = pkg1_unversioned.Duration{}
+ } else {
+ yyv138 := &x.MinimumGCAge
+ yym139 := z.DecBinary()
+ _ = yym139
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv138) {
+ } else if !yym139 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv138)
+ } else {
+ z.DecFallback(yyv138, false)
+ }
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.MaxPerPodContainerCount = 0
+ } else {
+ x.MaxPerPodContainerCount = int32(r.DecodeInt(32))
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.MaxContainerCount = 0
+ } else {
+ x.MaxContainerCount = int32(r.DecodeInt(32))
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.CAdvisorPort = 0
+ } else {
+ x.CAdvisorPort = uint(r.DecodeUint(codecSelferBitsize1234))
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.HealthzPort = 0
+ } else {
+ x.HealthzPort = int32(r.DecodeInt(32))
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.HealthzBindAddress = ""
+ } else {
+ x.HealthzBindAddress = string(r.DecodeString())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.OOMScoreAdj = 0
+ } else {
+ x.OOMScoreAdj = int32(r.DecodeInt(32))
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.RegisterNode = false
+ } else {
+ x.RegisterNode = bool(r.DecodeBool())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ClusterDomain = ""
+ } else {
+ x.ClusterDomain = string(r.DecodeString())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.MasterServiceNamespace = ""
+ } else {
+ x.MasterServiceNamespace = string(r.DecodeString())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ClusterDNS = ""
+ } else {
+ x.ClusterDNS = string(r.DecodeString())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.StreamingConnectionIdleTimeout = pkg1_unversioned.Duration{}
+ } else {
+ yyv150 := &x.StreamingConnectionIdleTimeout
+ yym151 := z.DecBinary()
+ _ = yym151
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv150) {
+ } else if !yym151 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv150)
+ } else {
+ z.DecFallback(yyv150, false)
+ }
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.NodeStatusUpdateFrequency = pkg1_unversioned.Duration{}
+ } else {
+ yyv152 := &x.NodeStatusUpdateFrequency
+ yym153 := z.DecBinary()
+ _ = yym153
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv152) {
+ } else if !yym153 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv152)
+ } else {
+ z.DecFallback(yyv152, false)
+ }
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ImageMinimumGCAge = pkg1_unversioned.Duration{}
+ } else {
+ yyv154 := &x.ImageMinimumGCAge
+ yym155 := z.DecBinary()
+ _ = yym155
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv154) {
+ } else if !yym155 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv154)
+ } else {
+ z.DecFallback(yyv154, false)
+ }
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ImageGCHighThresholdPercent = 0
+ } else {
+ x.ImageGCHighThresholdPercent = int32(r.DecodeInt(32))
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ImageGCLowThresholdPercent = 0
+ } else {
+ x.ImageGCLowThresholdPercent = int32(r.DecodeInt(32))
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.LowDiskSpaceThresholdMB = 0
+ } else {
+ x.LowDiskSpaceThresholdMB = int32(r.DecodeInt(32))
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.VolumeStatsAggPeriod = pkg1_unversioned.Duration{}
+ } else {
+ yyv159 := &x.VolumeStatsAggPeriod
+ yym160 := z.DecBinary()
+ _ = yym160
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv159) {
+ } else if !yym160 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv159)
+ } else {
+ z.DecFallback(yyv159, false)
+ }
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.NetworkPluginName = ""
+ } else {
+ x.NetworkPluginName = string(r.DecodeString())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.NetworkPluginDir = ""
+ } else {
+ x.NetworkPluginDir = string(r.DecodeString())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.VolumePluginDir = ""
+ } else {
+ x.VolumePluginDir = string(r.DecodeString())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.CloudProvider = ""
+ } else {
+ x.CloudProvider = string(r.DecodeString())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.CloudConfigFile = ""
+ } else {
+ x.CloudConfigFile = string(r.DecodeString())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.KubeletCgroups = ""
+ } else {
+ x.KubeletCgroups = string(r.DecodeString())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.RuntimeCgroups = ""
+ } else {
+ x.RuntimeCgroups = string(r.DecodeString())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.SystemCgroups = ""
+ } else {
+ x.SystemCgroups = string(r.DecodeString())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.CgroupRoot = ""
+ } else {
+ x.CgroupRoot = string(r.DecodeString())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ContainerRuntime = ""
+ } else {
+ x.ContainerRuntime = string(r.DecodeString())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.RuntimeRequestTimeout = pkg1_unversioned.Duration{}
+ } else {
+ yyv171 := &x.RuntimeRequestTimeout
+ yym172 := z.DecBinary()
+ _ = yym172
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv171) {
+ } else if !yym172 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv171)
+ } else {
+ z.DecFallback(yyv171, false)
+ }
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.RktPath = ""
+ } else {
+ x.RktPath = string(r.DecodeString())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.RktAPIEndpoint = ""
+ } else {
+ x.RktAPIEndpoint = string(r.DecodeString())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.RktStage1Image = ""
+ } else {
+ x.RktStage1Image = string(r.DecodeString())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.LockFilePath = ""
+ } else {
+ x.LockFilePath = string(r.DecodeString())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ExitOnLockContention = false
+ } else {
+ x.ExitOnLockContention = bool(r.DecodeBool())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ConfigureCBR0 = false
+ } else {
+ x.ConfigureCBR0 = bool(r.DecodeBool())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.HairpinMode = ""
+ } else {
+ x.HairpinMode = string(r.DecodeString())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.BabysitDaemons = false
+ } else {
+ x.BabysitDaemons = bool(r.DecodeBool())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.MaxPods = 0
+ } else {
+ x.MaxPods = int32(r.DecodeInt(32))
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.NvidiaGPUs = 0
+ } else {
+ x.NvidiaGPUs = int32(r.DecodeInt(32))
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.DockerExecHandlerName = ""
+ } else {
+ x.DockerExecHandlerName = string(r.DecodeString())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.PodCIDR = ""
+ } else {
+ x.PodCIDR = string(r.DecodeString())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ResolverConfig = ""
+ } else {
+ x.ResolverConfig = string(r.DecodeString())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.CPUCFSQuota = false
+ } else {
+ x.CPUCFSQuota = bool(r.DecodeBool())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Containerized = false
+ } else {
+ x.Containerized = bool(r.DecodeBool())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.MaxOpenFiles = 0
+ } else {
+ x.MaxOpenFiles = uint64(r.DecodeUint(64))
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ReconcileCIDR = false
+ } else {
+ x.ReconcileCIDR = bool(r.DecodeBool())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.RegisterSchedulable = false
+ } else {
+ x.RegisterSchedulable = bool(r.DecodeBool())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ContentType = ""
+ } else {
+ x.ContentType = string(r.DecodeString())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.KubeAPIQPS = 0
+ } else {
+ x.KubeAPIQPS = float32(r.DecodeFloat(true))
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.KubeAPIBurst = 0
+ } else {
+ x.KubeAPIBurst = int32(r.DecodeInt(32))
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.SerializeImagePulls = false
+ } else {
+ x.SerializeImagePulls = bool(r.DecodeBool())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ExperimentalFlannelOverlay = false
+ } else {
+ x.ExperimentalFlannelOverlay = bool(r.DecodeBool())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.OutOfDiskTransitionFrequency = pkg1_unversioned.Duration{}
+ } else {
+ yyv196 := &x.OutOfDiskTransitionFrequency
+ yym197 := z.DecBinary()
+ _ = yym197
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv196) {
+ } else if !yym197 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv196)
+ } else {
+ z.DecFallback(yyv196, false)
+ }
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.NodeIP = ""
+ } else {
+ x.NodeIP = string(r.DecodeString())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.NodeLabels = nil
+ } else {
+ yyv199 := &x.NodeLabels
+ yym200 := z.DecBinary()
+ _ = yym200
+ if false {
+ } else {
+ z.F.DecMapStringStringX(yyv199, false, d)
+ }
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.NonMasqueradeCIDR = ""
+ } else {
+ x.NonMasqueradeCIDR = string(r.DecodeString())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.EnableCustomMetrics = false
+ } else {
+ x.EnableCustomMetrics = bool(r.DecodeBool())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.EvictionHard = ""
+ } else {
+ x.EvictionHard = string(r.DecodeString())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.EvictionSoft = ""
+ } else {
+ x.EvictionSoft = string(r.DecodeString())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.EvictionSoftGracePeriod = ""
+ } else {
+ x.EvictionSoftGracePeriod = string(r.DecodeString())
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.EvictionPressureTransitionPeriod = pkg1_unversioned.Duration{}
+ } else {
+ yyv206 := &x.EvictionPressureTransitionPeriod
+ yym207 := z.DecBinary()
+ _ = yym207
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv206) {
+ } else if !yym207 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv206)
+ } else {
+ z.DecFallback(yyv206, false)
+ }
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.EvictionMaxPodGracePeriod = 0
+ } else {
+ x.EvictionMaxPodGracePeriod = int32(r.DecodeInt(32))
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.PodsPerCore = 0
+ } else {
+ x.PodsPerCore = int32(r.DecodeInt(32))
+ }
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.EnableControllerAttachDetach = false
+ } else {
+ x.EnableControllerAttachDetach = bool(r.DecodeBool())
+ }
+ for {
+ yyj107++
+ if yyhl107 {
+ yyb107 = yyj107 > l
+ } else {
+ yyb107 = r.CheckBreak()
+ }
+ if yyb107 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj107-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *KubeSchedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [14]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[12] = x.Kind != ""
+ yyq2[13] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(14)
+ } else {
+ yynn2 = 12
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Port))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("port"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Port))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Address))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("address"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Address))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.AlgorithmProvider))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("algorithmProvider"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.AlgorithmProvider))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.PolicyConfigFile))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("policyConfigFile"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.PolicyConfigFile))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeBool(bool(x.EnableProfiling))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("enableProfiling"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeBool(bool(x.EnableProfiling))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ContentType))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("contentType"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ContentType))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeFloat32(float32(x.KubeAPIQPS))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kubeAPIQPS"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeFloat32(float32(x.KubeAPIQPS))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym25 := z.EncBinary()
+ _ = yym25
+ if false {
+ } else {
+ r.EncodeInt(int64(x.KubeAPIBurst))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kubeAPIBurst"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym26 := z.EncBinary()
+ _ = yym26
+ if false {
+ } else {
+ r.EncodeInt(int64(x.KubeAPIBurst))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym28 := z.EncBinary()
+ _ = yym28
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.SchedulerName))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("schedulerName"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym29 := z.EncBinary()
+ _ = yym29
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.SchedulerName))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym31 := z.EncBinary()
+ _ = yym31
+ if false {
+ } else {
+ r.EncodeInt(int64(x.HardPodAffinitySymmetricWeight))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("hardPodAffinitySymmetricWeight"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym32 := z.EncBinary()
+ _ = yym32
+ if false {
+ } else {
+ r.EncodeInt(int64(x.HardPodAffinitySymmetricWeight))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym34 := z.EncBinary()
+ _ = yym34
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.FailureDomains))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("failureDomains"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym35 := z.EncBinary()
+ _ = yym35
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.FailureDomains))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy37 := &x.LeaderElection
+ yy37.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("leaderElection"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy39 := &x.LeaderElection
+ yy39.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[12] {
+ yym42 := z.EncBinary()
+ _ = yym42
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[12] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym43 := z.EncBinary()
+ _ = yym43
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[13] {
+ yym45 := z.EncBinary()
+ _ = yym45
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[13] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym46 := z.EncBinary()
+ _ = yym46
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *KubeSchedulerConfiguration) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *KubeSchedulerConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "port":
+ if r.TryDecodeAsNil() {
+ x.Port = 0
+ } else {
+ x.Port = int32(r.DecodeInt(32))
+ }
+ case "address":
+ if r.TryDecodeAsNil() {
+ x.Address = ""
+ } else {
+ x.Address = string(r.DecodeString())
+ }
+ case "algorithmProvider":
+ if r.TryDecodeAsNil() {
+ x.AlgorithmProvider = ""
+ } else {
+ x.AlgorithmProvider = string(r.DecodeString())
+ }
+ case "policyConfigFile":
+ if r.TryDecodeAsNil() {
+ x.PolicyConfigFile = ""
+ } else {
+ x.PolicyConfigFile = string(r.DecodeString())
+ }
+ case "enableProfiling":
+ if r.TryDecodeAsNil() {
+ x.EnableProfiling = false
+ } else {
+ x.EnableProfiling = bool(r.DecodeBool())
+ }
+ case "contentType":
+ if r.TryDecodeAsNil() {
+ x.ContentType = ""
+ } else {
+ x.ContentType = string(r.DecodeString())
+ }
+ case "kubeAPIQPS":
+ if r.TryDecodeAsNil() {
+ x.KubeAPIQPS = 0
+ } else {
+ x.KubeAPIQPS = float32(r.DecodeFloat(true))
+ }
+ case "kubeAPIBurst":
+ if r.TryDecodeAsNil() {
+ x.KubeAPIBurst = 0
+ } else {
+ x.KubeAPIBurst = int32(r.DecodeInt(32))
+ }
+ case "schedulerName":
+ if r.TryDecodeAsNil() {
+ x.SchedulerName = ""
+ } else {
+ x.SchedulerName = string(r.DecodeString())
+ }
+ case "hardPodAffinitySymmetricWeight":
+ if r.TryDecodeAsNil() {
+ x.HardPodAffinitySymmetricWeight = 0
+ } else {
+ x.HardPodAffinitySymmetricWeight = int(r.DecodeInt(codecSelferBitsize1234))
+ }
+ case "failureDomains":
+ if r.TryDecodeAsNil() {
+ x.FailureDomains = ""
+ } else {
+ x.FailureDomains = string(r.DecodeString())
+ }
+ case "leaderElection":
+ if r.TryDecodeAsNil() {
+ x.LeaderElection = LeaderElectionConfiguration{}
+ } else {
+ yyv15 := &x.LeaderElection
+ yyv15.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj18 int
+ var yyb18 bool
+ var yyhl18 bool = l >= 0
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Port = 0
+ } else {
+ x.Port = int32(r.DecodeInt(32))
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Address = ""
+ } else {
+ x.Address = string(r.DecodeString())
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.AlgorithmProvider = ""
+ } else {
+ x.AlgorithmProvider = string(r.DecodeString())
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.PolicyConfigFile = ""
+ } else {
+ x.PolicyConfigFile = string(r.DecodeString())
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.EnableProfiling = false
+ } else {
+ x.EnableProfiling = bool(r.DecodeBool())
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ContentType = ""
+ } else {
+ x.ContentType = string(r.DecodeString())
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.KubeAPIQPS = 0
+ } else {
+ x.KubeAPIQPS = float32(r.DecodeFloat(true))
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.KubeAPIBurst = 0
+ } else {
+ x.KubeAPIBurst = int32(r.DecodeInt(32))
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.SchedulerName = ""
+ } else {
+ x.SchedulerName = string(r.DecodeString())
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.HardPodAffinitySymmetricWeight = 0
+ } else {
+ x.HardPodAffinitySymmetricWeight = int(r.DecodeInt(codecSelferBitsize1234))
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.FailureDomains = ""
+ } else {
+ x.FailureDomains = string(r.DecodeString())
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.LeaderElection = LeaderElectionConfiguration{}
+ } else {
+ yyv30 := &x.LeaderElection
+ yyv30.CodecDecodeSelf(d)
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj18++
+ if yyhl18 {
+ yyb18 = yyj18 > l
+ } else {
+ yyb18 = r.CheckBreak()
+ }
+ if yyb18 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj18-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *LeaderElectionConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 4
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeBool(bool(x.LeaderElect))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("leaderElect"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeBool(bool(x.LeaderElect))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy7 := &x.LeaseDuration
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy7) {
+ } else if !yym8 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy7)
+ } else {
+ z.EncFallback(yy7)
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("leaseDuration"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy9 := &x.LeaseDuration
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy9) {
+ } else if !yym10 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy9)
+ } else {
+ z.EncFallback(yy9)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy12 := &x.RenewDeadline
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy12) {
+ } else if !yym13 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy12)
+ } else {
+ z.EncFallback(yy12)
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("renewDeadline"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy14 := &x.RenewDeadline
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy14) {
+ } else if !yym15 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy14)
+ } else {
+ z.EncFallback(yy14)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy17 := &x.RetryPeriod
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy17) {
+ } else if !yym18 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy17)
+ } else {
+ z.EncFallback(yy17)
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("retryPeriod"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy19 := &x.RetryPeriod
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy19) {
+ } else if !yym20 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy19)
+ } else {
+ z.EncFallback(yy19)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *LeaderElectionConfiguration) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *LeaderElectionConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "leaderElect":
+ if r.TryDecodeAsNil() {
+ x.LeaderElect = false
+ } else {
+ x.LeaderElect = bool(r.DecodeBool())
+ }
+ case "leaseDuration":
+ if r.TryDecodeAsNil() {
+ x.LeaseDuration = pkg1_unversioned.Duration{}
+ } else {
+ yyv5 := &x.LeaseDuration
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv5) {
+ } else if !yym6 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv5)
+ } else {
+ z.DecFallback(yyv5, false)
+ }
+ }
+ case "renewDeadline":
+ if r.TryDecodeAsNil() {
+ x.RenewDeadline = pkg1_unversioned.Duration{}
+ } else {
+ yyv7 := &x.RenewDeadline
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv7) {
+ } else if !yym8 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv7)
+ } else {
+ z.DecFallback(yyv7, false)
+ }
+ }
+ case "retryPeriod":
+ if r.TryDecodeAsNil() {
+ x.RetryPeriod = pkg1_unversioned.Duration{}
+ } else {
+ yyv9 := &x.RetryPeriod
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv9) {
+ } else if !yym10 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv9)
+ } else {
+ z.DecFallback(yyv9, false)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *LeaderElectionConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj11 int
+ var yyb11 bool
+ var yyhl11 bool = l >= 0
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.LeaderElect = false
+ } else {
+ x.LeaderElect = bool(r.DecodeBool())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.LeaseDuration = pkg1_unversioned.Duration{}
+ } else {
+ yyv13 := &x.LeaseDuration
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv13) {
+ } else if !yym14 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv13)
+ } else {
+ z.DecFallback(yyv13, false)
+ }
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.RenewDeadline = pkg1_unversioned.Duration{}
+ } else {
+ yyv15 := &x.RenewDeadline
+ yym16 := z.DecBinary()
+ _ = yym16
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv15) {
+ } else if !yym16 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv15)
+ } else {
+ z.DecFallback(yyv15, false)
+ }
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.RetryPeriod = pkg1_unversioned.Duration{}
+ } else {
+ yyv17 := &x.RetryPeriod
+ yym18 := z.DecBinary()
+ _ = yym18
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv17) {
+ } else if !yym18 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv17)
+ } else {
+ z.DecFallback(yyv17, false)
+ }
+ }
+ for {
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj11-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [50]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[48] = x.Kind != ""
+ yyq2[49] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(50)
+ } else {
+ yynn2 = 48
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Port))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("port"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Port))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Address))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("address"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Address))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.CloudProvider))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("cloudProvider"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.CloudProvider))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.CloudConfigFile))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("cloudConfigFile"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.CloudConfigFile))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ConcurrentEndpointSyncs))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("concurrentEndpointSyncs"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ConcurrentEndpointSyncs))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ConcurrentRSSyncs))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("concurrentRSSyncs"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ConcurrentRSSyncs))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ConcurrentRCSyncs))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("concurrentRCSyncs"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ConcurrentRCSyncs))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym25 := z.EncBinary()
+ _ = yym25
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ConcurrentResourceQuotaSyncs))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("concurrentResourceQuotaSyncs"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym26 := z.EncBinary()
+ _ = yym26
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ConcurrentResourceQuotaSyncs))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym28 := z.EncBinary()
+ _ = yym28
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ConcurrentDeploymentSyncs))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("concurrentDeploymentSyncs"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym29 := z.EncBinary()
+ _ = yym29
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ConcurrentDeploymentSyncs))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym31 := z.EncBinary()
+ _ = yym31
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ConcurrentDaemonSetSyncs))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("concurrentDaemonSetSyncs"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym32 := z.EncBinary()
+ _ = yym32
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ConcurrentDaemonSetSyncs))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym34 := z.EncBinary()
+ _ = yym34
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ConcurrentJobSyncs))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("concurrentJobSyncs"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym35 := z.EncBinary()
+ _ = yym35
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ConcurrentJobSyncs))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym37 := z.EncBinary()
+ _ = yym37
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ConcurrentNamespaceSyncs))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("concurrentNamespaceSyncs"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym38 := z.EncBinary()
+ _ = yym38
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ConcurrentNamespaceSyncs))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym40 := z.EncBinary()
+ _ = yym40
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ConcurrentSATokenSyncs))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("concurrentSATokenSyncs"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym41 := z.EncBinary()
+ _ = yym41
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ConcurrentSATokenSyncs))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym43 := z.EncBinary()
+ _ = yym43
+ if false {
+ } else {
+ r.EncodeInt(int64(x.LookupCacheSizeForRC))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("lookupCacheSizeForRC"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym44 := z.EncBinary()
+ _ = yym44
+ if false {
+ } else {
+ r.EncodeInt(int64(x.LookupCacheSizeForRC))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym46 := z.EncBinary()
+ _ = yym46
+ if false {
+ } else {
+ r.EncodeInt(int64(x.LookupCacheSizeForRS))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("lookupCacheSizeForRS"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym47 := z.EncBinary()
+ _ = yym47
+ if false {
+ } else {
+ r.EncodeInt(int64(x.LookupCacheSizeForRS))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym49 := z.EncBinary()
+ _ = yym49
+ if false {
+ } else {
+ r.EncodeInt(int64(x.LookupCacheSizeForDaemonSet))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("lookupCacheSizeForDaemonSet"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym50 := z.EncBinary()
+ _ = yym50
+ if false {
+ } else {
+ r.EncodeInt(int64(x.LookupCacheSizeForDaemonSet))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy52 := &x.ServiceSyncPeriod
+ yym53 := z.EncBinary()
+ _ = yym53
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy52) {
+ } else if !yym53 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy52)
+ } else {
+ z.EncFallback(yy52)
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("serviceSyncPeriod"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy54 := &x.ServiceSyncPeriod
+ yym55 := z.EncBinary()
+ _ = yym55
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy54) {
+ } else if !yym55 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy54)
+ } else {
+ z.EncFallback(yy54)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy57 := &x.NodeSyncPeriod
+ yym58 := z.EncBinary()
+ _ = yym58
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy57) {
+ } else if !yym58 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy57)
+ } else {
+ z.EncFallback(yy57)
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("nodeSyncPeriod"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy59 := &x.NodeSyncPeriod
+ yym60 := z.EncBinary()
+ _ = yym60
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy59) {
+ } else if !yym60 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy59)
+ } else {
+ z.EncFallback(yy59)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy62 := &x.ResourceQuotaSyncPeriod
+ yym63 := z.EncBinary()
+ _ = yym63
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy62) {
+ } else if !yym63 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy62)
+ } else {
+ z.EncFallback(yy62)
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("resourceQuotaSyncPeriod"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy64 := &x.ResourceQuotaSyncPeriod
+ yym65 := z.EncBinary()
+ _ = yym65
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy64) {
+ } else if !yym65 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy64)
+ } else {
+ z.EncFallback(yy64)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy67 := &x.NamespaceSyncPeriod
+ yym68 := z.EncBinary()
+ _ = yym68
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy67) {
+ } else if !yym68 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy67)
+ } else {
+ z.EncFallback(yy67)
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("namespaceSyncPeriod"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy69 := &x.NamespaceSyncPeriod
+ yym70 := z.EncBinary()
+ _ = yym70
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy69) {
+ } else if !yym70 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy69)
+ } else {
+ z.EncFallback(yy69)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy72 := &x.PVClaimBinderSyncPeriod
+ yym73 := z.EncBinary()
+ _ = yym73
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy72) {
+ } else if !yym73 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy72)
+ } else {
+ z.EncFallback(yy72)
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("pvClaimBinderSyncPeriod"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy74 := &x.PVClaimBinderSyncPeriod
+ yym75 := z.EncBinary()
+ _ = yym75
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy74) {
+ } else if !yym75 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy74)
+ } else {
+ z.EncFallback(yy74)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy77 := &x.MinResyncPeriod
+ yym78 := z.EncBinary()
+ _ = yym78
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy77) {
+ } else if !yym78 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy77)
+ } else {
+ z.EncFallback(yy77)
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("minResyncPeriod"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy79 := &x.MinResyncPeriod
+ yym80 := z.EncBinary()
+ _ = yym80
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy79) {
+ } else if !yym80 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy79)
+ } else {
+ z.EncFallback(yy79)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym82 := z.EncBinary()
+ _ = yym82
+ if false {
+ } else {
+ r.EncodeInt(int64(x.TerminatedPodGCThreshold))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("terminatedPodGCThreshold"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym83 := z.EncBinary()
+ _ = yym83
+ if false {
+ } else {
+ r.EncodeInt(int64(x.TerminatedPodGCThreshold))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy85 := &x.HorizontalPodAutoscalerSyncPeriod
+ yym86 := z.EncBinary()
+ _ = yym86
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy85) {
+ } else if !yym86 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy85)
+ } else {
+ z.EncFallback(yy85)
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("horizontalPodAutoscalerSyncPeriod"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy87 := &x.HorizontalPodAutoscalerSyncPeriod
+ yym88 := z.EncBinary()
+ _ = yym88
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy87) {
+ } else if !yym88 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy87)
+ } else {
+ z.EncFallback(yy87)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy90 := &x.DeploymentControllerSyncPeriod
+ yym91 := z.EncBinary()
+ _ = yym91
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy90) {
+ } else if !yym91 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy90)
+ } else {
+ z.EncFallback(yy90)
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("deploymentControllerSyncPeriod"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy92 := &x.DeploymentControllerSyncPeriod
+ yym93 := z.EncBinary()
+ _ = yym93
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy92) {
+ } else if !yym93 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy92)
+ } else {
+ z.EncFallback(yy92)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy95 := &x.PodEvictionTimeout
+ yym96 := z.EncBinary()
+ _ = yym96
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy95) {
+ } else if !yym96 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy95)
+ } else {
+ z.EncFallback(yy95)
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("podEvictionTimeout"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy97 := &x.PodEvictionTimeout
+ yym98 := z.EncBinary()
+ _ = yym98
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy97) {
+ } else if !yym98 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy97)
+ } else {
+ z.EncFallback(yy97)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym100 := z.EncBinary()
+ _ = yym100
+ if false {
+ } else {
+ r.EncodeFloat32(float32(x.DeletingPodsQps))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("deletingPodsQps"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym101 := z.EncBinary()
+ _ = yym101
+ if false {
+ } else {
+ r.EncodeFloat32(float32(x.DeletingPodsQps))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym103 := z.EncBinary()
+ _ = yym103
+ if false {
+ } else {
+ r.EncodeInt(int64(x.DeletingPodsBurst))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("deletingPodsBurst"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym104 := z.EncBinary()
+ _ = yym104
+ if false {
+ } else {
+ r.EncodeInt(int64(x.DeletingPodsBurst))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy106 := &x.NodeMonitorGracePeriod
+ yym107 := z.EncBinary()
+ _ = yym107
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy106) {
+ } else if !yym107 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy106)
+ } else {
+ z.EncFallback(yy106)
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("nodeMonitorGracePeriod"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy108 := &x.NodeMonitorGracePeriod
+ yym109 := z.EncBinary()
+ _ = yym109
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy108) {
+ } else if !yym109 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy108)
+ } else {
+ z.EncFallback(yy108)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym111 := z.EncBinary()
+ _ = yym111
+ if false {
+ } else {
+ r.EncodeInt(int64(x.RegisterRetryCount))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("registerRetryCount"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym112 := z.EncBinary()
+ _ = yym112
+ if false {
+ } else {
+ r.EncodeInt(int64(x.RegisterRetryCount))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy114 := &x.NodeStartupGracePeriod
+ yym115 := z.EncBinary()
+ _ = yym115
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy114) {
+ } else if !yym115 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy114)
+ } else {
+ z.EncFallback(yy114)
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("nodeStartupGracePeriod"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy116 := &x.NodeStartupGracePeriod
+ yym117 := z.EncBinary()
+ _ = yym117
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy116) {
+ } else if !yym117 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy116)
+ } else {
+ z.EncFallback(yy116)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy119 := &x.NodeMonitorPeriod
+ yym120 := z.EncBinary()
+ _ = yym120
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy119) {
+ } else if !yym120 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy119)
+ } else {
+ z.EncFallback(yy119)
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("nodeMonitorPeriod"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy121 := &x.NodeMonitorPeriod
+ yym122 := z.EncBinary()
+ _ = yym122
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy121) {
+ } else if !yym122 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy121)
+ } else {
+ z.EncFallback(yy121)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym124 := z.EncBinary()
+ _ = yym124
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ServiceAccountKeyFile))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("serviceAccountKeyFile"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym125 := z.EncBinary()
+ _ = yym125
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ServiceAccountKeyFile))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym127 := z.EncBinary()
+ _ = yym127
+ if false {
+ } else {
+ r.EncodeBool(bool(x.EnableProfiling))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("enableProfiling"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym128 := z.EncBinary()
+ _ = yym128
+ if false {
+ } else {
+ r.EncodeBool(bool(x.EnableProfiling))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym130 := z.EncBinary()
+ _ = yym130
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ClusterName))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("clusterName"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym131 := z.EncBinary()
+ _ = yym131
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ClusterName))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym133 := z.EncBinary()
+ _ = yym133
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ClusterCIDR))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("clusterCIDR"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym134 := z.EncBinary()
+ _ = yym134
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ClusterCIDR))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym136 := z.EncBinary()
+ _ = yym136
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ServiceCIDR))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("serviceCIDR"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym137 := z.EncBinary()
+ _ = yym137
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ServiceCIDR))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym139 := z.EncBinary()
+ _ = yym139
+ if false {
+ } else {
+ r.EncodeInt(int64(x.NodeCIDRMaskSize))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("nodeCIDRMaskSize"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym140 := z.EncBinary()
+ _ = yym140
+ if false {
+ } else {
+ r.EncodeInt(int64(x.NodeCIDRMaskSize))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym142 := z.EncBinary()
+ _ = yym142
+ if false {
+ } else {
+ r.EncodeBool(bool(x.AllocateNodeCIDRs))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("allocateNodeCIDRs"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym143 := z.EncBinary()
+ _ = yym143
+ if false {
+ } else {
+ r.EncodeBool(bool(x.AllocateNodeCIDRs))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym145 := z.EncBinary()
+ _ = yym145
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ConfigureCloudRoutes))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("configureCloudRoutes"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym146 := z.EncBinary()
+ _ = yym146
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ConfigureCloudRoutes))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym148 := z.EncBinary()
+ _ = yym148
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.RootCAFile))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("rootCAFile"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym149 := z.EncBinary()
+ _ = yym149
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.RootCAFile))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym151 := z.EncBinary()
+ _ = yym151
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ContentType))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("contentType"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym152 := z.EncBinary()
+ _ = yym152
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ContentType))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym154 := z.EncBinary()
+ _ = yym154
+ if false {
+ } else {
+ r.EncodeFloat32(float32(x.KubeAPIQPS))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kubeAPIQPS"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym155 := z.EncBinary()
+ _ = yym155
+ if false {
+ } else {
+ r.EncodeFloat32(float32(x.KubeAPIQPS))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym157 := z.EncBinary()
+ _ = yym157
+ if false {
+ } else {
+ r.EncodeInt(int64(x.KubeAPIBurst))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kubeAPIBurst"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym158 := z.EncBinary()
+ _ = yym158
+ if false {
+ } else {
+ r.EncodeInt(int64(x.KubeAPIBurst))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy160 := &x.LeaderElection
+ yy160.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("leaderElection"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy162 := &x.LeaderElection
+ yy162.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy165 := &x.VolumeConfiguration
+ yy165.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("volumeConfiguration"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy167 := &x.VolumeConfiguration
+ yy167.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy170 := &x.ControllerStartInterval
+ yym171 := z.EncBinary()
+ _ = yym171
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy170) {
+ } else if !yym171 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy170)
+ } else {
+ z.EncFallback(yy170)
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("controllerStartInterval"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy172 := &x.ControllerStartInterval
+ yym173 := z.EncBinary()
+ _ = yym173
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy172) {
+ } else if !yym173 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy172)
+ } else {
+ z.EncFallback(yy172)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym175 := z.EncBinary()
+ _ = yym175
+ if false {
+ } else {
+ r.EncodeBool(bool(x.EnableGarbageCollector))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("enableGarbageCollector"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym176 := z.EncBinary()
+ _ = yym176
+ if false {
+ } else {
+ r.EncodeBool(bool(x.EnableGarbageCollector))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[48] {
+ yym178 := z.EncBinary()
+ _ = yym178
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[48] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym179 := z.EncBinary()
+ _ = yym179
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[49] {
+ yym181 := z.EncBinary()
+ _ = yym181
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[49] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym182 := z.EncBinary()
+ _ = yym182
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *KubeControllerManagerConfiguration) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "port":
+ if r.TryDecodeAsNil() {
+ x.Port = 0
+ } else {
+ x.Port = int32(r.DecodeInt(32))
+ }
+ case "address":
+ if r.TryDecodeAsNil() {
+ x.Address = ""
+ } else {
+ x.Address = string(r.DecodeString())
+ }
+ case "cloudProvider":
+ if r.TryDecodeAsNil() {
+ x.CloudProvider = ""
+ } else {
+ x.CloudProvider = string(r.DecodeString())
+ }
+ case "cloudConfigFile":
+ if r.TryDecodeAsNil() {
+ x.CloudConfigFile = ""
+ } else {
+ x.CloudConfigFile = string(r.DecodeString())
+ }
+ case "concurrentEndpointSyncs":
+ if r.TryDecodeAsNil() {
+ x.ConcurrentEndpointSyncs = 0
+ } else {
+ x.ConcurrentEndpointSyncs = int32(r.DecodeInt(32))
+ }
+ case "concurrentRSSyncs":
+ if r.TryDecodeAsNil() {
+ x.ConcurrentRSSyncs = 0
+ } else {
+ x.ConcurrentRSSyncs = int32(r.DecodeInt(32))
+ }
+ case "concurrentRCSyncs":
+ if r.TryDecodeAsNil() {
+ x.ConcurrentRCSyncs = 0
+ } else {
+ x.ConcurrentRCSyncs = int32(r.DecodeInt(32))
+ }
+ case "concurrentResourceQuotaSyncs":
+ if r.TryDecodeAsNil() {
+ x.ConcurrentResourceQuotaSyncs = 0
+ } else {
+ x.ConcurrentResourceQuotaSyncs = int32(r.DecodeInt(32))
+ }
+ case "concurrentDeploymentSyncs":
+ if r.TryDecodeAsNil() {
+ x.ConcurrentDeploymentSyncs = 0
+ } else {
+ x.ConcurrentDeploymentSyncs = int32(r.DecodeInt(32))
+ }
+ case "concurrentDaemonSetSyncs":
+ if r.TryDecodeAsNil() {
+ x.ConcurrentDaemonSetSyncs = 0
+ } else {
+ x.ConcurrentDaemonSetSyncs = int32(r.DecodeInt(32))
+ }
+ case "concurrentJobSyncs":
+ if r.TryDecodeAsNil() {
+ x.ConcurrentJobSyncs = 0
+ } else {
+ x.ConcurrentJobSyncs = int32(r.DecodeInt(32))
+ }
+ case "concurrentNamespaceSyncs":
+ if r.TryDecodeAsNil() {
+ x.ConcurrentNamespaceSyncs = 0
+ } else {
+ x.ConcurrentNamespaceSyncs = int32(r.DecodeInt(32))
+ }
+ case "concurrentSATokenSyncs":
+ if r.TryDecodeAsNil() {
+ x.ConcurrentSATokenSyncs = 0
+ } else {
+ x.ConcurrentSATokenSyncs = int32(r.DecodeInt(32))
+ }
+ case "lookupCacheSizeForRC":
+ if r.TryDecodeAsNil() {
+ x.LookupCacheSizeForRC = 0
+ } else {
+ x.LookupCacheSizeForRC = int32(r.DecodeInt(32))
+ }
+ case "lookupCacheSizeForRS":
+ if r.TryDecodeAsNil() {
+ x.LookupCacheSizeForRS = 0
+ } else {
+ x.LookupCacheSizeForRS = int32(r.DecodeInt(32))
+ }
+ case "lookupCacheSizeForDaemonSet":
+ if r.TryDecodeAsNil() {
+ x.LookupCacheSizeForDaemonSet = 0
+ } else {
+ x.LookupCacheSizeForDaemonSet = int32(r.DecodeInt(32))
+ }
+ case "serviceSyncPeriod":
+ if r.TryDecodeAsNil() {
+ x.ServiceSyncPeriod = pkg1_unversioned.Duration{}
+ } else {
+ yyv20 := &x.ServiceSyncPeriod
+ yym21 := z.DecBinary()
+ _ = yym21
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv20) {
+ } else if !yym21 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv20)
+ } else {
+ z.DecFallback(yyv20, false)
+ }
+ }
+ case "nodeSyncPeriod":
+ if r.TryDecodeAsNil() {
+ x.NodeSyncPeriod = pkg1_unversioned.Duration{}
+ } else {
+ yyv22 := &x.NodeSyncPeriod
+ yym23 := z.DecBinary()
+ _ = yym23
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv22) {
+ } else if !yym23 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv22)
+ } else {
+ z.DecFallback(yyv22, false)
+ }
+ }
+ case "resourceQuotaSyncPeriod":
+ if r.TryDecodeAsNil() {
+ x.ResourceQuotaSyncPeriod = pkg1_unversioned.Duration{}
+ } else {
+ yyv24 := &x.ResourceQuotaSyncPeriod
+ yym25 := z.DecBinary()
+ _ = yym25
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv24) {
+ } else if !yym25 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv24)
+ } else {
+ z.DecFallback(yyv24, false)
+ }
+ }
+ case "namespaceSyncPeriod":
+ if r.TryDecodeAsNil() {
+ x.NamespaceSyncPeriod = pkg1_unversioned.Duration{}
+ } else {
+ yyv26 := &x.NamespaceSyncPeriod
+ yym27 := z.DecBinary()
+ _ = yym27
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv26) {
+ } else if !yym27 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv26)
+ } else {
+ z.DecFallback(yyv26, false)
+ }
+ }
+ case "pvClaimBinderSyncPeriod":
+ if r.TryDecodeAsNil() {
+ x.PVClaimBinderSyncPeriod = pkg1_unversioned.Duration{}
+ } else {
+ yyv28 := &x.PVClaimBinderSyncPeriod
+ yym29 := z.DecBinary()
+ _ = yym29
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv28) {
+ } else if !yym29 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv28)
+ } else {
+ z.DecFallback(yyv28, false)
+ }
+ }
+ case "minResyncPeriod":
+ if r.TryDecodeAsNil() {
+ x.MinResyncPeriod = pkg1_unversioned.Duration{}
+ } else {
+ yyv30 := &x.MinResyncPeriod
+ yym31 := z.DecBinary()
+ _ = yym31
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv30) {
+ } else if !yym31 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv30)
+ } else {
+ z.DecFallback(yyv30, false)
+ }
+ }
+ case "terminatedPodGCThreshold":
+ if r.TryDecodeAsNil() {
+ x.TerminatedPodGCThreshold = 0
+ } else {
+ x.TerminatedPodGCThreshold = int32(r.DecodeInt(32))
+ }
+ case "horizontalPodAutoscalerSyncPeriod":
+ if r.TryDecodeAsNil() {
+ x.HorizontalPodAutoscalerSyncPeriod = pkg1_unversioned.Duration{}
+ } else {
+ yyv33 := &x.HorizontalPodAutoscalerSyncPeriod
+ yym34 := z.DecBinary()
+ _ = yym34
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv33) {
+ } else if !yym34 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv33)
+ } else {
+ z.DecFallback(yyv33, false)
+ }
+ }
+ case "deploymentControllerSyncPeriod":
+ if r.TryDecodeAsNil() {
+ x.DeploymentControllerSyncPeriod = pkg1_unversioned.Duration{}
+ } else {
+ yyv35 := &x.DeploymentControllerSyncPeriod
+ yym36 := z.DecBinary()
+ _ = yym36
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv35) {
+ } else if !yym36 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv35)
+ } else {
+ z.DecFallback(yyv35, false)
+ }
+ }
+ case "podEvictionTimeout":
+ if r.TryDecodeAsNil() {
+ x.PodEvictionTimeout = pkg1_unversioned.Duration{}
+ } else {
+ yyv37 := &x.PodEvictionTimeout
+ yym38 := z.DecBinary()
+ _ = yym38
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv37) {
+ } else if !yym38 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv37)
+ } else {
+ z.DecFallback(yyv37, false)
+ }
+ }
+ case "deletingPodsQps":
+ if r.TryDecodeAsNil() {
+ x.DeletingPodsQps = 0
+ } else {
+ x.DeletingPodsQps = float32(r.DecodeFloat(true))
+ }
+ case "deletingPodsBurst":
+ if r.TryDecodeAsNil() {
+ x.DeletingPodsBurst = 0
+ } else {
+ x.DeletingPodsBurst = int32(r.DecodeInt(32))
+ }
+ case "nodeMonitorGracePeriod":
+ if r.TryDecodeAsNil() {
+ x.NodeMonitorGracePeriod = pkg1_unversioned.Duration{}
+ } else {
+ yyv41 := &x.NodeMonitorGracePeriod
+ yym42 := z.DecBinary()
+ _ = yym42
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv41) {
+ } else if !yym42 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv41)
+ } else {
+ z.DecFallback(yyv41, false)
+ }
+ }
+ case "registerRetryCount":
+ if r.TryDecodeAsNil() {
+ x.RegisterRetryCount = 0
+ } else {
+ x.RegisterRetryCount = int32(r.DecodeInt(32))
+ }
+ case "nodeStartupGracePeriod":
+ if r.TryDecodeAsNil() {
+ x.NodeStartupGracePeriod = pkg1_unversioned.Duration{}
+ } else {
+ yyv44 := &x.NodeStartupGracePeriod
+ yym45 := z.DecBinary()
+ _ = yym45
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv44) {
+ } else if !yym45 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv44)
+ } else {
+ z.DecFallback(yyv44, false)
+ }
+ }
+ case "nodeMonitorPeriod":
+ if r.TryDecodeAsNil() {
+ x.NodeMonitorPeriod = pkg1_unversioned.Duration{}
+ } else {
+ yyv46 := &x.NodeMonitorPeriod
+ yym47 := z.DecBinary()
+ _ = yym47
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv46) {
+ } else if !yym47 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv46)
+ } else {
+ z.DecFallback(yyv46, false)
+ }
+ }
+ case "serviceAccountKeyFile":
+ if r.TryDecodeAsNil() {
+ x.ServiceAccountKeyFile = ""
+ } else {
+ x.ServiceAccountKeyFile = string(r.DecodeString())
+ }
+ case "enableProfiling":
+ if r.TryDecodeAsNil() {
+ x.EnableProfiling = false
+ } else {
+ x.EnableProfiling = bool(r.DecodeBool())
+ }
+ case "clusterName":
+ if r.TryDecodeAsNil() {
+ x.ClusterName = ""
+ } else {
+ x.ClusterName = string(r.DecodeString())
+ }
+ case "clusterCIDR":
+ if r.TryDecodeAsNil() {
+ x.ClusterCIDR = ""
+ } else {
+ x.ClusterCIDR = string(r.DecodeString())
+ }
+ case "serviceCIDR":
+ if r.TryDecodeAsNil() {
+ x.ServiceCIDR = ""
+ } else {
+ x.ServiceCIDR = string(r.DecodeString())
+ }
+ case "nodeCIDRMaskSize":
+ if r.TryDecodeAsNil() {
+ x.NodeCIDRMaskSize = 0
+ } else {
+ x.NodeCIDRMaskSize = int32(r.DecodeInt(32))
+ }
+ case "allocateNodeCIDRs":
+ if r.TryDecodeAsNil() {
+ x.AllocateNodeCIDRs = false
+ } else {
+ x.AllocateNodeCIDRs = bool(r.DecodeBool())
+ }
+ case "configureCloudRoutes":
+ if r.TryDecodeAsNil() {
+ x.ConfigureCloudRoutes = false
+ } else {
+ x.ConfigureCloudRoutes = bool(r.DecodeBool())
+ }
+ case "rootCAFile":
+ if r.TryDecodeAsNil() {
+ x.RootCAFile = ""
+ } else {
+ x.RootCAFile = string(r.DecodeString())
+ }
+ case "contentType":
+ if r.TryDecodeAsNil() {
+ x.ContentType = ""
+ } else {
+ x.ContentType = string(r.DecodeString())
+ }
+ case "kubeAPIQPS":
+ if r.TryDecodeAsNil() {
+ x.KubeAPIQPS = 0
+ } else {
+ x.KubeAPIQPS = float32(r.DecodeFloat(true))
+ }
+ case "kubeAPIBurst":
+ if r.TryDecodeAsNil() {
+ x.KubeAPIBurst = 0
+ } else {
+ x.KubeAPIBurst = int32(r.DecodeInt(32))
+ }
+ case "leaderElection":
+ if r.TryDecodeAsNil() {
+ x.LeaderElection = LeaderElectionConfiguration{}
+ } else {
+ yyv60 := &x.LeaderElection
+ yyv60.CodecDecodeSelf(d)
+ }
+ case "volumeConfiguration":
+ if r.TryDecodeAsNil() {
+ x.VolumeConfiguration = VolumeConfiguration{}
+ } else {
+ yyv61 := &x.VolumeConfiguration
+ yyv61.CodecDecodeSelf(d)
+ }
+ case "controllerStartInterval":
+ if r.TryDecodeAsNil() {
+ x.ControllerStartInterval = pkg1_unversioned.Duration{}
+ } else {
+ yyv62 := &x.ControllerStartInterval
+ yym63 := z.DecBinary()
+ _ = yym63
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv62) {
+ } else if !yym63 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv62)
+ } else {
+ z.DecFallback(yyv62, false)
+ }
+ }
+ case "enableGarbageCollector":
+ if r.TryDecodeAsNil() {
+ x.EnableGarbageCollector = false
+ } else {
+ x.EnableGarbageCollector = bool(r.DecodeBool())
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj67 int
+ var yyb67 bool
+ var yyhl67 bool = l >= 0
+ yyj67++
+ if yyhl67 {
+ yyb67 = yyj67 > l
+ } else {
+ yyb67 = r.CheckBreak()
+ }
+ if yyb67 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Port = 0
+ } else {
+ x.Port = int32(r.DecodeInt(32))
+ }
+ yyj67++
+ if yyhl67 {
+ yyb67 = yyj67 > l
+ } else {
+ yyb67 = r.CheckBreak()
+ }
+ if yyb67 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Address = ""
+ } else {
+ x.Address = string(r.DecodeString())
+ }
+ yyj67++
+ if yyhl67 {
+ yyb67 = yyj67 > l
+ } else {
+ yyb67 = r.CheckBreak()
+ }
+ if yyb67 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.CloudProvider = ""
+ } else {
+ x.CloudProvider = string(r.DecodeString())
+ }
+ yyj67++
+ if yyhl67 {
+ yyb67 = yyj67 > l
+ } else {
+ yyb67 = r.CheckBreak()
+ }
+ if yyb67 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.CloudConfigFile = ""
+ } else {
+ x.CloudConfigFile = string(r.DecodeString())
+ }
+ yyj67++
+ if yyhl67 {
+ yyb67 = yyj67 > l
+ } else {
+ yyb67 = r.CheckBreak()
+ }
+ if yyb67 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ConcurrentEndpointSyncs = 0
+ } else {
+ x.ConcurrentEndpointSyncs = int32(r.DecodeInt(32))
+ }
+ yyj67++
+ if yyhl67 {
+ yyb67 = yyj67 > l
+ } else {
+ yyb67 = r.CheckBreak()
+ }
+ if yyb67 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ConcurrentRSSyncs = 0
+ } else {
+ x.ConcurrentRSSyncs = int32(r.DecodeInt(32))
+ }
+ yyj67++
+ if yyhl67 {
+ yyb67 = yyj67 > l
+ } else {
+ yyb67 = r.CheckBreak()
+ }
+ if yyb67 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ConcurrentRCSyncs = 0
+ } else {
+ x.ConcurrentRCSyncs = int32(r.DecodeInt(32))
+ }
+ yyj67++
+ if yyhl67 {
+ yyb67 = yyj67 > l
+ } else {
+ yyb67 = r.CheckBreak()
+ }
+ if yyb67 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ConcurrentResourceQuotaSyncs = 0
+ } else {
+ x.ConcurrentResourceQuotaSyncs = int32(r.DecodeInt(32))
+ }
+ yyj67++
+ if yyhl67 {
+ yyb67 = yyj67 > l
+ } else {
+ yyb67 = r.CheckBreak()
+ }
+ if yyb67 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ConcurrentDeploymentSyncs = 0
+ } else {
+ x.ConcurrentDeploymentSyncs = int32(r.DecodeInt(32))
+ }
+ yyj67++
+ if yyhl67 {
+ yyb67 = yyj67 > l
+ } else {
+ yyb67 = r.CheckBreak()
+ }
+ if yyb67 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ConcurrentDaemonSetSyncs = 0
+ } else {
+ x.ConcurrentDaemonSetSyncs = int32(r.DecodeInt(32))
+ }
+ yyj67++
+ if yyhl67 {
+ yyb67 = yyj67 > l
+ } else {
+ yyb67 = r.CheckBreak()
+ }
+ if yyb67 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ConcurrentJobSyncs = 0
+ } else {
+ x.ConcurrentJobSyncs = int32(r.DecodeInt(32))
+ }
+ yyj67++
+ if yyhl67 {
+ yyb67 = yyj67 > l
+ } else {
+ yyb67 = r.CheckBreak()
+ }
+ if yyb67 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ConcurrentNamespaceSyncs = 0
+ } else {
+ x.ConcurrentNamespaceSyncs = int32(r.DecodeInt(32))
+ }
+ yyj67++
+ if yyhl67 {
+ yyb67 = yyj67 > l
+ } else {
+ yyb67 = r.CheckBreak()
+ }
+ if yyb67 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ConcurrentSATokenSyncs = 0
+ } else {
+ x.ConcurrentSATokenSyncs = int32(r.DecodeInt(32))
+ }
+ yyj67++
+ if yyhl67 {
+ yyb67 = yyj67 > l
+ } else {
+ yyb67 = r.CheckBreak()
+ }
+ if yyb67 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.LookupCacheSizeForRC = 0
+ } else {
+ x.LookupCacheSizeForRC = int32(r.DecodeInt(32))
+ }
+ yyj67++
+ if yyhl67 {
+ yyb67 = yyj67 > l
+ } else {
+ yyb67 = r.CheckBreak()
+ }
+ if yyb67 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.LookupCacheSizeForRS = 0
+ } else {
+ x.LookupCacheSizeForRS = int32(r.DecodeInt(32))
+ }
+ yyj67++
+ if yyhl67 {
+ yyb67 = yyj67 > l
+ } else {
+ yyb67 = r.CheckBreak()
+ }
+ if yyb67 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.LookupCacheSizeForDaemonSet = 0
+ } else {
+ x.LookupCacheSizeForDaemonSet = int32(r.DecodeInt(32))
+ }
+ yyj67++
+ if yyhl67 {
+ yyb67 = yyj67 > l
+ } else {
+ yyb67 = r.CheckBreak()
+ }
+ if yyb67 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ServiceSyncPeriod = pkg1_unversioned.Duration{}
+ } else {
+ yyv84 := &x.ServiceSyncPeriod
+ yym85 := z.DecBinary()
+ _ = yym85
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv84) {
+ } else if !yym85 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv84)
+ } else {
+ z.DecFallback(yyv84, false)
+ }
+ }
+ yyj67++
+ if yyhl67 {
+ yyb67 = yyj67 > l
+ } else {
+ yyb67 = r.CheckBreak()
+ }
+ if yyb67 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.NodeSyncPeriod = pkg1_unversioned.Duration{}
+ } else {
+ yyv86 := &x.NodeSyncPeriod
+ yym87 := z.DecBinary()
+ _ = yym87
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv86) {
+ } else if !yym87 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv86)
+ } else {
+ z.DecFallback(yyv86, false)
+ }
+ }
+ yyj67++
+ if yyhl67 {
+ yyb67 = yyj67 > l
+ } else {
+ yyb67 = r.CheckBreak()
+ }
+ if yyb67 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ResourceQuotaSyncPeriod = pkg1_unversioned.Duration{}
+ } else {
+ yyv88 := &x.ResourceQuotaSyncPeriod
+ yym89 := z.DecBinary()
+ _ = yym89
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv88) {
+ } else if !yym89 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv88)
+ } else {
+ z.DecFallback(yyv88, false)
+ }
+ }
+ yyj67++
+ if yyhl67 {
+ yyb67 = yyj67 > l
+ } else {
+ yyb67 = r.CheckBreak()
+ }
+ if yyb67 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.NamespaceSyncPeriod = pkg1_unversioned.Duration{}
+ } else {
+ yyv90 := &x.NamespaceSyncPeriod
+ yym91 := z.DecBinary()
+ _ = yym91
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv90) {
+ } else if !yym91 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv90)
+ } else {
+ z.DecFallback(yyv90, false)
+ }
+ }
+ yyj67++
+ if yyhl67 {
+ yyb67 = yyj67 > l
+ } else {
+ yyb67 = r.CheckBreak()
+ }
+ if yyb67 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.PVClaimBinderSyncPeriod = pkg1_unversioned.Duration{}
+ } else {
+ yyv92 := &x.PVClaimBinderSyncPeriod
+ yym93 := z.DecBinary()
+ _ = yym93
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv92) {
+ } else if !yym93 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv92)
+ } else {
+ z.DecFallback(yyv92, false)
+ }
+ }
+ yyj67++
+ if yyhl67 {
+ yyb67 = yyj67 > l
+ } else {
+ yyb67 = r.CheckBreak()
+ }
+ if yyb67 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.MinResyncPeriod = pkg1_unversioned.Duration{}
+ } else {
+ yyv94 := &x.MinResyncPeriod
+ yym95 := z.DecBinary()
+ _ = yym95
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv94) {
+ } else if !yym95 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv94)
+ } else {
+ z.DecFallback(yyv94, false)
+ }
+ }
+ yyj67++
+ if yyhl67 {
+ yyb67 = yyj67 > l
+ } else {
+ yyb67 = r.CheckBreak()
+ }
+ if yyb67 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.TerminatedPodGCThreshold = 0
+ } else {
+ x.TerminatedPodGCThreshold = int32(r.DecodeInt(32))
+ }
+ yyj67++
+ if yyhl67 {
+ yyb67 = yyj67 > l
+ } else {
+ yyb67 = r.CheckBreak()
+ }
+ if yyb67 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.HorizontalPodAutoscalerSyncPeriod = pkg1_unversioned.Duration{}
+ } else {
+ yyv97 := &x.HorizontalPodAutoscalerSyncPeriod
+ yym98 := z.DecBinary()
+ _ = yym98
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv97) {
+ } else if !yym98 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv97)
+ } else {
+ z.DecFallback(yyv97, false)
+ }
+ }
+ yyj67++
+ if yyhl67 {
+ yyb67 = yyj67 > l
+ } else {
+ yyb67 = r.CheckBreak()
+ }
+ if yyb67 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.DeploymentControllerSyncPeriod = pkg1_unversioned.Duration{}
+ } else {
+ yyv99 := &x.DeploymentControllerSyncPeriod
+ yym100 := z.DecBinary()
+ _ = yym100
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv99) {
+ } else if !yym100 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv99)
+ } else {
+ z.DecFallback(yyv99, false)
+ }
+ }
+ yyj67++
+ if yyhl67 {
+ yyb67 = yyj67 > l
+ } else {
+ yyb67 = r.CheckBreak()
+ }
+ if yyb67 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.PodEvictionTimeout = pkg1_unversioned.Duration{}
+ } else {
+ yyv101 := &x.PodEvictionTimeout
+ yym102 := z.DecBinary()
+ _ = yym102
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv101) {
+ } else if !yym102 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv101)
+ } else {
+ z.DecFallback(yyv101, false)
+ }
+ }
+ yyj67++
+ if yyhl67 {
+ yyb67 = yyj67 > l
+ } else {
+ yyb67 = r.CheckBreak()
+ }
+ if yyb67 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.DeletingPodsQps = 0
+ } else {
+ x.DeletingPodsQps = float32(r.DecodeFloat(true))
+ }
+ yyj67++
+ if yyhl67 {
+ yyb67 = yyj67 > l
+ } else {
+ yyb67 = r.CheckBreak()
+ }
+ if yyb67 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.DeletingPodsBurst = 0
+ } else {
+ x.DeletingPodsBurst = int32(r.DecodeInt(32))
+ }
+ yyj67++
+ if yyhl67 {
+ yyb67 = yyj67 > l
+ } else {
+ yyb67 = r.CheckBreak()
+ }
+ if yyb67 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.NodeMonitorGracePeriod = pkg1_unversioned.Duration{}
+ } else {
+ yyv105 := &x.NodeMonitorGracePeriod
+ yym106 := z.DecBinary()
+ _ = yym106
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv105) {
+ } else if !yym106 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv105)
+ } else {
+ z.DecFallback(yyv105, false)
+ }
+ }
+ yyj67++
+ if yyhl67 {
+ yyb67 = yyj67 > l
+ } else {
+ yyb67 = r.CheckBreak()
+ }
+ if yyb67 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.RegisterRetryCount = 0
+ } else {
+ x.RegisterRetryCount = int32(r.DecodeInt(32))
+ }
+ yyj67++
+ if yyhl67 {
+ yyb67 = yyj67 > l
+ } else {
+ yyb67 = r.CheckBreak()
+ }
+ if yyb67 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.NodeStartupGracePeriod = pkg1_unversioned.Duration{}
+ } else {
+ yyv108 := &x.NodeStartupGracePeriod
+ yym109 := z.DecBinary()
+ _ = yym109
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv108) {
+ } else if !yym109 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv108)
+ } else {
+ z.DecFallback(yyv108, false)
+ }
+ }
+ yyj67++
+ if yyhl67 {
+ yyb67 = yyj67 > l
+ } else {
+ yyb67 = r.CheckBreak()
+ }
+ if yyb67 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.NodeMonitorPeriod = pkg1_unversioned.Duration{}
+ } else {
+ yyv110 := &x.NodeMonitorPeriod
+ yym111 := z.DecBinary()
+ _ = yym111
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv110) {
+ } else if !yym111 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv110)
+ } else {
+ z.DecFallback(yyv110, false)
+ }
+ }
+ yyj67++
+ if yyhl67 {
+ yyb67 = yyj67 > l
+ } else {
+ yyb67 = r.CheckBreak()
+ }
+ if yyb67 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ServiceAccountKeyFile = ""
+ } else {
+ x.ServiceAccountKeyFile = string(r.DecodeString())
+ }
+ yyj67++
+ if yyhl67 {
+ yyb67 = yyj67 > l
+ } else {
+ yyb67 = r.CheckBreak()
+ }
+ if yyb67 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.EnableProfiling = false
+ } else {
+ x.EnableProfiling = bool(r.DecodeBool())
+ }
+ yyj67++
+ if yyhl67 {
+ yyb67 = yyj67 > l
+ } else {
+ yyb67 = r.CheckBreak()
+ }
+ if yyb67 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ClusterName = ""
+ } else {
+ x.ClusterName = string(r.DecodeString())
+ }
+ yyj67++
+ if yyhl67 {
+ yyb67 = yyj67 > l
+ } else {
+ yyb67 = r.CheckBreak()
+ }
+ if yyb67 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ClusterCIDR = ""
+ } else {
+ x.ClusterCIDR = string(r.DecodeString())
+ }
+ yyj67++
+ if yyhl67 {
+ yyb67 = yyj67 > l
+ } else {
+ yyb67 = r.CheckBreak()
+ }
+ if yyb67 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ServiceCIDR = ""
+ } else {
+ x.ServiceCIDR = string(r.DecodeString())
+ }
+ yyj67++
+ if yyhl67 {
+ yyb67 = yyj67 > l
+ } else {
+ yyb67 = r.CheckBreak()
+ }
+ if yyb67 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.NodeCIDRMaskSize = 0
+ } else {
+ x.NodeCIDRMaskSize = int32(r.DecodeInt(32))
+ }
+ yyj67++
+ if yyhl67 {
+ yyb67 = yyj67 > l
+ } else {
+ yyb67 = r.CheckBreak()
+ }
+ if yyb67 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.AllocateNodeCIDRs = false
+ } else {
+ x.AllocateNodeCIDRs = bool(r.DecodeBool())
+ }
+ yyj67++
+ if yyhl67 {
+ yyb67 = yyj67 > l
+ } else {
+ yyb67 = r.CheckBreak()
+ }
+ if yyb67 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ConfigureCloudRoutes = false
+ } else {
+ x.ConfigureCloudRoutes = bool(r.DecodeBool())
+ }
+ yyj67++
+ if yyhl67 {
+ yyb67 = yyj67 > l
+ } else {
+ yyb67 = r.CheckBreak()
+ }
+ if yyb67 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.RootCAFile = ""
+ } else {
+ x.RootCAFile = string(r.DecodeString())
+ }
+ yyj67++
+ if yyhl67 {
+ yyb67 = yyj67 > l
+ } else {
+ yyb67 = r.CheckBreak()
+ }
+ if yyb67 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ContentType = ""
+ } else {
+ x.ContentType = string(r.DecodeString())
+ }
+ yyj67++
+ if yyhl67 {
+ yyb67 = yyj67 > l
+ } else {
+ yyb67 = r.CheckBreak()
+ }
+ if yyb67 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.KubeAPIQPS = 0
+ } else {
+ x.KubeAPIQPS = float32(r.DecodeFloat(true))
+ }
+ yyj67++
+ if yyhl67 {
+ yyb67 = yyj67 > l
+ } else {
+ yyb67 = r.CheckBreak()
+ }
+ if yyb67 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.KubeAPIBurst = 0
+ } else {
+ x.KubeAPIBurst = int32(r.DecodeInt(32))
+ }
+ yyj67++
+ if yyhl67 {
+ yyb67 = yyj67 > l
+ } else {
+ yyb67 = r.CheckBreak()
+ }
+ if yyb67 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.LeaderElection = LeaderElectionConfiguration{}
+ } else {
+ yyv124 := &x.LeaderElection
+ yyv124.CodecDecodeSelf(d)
+ }
+ yyj67++
+ if yyhl67 {
+ yyb67 = yyj67 > l
+ } else {
+ yyb67 = r.CheckBreak()
+ }
+ if yyb67 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.VolumeConfiguration = VolumeConfiguration{}
+ } else {
+ yyv125 := &x.VolumeConfiguration
+ yyv125.CodecDecodeSelf(d)
+ }
+ yyj67++
+ if yyhl67 {
+ yyb67 = yyj67 > l
+ } else {
+ yyb67 = r.CheckBreak()
+ }
+ if yyb67 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ControllerStartInterval = pkg1_unversioned.Duration{}
+ } else {
+ yyv126 := &x.ControllerStartInterval
+ yym127 := z.DecBinary()
+ _ = yym127
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv126) {
+ } else if !yym127 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv126)
+ } else {
+ z.DecFallback(yyv126, false)
+ }
+ }
+ yyj67++
+ if yyhl67 {
+ yyb67 = yyj67 > l
+ } else {
+ yyb67 = r.CheckBreak()
+ }
+ if yyb67 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.EnableGarbageCollector = false
+ } else {
+ x.EnableGarbageCollector = bool(r.DecodeBool())
+ }
+ yyj67++
+ if yyhl67 {
+ yyb67 = yyj67 > l
+ } else {
+ yyb67 = r.CheckBreak()
+ }
+ if yyb67 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj67++
+ if yyhl67 {
+ yyb67 = yyj67 > l
+ } else {
+ yyb67 = r.CheckBreak()
+ }
+ if yyb67 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj67++
+ if yyhl67 {
+ yyb67 = yyj67 > l
+ } else {
+ yyb67 = r.CheckBreak()
+ }
+ if yyb67 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj67-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *VolumeConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 4
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeBool(bool(x.EnableHostPathProvisioning))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("enableHostPathProvisioning"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeBool(bool(x.EnableHostPathProvisioning))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeBool(bool(x.EnableDynamicProvisioning))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("enableDynamicProvisioning"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeBool(bool(x.EnableDynamicProvisioning))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy10 := &x.PersistentVolumeRecyclerConfiguration
+ yy10.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("persitentVolumeRecyclerConfiguration"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy12 := &x.PersistentVolumeRecyclerConfiguration
+ yy12.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.FlexVolumePluginDir))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("flexVolumePluginDir"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.FlexVolumePluginDir))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *VolumeConfiguration) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *VolumeConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "enableHostPathProvisioning":
+ if r.TryDecodeAsNil() {
+ x.EnableHostPathProvisioning = false
+ } else {
+ x.EnableHostPathProvisioning = bool(r.DecodeBool())
+ }
+ case "enableDynamicProvisioning":
+ if r.TryDecodeAsNil() {
+ x.EnableDynamicProvisioning = false
+ } else {
+ x.EnableDynamicProvisioning = bool(r.DecodeBool())
+ }
+ case "persitentVolumeRecyclerConfiguration":
+ if r.TryDecodeAsNil() {
+ x.PersistentVolumeRecyclerConfiguration = PersistentVolumeRecyclerConfiguration{}
+ } else {
+ yyv6 := &x.PersistentVolumeRecyclerConfiguration
+ yyv6.CodecDecodeSelf(d)
+ }
+ case "flexVolumePluginDir":
+ if r.TryDecodeAsNil() {
+ x.FlexVolumePluginDir = ""
+ } else {
+ x.FlexVolumePluginDir = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *VolumeConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.EnableHostPathProvisioning = false
+ } else {
+ x.EnableHostPathProvisioning = bool(r.DecodeBool())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.EnableDynamicProvisioning = false
+ } else {
+ x.EnableDynamicProvisioning = bool(r.DecodeBool())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.PersistentVolumeRecyclerConfiguration = PersistentVolumeRecyclerConfiguration{}
+ } else {
+ yyv11 := &x.PersistentVolumeRecyclerConfiguration
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.FlexVolumePluginDir = ""
+ } else {
+ x.FlexVolumePluginDir = string(r.DecodeString())
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PersistentVolumeRecyclerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [7]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(7)
+ } else {
+ yynn2 = 7
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeInt(int64(x.MaximumRetry))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("maximumRetry"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(x.MaximumRetry))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeInt(int64(x.MinimumTimeoutNFS))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("minimumTimeoutNFS"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeInt(int64(x.MinimumTimeoutNFS))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.PodTemplateFilePathNFS))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("podTemplateFilePathNFS"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.PodTemplateFilePathNFS))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeInt(int64(x.IncrementTimeoutNFS))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("incrementTimeoutNFS"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeInt(int64(x.IncrementTimeoutNFS))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.PodTemplateFilePathHostPath))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("podTemplateFilePathHostPath"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.PodTemplateFilePathHostPath))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeInt(int64(x.MinimumTimeoutHostPath))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("minimumTimeoutHostPath"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeInt(int64(x.MinimumTimeoutHostPath))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeInt(int64(x.IncrementTimeoutHostPath))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("incrementTimeoutHostPath"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeInt(int64(x.IncrementTimeoutHostPath))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PersistentVolumeRecyclerConfiguration) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PersistentVolumeRecyclerConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "maximumRetry":
+ if r.TryDecodeAsNil() {
+ x.MaximumRetry = 0
+ } else {
+ x.MaximumRetry = int32(r.DecodeInt(32))
+ }
+ case "minimumTimeoutNFS":
+ if r.TryDecodeAsNil() {
+ x.MinimumTimeoutNFS = 0
+ } else {
+ x.MinimumTimeoutNFS = int32(r.DecodeInt(32))
+ }
+ case "podTemplateFilePathNFS":
+ if r.TryDecodeAsNil() {
+ x.PodTemplateFilePathNFS = ""
+ } else {
+ x.PodTemplateFilePathNFS = string(r.DecodeString())
+ }
+ case "incrementTimeoutNFS":
+ if r.TryDecodeAsNil() {
+ x.IncrementTimeoutNFS = 0
+ } else {
+ x.IncrementTimeoutNFS = int32(r.DecodeInt(32))
+ }
+ case "podTemplateFilePathHostPath":
+ if r.TryDecodeAsNil() {
+ x.PodTemplateFilePathHostPath = ""
+ } else {
+ x.PodTemplateFilePathHostPath = string(r.DecodeString())
+ }
+ case "minimumTimeoutHostPath":
+ if r.TryDecodeAsNil() {
+ x.MinimumTimeoutHostPath = 0
+ } else {
+ x.MinimumTimeoutHostPath = int32(r.DecodeInt(32))
+ }
+ case "incrementTimeoutHostPath":
+ if r.TryDecodeAsNil() {
+ x.IncrementTimeoutHostPath = 0
+ } else {
+ x.IncrementTimeoutHostPath = int32(r.DecodeInt(32))
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PersistentVolumeRecyclerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj11 int
+ var yyb11 bool
+ var yyhl11 bool = l >= 0
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.MaximumRetry = 0
+ } else {
+ x.MaximumRetry = int32(r.DecodeInt(32))
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.MinimumTimeoutNFS = 0
+ } else {
+ x.MinimumTimeoutNFS = int32(r.DecodeInt(32))
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.PodTemplateFilePathNFS = ""
+ } else {
+ x.PodTemplateFilePathNFS = string(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.IncrementTimeoutNFS = 0
+ } else {
+ x.IncrementTimeoutNFS = int32(r.DecodeInt(32))
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.PodTemplateFilePathHostPath = ""
+ } else {
+ x.PodTemplateFilePathHostPath = string(r.DecodeString())
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.MinimumTimeoutHostPath = 0
+ } else {
+ x.MinimumTimeoutHostPath = int32(r.DecodeInt(32))
+ }
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.IncrementTimeoutHostPath = 0
+ } else {
+ x.IncrementTimeoutHostPath = int32(r.DecodeInt(32))
+ }
+ for {
+ yyj11++
+ if yyhl11 {
+ yyb11 = yyj11 > l
+ } else {
+ yyb11 = r.CheckBreak()
+ }
+ if yyb11 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj11-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/types.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/types.go
new file mode 100644
index 0000000..97e9233
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/types.go
@@ -0,0 +1,621 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package componentconfig
+
+import "k8s.io/kubernetes/pkg/api/unversioned"
+
+type KubeProxyConfiguration struct {
+ unversioned.TypeMeta
+
+ // bindAddress is the IP address for the proxy server to serve on (set to 0.0.0.0
+ // for all interfaces)
+ BindAddress string `json:"bindAddress"`
+ // clusterCIDR is the CIDR range of the pods in the cluster. It is used to
+ // bridge traffic coming from outside of the cluster. If not provided,
+ // no off-cluster bridging will be performed.
+ ClusterCIDR string `json:"clusterCIDR"`
+ // healthzBindAddress is the IP address for the health check server to serve on,
+ // defaulting to 127.0.0.1 (set to 0.0.0.0 for all interfaces)
+ HealthzBindAddress string `json:"healthzBindAddress"`
+ // healthzPort is the port to bind the health check server. Use 0 to disable.
+ HealthzPort int32 `json:"healthzPort"`
+ // hostnameOverride, if non-empty, will be used as the identity instead of the actual hostname.
+ HostnameOverride string `json:"hostnameOverride"`
+ // iptablesMasqueradeBit is the bit of the iptables fwmark space to use for SNAT if using
+ // the pure iptables proxy mode. Values must be within the range [0, 31].
+ IPTablesMasqueradeBit *int32 `json:"iptablesMasqueradeBit"`
+ // iptablesSyncPeriod is the period that iptables rules are refreshed (e.g. '5s', '1m',
+ // '2h22m'). Must be greater than 0.
+ IPTablesSyncPeriod unversioned.Duration `json:"iptablesSyncPeriodSeconds"`
+ // kubeconfigPath is the path to the kubeconfig file with authorization information (the
+ // master location is set by the master flag).
+ KubeconfigPath string `json:"kubeconfigPath"`
+ // masqueradeAll tells kube-proxy to SNAT everything if using the pure iptables proxy mode.
+ MasqueradeAll bool `json:"masqueradeAll"`
+ // master is the address of the Kubernetes API server (overrides any value in kubeconfig)
+ Master string `json:"master"`
+ // oomScoreAdj is the oom-score-adj value for kube-proxy process. Values must be within
+ // the range [-1000, 1000]
+ OOMScoreAdj *int32 `json:"oomScoreAdj"`
+ // mode specifies which proxy mode to use.
+ Mode ProxyMode `json:"mode"`
+ // portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed
+ // in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen.
+ PortRange string `json:"portRange"`
+ // resourceContainer is the absolute name of the resource-only container to create and run
+ // the Kube-proxy in (Default: /kube-proxy).
+ ResourceContainer string `json:"kubeletCgroups"`
+ // udpIdleTimeout is how long an idle UDP connection will be kept open (e.g. '250ms', '2s').
+ // Must be greater than 0. Only applicable for proxyMode=userspace.
+ UDPIdleTimeout unversioned.Duration `json:"udpTimeoutMilliseconds"`
+ // conntrackMax is the maximum number of NAT connections to track (0 to leave as-is)")
+ ConntrackMax int32 `json:"conntrackMax"`
+ // conntrackTCPEstablishedTimeout is how long an idle UDP connection will be kept open
+ // (e.g. '250ms', '2s'). Must be greater than 0. Only applicable for proxyMode is Userspace
+ ConntrackTCPEstablishedTimeout unversioned.Duration `json:"conntrackTCPEstablishedTimeout"`
+}
+
+// Currently two modes of proxying are available: 'userspace' (older, stable) or 'iptables'
+// (newer, faster). If blank, look at the Node object on the Kubernetes API and respect the
+// 'net.experimental.kubernetes.io/proxy-mode' annotation if provided. Otherwise use the
+// best-available proxy (currently iptables, but may change in future versions). If the
+// iptables proxy is selected, regardless of how, but the system's kernel or iptables
+// versions are insufficient, this always falls back to the userspace proxy.
+type ProxyMode string
+
+const (
+ ProxyModeUserspace ProxyMode = "userspace"
+ ProxyModeIPTables ProxyMode = "iptables"
+)
+
+// HairpinMode denotes how the kubelet should configure networking to handle
+// hairpin packets.
+type HairpinMode string
+
+// Enum settings for different ways to handle hairpin packets.
+const (
+ // Set the hairpin flag on the veth of containers in the respective
+ // container runtime.
+ HairpinVeth = "hairpin-veth"
+ // Make the container bridge promiscuous. This will force it to accept
+ // hairpin packets, even if the flag isn't set on ports of the bridge.
+ PromiscuousBridge = "promiscuous-bridge"
+ // Neither of the above. If the kubelet is started in this hairpin mode
+ // and kube-proxy is running in iptables mode, hairpin packets will be
+ // dropped by the container bridge.
+ HairpinNone = "none"
+)
+
+// TODO: curate the ordering and structure of this config object
+type KubeletConfiguration struct {
+ // config is the path to the config file or directory of files
+ Config string `json:"config"`
+ // syncFrequency is the max period between synchronizing running
+ // containers and config
+ SyncFrequency unversioned.Duration `json:"syncFrequency"`
+ // fileCheckFrequency is the duration between checking config files for
+ // new data
+ FileCheckFrequency unversioned.Duration `json:"fileCheckFrequency"`
+ // httpCheckFrequency is the duration between checking http for new data
+ HTTPCheckFrequency unversioned.Duration `json:"httpCheckFrequency"`
+ // manifestURL is the URL for accessing the container manifest
+ ManifestURL string `json:"manifestURL"`
+ // manifestURLHeader is the HTTP header to use when accessing the manifest
+ // URL, with the key separated from the value with a ':', as in 'key:value'
+ ManifestURLHeader string `json:"manifestURLHeader"`
+ // enableServer enables the Kubelet's server
+ EnableServer bool `json:"enableServer"`
+ // address is the IP address for the Kubelet to serve on (set to 0.0.0.0
+ // for all interfaces)
+ Address string `json:"address"`
+ // port is the port for the Kubelet to serve on.
+ Port uint `json:"port"`
+ // readOnlyPort is the read-only port for the Kubelet to serve on with
+ // no authentication/authorization (set to 0 to disable)
+ ReadOnlyPort uint `json:"readOnlyPort"`
+ // tLSCertFile is the file containing x509 Certificate for HTTPS. (CA cert,
+ // if any, concatenated after server cert). If tlsCertFile and
+ // tlsPrivateKeyFile are not provided, a self-signed certificate
+ // and key are generated for the public address and saved to the directory
+ // passed to certDir.
+ TLSCertFile string `json:"tLSCertFile"`
+ // tLSPrivateKeyFile is the ile containing x509 private key matching
+ // tlsCertFile.
+ TLSPrivateKeyFile string `json:"tLSPrivateKeyFile"`
+ // certDirectory is the directory where the TLS certs are located (by
+ // default /var/run/kubernetes). If tlsCertFile and tlsPrivateKeyFile
+ // are provided, this flag will be ignored.
+ CertDirectory string `json:"certDirectory"`
+ // hostnameOverride is the hostname used to identify the kubelet instead
+ // of the actual hostname.
+ HostnameOverride string `json:"hostnameOverride"`
+ // podInfraContainerImage is the image whose network/ipc namespaces
+ // containers in each pod will use.
+ PodInfraContainerImage string `json:"podInfraContainerImage"`
+ // dockerEndpoint is the path to the docker endpoint to communicate with.
+ DockerEndpoint string `json:"dockerEndpoint"`
+ // rootDirectory is the directory path to place kubelet files (volume
+ // mounts,etc).
+ RootDirectory string `json:"rootDirectory"`
+ // seccompProfileRoot is the directory path for seccomp profiles.
+ SeccompProfileRoot string `json:"seccompProfileRoot"`
+ // allowPrivileged enables containers to request privileged mode.
+ // Defaults to false.
+ AllowPrivileged bool `json:"allowPrivileged"`
+ // hostNetworkSources is a comma-separated list of sources from which the
+ // Kubelet allows pods to use of host network. Defaults to "*".
+ HostNetworkSources string `json:"hostNetworkSources"`
+ // hostPIDSources is a comma-separated list of sources from which the
+ // Kubelet allows pods to use the host pid namespace. Defaults to "*".
+ HostPIDSources string `json:"hostPIDSources"`
+ // hostIPCSources is a comma-separated list of sources from which the
+ // Kubelet allows pods to use the host ipc namespace. Defaults to "*".
+ HostIPCSources string `json:"hostIPCSources"`
+ // registryPullQPS is the limit of registry pulls per second. If 0,
+ // unlimited. Set to 0 for no limit. Defaults to 5.0.
+ RegistryPullQPS float64 `json:"registryPullQPS"`
+ // registryBurst is the maximum size of a bursty pulls, temporarily allows
+ // pulls to burst to this number, while still not exceeding registryQps.
+ // Only used if registryQps > 0.
+ RegistryBurst int32 `json:"registryBurst"`
+ // eventRecordQPS is the maximum event creations per second. If 0, there
+ // is no limit enforced.
+ EventRecordQPS float32 `json:"eventRecordQPS"`
+ // eventBurst is the maximum size of a bursty event records, temporarily
+ // allows event records to burst to this number, while still not exceeding
+ // event-qps. Only used if eventQps > 0
+ EventBurst int32 `json:"eventBurst"`
+ // enableDebuggingHandlers enables server endpoints for log collection
+ // and local running of containers and commands
+ EnableDebuggingHandlers bool `json:"enableDebuggingHandlers"`
+ // minimumGCAge is the minimum age for a finished container before it is
+ // garbage collected.
+ MinimumGCAge unversioned.Duration `json:"minimumGCAge"`
+ // maxPerPodContainerCount is the maximum number of old instances to
+ // retain per container. Each container takes up some disk space.
+ MaxPerPodContainerCount int32 `json:"maxPerPodContainerCount"`
+ // maxContainerCount is the maximum number of old instances of containers
+ // to retain globally. Each container takes up some disk space.
+ MaxContainerCount int32 `json:"maxContainerCount"`
+ // cAdvisorPort is the port of the localhost cAdvisor endpoint
+ CAdvisorPort uint `json:"cAdvisorPort"`
+ // healthzPort is the port of the localhost healthz endpoint
+ HealthzPort int32 `json:"healthzPort"`
+ // healthzBindAddress is the IP address for the healthz server to serve
+ // on.
+ HealthzBindAddress string `json:"healthzBindAddress"`
+ // oomScoreAdj is The oom-score-adj value for kubelet process. Values
+ // must be within the range [-1000, 1000].
+ OOMScoreAdj int32 `json:"oomScoreAdj"`
+ // registerNode enables automatic registration with the apiserver.
+ RegisterNode bool `json:"registerNode"`
+ // clusterDomain is the DNS domain for this cluster. If set, kubelet will
+ // configure all containers to search this domain in addition to the
+ // host's search domains.
+ ClusterDomain string `json:"clusterDomain"`
+ // masterServiceNamespace is The namespace from which the kubernetes
+ // master services should be injected into pods.
+ MasterServiceNamespace string `json:"masterServiceNamespace"`
+ // clusterDNS is the IP address for a cluster DNS server. If set, kubelet
+ // will configure all containers to use this for DNS resolution in
+ // addition to the host's DNS servers
+ ClusterDNS string `json:"clusterDNS"`
+ // streamingConnectionIdleTimeout is the maximum time a streaming connection
+ // can be idle before the connection is automatically closed.
+ StreamingConnectionIdleTimeout unversioned.Duration `json:"streamingConnectionIdleTimeout"`
+ // nodeStatusUpdateFrequency is the frequency that kubelet posts node
+ // status to master. Note: be cautious when changing the constant, it
+ // must work with nodeMonitorGracePeriod in nodecontroller.
+ NodeStatusUpdateFrequency unversioned.Duration `json:"nodeStatusUpdateFrequency"`
+ // minimumGCAge is the minimum age for a unused image before it is
+ // garbage collected.
+ ImageMinimumGCAge unversioned.Duration `json:"imageMinimumGCAge"`
+ // imageGCHighThresholdPercent is the percent of disk usage after which
+ // image garbage collection is always run.
+ ImageGCHighThresholdPercent int32 `json:"imageGCHighThresholdPercent"`
+ // imageGCLowThresholdPercent is the percent of disk usage before which
+ // image garbage collection is never run. Lowest disk usage to garbage
+ // collect to.
+ ImageGCLowThresholdPercent int32 `json:"imageGCLowThresholdPercent"`
+ // lowDiskSpaceThresholdMB is the absolute free disk space, in MB, to
+ // maintain. When disk space falls below this threshold, new pods would
+ // be rejected.
+ LowDiskSpaceThresholdMB int32 `json:"lowDiskSpaceThresholdMB"`
+ // How frequently to calculate and cache volume disk usage for all pods
+ VolumeStatsAggPeriod unversioned.Duration `json:"volumeStatsAggPeriod"`
+ // networkPluginName is the name of the network plugin to be invoked for
+ // various events in kubelet/pod lifecycle
+ NetworkPluginName string `json:"networkPluginName"`
+ // networkPluginDir is the full path of the directory in which to search
+ // for network plugins
+ NetworkPluginDir string `json:"networkPluginDir"`
+ // volumePluginDir is the full path of the directory in which to search
+ // for additional third party volume plugins
+ VolumePluginDir string `json:"volumePluginDir"`
+ // cloudProvider is the provider for cloud services.
+ CloudProvider string `json:"cloudProvider,omitempty"`
+ // cloudConfigFile is the path to the cloud provider configuration file.
+ CloudConfigFile string `json:"cloudConfigFile,omitempty"`
+ // KubeletCgroups is the absolute name of cgroups to isolate the kubelet in.
+ KubeletCgroups string `json:"kubeletCgroups,omitempty"`
+ // Cgroups that container runtime is expected to be isolated in.
+ RuntimeCgroups string `json:"runtimeCgroups,omitempty"`
+ // SystemCgroups is absolute name of cgroups in which to place
+ // all non-kernel processes that are not already in a container. Empty
+ // for no container. Rolling back the flag requires a reboot.
+ SystemCgroups string `json:"systemContainer,omitempty"`
+ // cgroupRoot is the root cgroup to use for pods. This is handled by the
+ // container runtime on a best effort basis.
+ CgroupRoot string `json:"cgroupRoot,omitempty"`
+ // containerRuntime is the container runtime to use.
+ ContainerRuntime string `json:"containerRuntime"`
+ // runtimeRequestTimeout is the timeout for all runtime requests except long running
+ // requests - pull, logs, exec and attach.
+ RuntimeRequestTimeout unversioned.Duration `json:"runtimeRequestTimeout,omitempty"`
+ // rktPath is the path of rkt binary. Leave empty to use the first rkt in
+ // $PATH.
+ RktPath string `json:"rktPath,omitempty"`
+ // rktApiEndpoint is the endpoint of the rkt API service to communicate with.
+ RktAPIEndpoint string `json:"rktAPIEndpoint,omitempty"`
+ // rktStage1Image is the image to use as stage1. Local paths and
+ // http/https URLs are supported.
+ RktStage1Image string `json:"rktStage1Image,omitempty"`
+ // lockFilePath is the path that kubelet will use to as a lock file.
+ // It uses this file as a lock to synchronize with other kubelet processes
+ // that may be running.
+ LockFilePath string `json:"lockFilePath"`
+ // ExitOnLockContention is a flag that signifies to the kubelet that it is running
+ // in "bootstrap" mode. This requires that 'LockFilePath' has been set.
+ // This will cause the kubelet to listen to inotify events on the lock file,
+ // releasing it and exiting when another process tries to open that file.
+ ExitOnLockContention bool `json:"exitOnLockContention"`
+ // configureCBR0 enables the kublet to configure cbr0 based on
+ // Node.Spec.PodCIDR.
+ ConfigureCBR0 bool `json:"configureCbr0"`
+ // How should the kubelet configure the container bridge for hairpin packets.
+ // Setting this flag allows endpoints in a Service to loadbalance back to
+ // themselves if they should try to access their own Service. Values:
+ // "promiscuous-bridge": make the container bridge promiscuous.
+ // "hairpin-veth": set the hairpin flag on container veth interfaces.
+ // "none": do nothing.
+ // Setting --configure-cbr0 to false implies that to achieve hairpin NAT
+ // one must set --hairpin-mode=veth-flag, because bridge assumes the
+ // existence of a container bridge named cbr0.
+ HairpinMode string `json:"hairpinMode"`
+ // The node has babysitter process monitoring docker and kubelet.
+ BabysitDaemons bool `json:"babysitDaemons"`
+ // maxPods is the number of pods that can run on this Kubelet.
+ MaxPods int32 `json:"maxPods"`
+ // nvidiaGPUs is the number of NVIDIA GPU devices on this node.
+ NvidiaGPUs int32 `json:"nvidiaGPUs"`
+ // dockerExecHandlerName is the handler to use when executing a command
+ // in a container. Valid values are 'native' and 'nsenter'. Defaults to
+ // 'native'.
+ DockerExecHandlerName string `json:"dockerExecHandlerName"`
+ // The CIDR to use for pod IP addresses, only used in standalone mode.
+ // In cluster mode, this is obtained from the master.
+ PodCIDR string `json:"podCIDR"`
+ // ResolverConfig is the resolver configuration file used as the basis
+ // for the container DNS resolution configuration."), []
+ ResolverConfig string `json:"resolvConf"`
+ // cpuCFSQuota is Enable CPU CFS quota enforcement for containers that
+ // specify CPU limits
+ CPUCFSQuota bool `json:"cpuCFSQuota"`
+ // containerized should be set to true if kubelet is running in a container.
+ Containerized bool `json:"containerized"`
+ // maxOpenFiles is Number of files that can be opened by Kubelet process.
+ MaxOpenFiles uint64 `json:"maxOpenFiles"`
+ // reconcileCIDR is Reconcile node CIDR with the CIDR specified by the
+ // API server. No-op if register-node or configure-cbr0 is false.
+ ReconcileCIDR bool `json:"reconcileCIDR"`
+ // registerSchedulable tells the kubelet to register the node as
+ // schedulable. No-op if register-node is false.
+ RegisterSchedulable bool `json:"registerSchedulable"`
+ // contentType is contentType of requests sent to apiserver.
+ ContentType string `json:"contentType"`
+ // kubeAPIQPS is the QPS to use while talking with kubernetes apiserver
+ KubeAPIQPS float32 `json:"kubeAPIQPS"`
+ // kubeAPIBurst is the burst to allow while talking with kubernetes
+ // apiserver
+ KubeAPIBurst int32 `json:"kubeAPIBurst"`
+ // serializeImagePulls when enabled, tells the Kubelet to pull images one
+ // at a time. We recommend *not* changing the default value on nodes that
+ // run docker daemon with version < 1.9 or an Aufs storage backend.
+ // Issue #10959 has more details.
+ SerializeImagePulls bool `json:"serializeImagePulls"`
+ // experimentalFlannelOverlay enables experimental support for starting the
+ // kubelet with the default overlay network (flannel). Assumes flanneld
+ // is already running in client mode.
+ ExperimentalFlannelOverlay bool `json:"experimentalFlannelOverlay"`
+ // outOfDiskTransitionFrequency is duration for which the kubelet has to
+ // wait before transitioning out of out-of-disk node condition status.
+ OutOfDiskTransitionFrequency unversioned.Duration `json:"outOfDiskTransitionFrequency,omitempty"`
+ // nodeIP is IP address of the node. If set, kubelet will use this IP
+ // address for the node.
+ NodeIP string `json:"nodeIP,omitempty"`
+ // nodeLabels to add when registering the node in the cluster.
+ NodeLabels map[string]string `json:"nodeLabels"`
+ // nonMasqueradeCIDR configures masquerading: traffic to IPs outside this range will use IP masquerade.
+ NonMasqueradeCIDR string `json:"nonMasqueradeCIDR"`
+ // enable gathering custom metrics.
+ EnableCustomMetrics bool `json:"enableCustomMetrics"`
+ // Comma-delimited list of hard eviction expressions. For example, 'memory.available<300Mi'.
+ EvictionHard string `json:"evictionHard,omitempty"`
+ // Comma-delimited list of soft eviction expressions. For example, 'memory.available<300Mi'.
+ EvictionSoft string `json:"evictionSoft,omitempty"`
+ // Comma-delimeted list of grace periods for each soft eviction signal. For example, 'memory.available=30s'.
+ EvictionSoftGracePeriod string `json:"evictionSoftGracePeriod,omitempty"`
+ // Duration for which the kubelet has to wait before transitioning out of an eviction pressure condition.
+ EvictionPressureTransitionPeriod unversioned.Duration `json:"evictionPressureTransitionPeriod,omitempty"`
+ // Maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met.
+ EvictionMaxPodGracePeriod int32 `json:"evictionMaxPodGracePeriod,omitempty"`
+ // Maximum number of pods per core. Cannot exceed MaxPods
+ PodsPerCore int32 `json:"podsPerCore"`
+ // enableControllerAttachDetach enables the Attach/Detach controller to
+ // manage attachment/detachment of volumes scheduled to this node, and
+ // disables kubelet from executing any attach/detach operations
+ EnableControllerAttachDetach bool `json:"enableControllerAttachDetach"`
+}
+
+type KubeSchedulerConfiguration struct {
+ unversioned.TypeMeta
+
+ // port is the port that the scheduler's http service runs on.
+ Port int32 `json:"port"`
+ // address is the IP address to serve on.
+ Address string `json:"address"`
+ // algorithmProvider is the scheduling algorithm provider to use.
+ AlgorithmProvider string `json:"algorithmProvider"`
+ // policyConfigFile is the filepath to the scheduler policy configuration.
+ PolicyConfigFile string `json:"policyConfigFile"`
+ // enableProfiling enables profiling via web interface.
+ EnableProfiling bool `json:"enableProfiling"`
+ // contentType is contentType of requests sent to apiserver.
+ ContentType string `json:"contentType"`
+ // kubeAPIQPS is the QPS to use while talking with kubernetes apiserver.
+ KubeAPIQPS float32 `json:"kubeAPIQPS"`
+ // kubeAPIBurst is the QPS burst to use while talking with kubernetes apiserver.
+ KubeAPIBurst int32 `json:"kubeAPIBurst"`
+ // schedulerName is name of the scheduler, used to select which pods
+ // will be processed by this scheduler, based on pod's annotation with
+ // key 'scheduler.alpha.kubernetes.io/name'.
+ SchedulerName string `json:"schedulerName"`
+ // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule
+ // corresponding to every RequiredDuringScheduling affinity rule.
+ // HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 0-100.
+ HardPodAffinitySymmetricWeight int `json:"hardPodAffinitySymmetricWeight"`
+ // Indicate the "all topologies" set for empty topologyKey when it's used for PreferredDuringScheduling pod anti-affinity.
+ FailureDomains string `json:"failureDomains"`
+ // leaderElection defines the configuration of leader election client.
+ LeaderElection LeaderElectionConfiguration `json:"leaderElection"`
+}
+
+// LeaderElectionConfiguration defines the configuration of leader election
+// clients for components that can run with leader election enabled.
+type LeaderElectionConfiguration struct {
+ // leaderElect enables a leader election client to gain leadership
+ // before executing the main loop. Enable this when running replicated
+ // components for high availability.
+ LeaderElect bool `json:"leaderElect"`
+ // leaseDuration is the duration that non-leader candidates will wait
+ // after observing a leadership renewal until attempting to acquire
+ // leadership of a led but unrenewed leader slot. This is effectively the
+ // maximum duration that a leader can be stopped before it is replaced
+ // by another candidate. This is only applicable if leader election is
+ // enabled.
+ LeaseDuration unversioned.Duration `json:"leaseDuration"`
+ // renewDeadline is the interval between attempts by the acting master to
+ // renew a leadership slot before it stops leading. This must be less
+ // than or equal to the lease duration. This is only applicable if leader
+ // election is enabled.
+ RenewDeadline unversioned.Duration `json:"renewDeadline"`
+ // retryPeriod is the duration the clients should wait between attempting
+ // acquisition and renewal of a leadership. This is only applicable if
+ // leader election is enabled.
+ RetryPeriod unversioned.Duration `json:"retryPeriod"`
+}
+
+type KubeControllerManagerConfiguration struct {
+ unversioned.TypeMeta
+
+ // port is the port that the controller-manager's http service runs on.
+ Port int32 `json:"port"`
+ // address is the IP address to serve on (set to 0.0.0.0 for all interfaces).
+ Address string `json:"address"`
+ // cloudProvider is the provider for cloud services.
+ CloudProvider string `json:"cloudProvider"`
+ // cloudConfigFile is the path to the cloud provider configuration file.
+ CloudConfigFile string `json:"cloudConfigFile"`
+ // concurrentEndpointSyncs is the number of endpoint syncing operations
+ // that will be done concurrently. Larger number = faster endpoint updating,
+ // but more CPU (and network) load.
+ ConcurrentEndpointSyncs int32 `json:"concurrentEndpointSyncs"`
+ // concurrentRSSyncs is the number of replica sets that are allowed to sync
+ // concurrently. Larger number = more responsive replica management, but more
+ // CPU (and network) load.
+ ConcurrentRSSyncs int32 `json:"concurrentRSSyncs"`
+ // concurrentRCSyncs is the number of replication controllers that are
+ // allowed to sync concurrently. Larger number = more responsive replica
+ // management, but more CPU (and network) load.
+ ConcurrentRCSyncs int32 `json:"concurrentRCSyncs"`
+ // concurrentResourceQuotaSyncs is the number of resource quotas that are
+ // allowed to sync concurrently. Larger number = more responsive quota
+ // management, but more CPU (and network) load.
+ ConcurrentResourceQuotaSyncs int32 `json:"concurrentResourceQuotaSyncs"`
+ // concurrentDeploymentSyncs is the number of deployment objects that are
+ // allowed to sync concurrently. Larger number = more responsive deployments,
+ // but more CPU (and network) load.
+ ConcurrentDeploymentSyncs int32 `json:"concurrentDeploymentSyncs"`
+ // concurrentDaemonSetSyncs is the number of daemonset objects that are
+ // allowed to sync concurrently. Larger number = more responsive daemonset,
+ // but more CPU (and network) load.
+ ConcurrentDaemonSetSyncs int32 `json:"concurrentDaemonSetSyncs"`
+ // concurrentJobSyncs is the number of job objects that are
+ // allowed to sync concurrently. Larger number = more responsive jobs,
+ // but more CPU (and network) load.
+ ConcurrentJobSyncs int32 `json:"concurrentJobSyncs"`
+ // concurrentNamespaceSyncs is the number of namespace objects that are
+ // allowed to sync concurrently.
+ ConcurrentNamespaceSyncs int32 `json:"concurrentNamespaceSyncs"`
+ // concurrentSATokenSyncs is the number of service account token syncing operations
+ // that will be done concurrently.
+ ConcurrentSATokenSyncs int32 `json:"concurrentSATokenSyncs"`
+ // lookupCacheSizeForRC is the size of lookup cache for replication controllers.
+ // Larger number = more responsive replica management, but more MEM load.
+ LookupCacheSizeForRC int32 `json:"lookupCacheSizeForRC"`
+ // lookupCacheSizeForRS is the size of lookup cache for replicatsets.
+ // Larger number = more responsive replica management, but more MEM load.
+ LookupCacheSizeForRS int32 `json:"lookupCacheSizeForRS"`
+ // lookupCacheSizeForDaemonSet is the size of lookup cache for daemonsets.
+ // Larger number = more responsive daemonset, but more MEM load.
+ LookupCacheSizeForDaemonSet int32 `json:"lookupCacheSizeForDaemonSet"`
+ // serviceSyncPeriod is the period for syncing services with their external
+ // load balancers.
+ ServiceSyncPeriod unversioned.Duration `json:"serviceSyncPeriod"`
+ // nodeSyncPeriod is the period for syncing nodes from cloudprovider. Longer
+ // periods will result in fewer calls to cloud provider, but may delay addition
+ // of new nodes to cluster.
+ NodeSyncPeriod unversioned.Duration `json:"nodeSyncPeriod"`
+ // resourceQuotaSyncPeriod is the period for syncing quota usage status
+ // in the system.
+ ResourceQuotaSyncPeriod unversioned.Duration `json:"resourceQuotaSyncPeriod"`
+ // namespaceSyncPeriod is the period for syncing namespace life-cycle
+ // updates.
+ NamespaceSyncPeriod unversioned.Duration `json:"namespaceSyncPeriod"`
+ // pvClaimBinderSyncPeriod is the period for syncing persistent volumes
+ // and persistent volume claims.
+ PVClaimBinderSyncPeriod unversioned.Duration `json:"pvClaimBinderSyncPeriod"`
+ // minResyncPeriod is the resync period in reflectors; will be random between
+ // minResyncPeriod and 2*minResyncPeriod.
+ MinResyncPeriod unversioned.Duration `json:"minResyncPeriod"`
+ // terminatedPodGCThreshold is the number of terminated pods that can exist
+ // before the terminated pod garbage collector starts deleting terminated pods.
+ // If <= 0, the terminated pod garbage collector is disabled.
+ TerminatedPodGCThreshold int32 `json:"terminatedPodGCThreshold"`
+ // horizontalPodAutoscalerSyncPeriod is the period for syncing the number of
+ // pods in horizontal pod autoscaler.
+ HorizontalPodAutoscalerSyncPeriod unversioned.Duration `json:"horizontalPodAutoscalerSyncPeriod"`
+ // deploymentControllerSyncPeriod is the period for syncing the deployments.
+ DeploymentControllerSyncPeriod unversioned.Duration `json:"deploymentControllerSyncPeriod"`
+ // podEvictionTimeout is the grace period for deleting pods on failed nodes.
+ PodEvictionTimeout unversioned.Duration `json:"podEvictionTimeout"`
+ // deletingPodsQps is the number of nodes per second on which pods are deleted in
+ // case of node failure.
+ DeletingPodsQps float32 `json:"deletingPodsQps"`
+ // deletingPodsBurst is the number of nodes on which pods are bursty deleted in
+ // case of node failure. For more details look into RateLimiter.
+ DeletingPodsBurst int32 `json:"deletingPodsBurst"`
+ // nodeMontiorGracePeriod is the amount of time which we allow a running node to be
+ // unresponsive before marking it unhealty. Must be N times more than kubelet's
+ // nodeStatusUpdateFrequency, where N means number of retries allowed for kubelet
+ // to post node status.
+ NodeMonitorGracePeriod unversioned.Duration `json:"nodeMonitorGracePeriod"`
+ // registerRetryCount is the number of retries for initial node registration.
+ // Retry interval equals node-sync-period.
+ RegisterRetryCount int32 `json:"registerRetryCount"`
+ // nodeStartupGracePeriod is the amount of time which we allow starting a node to
+ // be unresponsive before marking it unhealty.
+ NodeStartupGracePeriod unversioned.Duration `json:"nodeStartupGracePeriod"`
+ // nodeMonitorPeriod is the period for syncing NodeStatus in NodeController.
+ NodeMonitorPeriod unversioned.Duration `json:"nodeMonitorPeriod"`
+ // serviceAccountKeyFile is the filename containing a PEM-encoded private RSA key
+ // used to sign service account tokens.
+ ServiceAccountKeyFile string `json:"serviceAccountKeyFile"`
+ // enableProfiling enables profiling via web interface host:port/debug/pprof/
+ EnableProfiling bool `json:"enableProfiling"`
+ // clusterName is the instance prefix for the cluster.
+ ClusterName string `json:"clusterName"`
+ // clusterCIDR is CIDR Range for Pods in cluster.
+ ClusterCIDR string `json:"clusterCIDR"`
+ // serviceCIDR is CIDR Range for Services in cluster.
+ ServiceCIDR string `json:"serviceCIDR"`
+ // NodeCIDRMaskSize is the mask size for node cidr in cluster.
+ NodeCIDRMaskSize int32 `json:"nodeCIDRMaskSize"`
+ // allocateNodeCIDRs enables CIDRs for Pods to be allocated and, if
+ // ConfigureCloudRoutes is true, to be set on the cloud provider.
+ AllocateNodeCIDRs bool `json:"allocateNodeCIDRs"`
+ // configureCloudRoutes enables CIDRs allocated with allocateNodeCIDRs
+ // to be configured on the cloud provider.
+ ConfigureCloudRoutes bool `json:"configureCloudRoutes"`
+ // rootCAFile is the root certificate authority will be included in service
+ // account's token secret. This must be a valid PEM-encoded CA bundle.
+ RootCAFile string `json:"rootCAFile"`
+ // contentType is contentType of requests sent to apiserver.
+ ContentType string `json:"contentType"`
+ // kubeAPIQPS is the QPS to use while talking with kubernetes apiserver.
+ KubeAPIQPS float32 `json:"kubeAPIQPS"`
+ // kubeAPIBurst is the burst to use while talking with kubernetes apiserver.
+ KubeAPIBurst int32 `json:"kubeAPIBurst"`
+ // leaderElection defines the configuration of leader election client.
+ LeaderElection LeaderElectionConfiguration `json:"leaderElection"`
+ // volumeConfiguration holds configuration for volume related features.
+ VolumeConfiguration VolumeConfiguration `json:"volumeConfiguration"`
+ // How long to wait between starting controller managers
+ ControllerStartInterval unversioned.Duration `json:"controllerStartInterval"`
+ // enables the generic garbage collector. MUST be synced with the
+ // corresponding flag of the kube-apiserver. WARNING: the generic garbage
+ // collector is an alpha feature.
+ EnableGarbageCollector bool `json:"enableGarbageCollector"`
+}
+
+// VolumeConfiguration contains *all* enumerated flags meant to configure all volume
+// plugins. From this config, the controller-manager binary will create many instances of
+// volume.VolumeConfig, each containing only the configuration needed for that plugin which
+// are then passed to the appropriate plugin. The ControllerManager binary is the only part
+// of the code which knows what plugins are supported and which flags correspond to each plugin.
+type VolumeConfiguration struct {
+ // enableHostPathProvisioning enables HostPath PV provisioning when running without a
+ // cloud provider. This allows testing and development of provisioning features. HostPath
+ // provisioning is not supported in any way, won't work in a multi-node cluster, and
+ // should not be used for anything other than testing or development.
+ EnableHostPathProvisioning bool `json:"enableHostPathProvisioning"`
+ // enableDynamicProvisioning enables the provisioning of volumes when running within an environment
+ // that supports dynamic provisioning. Defaults to true.
+ EnableDynamicProvisioning bool `json:"enableDynamicProvisioning"`
+ // persistentVolumeRecyclerConfiguration holds configuration for persistent volume plugins.
+ PersistentVolumeRecyclerConfiguration PersistentVolumeRecyclerConfiguration `json:"persitentVolumeRecyclerConfiguration"`
+ // volumePluginDir is the full path of the directory in which the flex
+ // volume plugin should search for additional third party volume plugins
+ FlexVolumePluginDir string `json:"flexVolumePluginDir"`
+}
+
+type PersistentVolumeRecyclerConfiguration struct {
+ // maximumRetry is number of retries the PV recycler will execute on failure to recycle
+ // PV.
+ MaximumRetry int32 `json:"maximumRetry"`
+ // minimumTimeoutNFS is the minimum ActiveDeadlineSeconds to use for an NFS Recycler
+ // pod.
+ MinimumTimeoutNFS int32 `json:"minimumTimeoutNFS"`
+ // podTemplateFilePathNFS is the file path to a pod definition used as a template for
+ // NFS persistent volume recycling
+ PodTemplateFilePathNFS string `json:"podTemplateFilePathNFS"`
+ // incrementTimeoutNFS is the increment of time added per Gi to ActiveDeadlineSeconds
+ // for an NFS scrubber pod.
+ IncrementTimeoutNFS int32 `json:"incrementTimeoutNFS"`
+ // podTemplateFilePathHostPath is the file path to a pod definition used as a template for
+ // HostPath persistent volume recycling. This is for development and testing only and
+ // will not work in a multi-node cluster.
+ PodTemplateFilePathHostPath string `json:"podTemplateFilePathHostPath"`
+ // minimumTimeoutHostPath is the minimum ActiveDeadlineSeconds to use for a HostPath
+ // Recycler pod. This is for development and testing only and will not work in a multi-node
+ // cluster.
+ MinimumTimeoutHostPath int32 `json:"minimumTimeoutHostPath"`
+ // incrementTimeoutHostPath is the increment of time added per Gi to ActiveDeadlineSeconds
+ // for a HostPath scrubber pod. This is for development and testing only and will not work
+ // in a multi-node cluster.
+ IncrementTimeoutHostPath int32 `json:"incrementTimeoutHostPath"`
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/conversion_generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/conversion_generated.go
new file mode 100644
index 0000000..160b0f9
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/conversion_generated.go
@@ -0,0 +1,182 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by conversion-gen. Do not edit it manually!
+
+package v1alpha1
+
+import (
+ api "k8s.io/kubernetes/pkg/api"
+ componentconfig "k8s.io/kubernetes/pkg/apis/componentconfig"
+ conversion "k8s.io/kubernetes/pkg/conversion"
+)
+
+func init() {
+ if err := api.Scheme.AddGeneratedConversionFuncs(
+ Convert_v1alpha1_KubeProxyConfiguration_To_componentconfig_KubeProxyConfiguration,
+ Convert_componentconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration,
+ Convert_v1alpha1_KubeSchedulerConfiguration_To_componentconfig_KubeSchedulerConfiguration,
+ Convert_componentconfig_KubeSchedulerConfiguration_To_v1alpha1_KubeSchedulerConfiguration,
+ Convert_v1alpha1_LeaderElectionConfiguration_To_componentconfig_LeaderElectionConfiguration,
+ Convert_componentconfig_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration,
+ ); err != nil {
+ // if one of the conversion functions is malformed, detect it immediately.
+ panic(err)
+ }
+}
+
+func autoConvert_v1alpha1_KubeProxyConfiguration_To_componentconfig_KubeProxyConfiguration(in *KubeProxyConfiguration, out *componentconfig.KubeProxyConfiguration, s conversion.Scope) error {
+ SetDefaults_KubeProxyConfiguration(in)
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ out.BindAddress = in.BindAddress
+ out.ClusterCIDR = in.ClusterCIDR
+ out.HealthzBindAddress = in.HealthzBindAddress
+ out.HealthzPort = in.HealthzPort
+ out.HostnameOverride = in.HostnameOverride
+ out.IPTablesMasqueradeBit = in.IPTablesMasqueradeBit
+ out.IPTablesSyncPeriod = in.IPTablesSyncPeriod
+ out.KubeconfigPath = in.KubeconfigPath
+ out.MasqueradeAll = in.MasqueradeAll
+ out.Master = in.Master
+ out.OOMScoreAdj = in.OOMScoreAdj
+ out.Mode = componentconfig.ProxyMode(in.Mode)
+ out.PortRange = in.PortRange
+ out.ResourceContainer = in.ResourceContainer
+ out.UDPIdleTimeout = in.UDPIdleTimeout
+ out.ConntrackMax = in.ConntrackMax
+ out.ConntrackTCPEstablishedTimeout = in.ConntrackTCPEstablishedTimeout
+ return nil
+}
+
+func Convert_v1alpha1_KubeProxyConfiguration_To_componentconfig_KubeProxyConfiguration(in *KubeProxyConfiguration, out *componentconfig.KubeProxyConfiguration, s conversion.Scope) error {
+ return autoConvert_v1alpha1_KubeProxyConfiguration_To_componentconfig_KubeProxyConfiguration(in, out, s)
+}
+
+func autoConvert_componentconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration(in *componentconfig.KubeProxyConfiguration, out *KubeProxyConfiguration, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ out.BindAddress = in.BindAddress
+ out.ClusterCIDR = in.ClusterCIDR
+ out.HealthzBindAddress = in.HealthzBindAddress
+ out.HealthzPort = in.HealthzPort
+ out.HostnameOverride = in.HostnameOverride
+ out.IPTablesMasqueradeBit = in.IPTablesMasqueradeBit
+ out.IPTablesSyncPeriod = in.IPTablesSyncPeriod
+ out.KubeconfigPath = in.KubeconfigPath
+ out.MasqueradeAll = in.MasqueradeAll
+ out.Master = in.Master
+ out.OOMScoreAdj = in.OOMScoreAdj
+ out.Mode = ProxyMode(in.Mode)
+ out.PortRange = in.PortRange
+ out.ResourceContainer = in.ResourceContainer
+ out.UDPIdleTimeout = in.UDPIdleTimeout
+ out.ConntrackMax = in.ConntrackMax
+ out.ConntrackTCPEstablishedTimeout = in.ConntrackTCPEstablishedTimeout
+ return nil
+}
+
+func Convert_componentconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration(in *componentconfig.KubeProxyConfiguration, out *KubeProxyConfiguration, s conversion.Scope) error {
+ return autoConvert_componentconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration(in, out, s)
+}
+
+func autoConvert_v1alpha1_KubeSchedulerConfiguration_To_componentconfig_KubeSchedulerConfiguration(in *KubeSchedulerConfiguration, out *componentconfig.KubeSchedulerConfiguration, s conversion.Scope) error {
+ SetDefaults_KubeSchedulerConfiguration(in)
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ out.Port = int32(in.Port)
+ out.Address = in.Address
+ out.AlgorithmProvider = in.AlgorithmProvider
+ out.PolicyConfigFile = in.PolicyConfigFile
+ if err := api.Convert_Pointer_bool_To_bool(&in.EnableProfiling, &out.EnableProfiling, s); err != nil {
+ return err
+ }
+ out.ContentType = in.ContentType
+ out.KubeAPIQPS = in.KubeAPIQPS
+ out.KubeAPIBurst = int32(in.KubeAPIBurst)
+ out.SchedulerName = in.SchedulerName
+ out.HardPodAffinitySymmetricWeight = in.HardPodAffinitySymmetricWeight
+ out.FailureDomains = in.FailureDomains
+ if err := Convert_v1alpha1_LeaderElectionConfiguration_To_componentconfig_LeaderElectionConfiguration(&in.LeaderElection, &out.LeaderElection, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1alpha1_KubeSchedulerConfiguration_To_componentconfig_KubeSchedulerConfiguration(in *KubeSchedulerConfiguration, out *componentconfig.KubeSchedulerConfiguration, s conversion.Scope) error {
+ return autoConvert_v1alpha1_KubeSchedulerConfiguration_To_componentconfig_KubeSchedulerConfiguration(in, out, s)
+}
+
+func autoConvert_componentconfig_KubeSchedulerConfiguration_To_v1alpha1_KubeSchedulerConfiguration(in *componentconfig.KubeSchedulerConfiguration, out *KubeSchedulerConfiguration, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ out.Port = int(in.Port)
+ out.Address = in.Address
+ out.AlgorithmProvider = in.AlgorithmProvider
+ out.PolicyConfigFile = in.PolicyConfigFile
+ if err := api.Convert_bool_To_Pointer_bool(&in.EnableProfiling, &out.EnableProfiling, s); err != nil {
+ return err
+ }
+ out.ContentType = in.ContentType
+ out.KubeAPIQPS = in.KubeAPIQPS
+ out.KubeAPIBurst = int(in.KubeAPIBurst)
+ out.SchedulerName = in.SchedulerName
+ out.HardPodAffinitySymmetricWeight = in.HardPodAffinitySymmetricWeight
+ out.FailureDomains = in.FailureDomains
+ if err := Convert_componentconfig_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(&in.LeaderElection, &out.LeaderElection, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_componentconfig_KubeSchedulerConfiguration_To_v1alpha1_KubeSchedulerConfiguration(in *componentconfig.KubeSchedulerConfiguration, out *KubeSchedulerConfiguration, s conversion.Scope) error {
+ return autoConvert_componentconfig_KubeSchedulerConfiguration_To_v1alpha1_KubeSchedulerConfiguration(in, out, s)
+}
+
+func autoConvert_v1alpha1_LeaderElectionConfiguration_To_componentconfig_LeaderElectionConfiguration(in *LeaderElectionConfiguration, out *componentconfig.LeaderElectionConfiguration, s conversion.Scope) error {
+ SetDefaults_LeaderElectionConfiguration(in)
+ if err := api.Convert_Pointer_bool_To_bool(&in.LeaderElect, &out.LeaderElect, s); err != nil {
+ return err
+ }
+ out.LeaseDuration = in.LeaseDuration
+ out.RenewDeadline = in.RenewDeadline
+ out.RetryPeriod = in.RetryPeriod
+ return nil
+}
+
+func Convert_v1alpha1_LeaderElectionConfiguration_To_componentconfig_LeaderElectionConfiguration(in *LeaderElectionConfiguration, out *componentconfig.LeaderElectionConfiguration, s conversion.Scope) error {
+ return autoConvert_v1alpha1_LeaderElectionConfiguration_To_componentconfig_LeaderElectionConfiguration(in, out, s)
+}
+
+func autoConvert_componentconfig_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(in *componentconfig.LeaderElectionConfiguration, out *LeaderElectionConfiguration, s conversion.Scope) error {
+ if err := api.Convert_bool_To_Pointer_bool(&in.LeaderElect, &out.LeaderElect, s); err != nil {
+ return err
+ }
+ out.LeaseDuration = in.LeaseDuration
+ out.RenewDeadline = in.RenewDeadline
+ out.RetryPeriod = in.RetryPeriod
+ return nil
+}
+
+func Convert_componentconfig_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(in *componentconfig.LeaderElectionConfiguration, out *LeaderElectionConfiguration, s conversion.Scope) error {
+ return autoConvert_componentconfig_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(in, out, s)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/deep_copy_generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/deep_copy_generated.go
new file mode 100644
index 0000000..04d9ab0
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/deep_copy_generated.go
@@ -0,0 +1,110 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by deepcopy-gen. Do not edit it manually!
+
+package v1alpha1
+
+import (
+ api "k8s.io/kubernetes/pkg/api"
+ conversion "k8s.io/kubernetes/pkg/conversion"
+)
+
+func init() {
+ if err := api.Scheme.AddGeneratedDeepCopyFuncs(
+ DeepCopy_v1alpha1_KubeProxyConfiguration,
+ DeepCopy_v1alpha1_KubeSchedulerConfiguration,
+ DeepCopy_v1alpha1_LeaderElectionConfiguration,
+ ); err != nil {
+ // if one of the deep copy functions is malformed, detect it immediately.
+ panic(err)
+ }
+}
+
+func DeepCopy_v1alpha1_KubeProxyConfiguration(in KubeProxyConfiguration, out *KubeProxyConfiguration, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.BindAddress = in.BindAddress
+ out.ClusterCIDR = in.ClusterCIDR
+ out.HealthzBindAddress = in.HealthzBindAddress
+ out.HealthzPort = in.HealthzPort
+ out.HostnameOverride = in.HostnameOverride
+ if in.IPTablesMasqueradeBit != nil {
+ in, out := in.IPTablesMasqueradeBit, &out.IPTablesMasqueradeBit
+ *out = new(int32)
+ **out = *in
+ } else {
+ out.IPTablesMasqueradeBit = nil
+ }
+ out.IPTablesSyncPeriod = in.IPTablesSyncPeriod
+ out.KubeconfigPath = in.KubeconfigPath
+ out.MasqueradeAll = in.MasqueradeAll
+ out.Master = in.Master
+ if in.OOMScoreAdj != nil {
+ in, out := in.OOMScoreAdj, &out.OOMScoreAdj
+ *out = new(int32)
+ **out = *in
+ } else {
+ out.OOMScoreAdj = nil
+ }
+ out.Mode = in.Mode
+ out.PortRange = in.PortRange
+ out.ResourceContainer = in.ResourceContainer
+ out.UDPIdleTimeout = in.UDPIdleTimeout
+ out.ConntrackMax = in.ConntrackMax
+ out.ConntrackTCPEstablishedTimeout = in.ConntrackTCPEstablishedTimeout
+ return nil
+}
+
+func DeepCopy_v1alpha1_KubeSchedulerConfiguration(in KubeSchedulerConfiguration, out *KubeSchedulerConfiguration, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.Port = in.Port
+ out.Address = in.Address
+ out.AlgorithmProvider = in.AlgorithmProvider
+ out.PolicyConfigFile = in.PolicyConfigFile
+ if in.EnableProfiling != nil {
+ in, out := in.EnableProfiling, &out.EnableProfiling
+ *out = new(bool)
+ **out = *in
+ } else {
+ out.EnableProfiling = nil
+ }
+ out.ContentType = in.ContentType
+ out.KubeAPIQPS = in.KubeAPIQPS
+ out.KubeAPIBurst = in.KubeAPIBurst
+ out.SchedulerName = in.SchedulerName
+ out.HardPodAffinitySymmetricWeight = in.HardPodAffinitySymmetricWeight
+ out.FailureDomains = in.FailureDomains
+ if err := DeepCopy_v1alpha1_LeaderElectionConfiguration(in.LeaderElection, &out.LeaderElection, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_v1alpha1_LeaderElectionConfiguration(in LeaderElectionConfiguration, out *LeaderElectionConfiguration, c *conversion.Cloner) error {
+ if in.LeaderElect != nil {
+ in, out := in.LeaderElect, &out.LeaderElect
+ *out = new(bool)
+ **out = *in
+ } else {
+ out.LeaderElect = nil
+ }
+ out.LeaseDuration = in.LeaseDuration
+ out.RenewDeadline = in.RenewDeadline
+ out.RetryPeriod = in.RetryPeriod
+ return nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/defaults.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/defaults.go
new file mode 100644
index 0000000..b40b3e4
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/defaults.go
@@ -0,0 +1,114 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ "time"
+
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/kubelet/qos"
+ "k8s.io/kubernetes/pkg/master/ports"
+ "k8s.io/kubernetes/pkg/runtime"
+)
+
+func addDefaultingFuncs(scheme *runtime.Scheme) {
+ scheme.AddDefaultingFuncs(
+ SetDefaults_KubeProxyConfiguration,
+ SetDefaults_KubeSchedulerConfiguration,
+ SetDefaults_LeaderElectionConfiguration,
+ )
+}
+
+func SetDefaults_KubeProxyConfiguration(obj *KubeProxyConfiguration) {
+ if obj.BindAddress == "" {
+ obj.BindAddress = "0.0.0.0"
+ }
+ if obj.HealthzPort == 0 {
+ obj.HealthzPort = 10249
+ }
+ if obj.HealthzBindAddress == "" {
+ obj.HealthzBindAddress = "127.0.0.1"
+ }
+ if obj.OOMScoreAdj == nil {
+ temp := int32(qos.KubeProxyOOMScoreAdj)
+ obj.OOMScoreAdj = &temp
+ }
+ if obj.ResourceContainer == "" {
+ obj.ResourceContainer = "/kube-proxy"
+ }
+ if obj.IPTablesSyncPeriod.Duration == 0 {
+ obj.IPTablesSyncPeriod = unversioned.Duration{Duration: 30 * time.Second}
+ }
+ zero := unversioned.Duration{}
+ if obj.UDPIdleTimeout == zero {
+ obj.UDPIdleTimeout = unversioned.Duration{Duration: 250 * time.Millisecond}
+ }
+ if obj.ConntrackMax == 0 {
+ obj.ConntrackMax = 256 * 1024 // 4x default (64k)
+ }
+ if obj.IPTablesMasqueradeBit == nil {
+ temp := int32(14)
+ obj.IPTablesMasqueradeBit = &temp
+ }
+ if obj.ConntrackTCPEstablishedTimeout == zero {
+ obj.ConntrackTCPEstablishedTimeout = unversioned.Duration{Duration: 24 * time.Hour} // 1 day (1/5 default)
+ }
+}
+
+func SetDefaults_KubeSchedulerConfiguration(obj *KubeSchedulerConfiguration) {
+ if obj.Port == 0 {
+ obj.Port = ports.SchedulerPort
+ }
+ if obj.Address == "" {
+ obj.Address = "0.0.0.0"
+ }
+ if obj.AlgorithmProvider == "" {
+ obj.AlgorithmProvider = "DefaultProvider"
+ }
+ if obj.ContentType == "" {
+ obj.ContentType = "application/vnd.kubernetes.protobuf"
+ }
+ if obj.KubeAPIQPS == 0 {
+ obj.KubeAPIQPS = 50.0
+ }
+ if obj.KubeAPIBurst == 0 {
+ obj.KubeAPIBurst = 100
+ }
+ if obj.SchedulerName == "" {
+ obj.SchedulerName = api.DefaultSchedulerName
+ }
+ if obj.HardPodAffinitySymmetricWeight == 0 {
+ obj.HardPodAffinitySymmetricWeight = api.DefaultHardPodAffinitySymmetricWeight
+ }
+ if obj.FailureDomains == "" {
+ obj.FailureDomains = api.DefaultFailureDomains
+ }
+}
+
+func SetDefaults_LeaderElectionConfiguration(obj *LeaderElectionConfiguration) {
+ zero := unversioned.Duration{}
+ if obj.LeaseDuration == zero {
+ obj.LeaseDuration = unversioned.Duration{Duration: 15 * time.Second}
+ }
+ if obj.RenewDeadline == zero {
+ obj.RenewDeadline = unversioned.Duration{Duration: 10 * time.Second}
+ }
+ if obj.RetryPeriod == zero {
+ obj.RetryPeriod = unversioned.Duration{Duration: 2 * time.Second}
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/doc.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/doc.go
new file mode 100644
index 0000000..621e806
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package,register
+// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/componentconfig
+
+package v1alpha1
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/register.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/register.go
new file mode 100644
index 0000000..17fb52a
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/register.go
@@ -0,0 +1,40 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/runtime"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "componentconfig"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1alpha1"}
+
+func AddToScheme(scheme *runtime.Scheme) {
+ addKnownTypes(scheme)
+ addDefaultingFuncs(scheme)
+}
+
+func addKnownTypes(scheme *runtime.Scheme) {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &KubeProxyConfiguration{},
+ &KubeSchedulerConfiguration{},
+ )
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/types.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/types.go
new file mode 100644
index 0000000..817468a
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/types.go
@@ -0,0 +1,141 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import "k8s.io/kubernetes/pkg/api/unversioned"
+
+type KubeProxyConfiguration struct {
+ unversioned.TypeMeta
+
+ // bindAddress is the IP address for the proxy server to serve on (set to 0.0.0.0
+ // for all interfaces)
+ BindAddress string `json:"bindAddress"`
+ // clusterCIDR is the CIDR range of the pods in the cluster. It is used to
+ // bridge traffic coming from outside of the cluster. If not provided,
+ // no off-cluster bridging will be performed.
+ ClusterCIDR string `json:"clusterCIDR"`
+ // healthzBindAddress is the IP address for the health check server to serve on,
+ // defaulting to 127.0.0.1 (set to 0.0.0.0 for all interfaces)
+ HealthzBindAddress string `json:"healthzBindAddress"`
+ // healthzPort is the port to bind the health check server. Use 0 to disable.
+ HealthzPort int32 `json:"healthzPort"`
+ // hostnameOverride, if non-empty, will be used as the identity instead of the actual hostname.
+ HostnameOverride string `json:"hostnameOverride"`
+ // iptablesMasqueradeBit is the bit of the iptables fwmark space to use for SNAT if using
+ // the pure iptables proxy mode. Values must be within the range [0, 31].
+ IPTablesMasqueradeBit *int32 `json:"iptablesMasqueradeBit"`
+ // iptablesSyncPeriod is the period that iptables rules are refreshed (e.g. '5s', '1m',
+ // '2h22m'). Must be greater than 0.
+ IPTablesSyncPeriod unversioned.Duration `json:"iptablesSyncPeriodSeconds"`
+ // kubeconfigPath is the path to the kubeconfig file with authorization information (the
+ // master location is set by the master flag).
+ KubeconfigPath string `json:"kubeconfigPath"`
+ // masqueradeAll tells kube-proxy to SNAT everything if using the pure iptables proxy mode.
+ MasqueradeAll bool `json:"masqueradeAll"`
+ // master is the address of the Kubernetes API server (overrides any value in kubeconfig)
+ Master string `json:"master"`
+ // oomScoreAdj is the oom-score-adj value for kube-proxy process. Values must be within
+ // the range [-1000, 1000]
+ OOMScoreAdj *int32 `json:"oomScoreAdj"`
+ // mode specifies which proxy mode to use.
+ Mode ProxyMode `json:"mode"`
+ // portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed
+ // in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen.
+ PortRange string `json:"portRange"`
+ // resourceContainer is the bsolute name of the resource-only container to create and run
+ // the Kube-proxy in (Default: /kube-proxy).
+ ResourceContainer string `json:"resourceContainer"`
+ // udpIdleTimeout is how long an idle UDP connection will be kept open (e.g. '250ms', '2s').
+ // Must be greater than 0. Only applicable for proxyMode=userspace.
+ UDPIdleTimeout unversioned.Duration `json:"udpTimeoutMilliseconds"`
+ // conntrackMax is the maximum number of NAT connections to track (0 to leave as-is)")
+ ConntrackMax int32 `json:"conntrackMax"`
+ // conntrackTCPEstablishedTimeout is how long an idle UDP connection will be kept open
+ // (e.g. '250ms', '2s'). Must be greater than 0. Only applicable for proxyMode is Userspace
+ ConntrackTCPEstablishedTimeout unversioned.Duration `json:"conntrackTCPEstablishedTimeout"`
+}
+
+// Currently two modes of proxying are available: 'userspace' (older, stable) or 'iptables'
+// (experimental). If blank, look at the Node object on the Kubernetes API and respect the
+// 'net.experimental.kubernetes.io/proxy-mode' annotation if provided. Otherwise use the
+// best-available proxy (currently userspace, but may change in future versions). If the
+// iptables proxy is selected, regardless of how, but the system's kernel or iptables
+// versions are insufficient, this always falls back to the userspace proxy.
+type ProxyMode string
+
+const (
+ ProxyModeUserspace ProxyMode = "userspace"
+ ProxyModeIPTables ProxyMode = "iptables"
+)
+
+type KubeSchedulerConfiguration struct {
+ unversioned.TypeMeta
+
+ // port is the port that the scheduler's http service runs on.
+ Port int `json:"port"`
+ // address is the IP address to serve on.
+ Address string `json:"address"`
+ // algorithmProvider is the scheduling algorithm provider to use.
+ AlgorithmProvider string `json:"algorithmProvider"`
+ // policyConfigFile is the filepath to the scheduler policy configuration.
+ PolicyConfigFile string `json:"policyConfigFile"`
+ // enableProfiling enables profiling via web interface.
+ EnableProfiling *bool `json:"enableProfiling"`
+ // contentType is contentType of requests sent to apiserver.
+ ContentType string `json:"contentType"`
+ // kubeAPIQPS is the QPS to use while talking with kubernetes apiserver.
+ KubeAPIQPS float32 `json:"kubeAPIQPS"`
+ // kubeAPIBurst is the QPS burst to use while talking with kubernetes apiserver.
+ KubeAPIBurst int `json:"kubeAPIBurst"`
+ // schedulerName is name of the scheduler, used to select which pods
+ // will be processed by this scheduler, based on pod's annotation with
+ // key 'scheduler.alpha.kubernetes.io/name'.
+ SchedulerName string `json:"schedulerName"`
+ // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule
+ // corresponding to every RequiredDuringScheduling affinity rule.
+ // HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 0-100.
+ HardPodAffinitySymmetricWeight int `json:"hardPodAffinitySymmetricWeight"`
+ // Indicate the "all topologies" set for empty topologyKey when it's used for PreferredDuringScheduling pod anti-affinity.
+ FailureDomains string `json:"failureDomains"`
+ // leaderElection defines the configuration of leader election client.
+ LeaderElection LeaderElectionConfiguration `json:"leaderElection"`
+}
+
+// LeaderElectionConfiguration defines the configuration of leader election
+// clients for components that can run with leader election enabled.
+type LeaderElectionConfiguration struct {
+ // leaderElect enables a leader election client to gain leadership
+ // before executing the main loop. Enable this when running replicated
+ // components for high availability.
+ LeaderElect *bool `json:"leaderElect"`
+ // leaseDuration is the duration that non-leader candidates will wait
+ // after observing a leadership renewal until attempting to acquire
+ // leadership of a led but unrenewed leader slot. This is effectively the
+ // maximum duration that a leader can be stopped before it is replaced
+ // by another candidate. This is only applicable if leader election is
+ // enabled.
+ LeaseDuration unversioned.Duration `json:"leaseDuration"`
+ // renewDeadline is the interval between attempts by the acting master to
+ // renew a leadership slot before it stops leading. This must be less
+ // than or equal to the lease duration. This is only applicable if leader
+ // election is enabled.
+ RenewDeadline unversioned.Duration `json:"renewDeadline"`
+ // retryPeriod is the duration the clients should wait between attempting
+ // acquisition and renewal of a leadership. This is only applicable if
+ // leader election is enabled.
+ RetryPeriod unversioned.Duration `json:"retryPeriod"`
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/deep_copy_generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/deep_copy_generated.go
new file mode 100644
index 0000000..d7bbf3b
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/deep_copy_generated.go
@@ -0,0 +1,859 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by deepcopy-gen. Do not edit it manually!
+
+package extensions
+
+import (
+ api "k8s.io/kubernetes/pkg/api"
+ unversioned "k8s.io/kubernetes/pkg/api/unversioned"
+ conversion "k8s.io/kubernetes/pkg/conversion"
+ intstr "k8s.io/kubernetes/pkg/util/intstr"
+)
+
+func init() {
+ if err := api.Scheme.AddGeneratedDeepCopyFuncs(
+ DeepCopy_extensions_APIVersion,
+ DeepCopy_extensions_CustomMetricCurrentStatus,
+ DeepCopy_extensions_CustomMetricCurrentStatusList,
+ DeepCopy_extensions_CustomMetricTarget,
+ DeepCopy_extensions_CustomMetricTargetList,
+ DeepCopy_extensions_DaemonSet,
+ DeepCopy_extensions_DaemonSetList,
+ DeepCopy_extensions_DaemonSetSpec,
+ DeepCopy_extensions_DaemonSetStatus,
+ DeepCopy_extensions_Deployment,
+ DeepCopy_extensions_DeploymentList,
+ DeepCopy_extensions_DeploymentRollback,
+ DeepCopy_extensions_DeploymentSpec,
+ DeepCopy_extensions_DeploymentStatus,
+ DeepCopy_extensions_DeploymentStrategy,
+ DeepCopy_extensions_FSGroupStrategyOptions,
+ DeepCopy_extensions_HTTPIngressPath,
+ DeepCopy_extensions_HTTPIngressRuleValue,
+ DeepCopy_extensions_HostPortRange,
+ DeepCopy_extensions_IDRange,
+ DeepCopy_extensions_Ingress,
+ DeepCopy_extensions_IngressBackend,
+ DeepCopy_extensions_IngressList,
+ DeepCopy_extensions_IngressRule,
+ DeepCopy_extensions_IngressRuleValue,
+ DeepCopy_extensions_IngressSpec,
+ DeepCopy_extensions_IngressStatus,
+ DeepCopy_extensions_IngressTLS,
+ DeepCopy_extensions_NetworkPolicy,
+ DeepCopy_extensions_NetworkPolicyIngressRule,
+ DeepCopy_extensions_NetworkPolicyList,
+ DeepCopy_extensions_NetworkPolicyPeer,
+ DeepCopy_extensions_NetworkPolicyPort,
+ DeepCopy_extensions_NetworkPolicySpec,
+ DeepCopy_extensions_PodSecurityPolicy,
+ DeepCopy_extensions_PodSecurityPolicyList,
+ DeepCopy_extensions_PodSecurityPolicySpec,
+ DeepCopy_extensions_ReplicaSet,
+ DeepCopy_extensions_ReplicaSetList,
+ DeepCopy_extensions_ReplicaSetSpec,
+ DeepCopy_extensions_ReplicaSetStatus,
+ DeepCopy_extensions_ReplicationControllerDummy,
+ DeepCopy_extensions_RollbackConfig,
+ DeepCopy_extensions_RollingUpdateDeployment,
+ DeepCopy_extensions_RunAsUserStrategyOptions,
+ DeepCopy_extensions_SELinuxStrategyOptions,
+ DeepCopy_extensions_Scale,
+ DeepCopy_extensions_ScaleSpec,
+ DeepCopy_extensions_ScaleStatus,
+ DeepCopy_extensions_SupplementalGroupsStrategyOptions,
+ DeepCopy_extensions_ThirdPartyResource,
+ DeepCopy_extensions_ThirdPartyResourceData,
+ DeepCopy_extensions_ThirdPartyResourceDataList,
+ DeepCopy_extensions_ThirdPartyResourceList,
+ ); err != nil {
+ // if one of the deep copy functions is malformed, detect it immediately.
+ panic(err)
+ }
+}
+
+func DeepCopy_extensions_APIVersion(in APIVersion, out *APIVersion, c *conversion.Cloner) error {
+ out.Name = in.Name
+ return nil
+}
+
+func DeepCopy_extensions_CustomMetricCurrentStatus(in CustomMetricCurrentStatus, out *CustomMetricCurrentStatus, c *conversion.Cloner) error {
+ out.Name = in.Name
+ out.CurrentValue = in.CurrentValue.DeepCopy()
+ return nil
+}
+
+func DeepCopy_extensions_CustomMetricCurrentStatusList(in CustomMetricCurrentStatusList, out *CustomMetricCurrentStatusList, c *conversion.Cloner) error {
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]CustomMetricCurrentStatus, len(in))
+ for i := range in {
+ if err := DeepCopy_extensions_CustomMetricCurrentStatus(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_extensions_CustomMetricTarget(in CustomMetricTarget, out *CustomMetricTarget, c *conversion.Cloner) error {
+ out.Name = in.Name
+ out.TargetValue = in.TargetValue.DeepCopy()
+ return nil
+}
+
+func DeepCopy_extensions_CustomMetricTargetList(in CustomMetricTargetList, out *CustomMetricTargetList, c *conversion.Cloner) error {
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]CustomMetricTarget, len(in))
+ for i := range in {
+ if err := DeepCopy_extensions_CustomMetricTarget(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_extensions_DaemonSet(in DaemonSet, out *DaemonSet, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_extensions_DaemonSetSpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ out.Status = in.Status
+ return nil
+}
+
+func DeepCopy_extensions_DaemonSetList(in DaemonSetList, out *DaemonSetList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]DaemonSet, len(in))
+ for i := range in {
+ if err := DeepCopy_extensions_DaemonSet(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_extensions_DaemonSetSpec(in DaemonSetSpec, out *DaemonSetSpec, c *conversion.Cloner) error {
+ if in.Selector != nil {
+ in, out := in.Selector, &out.Selector
+ *out = new(unversioned.LabelSelector)
+ if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.Selector = nil
+ }
+ if err := api.DeepCopy_api_PodTemplateSpec(in.Template, &out.Template, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_extensions_DaemonSetStatus(in DaemonSetStatus, out *DaemonSetStatus, c *conversion.Cloner) error {
+ out.CurrentNumberScheduled = in.CurrentNumberScheduled
+ out.NumberMisscheduled = in.NumberMisscheduled
+ out.DesiredNumberScheduled = in.DesiredNumberScheduled
+ return nil
+}
+
+func DeepCopy_extensions_Deployment(in Deployment, out *Deployment, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_extensions_DeploymentSpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ out.Status = in.Status
+ return nil
+}
+
+func DeepCopy_extensions_DeploymentList(in DeploymentList, out *DeploymentList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]Deployment, len(in))
+ for i := range in {
+ if err := DeepCopy_extensions_Deployment(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_extensions_DeploymentRollback(in DeploymentRollback, out *DeploymentRollback, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.Name = in.Name
+ if in.UpdatedAnnotations != nil {
+ in, out := in.UpdatedAnnotations, &out.UpdatedAnnotations
+ *out = make(map[string]string)
+ for key, val := range in {
+ (*out)[key] = val
+ }
+ } else {
+ out.UpdatedAnnotations = nil
+ }
+ out.RollbackTo = in.RollbackTo
+ return nil
+}
+
+func DeepCopy_extensions_DeploymentSpec(in DeploymentSpec, out *DeploymentSpec, c *conversion.Cloner) error {
+ out.Replicas = in.Replicas
+ if in.Selector != nil {
+ in, out := in.Selector, &out.Selector
+ *out = new(unversioned.LabelSelector)
+ if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.Selector = nil
+ }
+ if err := api.DeepCopy_api_PodTemplateSpec(in.Template, &out.Template, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_extensions_DeploymentStrategy(in.Strategy, &out.Strategy, c); err != nil {
+ return err
+ }
+ out.MinReadySeconds = in.MinReadySeconds
+ if in.RevisionHistoryLimit != nil {
+ in, out := in.RevisionHistoryLimit, &out.RevisionHistoryLimit
+ *out = new(int32)
+ **out = *in
+ } else {
+ out.RevisionHistoryLimit = nil
+ }
+ out.Paused = in.Paused
+ if in.RollbackTo != nil {
+ in, out := in.RollbackTo, &out.RollbackTo
+ *out = new(RollbackConfig)
+ **out = *in
+ } else {
+ out.RollbackTo = nil
+ }
+ return nil
+}
+
+func DeepCopy_extensions_DeploymentStatus(in DeploymentStatus, out *DeploymentStatus, c *conversion.Cloner) error {
+ out.ObservedGeneration = in.ObservedGeneration
+ out.Replicas = in.Replicas
+ out.UpdatedReplicas = in.UpdatedReplicas
+ out.AvailableReplicas = in.AvailableReplicas
+ out.UnavailableReplicas = in.UnavailableReplicas
+ return nil
+}
+
+func DeepCopy_extensions_DeploymentStrategy(in DeploymentStrategy, out *DeploymentStrategy, c *conversion.Cloner) error {
+ out.Type = in.Type
+ if in.RollingUpdate != nil {
+ in, out := in.RollingUpdate, &out.RollingUpdate
+ *out = new(RollingUpdateDeployment)
+ **out = *in
+ } else {
+ out.RollingUpdate = nil
+ }
+ return nil
+}
+
+func DeepCopy_extensions_FSGroupStrategyOptions(in FSGroupStrategyOptions, out *FSGroupStrategyOptions, c *conversion.Cloner) error {
+ out.Rule = in.Rule
+ if in.Ranges != nil {
+ in, out := in.Ranges, &out.Ranges
+ *out = make([]IDRange, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.Ranges = nil
+ }
+ return nil
+}
+
+func DeepCopy_extensions_HTTPIngressPath(in HTTPIngressPath, out *HTTPIngressPath, c *conversion.Cloner) error {
+ out.Path = in.Path
+ out.Backend = in.Backend
+ return nil
+}
+
+func DeepCopy_extensions_HTTPIngressRuleValue(in HTTPIngressRuleValue, out *HTTPIngressRuleValue, c *conversion.Cloner) error {
+ if in.Paths != nil {
+ in, out := in.Paths, &out.Paths
+ *out = make([]HTTPIngressPath, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.Paths = nil
+ }
+ return nil
+}
+
+func DeepCopy_extensions_HostPortRange(in HostPortRange, out *HostPortRange, c *conversion.Cloner) error {
+ out.Min = in.Min
+ out.Max = in.Max
+ return nil
+}
+
+func DeepCopy_extensions_IDRange(in IDRange, out *IDRange, c *conversion.Cloner) error {
+ out.Min = in.Min
+ out.Max = in.Max
+ return nil
+}
+
+func DeepCopy_extensions_Ingress(in Ingress, out *Ingress, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_extensions_IngressSpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_extensions_IngressStatus(in.Status, &out.Status, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_extensions_IngressBackend(in IngressBackend, out *IngressBackend, c *conversion.Cloner) error {
+ out.ServiceName = in.ServiceName
+ out.ServicePort = in.ServicePort
+ return nil
+}
+
+func DeepCopy_extensions_IngressList(in IngressList, out *IngressList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]Ingress, len(in))
+ for i := range in {
+ if err := DeepCopy_extensions_Ingress(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_extensions_IngressRule(in IngressRule, out *IngressRule, c *conversion.Cloner) error {
+ out.Host = in.Host
+ if err := DeepCopy_extensions_IngressRuleValue(in.IngressRuleValue, &out.IngressRuleValue, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_extensions_IngressRuleValue(in IngressRuleValue, out *IngressRuleValue, c *conversion.Cloner) error {
+ if in.HTTP != nil {
+ in, out := in.HTTP, &out.HTTP
+ *out = new(HTTPIngressRuleValue)
+ if err := DeepCopy_extensions_HTTPIngressRuleValue(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.HTTP = nil
+ }
+ return nil
+}
+
+func DeepCopy_extensions_IngressSpec(in IngressSpec, out *IngressSpec, c *conversion.Cloner) error {
+ if in.Backend != nil {
+ in, out := in.Backend, &out.Backend
+ *out = new(IngressBackend)
+ **out = *in
+ } else {
+ out.Backend = nil
+ }
+ if in.TLS != nil {
+ in, out := in.TLS, &out.TLS
+ *out = make([]IngressTLS, len(in))
+ for i := range in {
+ if err := DeepCopy_extensions_IngressTLS(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.TLS = nil
+ }
+ if in.Rules != nil {
+ in, out := in.Rules, &out.Rules
+ *out = make([]IngressRule, len(in))
+ for i := range in {
+ if err := DeepCopy_extensions_IngressRule(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Rules = nil
+ }
+ return nil
+}
+
+func DeepCopy_extensions_IngressStatus(in IngressStatus, out *IngressStatus, c *conversion.Cloner) error {
+ if err := api.DeepCopy_api_LoadBalancerStatus(in.LoadBalancer, &out.LoadBalancer, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_extensions_IngressTLS(in IngressTLS, out *IngressTLS, c *conversion.Cloner) error {
+ if in.Hosts != nil {
+ in, out := in.Hosts, &out.Hosts
+ *out = make([]string, len(in))
+ copy(*out, in)
+ } else {
+ out.Hosts = nil
+ }
+ out.SecretName = in.SecretName
+ return nil
+}
+
+func DeepCopy_extensions_NetworkPolicy(in NetworkPolicy, out *NetworkPolicy, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_extensions_NetworkPolicySpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_extensions_NetworkPolicyIngressRule(in NetworkPolicyIngressRule, out *NetworkPolicyIngressRule, c *conversion.Cloner) error {
+ if in.Ports != nil {
+ in, out := in.Ports, &out.Ports
+ *out = make([]NetworkPolicyPort, len(in))
+ for i := range in {
+ if err := DeepCopy_extensions_NetworkPolicyPort(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Ports = nil
+ }
+ if in.From != nil {
+ in, out := in.From, &out.From
+ *out = make([]NetworkPolicyPeer, len(in))
+ for i := range in {
+ if err := DeepCopy_extensions_NetworkPolicyPeer(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.From = nil
+ }
+ return nil
+}
+
+func DeepCopy_extensions_NetworkPolicyList(in NetworkPolicyList, out *NetworkPolicyList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]NetworkPolicy, len(in))
+ for i := range in {
+ if err := DeepCopy_extensions_NetworkPolicy(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_extensions_NetworkPolicyPeer(in NetworkPolicyPeer, out *NetworkPolicyPeer, c *conversion.Cloner) error {
+ if in.PodSelector != nil {
+ in, out := in.PodSelector, &out.PodSelector
+ *out = new(unversioned.LabelSelector)
+ if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.PodSelector = nil
+ }
+ if in.NamespaceSelector != nil {
+ in, out := in.NamespaceSelector, &out.NamespaceSelector
+ *out = new(unversioned.LabelSelector)
+ if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.NamespaceSelector = nil
+ }
+ return nil
+}
+
+func DeepCopy_extensions_NetworkPolicyPort(in NetworkPolicyPort, out *NetworkPolicyPort, c *conversion.Cloner) error {
+ if in.Protocol != nil {
+ in, out := in.Protocol, &out.Protocol
+ *out = new(api.Protocol)
+ **out = *in
+ } else {
+ out.Protocol = nil
+ }
+ if in.Port != nil {
+ in, out := in.Port, &out.Port
+ *out = new(intstr.IntOrString)
+ **out = *in
+ } else {
+ out.Port = nil
+ }
+ return nil
+}
+
+func DeepCopy_extensions_NetworkPolicySpec(in NetworkPolicySpec, out *NetworkPolicySpec, c *conversion.Cloner) error {
+ if err := unversioned.DeepCopy_unversioned_LabelSelector(in.PodSelector, &out.PodSelector, c); err != nil {
+ return err
+ }
+ if in.Ingress != nil {
+ in, out := in.Ingress, &out.Ingress
+ *out = make([]NetworkPolicyIngressRule, len(in))
+ for i := range in {
+ if err := DeepCopy_extensions_NetworkPolicyIngressRule(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Ingress = nil
+ }
+ return nil
+}
+
+func DeepCopy_extensions_PodSecurityPolicy(in PodSecurityPolicy, out *PodSecurityPolicy, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_extensions_PodSecurityPolicySpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_extensions_PodSecurityPolicyList(in PodSecurityPolicyList, out *PodSecurityPolicyList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]PodSecurityPolicy, len(in))
+ for i := range in {
+ if err := DeepCopy_extensions_PodSecurityPolicy(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_extensions_PodSecurityPolicySpec(in PodSecurityPolicySpec, out *PodSecurityPolicySpec, c *conversion.Cloner) error {
+ out.Privileged = in.Privileged
+ if in.DefaultAddCapabilities != nil {
+ in, out := in.DefaultAddCapabilities, &out.DefaultAddCapabilities
+ *out = make([]api.Capability, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.DefaultAddCapabilities = nil
+ }
+ if in.RequiredDropCapabilities != nil {
+ in, out := in.RequiredDropCapabilities, &out.RequiredDropCapabilities
+ *out = make([]api.Capability, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.RequiredDropCapabilities = nil
+ }
+ if in.AllowedCapabilities != nil {
+ in, out := in.AllowedCapabilities, &out.AllowedCapabilities
+ *out = make([]api.Capability, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.AllowedCapabilities = nil
+ }
+ if in.Volumes != nil {
+ in, out := in.Volumes, &out.Volumes
+ *out = make([]FSType, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.Volumes = nil
+ }
+ out.HostNetwork = in.HostNetwork
+ if in.HostPorts != nil {
+ in, out := in.HostPorts, &out.HostPorts
+ *out = make([]HostPortRange, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.HostPorts = nil
+ }
+ out.HostPID = in.HostPID
+ out.HostIPC = in.HostIPC
+ if err := DeepCopy_extensions_SELinuxStrategyOptions(in.SELinux, &out.SELinux, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_extensions_RunAsUserStrategyOptions(in.RunAsUser, &out.RunAsUser, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_extensions_SupplementalGroupsStrategyOptions(in.SupplementalGroups, &out.SupplementalGroups, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_extensions_FSGroupStrategyOptions(in.FSGroup, &out.FSGroup, c); err != nil {
+ return err
+ }
+ out.ReadOnlyRootFilesystem = in.ReadOnlyRootFilesystem
+ return nil
+}
+
+func DeepCopy_extensions_ReplicaSet(in ReplicaSet, out *ReplicaSet, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_extensions_ReplicaSetSpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ out.Status = in.Status
+ return nil
+}
+
+func DeepCopy_extensions_ReplicaSetList(in ReplicaSetList, out *ReplicaSetList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]ReplicaSet, len(in))
+ for i := range in {
+ if err := DeepCopy_extensions_ReplicaSet(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_extensions_ReplicaSetSpec(in ReplicaSetSpec, out *ReplicaSetSpec, c *conversion.Cloner) error {
+ out.Replicas = in.Replicas
+ if in.Selector != nil {
+ in, out := in.Selector, &out.Selector
+ *out = new(unversioned.LabelSelector)
+ if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.Selector = nil
+ }
+ if err := api.DeepCopy_api_PodTemplateSpec(in.Template, &out.Template, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_extensions_ReplicaSetStatus(in ReplicaSetStatus, out *ReplicaSetStatus, c *conversion.Cloner) error {
+ out.Replicas = in.Replicas
+ out.FullyLabeledReplicas = in.FullyLabeledReplicas
+ out.ObservedGeneration = in.ObservedGeneration
+ return nil
+}
+
+func DeepCopy_extensions_ReplicationControllerDummy(in ReplicationControllerDummy, out *ReplicationControllerDummy, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ return nil
+}
+
+func DeepCopy_extensions_RollbackConfig(in RollbackConfig, out *RollbackConfig, c *conversion.Cloner) error {
+ out.Revision = in.Revision
+ return nil
+}
+
+func DeepCopy_extensions_RollingUpdateDeployment(in RollingUpdateDeployment, out *RollingUpdateDeployment, c *conversion.Cloner) error {
+ out.MaxUnavailable = in.MaxUnavailable
+ out.MaxSurge = in.MaxSurge
+ return nil
+}
+
+func DeepCopy_extensions_RunAsUserStrategyOptions(in RunAsUserStrategyOptions, out *RunAsUserStrategyOptions, c *conversion.Cloner) error {
+ out.Rule = in.Rule
+ if in.Ranges != nil {
+ in, out := in.Ranges, &out.Ranges
+ *out = make([]IDRange, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.Ranges = nil
+ }
+ return nil
+}
+
+func DeepCopy_extensions_SELinuxStrategyOptions(in SELinuxStrategyOptions, out *SELinuxStrategyOptions, c *conversion.Cloner) error {
+ out.Rule = in.Rule
+ if in.SELinuxOptions != nil {
+ in, out := in.SELinuxOptions, &out.SELinuxOptions
+ *out = new(api.SELinuxOptions)
+ **out = *in
+ } else {
+ out.SELinuxOptions = nil
+ }
+ return nil
+}
+
+func DeepCopy_extensions_Scale(in Scale, out *Scale, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ out.Spec = in.Spec
+ if err := DeepCopy_extensions_ScaleStatus(in.Status, &out.Status, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_extensions_ScaleSpec(in ScaleSpec, out *ScaleSpec, c *conversion.Cloner) error {
+ out.Replicas = in.Replicas
+ return nil
+}
+
+func DeepCopy_extensions_ScaleStatus(in ScaleStatus, out *ScaleStatus, c *conversion.Cloner) error {
+ out.Replicas = in.Replicas
+ if in.Selector != nil {
+ in, out := in.Selector, &out.Selector
+ *out = new(unversioned.LabelSelector)
+ if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.Selector = nil
+ }
+ return nil
+}
+
+func DeepCopy_extensions_SupplementalGroupsStrategyOptions(in SupplementalGroupsStrategyOptions, out *SupplementalGroupsStrategyOptions, c *conversion.Cloner) error {
+ out.Rule = in.Rule
+ if in.Ranges != nil {
+ in, out := in.Ranges, &out.Ranges
+ *out = make([]IDRange, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.Ranges = nil
+ }
+ return nil
+}
+
+func DeepCopy_extensions_ThirdPartyResource(in ThirdPartyResource, out *ThirdPartyResource, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ out.Description = in.Description
+ if in.Versions != nil {
+ in, out := in.Versions, &out.Versions
+ *out = make([]APIVersion, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.Versions = nil
+ }
+ return nil
+}
+
+func DeepCopy_extensions_ThirdPartyResourceData(in ThirdPartyResourceData, out *ThirdPartyResourceData, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if in.Data != nil {
+ in, out := in.Data, &out.Data
+ *out = make([]byte, len(in))
+ copy(*out, in)
+ } else {
+ out.Data = nil
+ }
+ return nil
+}
+
+func DeepCopy_extensions_ThirdPartyResourceDataList(in ThirdPartyResourceDataList, out *ThirdPartyResourceDataList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]ThirdPartyResourceData, len(in))
+ for i := range in {
+ if err := DeepCopy_extensions_ThirdPartyResourceData(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_extensions_ThirdPartyResourceList(in ThirdPartyResourceList, out *ThirdPartyResourceList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]ThirdPartyResource, len(in))
+ for i := range in {
+ if err := DeepCopy_extensions_ThirdPartyResource(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/doc.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/doc.go
new file mode 100644
index 0000000..2bbb71d
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package,register
+
+package extensions
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/install/install.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/install/install.go
new file mode 100644
index 0000000..1279dfb
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/install/install.go
@@ -0,0 +1,132 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package install installs the experimental API group, making it available as
+// an option to all of the API encoding/decoding machinery.
+package install
+
+import (
+ "fmt"
+
+ "github.com/golang/glog"
+
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/meta"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/apimachinery"
+ "k8s.io/kubernetes/pkg/apimachinery/registered"
+ "k8s.io/kubernetes/pkg/apis/extensions"
+ "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
+ "k8s.io/kubernetes/pkg/runtime"
+ "k8s.io/kubernetes/pkg/util/sets"
+)
+
+const importPrefix = "k8s.io/kubernetes/pkg/apis/extensions"
+
+var accessor = meta.NewAccessor()
+
+// availableVersions lists all known external versions for this group from most preferred to least preferred
+var availableVersions = []unversioned.GroupVersion{v1beta1.SchemeGroupVersion}
+
+func init() {
+ registered.RegisterVersions(availableVersions)
+ externalVersions := []unversioned.GroupVersion{}
+ for _, v := range availableVersions {
+ if registered.IsAllowedVersion(v) {
+ externalVersions = append(externalVersions, v)
+ }
+ }
+ if len(externalVersions) == 0 {
+ glog.V(4).Infof("No version is registered for group %v", extensions.GroupName)
+ return
+ }
+
+ if err := registered.EnableVersions(externalVersions...); err != nil {
+ glog.V(4).Infof("%v", err)
+ return
+ }
+ if err := enableVersions(externalVersions); err != nil {
+ glog.V(4).Infof("%v", err)
+ return
+ }
+}
+
+// TODO: enableVersions should be centralized rather than spread in each API
+// group.
+// We can combine registered.RegisterVersions, registered.EnableVersions and
+// registered.RegisterGroup once we have moved enableVersions there.
+func enableVersions(externalVersions []unversioned.GroupVersion) error {
+ addVersionsToScheme(externalVersions...)
+ preferredExternalVersion := externalVersions[0]
+
+ groupMeta := apimachinery.GroupMeta{
+ GroupVersion: preferredExternalVersion,
+ GroupVersions: externalVersions,
+ RESTMapper: newRESTMapper(externalVersions),
+ SelfLinker: runtime.SelfLinker(accessor),
+ InterfacesFor: interfacesFor,
+ }
+
+ if err := registered.RegisterGroup(groupMeta); err != nil {
+ return err
+ }
+ api.RegisterRESTMapper(groupMeta.RESTMapper)
+ return nil
+}
+
+func newRESTMapper(externalVersions []unversioned.GroupVersion) meta.RESTMapper {
+ // the list of kinds that are scoped at the root of the api hierarchy
+ // if a kind is not enumerated here, it is assumed to have a namespace scope
+ rootScoped := sets.NewString(
+ "PodSecurityPolicy",
+ "ThirdPartyResource",
+ )
+
+ ignoredKinds := sets.NewString()
+
+ return api.NewDefaultRESTMapper(externalVersions, interfacesFor, importPrefix, ignoredKinds, rootScoped)
+}
+
+// interfacesFor returns the default Codec and ResourceVersioner for a given version
+// string, or an error if the version is not known.
+func interfacesFor(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) {
+ switch version {
+ case v1beta1.SchemeGroupVersion:
+ return &meta.VersionInterfaces{
+ ObjectConvertor: api.Scheme,
+ MetadataAccessor: accessor,
+ }, nil
+ default:
+ g, _ := registered.Group(extensions.GroupName)
+ return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, g.GroupVersions)
+ }
+}
+
+func addVersionsToScheme(externalVersions ...unversioned.GroupVersion) {
+ // add the internal version to Scheme
+ extensions.AddToScheme(api.Scheme)
+ // add the enabled external versions to Scheme
+ for _, v := range externalVersions {
+ if !registered.IsEnabledVersion(v) {
+ glog.Errorf("Version %s is not enabled, so it will not be added to the Scheme.", v)
+ continue
+ }
+ switch v {
+ case v1beta1.SchemeGroupVersion:
+ v1beta1.AddToScheme(api.Scheme)
+ }
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/register.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/register.go
new file mode 100644
index 0000000..48ba33d
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/register.go
@@ -0,0 +1,79 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package extensions
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/apis/autoscaling"
+ "k8s.io/kubernetes/pkg/apis/batch"
+ "k8s.io/kubernetes/pkg/runtime"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "extensions"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
+
+// Kind takes an unqualified kind and returns back a Group qualified GroupKind
+func Kind(kind string) unversioned.GroupKind {
+ return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// Resource takes an unqualified resource and returns back a Group qualified GroupResource
+func Resource(resource string) unversioned.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+func AddToScheme(scheme *runtime.Scheme) {
+ // Add the API to Scheme.
+ addKnownTypes(scheme)
+}
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) {
+ // TODO this gets cleaned up when the types are fixed
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &Deployment{},
+ &DeploymentList{},
+ &DeploymentRollback{},
+ &autoscaling.HorizontalPodAutoscaler{},
+ &autoscaling.HorizontalPodAutoscalerList{},
+ &batch.Job{},
+ &batch.JobList{},
+ &batch.JobTemplate{},
+ &ReplicationControllerDummy{},
+ &Scale{},
+ &ThirdPartyResource{},
+ &ThirdPartyResourceList{},
+ &DaemonSetList{},
+ &DaemonSet{},
+ &ThirdPartyResourceData{},
+ &ThirdPartyResourceDataList{},
+ &Ingress{},
+ &IngressList{},
+ &api.ListOptions{},
+ &ReplicaSet{},
+ &ReplicaSetList{},
+ &api.ExportOptions{},
+ &PodSecurityPolicy{},
+ &PodSecurityPolicyList{},
+ &NetworkPolicy{},
+ &NetworkPolicyList{},
+ )
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/types.generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/types.generated.go
new file mode 100644
index 0000000..fd98234
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/types.generated.go
@@ -0,0 +1,17991 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// ************************************************************
+// DO NOT EDIT.
+// THIS FILE IS AUTO-GENERATED BY codecgen.
+// ************************************************************
+
+package extensions
+
+import (
+ "errors"
+ "fmt"
+ codec1978 "github.com/ugorji/go/codec"
+ pkg2_api "k8s.io/kubernetes/pkg/api"
+ pkg4_resource "k8s.io/kubernetes/pkg/api/resource"
+ pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned"
+ pkg3_types "k8s.io/kubernetes/pkg/types"
+ pkg5_intstr "k8s.io/kubernetes/pkg/util/intstr"
+ "reflect"
+ "runtime"
+ time "time"
+)
+
+const (
+ // ----- content types ----
+ codecSelferC_UTF81234 = 1
+ codecSelferC_RAW1234 = 0
+ // ----- value types used ----
+ codecSelferValueTypeArray1234 = 10
+ codecSelferValueTypeMap1234 = 9
+ // ----- containerStateValues ----
+ codecSelfer_containerMapKey1234 = 2
+ codecSelfer_containerMapValue1234 = 3
+ codecSelfer_containerMapEnd1234 = 4
+ codecSelfer_containerArrayElem1234 = 6
+ codecSelfer_containerArrayEnd1234 = 7
+)
+
+var (
+ codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits())
+ codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`)
+)
+
+type codecSelfer1234 struct{}
+
+func init() {
+ if codec1978.GenVersion != 5 {
+ _, file, _, _ := runtime.Caller(0)
+ err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v",
+ 5, codec1978.GenVersion, file)
+ panic(err)
+ }
+ if false { // reference the types, but skip this branch at build/run time
+ var v0 pkg2_api.ObjectMeta
+ var v1 pkg4_resource.Quantity
+ var v2 pkg1_unversioned.LabelSelector
+ var v3 pkg3_types.UID
+ var v4 pkg5_intstr.IntOrString
+ var v5 time.Time
+ _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5
+ }
+}
+
+func (x *ScaleSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Replicas != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Replicas))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("replicas"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Replicas))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ScaleSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ScaleSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "replicas":
+ if r.TryDecodeAsNil() {
+ x.Replicas = 0
+ } else {
+ x.Replicas = int32(r.DecodeInt(32))
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ScaleSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj5 int
+ var yyb5 bool
+ var yyhl5 bool = l >= 0
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Replicas = 0
+ } else {
+ x.Replicas = int32(r.DecodeInt(32))
+ }
+ for {
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj5-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ScaleStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.Selector != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Replicas))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("replicas"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Replicas))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Selector == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.Selector) {
+ } else {
+ z.EncFallback(x.Selector)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("selector"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Selector == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.Selector) {
+ } else {
+ z.EncFallback(x.Selector)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ScaleStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ScaleStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "replicas":
+ if r.TryDecodeAsNil() {
+ x.Replicas = 0
+ } else {
+ x.Replicas = int32(r.DecodeInt(32))
+ }
+ case "selector":
+ if r.TryDecodeAsNil() {
+ if x.Selector != nil {
+ x.Selector = nil
+ }
+ } else {
+ if x.Selector == nil {
+ x.Selector = new(pkg1_unversioned.LabelSelector)
+ }
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.Selector) {
+ } else {
+ z.DecFallback(x.Selector, false)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ScaleStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Replicas = 0
+ } else {
+ x.Replicas = int32(r.DecodeInt(32))
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Selector != nil {
+ x.Selector = nil
+ }
+ } else {
+ if x.Selector == nil {
+ x.Selector = new(pkg1_unversioned.LabelSelector)
+ }
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.Selector) {
+ } else {
+ z.DecFallback(x.Selector, false)
+ }
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *Scale) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = true
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy14 := &x.Status
+ yy14.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy16 := &x.Status
+ yy16.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *Scale) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *Scale) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_api.ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = ScaleSpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = ScaleStatus{}
+ } else {
+ yyv6 := &x.Status
+ yyv6.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *Scale) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_api.ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = ScaleSpec{}
+ } else {
+ yyv11 := &x.Spec
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = ScaleStatus{}
+ } else {
+ yyv12 := &x.Status
+ yyv12.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ReplicationControllerDummy) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Kind != ""
+ yyq2[1] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ReplicationControllerDummy) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ReplicationControllerDummy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ReplicationControllerDummy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *CustomMetricTarget) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("name"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy7 := &x.TargetValue
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy7) {
+ } else if !yym8 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy7)
+ } else {
+ z.EncFallback(yy7)
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("value"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy9 := &x.TargetValue
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy9) {
+ } else if !yym10 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy9)
+ } else {
+ z.EncFallback(yy9)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *CustomMetricTarget) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *CustomMetricTarget) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "name":
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ case "value":
+ if r.TryDecodeAsNil() {
+ x.TargetValue = pkg4_resource.Quantity{}
+ } else {
+ yyv5 := &x.TargetValue
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv5) {
+ } else if !yym6 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv5)
+ } else {
+ z.DecFallback(yyv5, false)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *CustomMetricTarget) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.TargetValue = pkg4_resource.Quantity{}
+ } else {
+ yyv9 := &x.TargetValue
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv9) {
+ } else if !yym10 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv9)
+ } else {
+ z.DecFallback(yyv9, false)
+ }
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *CustomMetricTargetList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ h.encSliceCustomMetricTarget(([]CustomMetricTarget)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.encSliceCustomMetricTarget(([]CustomMetricTarget)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *CustomMetricTargetList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *CustomMetricTargetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv4 := &x.Items
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.decSliceCustomMetricTarget((*[]CustomMetricTarget)(yyv4), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *CustomMetricTargetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv7 := &x.Items
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.decSliceCustomMetricTarget((*[]CustomMetricTarget)(yyv7), d)
+ }
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *CustomMetricCurrentStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("name"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy7 := &x.CurrentValue
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy7) {
+ } else if !yym8 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy7)
+ } else {
+ z.EncFallback(yy7)
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("value"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy9 := &x.CurrentValue
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy9) {
+ } else if !yym10 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy9)
+ } else {
+ z.EncFallback(yy9)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *CustomMetricCurrentStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *CustomMetricCurrentStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "name":
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ case "value":
+ if r.TryDecodeAsNil() {
+ x.CurrentValue = pkg4_resource.Quantity{}
+ } else {
+ yyv5 := &x.CurrentValue
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv5) {
+ } else if !yym6 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv5)
+ } else {
+ z.DecFallback(yyv5, false)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *CustomMetricCurrentStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.CurrentValue = pkg4_resource.Quantity{}
+ } else {
+ yyv9 := &x.CurrentValue
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv9) {
+ } else if !yym10 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv9)
+ } else {
+ z.DecFallback(yyv9, false)
+ }
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *CustomMetricCurrentStatusList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ h.encSliceCustomMetricCurrentStatus(([]CustomMetricCurrentStatus)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.encSliceCustomMetricCurrentStatus(([]CustomMetricCurrentStatus)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *CustomMetricCurrentStatusList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *CustomMetricCurrentStatusList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv4 := &x.Items
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.decSliceCustomMetricCurrentStatus((*[]CustomMetricCurrentStatus)(yyv4), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *CustomMetricCurrentStatusList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv7 := &x.Items
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.decSliceCustomMetricCurrentStatus((*[]CustomMetricCurrentStatus)(yyv7), d)
+ }
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ThirdPartyResource) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = x.Description != ""
+ yyq2[2] = len(x.Versions) != 0
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Description))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("description"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Description))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.Versions == nil {
+ r.EncodeNil()
+ } else {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ h.encSliceAPIVersion(([]APIVersion)(x.Versions), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("versions"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Versions == nil {
+ r.EncodeNil()
+ } else {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ h.encSliceAPIVersion(([]APIVersion)(x.Versions), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ThirdPartyResource) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ThirdPartyResource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_api.ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "description":
+ if r.TryDecodeAsNil() {
+ x.Description = ""
+ } else {
+ x.Description = string(r.DecodeString())
+ }
+ case "versions":
+ if r.TryDecodeAsNil() {
+ x.Versions = nil
+ } else {
+ yyv6 := &x.Versions
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceAPIVersion((*[]APIVersion)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ThirdPartyResource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_api.ObjectMeta{}
+ } else {
+ yyv11 := &x.ObjectMeta
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Description = ""
+ } else {
+ x.Description = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Versions = nil
+ } else {
+ yyv13 := &x.Versions
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceAPIVersion((*[]APIVersion)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ThirdPartyResourceList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceThirdPartyResource(([]ThirdPartyResource)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceThirdPartyResource(([]ThirdPartyResource)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ThirdPartyResourceList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ThirdPartyResourceList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceThirdPartyResource((*[]ThirdPartyResource)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ThirdPartyResourceList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceThirdPartyResource((*[]ThirdPartyResource)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *APIVersion) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Name != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("name"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *APIVersion) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *APIVersion) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "name":
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *APIVersion) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj5 int
+ var yyb5 bool
+ var yyhl5 bool = l >= 0
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ for {
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj5-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ThirdPartyResourceData) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = len(x.Data) != 0
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Data == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Data))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("data"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Data == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Data))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ThirdPartyResourceData) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ThirdPartyResourceData) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_api.ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "data":
+ if r.TryDecodeAsNil() {
+ x.Data = nil
+ } else {
+ yyv5 := &x.Data
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ *yyv5 = r.DecodeBytes(*(*[]byte)(yyv5), false, false)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ThirdPartyResourceData) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_api.ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Data = nil
+ } else {
+ yyv11 := &x.Data
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ *yyv11 = r.DecodeBytes(*(*[]byte)(yyv11), false, false)
+ }
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *Deployment) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = true
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy14 := &x.Status
+ yy14.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy16 := &x.Status
+ yy16.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *Deployment) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *Deployment) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_api.ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = DeploymentSpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = DeploymentStatus{}
+ } else {
+ yyv6 := &x.Status
+ yyv6.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *Deployment) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_api.ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = DeploymentSpec{}
+ } else {
+ yyv11 := &x.Spec
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = DeploymentStatus{}
+ } else {
+ yyv12 := &x.Status
+ yyv12.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *DeploymentSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [8]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Replicas != 0
+ yyq2[1] = x.Selector != nil
+ yyq2[3] = true
+ yyq2[4] = x.MinReadySeconds != 0
+ yyq2[5] = x.RevisionHistoryLimit != nil
+ yyq2[6] = x.Paused != false
+ yyq2[7] = x.RollbackTo != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(8)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Replicas))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("replicas"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Replicas))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Selector == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.Selector) {
+ } else {
+ z.EncFallback(x.Selector)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("selector"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Selector == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.Selector) {
+ } else {
+ z.EncFallback(x.Selector)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy10 := &x.Template
+ yy10.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("template"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy12 := &x.Template
+ yy12.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yy15 := &x.Strategy
+ yy15.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("strategy"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy17 := &x.Strategy
+ yy17.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeInt(int64(x.MinReadySeconds))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("minReadySeconds"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym21 := z.EncBinary()
+ _ = yym21
+ if false {
+ } else {
+ r.EncodeInt(int64(x.MinReadySeconds))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ if x.RevisionHistoryLimit == nil {
+ r.EncodeNil()
+ } else {
+ yy23 := *x.RevisionHistoryLimit
+ yym24 := z.EncBinary()
+ _ = yym24
+ if false {
+ } else {
+ r.EncodeInt(int64(yy23))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("revisionHistoryLimit"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.RevisionHistoryLimit == nil {
+ r.EncodeNil()
+ } else {
+ yy25 := *x.RevisionHistoryLimit
+ yym26 := z.EncBinary()
+ _ = yym26
+ if false {
+ } else {
+ r.EncodeInt(int64(yy25))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[6] {
+ yym28 := z.EncBinary()
+ _ = yym28
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Paused))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[6] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("paused"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym29 := z.EncBinary()
+ _ = yym29
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Paused))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[7] {
+ if x.RollbackTo == nil {
+ r.EncodeNil()
+ } else {
+ x.RollbackTo.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[7] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("rollbackTo"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.RollbackTo == nil {
+ r.EncodeNil()
+ } else {
+ x.RollbackTo.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *DeploymentSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *DeploymentSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "replicas":
+ if r.TryDecodeAsNil() {
+ x.Replicas = 0
+ } else {
+ x.Replicas = int32(r.DecodeInt(32))
+ }
+ case "selector":
+ if r.TryDecodeAsNil() {
+ if x.Selector != nil {
+ x.Selector = nil
+ }
+ } else {
+ if x.Selector == nil {
+ x.Selector = new(pkg1_unversioned.LabelSelector)
+ }
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.Selector) {
+ } else {
+ z.DecFallback(x.Selector, false)
+ }
+ }
+ case "template":
+ if r.TryDecodeAsNil() {
+ x.Template = pkg2_api.PodTemplateSpec{}
+ } else {
+ yyv7 := &x.Template
+ yyv7.CodecDecodeSelf(d)
+ }
+ case "strategy":
+ if r.TryDecodeAsNil() {
+ x.Strategy = DeploymentStrategy{}
+ } else {
+ yyv8 := &x.Strategy
+ yyv8.CodecDecodeSelf(d)
+ }
+ case "minReadySeconds":
+ if r.TryDecodeAsNil() {
+ x.MinReadySeconds = 0
+ } else {
+ x.MinReadySeconds = int32(r.DecodeInt(32))
+ }
+ case "revisionHistoryLimit":
+ if r.TryDecodeAsNil() {
+ if x.RevisionHistoryLimit != nil {
+ x.RevisionHistoryLimit = nil
+ }
+ } else {
+ if x.RevisionHistoryLimit == nil {
+ x.RevisionHistoryLimit = new(int32)
+ }
+ yym11 := z.DecBinary()
+ _ = yym11
+ if false {
+ } else {
+ *((*int32)(x.RevisionHistoryLimit)) = int32(r.DecodeInt(32))
+ }
+ }
+ case "paused":
+ if r.TryDecodeAsNil() {
+ x.Paused = false
+ } else {
+ x.Paused = bool(r.DecodeBool())
+ }
+ case "rollbackTo":
+ if r.TryDecodeAsNil() {
+ if x.RollbackTo != nil {
+ x.RollbackTo = nil
+ }
+ } else {
+ if x.RollbackTo == nil {
+ x.RollbackTo = new(RollbackConfig)
+ }
+ x.RollbackTo.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *DeploymentSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj14 int
+ var yyb14 bool
+ var yyhl14 bool = l >= 0
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Replicas = 0
+ } else {
+ x.Replicas = int32(r.DecodeInt(32))
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Selector != nil {
+ x.Selector = nil
+ }
+ } else {
+ if x.Selector == nil {
+ x.Selector = new(pkg1_unversioned.LabelSelector)
+ }
+ yym17 := z.DecBinary()
+ _ = yym17
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.Selector) {
+ } else {
+ z.DecFallback(x.Selector, false)
+ }
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Template = pkg2_api.PodTemplateSpec{}
+ } else {
+ yyv18 := &x.Template
+ yyv18.CodecDecodeSelf(d)
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Strategy = DeploymentStrategy{}
+ } else {
+ yyv19 := &x.Strategy
+ yyv19.CodecDecodeSelf(d)
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.MinReadySeconds = 0
+ } else {
+ x.MinReadySeconds = int32(r.DecodeInt(32))
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.RevisionHistoryLimit != nil {
+ x.RevisionHistoryLimit = nil
+ }
+ } else {
+ if x.RevisionHistoryLimit == nil {
+ x.RevisionHistoryLimit = new(int32)
+ }
+ yym22 := z.DecBinary()
+ _ = yym22
+ if false {
+ } else {
+ *((*int32)(x.RevisionHistoryLimit)) = int32(r.DecodeInt(32))
+ }
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Paused = false
+ } else {
+ x.Paused = bool(r.DecodeBool())
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.RollbackTo != nil {
+ x.RollbackTo = nil
+ }
+ } else {
+ if x.RollbackTo == nil {
+ x.RollbackTo = new(RollbackConfig)
+ }
+ x.RollbackTo.CodecDecodeSelf(d)
+ }
+ for {
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj14-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *DeploymentRollback) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = len(x.UpdatedAnnotations) != 0
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("name"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.UpdatedAnnotations == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ z.F.EncMapStringStringV(x.UpdatedAnnotations, false, e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("updatedAnnotations"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.UpdatedAnnotations == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ z.F.EncMapStringStringV(x.UpdatedAnnotations, false, e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy10 := &x.RollbackTo
+ yy10.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("rollbackTo"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy12 := &x.RollbackTo
+ yy12.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *DeploymentRollback) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *DeploymentRollback) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "name":
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ case "updatedAnnotations":
+ if r.TryDecodeAsNil() {
+ x.UpdatedAnnotations = nil
+ } else {
+ yyv5 := &x.UpdatedAnnotations
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ z.F.DecMapStringStringX(yyv5, false, d)
+ }
+ }
+ case "rollbackTo":
+ if r.TryDecodeAsNil() {
+ x.RollbackTo = RollbackConfig{}
+ } else {
+ yyv7 := &x.RollbackTo
+ yyv7.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *DeploymentRollback) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.UpdatedAnnotations = nil
+ } else {
+ yyv12 := &x.UpdatedAnnotations
+ yym13 := z.DecBinary()
+ _ = yym13
+ if false {
+ } else {
+ z.F.DecMapStringStringX(yyv12, false, d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.RollbackTo = RollbackConfig{}
+ } else {
+ yyv14 := &x.RollbackTo
+ yyv14.CodecDecodeSelf(d)
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *RollbackConfig) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Revision != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Revision))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("revision"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Revision))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *RollbackConfig) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *RollbackConfig) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "revision":
+ if r.TryDecodeAsNil() {
+ x.Revision = 0
+ } else {
+ x.Revision = int64(r.DecodeInt(64))
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *RollbackConfig) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj5 int
+ var yyb5 bool
+ var yyhl5 bool = l >= 0
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Revision = 0
+ } else {
+ x.Revision = int64(r.DecodeInt(64))
+ }
+ for {
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj5-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *DeploymentStrategy) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Type != ""
+ yyq2[1] = x.RollingUpdate != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ x.Type.CodecEncodeSelf(e)
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("type"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Type.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.RollingUpdate == nil {
+ r.EncodeNil()
+ } else {
+ x.RollingUpdate.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("rollingUpdate"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.RollingUpdate == nil {
+ r.EncodeNil()
+ } else {
+ x.RollingUpdate.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *DeploymentStrategy) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *DeploymentStrategy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "type":
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = DeploymentStrategyType(r.DecodeString())
+ }
+ case "rollingUpdate":
+ if r.TryDecodeAsNil() {
+ if x.RollingUpdate != nil {
+ x.RollingUpdate = nil
+ }
+ } else {
+ if x.RollingUpdate == nil {
+ x.RollingUpdate = new(RollingUpdateDeployment)
+ }
+ x.RollingUpdate.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *DeploymentStrategy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = DeploymentStrategyType(r.DecodeString())
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.RollingUpdate != nil {
+ x.RollingUpdate = nil
+ }
+ } else {
+ if x.RollingUpdate == nil {
+ x.RollingUpdate = new(RollingUpdateDeployment)
+ }
+ x.RollingUpdate.CodecDecodeSelf(d)
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x DeploymentStrategyType) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *DeploymentStrategyType) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *RollingUpdateDeployment) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.MaxUnavailable
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else if !yym5 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy4)
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("maxUnavailable"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.MaxUnavailable
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else if !yym7 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy6)
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.MaxSurge
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy9) {
+ } else if !yym10 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy9)
+ } else {
+ z.EncFallback(yy9)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("maxSurge"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.MaxSurge
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy11) {
+ } else if !yym12 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy11)
+ } else {
+ z.EncFallback(yy11)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *RollingUpdateDeployment) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *RollingUpdateDeployment) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "maxUnavailable":
+ if r.TryDecodeAsNil() {
+ x.MaxUnavailable = pkg5_intstr.IntOrString{}
+ } else {
+ yyv4 := &x.MaxUnavailable
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else if !yym5 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv4)
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "maxSurge":
+ if r.TryDecodeAsNil() {
+ x.MaxSurge = pkg5_intstr.IntOrString{}
+ } else {
+ yyv6 := &x.MaxSurge
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv6) {
+ } else if !yym7 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv6)
+ } else {
+ z.DecFallback(yyv6, false)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *RollingUpdateDeployment) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.MaxUnavailable = pkg5_intstr.IntOrString{}
+ } else {
+ yyv9 := &x.MaxUnavailable
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv9) {
+ } else if !yym10 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv9)
+ } else {
+ z.DecFallback(yyv9, false)
+ }
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.MaxSurge = pkg5_intstr.IntOrString{}
+ } else {
+ yyv11 := &x.MaxSurge
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else if !yym12 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv11)
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *DeploymentStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.ObservedGeneration != 0
+ yyq2[1] = x.Replicas != 0
+ yyq2[2] = x.UpdatedReplicas != 0
+ yyq2[3] = x.AvailableReplicas != 0
+ yyq2[4] = x.UnavailableReplicas != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ObservedGeneration))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("observedGeneration"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ObservedGeneration))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Replicas))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("replicas"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Replicas))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeInt(int64(x.UpdatedReplicas))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("updatedReplicas"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeInt(int64(x.UpdatedReplicas))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeInt(int64(x.AvailableReplicas))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("availableReplicas"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeInt(int64(x.AvailableReplicas))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeInt(int64(x.UnavailableReplicas))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("unavailableReplicas"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeInt(int64(x.UnavailableReplicas))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *DeploymentStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *DeploymentStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "observedGeneration":
+ if r.TryDecodeAsNil() {
+ x.ObservedGeneration = 0
+ } else {
+ x.ObservedGeneration = int64(r.DecodeInt(64))
+ }
+ case "replicas":
+ if r.TryDecodeAsNil() {
+ x.Replicas = 0
+ } else {
+ x.Replicas = int32(r.DecodeInt(32))
+ }
+ case "updatedReplicas":
+ if r.TryDecodeAsNil() {
+ x.UpdatedReplicas = 0
+ } else {
+ x.UpdatedReplicas = int32(r.DecodeInt(32))
+ }
+ case "availableReplicas":
+ if r.TryDecodeAsNil() {
+ x.AvailableReplicas = 0
+ } else {
+ x.AvailableReplicas = int32(r.DecodeInt(32))
+ }
+ case "unavailableReplicas":
+ if r.TryDecodeAsNil() {
+ x.UnavailableReplicas = 0
+ } else {
+ x.UnavailableReplicas = int32(r.DecodeInt(32))
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *DeploymentStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObservedGeneration = 0
+ } else {
+ x.ObservedGeneration = int64(r.DecodeInt(64))
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Replicas = 0
+ } else {
+ x.Replicas = int32(r.DecodeInt(32))
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.UpdatedReplicas = 0
+ } else {
+ x.UpdatedReplicas = int32(r.DecodeInt(32))
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.AvailableReplicas = 0
+ } else {
+ x.AvailableReplicas = int32(r.DecodeInt(32))
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.UnavailableReplicas = 0
+ } else {
+ x.UnavailableReplicas = int32(r.DecodeInt(32))
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *DeploymentList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceDeployment(([]Deployment)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceDeployment(([]Deployment)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *DeploymentList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *DeploymentList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceDeployment((*[]Deployment)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *DeploymentList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceDeployment((*[]Deployment)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *DaemonSetSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Selector != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Selector == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.Selector) {
+ } else {
+ z.EncFallback(x.Selector)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("selector"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Selector == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.Selector) {
+ } else {
+ z.EncFallback(x.Selector)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy7 := &x.Template
+ yy7.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("template"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy9 := &x.Template
+ yy9.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *DaemonSetSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *DaemonSetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "selector":
+ if r.TryDecodeAsNil() {
+ if x.Selector != nil {
+ x.Selector = nil
+ }
+ } else {
+ if x.Selector == nil {
+ x.Selector = new(pkg1_unversioned.LabelSelector)
+ }
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.Selector) {
+ } else {
+ z.DecFallback(x.Selector, false)
+ }
+ }
+ case "template":
+ if r.TryDecodeAsNil() {
+ x.Template = pkg2_api.PodTemplateSpec{}
+ } else {
+ yyv6 := &x.Template
+ yyv6.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *DaemonSetSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Selector != nil {
+ x.Selector = nil
+ }
+ } else {
+ if x.Selector == nil {
+ x.Selector = new(pkg1_unversioned.LabelSelector)
+ }
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.Selector) {
+ } else {
+ z.DecFallback(x.Selector, false)
+ }
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Template = pkg2_api.PodTemplateSpec{}
+ } else {
+ yyv10 := &x.Template
+ yyv10.CodecDecodeSelf(d)
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *DaemonSetStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 3
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeInt(int64(x.CurrentNumberScheduled))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("currentNumberScheduled"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(x.CurrentNumberScheduled))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeInt(int64(x.NumberMisscheduled))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("numberMisscheduled"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeInt(int64(x.NumberMisscheduled))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeInt(int64(x.DesiredNumberScheduled))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("desiredNumberScheduled"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeInt(int64(x.DesiredNumberScheduled))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *DaemonSetStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *DaemonSetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "currentNumberScheduled":
+ if r.TryDecodeAsNil() {
+ x.CurrentNumberScheduled = 0
+ } else {
+ x.CurrentNumberScheduled = int32(r.DecodeInt(32))
+ }
+ case "numberMisscheduled":
+ if r.TryDecodeAsNil() {
+ x.NumberMisscheduled = 0
+ } else {
+ x.NumberMisscheduled = int32(r.DecodeInt(32))
+ }
+ case "desiredNumberScheduled":
+ if r.TryDecodeAsNil() {
+ x.DesiredNumberScheduled = 0
+ } else {
+ x.DesiredNumberScheduled = int32(r.DecodeInt(32))
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *DaemonSetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.CurrentNumberScheduled = 0
+ } else {
+ x.CurrentNumberScheduled = int32(r.DecodeInt(32))
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.NumberMisscheduled = 0
+ } else {
+ x.NumberMisscheduled = int32(r.DecodeInt(32))
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.DesiredNumberScheduled = 0
+ } else {
+ x.DesiredNumberScheduled = int32(r.DecodeInt(32))
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *DaemonSet) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = true
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy14 := &x.Status
+ yy14.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy16 := &x.Status
+ yy16.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *DaemonSet) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *DaemonSet) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_api.ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = DaemonSetSpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = DaemonSetStatus{}
+ } else {
+ yyv6 := &x.Status
+ yyv6.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *DaemonSet) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_api.ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = DaemonSetSpec{}
+ } else {
+ yyv11 := &x.Spec
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = DaemonSetStatus{}
+ } else {
+ yyv12 := &x.Status
+ yyv12.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *DaemonSetList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceDaemonSet(([]DaemonSet)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceDaemonSet(([]DaemonSet)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *DaemonSetList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *DaemonSetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceDaemonSet((*[]DaemonSet)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *DaemonSetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceDaemonSet((*[]DaemonSet)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ThirdPartyResourceDataList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceThirdPartyResourceData(([]ThirdPartyResourceData)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceThirdPartyResourceData(([]ThirdPartyResourceData)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ThirdPartyResourceDataList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ThirdPartyResourceDataList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceThirdPartyResourceData((*[]ThirdPartyResourceData)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ThirdPartyResourceDataList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceThirdPartyResourceData((*[]ThirdPartyResourceData)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *Ingress) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = true
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy14 := &x.Status
+ yy14.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy16 := &x.Status
+ yy16.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *Ingress) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *Ingress) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_api.ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = IngressSpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = IngressStatus{}
+ } else {
+ yyv6 := &x.Status
+ yyv6.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *Ingress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_api.ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = IngressSpec{}
+ } else {
+ yyv11 := &x.Spec
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = IngressStatus{}
+ } else {
+ yyv12 := &x.Status
+ yyv12.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *IngressList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceIngress(([]Ingress)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceIngress(([]Ingress)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *IngressList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *IngressList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceIngress((*[]Ingress)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *IngressList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceIngress((*[]Ingress)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *IngressSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Backend != nil
+ yyq2[1] = len(x.TLS) != 0
+ yyq2[2] = len(x.Rules) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Backend == nil {
+ r.EncodeNil()
+ } else {
+ x.Backend.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("backend"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Backend == nil {
+ r.EncodeNil()
+ } else {
+ x.Backend.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.TLS == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.encSliceIngressTLS(([]IngressTLS)(x.TLS), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("tls"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.TLS == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.encSliceIngressTLS(([]IngressTLS)(x.TLS), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.Rules == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceIngressRule(([]IngressRule)(x.Rules), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("rules"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Rules == nil {
+ r.EncodeNil()
+ } else {
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ h.encSliceIngressRule(([]IngressRule)(x.Rules), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *IngressSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *IngressSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "backend":
+ if r.TryDecodeAsNil() {
+ if x.Backend != nil {
+ x.Backend = nil
+ }
+ } else {
+ if x.Backend == nil {
+ x.Backend = new(IngressBackend)
+ }
+ x.Backend.CodecDecodeSelf(d)
+ }
+ case "tls":
+ if r.TryDecodeAsNil() {
+ x.TLS = nil
+ } else {
+ yyv5 := &x.TLS
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ h.decSliceIngressTLS((*[]IngressTLS)(yyv5), d)
+ }
+ }
+ case "rules":
+ if r.TryDecodeAsNil() {
+ x.Rules = nil
+ } else {
+ yyv7 := &x.Rules
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.decSliceIngressRule((*[]IngressRule)(yyv7), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *IngressSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Backend != nil {
+ x.Backend = nil
+ }
+ } else {
+ if x.Backend == nil {
+ x.Backend = new(IngressBackend)
+ }
+ x.Backend.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.TLS = nil
+ } else {
+ yyv11 := &x.TLS
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ h.decSliceIngressTLS((*[]IngressTLS)(yyv11), d)
+ }
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Rules = nil
+ } else {
+ yyv13 := &x.Rules
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceIngressRule((*[]IngressRule)(yyv13), d)
+ }
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *IngressTLS) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = len(x.Hosts) != 0
+ yyq2[1] = x.SecretName != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Hosts == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Hosts, false, e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("hosts"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Hosts == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Hosts, false, e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.SecretName))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("secretName"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.SecretName))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *IngressTLS) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *IngressTLS) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "hosts":
+ if r.TryDecodeAsNil() {
+ x.Hosts = nil
+ } else {
+ yyv4 := &x.Hosts
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv4, false, d)
+ }
+ }
+ case "secretName":
+ if r.TryDecodeAsNil() {
+ x.SecretName = ""
+ } else {
+ x.SecretName = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *IngressTLS) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Hosts = nil
+ } else {
+ yyv8 := &x.Hosts
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv8, false, d)
+ }
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.SecretName = ""
+ } else {
+ x.SecretName = string(r.DecodeString())
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *IngressStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.LoadBalancer
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("loadBalancer"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.LoadBalancer
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *IngressStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *IngressStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "loadBalancer":
+ if r.TryDecodeAsNil() {
+ x.LoadBalancer = pkg2_api.LoadBalancerStatus{}
+ } else {
+ yyv4 := &x.LoadBalancer
+ yyv4.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *IngressStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj5 int
+ var yyb5 bool
+ var yyhl5 bool = l >= 0
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.LoadBalancer = pkg2_api.LoadBalancerStatus{}
+ } else {
+ yyv6 := &x.LoadBalancer
+ yyv6.CodecDecodeSelf(d)
+ }
+ for {
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj5-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *IngressRule) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Host != ""
+ yyq2[1] = x.IngressRuleValue.HTTP != nil && x.HTTP != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Host))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("host"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Host))
+ }
+ }
+ }
+ var yyn6 bool
+ if x.IngressRuleValue.HTTP == nil {
+ yyn6 = true
+ goto LABEL6
+ }
+ LABEL6:
+ if yyr2 || yy2arr2 {
+ if yyn6 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.HTTP == nil {
+ r.EncodeNil()
+ } else {
+ x.HTTP.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("http"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn6 {
+ r.EncodeNil()
+ } else {
+ if x.HTTP == nil {
+ r.EncodeNil()
+ } else {
+ x.HTTP.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *IngressRule) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *IngressRule) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "host":
+ if r.TryDecodeAsNil() {
+ x.Host = ""
+ } else {
+ x.Host = string(r.DecodeString())
+ }
+ case "http":
+ if x.IngressRuleValue.HTTP == nil {
+ x.IngressRuleValue.HTTP = new(HTTPIngressRuleValue)
+ }
+ if r.TryDecodeAsNil() {
+ if x.HTTP != nil {
+ x.HTTP = nil
+ }
+ } else {
+ if x.HTTP == nil {
+ x.HTTP = new(HTTPIngressRuleValue)
+ }
+ x.HTTP.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *IngressRule) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Host = ""
+ } else {
+ x.Host = string(r.DecodeString())
+ }
+ if x.IngressRuleValue.HTTP == nil {
+ x.IngressRuleValue.HTTP = new(HTTPIngressRuleValue)
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.HTTP != nil {
+ x.HTTP = nil
+ }
+ } else {
+ if x.HTTP == nil {
+ x.HTTP = new(HTTPIngressRuleValue)
+ }
+ x.HTTP.CodecDecodeSelf(d)
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *IngressRuleValue) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.HTTP != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.HTTP == nil {
+ r.EncodeNil()
+ } else {
+ x.HTTP.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("http"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.HTTP == nil {
+ r.EncodeNil()
+ } else {
+ x.HTTP.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *IngressRuleValue) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *IngressRuleValue) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "http":
+ if r.TryDecodeAsNil() {
+ if x.HTTP != nil {
+ x.HTTP = nil
+ }
+ } else {
+ if x.HTTP == nil {
+ x.HTTP = new(HTTPIngressRuleValue)
+ }
+ x.HTTP.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *IngressRuleValue) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj5 int
+ var yyb5 bool
+ var yyhl5 bool = l >= 0
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.HTTP != nil {
+ x.HTTP = nil
+ }
+ } else {
+ if x.HTTP == nil {
+ x.HTTP = new(HTTPIngressRuleValue)
+ }
+ x.HTTP.CodecDecodeSelf(d)
+ }
+ for {
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj5-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *HTTPIngressRuleValue) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Paths == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ h.encSliceHTTPIngressPath(([]HTTPIngressPath)(x.Paths), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("paths"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Paths == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.encSliceHTTPIngressPath(([]HTTPIngressPath)(x.Paths), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *HTTPIngressRuleValue) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *HTTPIngressRuleValue) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "paths":
+ if r.TryDecodeAsNil() {
+ x.Paths = nil
+ } else {
+ yyv4 := &x.Paths
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.decSliceHTTPIngressPath((*[]HTTPIngressPath)(yyv4), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *HTTPIngressRuleValue) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Paths = nil
+ } else {
+ yyv7 := &x.Paths
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.decSliceHTTPIngressPath((*[]HTTPIngressPath)(yyv7), d)
+ }
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *HTTPIngressPath) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Path != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Path))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("path"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Path))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy7 := &x.Backend
+ yy7.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("backend"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy9 := &x.Backend
+ yy9.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *HTTPIngressPath) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *HTTPIngressPath) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "path":
+ if r.TryDecodeAsNil() {
+ x.Path = ""
+ } else {
+ x.Path = string(r.DecodeString())
+ }
+ case "backend":
+ if r.TryDecodeAsNil() {
+ x.Backend = IngressBackend{}
+ } else {
+ yyv5 := &x.Backend
+ yyv5.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *HTTPIngressPath) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Path = ""
+ } else {
+ x.Path = string(r.DecodeString())
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Backend = IngressBackend{}
+ } else {
+ yyv8 := &x.Backend
+ yyv8.CodecDecodeSelf(d)
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *IngressBackend) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ServiceName))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("serviceName"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ServiceName))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy7 := &x.ServicePort
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy7) {
+ } else if !yym8 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy7)
+ } else {
+ z.EncFallback(yy7)
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("servicePort"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy9 := &x.ServicePort
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy9) {
+ } else if !yym10 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy9)
+ } else {
+ z.EncFallback(yy9)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *IngressBackend) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *IngressBackend) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "serviceName":
+ if r.TryDecodeAsNil() {
+ x.ServiceName = ""
+ } else {
+ x.ServiceName = string(r.DecodeString())
+ }
+ case "servicePort":
+ if r.TryDecodeAsNil() {
+ x.ServicePort = pkg5_intstr.IntOrString{}
+ } else {
+ yyv5 := &x.ServicePort
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv5) {
+ } else if !yym6 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv5)
+ } else {
+ z.DecFallback(yyv5, false)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *IngressBackend) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ServiceName = ""
+ } else {
+ x.ServiceName = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ServicePort = pkg5_intstr.IntOrString{}
+ } else {
+ yyv9 := &x.ServicePort
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv9) {
+ } else if !yym10 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv9)
+ } else {
+ z.DecFallback(yyv9, false)
+ }
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ReplicaSet) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = true
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy14 := &x.Status
+ yy14.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy16 := &x.Status
+ yy16.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ReplicaSet) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ReplicaSet) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_api.ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = ReplicaSetSpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = ReplicaSetStatus{}
+ } else {
+ yyv6 := &x.Status
+ yyv6.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ReplicaSet) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_api.ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = ReplicaSetSpec{}
+ } else {
+ yyv11 := &x.Spec
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = ReplicaSetStatus{}
+ } else {
+ yyv12 := &x.Status
+ yyv12.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ReplicaSetList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceReplicaSet(([]ReplicaSet)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceReplicaSet(([]ReplicaSet)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ReplicaSetList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ReplicaSetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceReplicaSet((*[]ReplicaSet)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ReplicaSetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceReplicaSet((*[]ReplicaSet)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ReplicaSetSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.Selector != nil
+ yyq2[2] = true
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Replicas))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("replicas"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Replicas))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Selector == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.Selector) {
+ } else {
+ z.EncFallback(x.Selector)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("selector"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Selector == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.Selector) {
+ } else {
+ z.EncFallback(x.Selector)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy10 := &x.Template
+ yy10.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("template"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy12 := &x.Template
+ yy12.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ReplicaSetSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ReplicaSetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "replicas":
+ if r.TryDecodeAsNil() {
+ x.Replicas = 0
+ } else {
+ x.Replicas = int32(r.DecodeInt(32))
+ }
+ case "selector":
+ if r.TryDecodeAsNil() {
+ if x.Selector != nil {
+ x.Selector = nil
+ }
+ } else {
+ if x.Selector == nil {
+ x.Selector = new(pkg1_unversioned.LabelSelector)
+ }
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.Selector) {
+ } else {
+ z.DecFallback(x.Selector, false)
+ }
+ }
+ case "template":
+ if r.TryDecodeAsNil() {
+ x.Template = pkg2_api.PodTemplateSpec{}
+ } else {
+ yyv7 := &x.Template
+ yyv7.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ReplicaSetSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Replicas = 0
+ } else {
+ x.Replicas = int32(r.DecodeInt(32))
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Selector != nil {
+ x.Selector = nil
+ }
+ } else {
+ if x.Selector == nil {
+ x.Selector = new(pkg1_unversioned.LabelSelector)
+ }
+ yym11 := z.DecBinary()
+ _ = yym11
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.Selector) {
+ } else {
+ z.DecFallback(x.Selector, false)
+ }
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Template = pkg2_api.PodTemplateSpec{}
+ } else {
+ yyv12 := &x.Template
+ yyv12.CodecDecodeSelf(d)
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ReplicaSetStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.FullyLabeledReplicas != 0
+ yyq2[2] = x.ObservedGeneration != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Replicas))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("replicas"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Replicas))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeInt(int64(x.FullyLabeledReplicas))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("fullyLabeledReplicas"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeInt(int64(x.FullyLabeledReplicas))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ObservedGeneration))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("observedGeneration"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ObservedGeneration))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ReplicaSetStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ReplicaSetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "replicas":
+ if r.TryDecodeAsNil() {
+ x.Replicas = 0
+ } else {
+ x.Replicas = int32(r.DecodeInt(32))
+ }
+ case "fullyLabeledReplicas":
+ if r.TryDecodeAsNil() {
+ x.FullyLabeledReplicas = 0
+ } else {
+ x.FullyLabeledReplicas = int32(r.DecodeInt(32))
+ }
+ case "observedGeneration":
+ if r.TryDecodeAsNil() {
+ x.ObservedGeneration = 0
+ } else {
+ x.ObservedGeneration = int64(r.DecodeInt(64))
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ReplicaSetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Replicas = 0
+ } else {
+ x.Replicas = int32(r.DecodeInt(32))
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.FullyLabeledReplicas = 0
+ } else {
+ x.FullyLabeledReplicas = int32(r.DecodeInt(32))
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObservedGeneration = 0
+ } else {
+ x.ObservedGeneration = int64(r.DecodeInt(64))
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PodSecurityPolicy) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PodSecurityPolicy) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PodSecurityPolicy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_api.ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = PodSecurityPolicySpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PodSecurityPolicy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_api.ObjectMeta{}
+ } else {
+ yyv9 := &x.ObjectMeta
+ yyv9.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = PodSecurityPolicySpec{}
+ } else {
+ yyv10 := &x.Spec
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PodSecurityPolicySpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [14]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Privileged != false
+ yyq2[1] = len(x.DefaultAddCapabilities) != 0
+ yyq2[2] = len(x.RequiredDropCapabilities) != 0
+ yyq2[3] = len(x.AllowedCapabilities) != 0
+ yyq2[4] = len(x.Volumes) != 0
+ yyq2[5] = x.HostNetwork != false
+ yyq2[6] = len(x.HostPorts) != 0
+ yyq2[7] = x.HostPID != false
+ yyq2[8] = x.HostIPC != false
+ yyq2[13] = x.ReadOnlyRootFilesystem != false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(14)
+ } else {
+ yynn2 = 4
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Privileged))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("privileged"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Privileged))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.DefaultAddCapabilities == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.encSliceapi_Capability(([]pkg2_api.Capability)(x.DefaultAddCapabilities), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("defaultAddCapabilities"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.DefaultAddCapabilities == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.encSliceapi_Capability(([]pkg2_api.Capability)(x.DefaultAddCapabilities), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.RequiredDropCapabilities == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceapi_Capability(([]pkg2_api.Capability)(x.RequiredDropCapabilities), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("requiredDropCapabilities"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.RequiredDropCapabilities == nil {
+ r.EncodeNil()
+ } else {
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ h.encSliceapi_Capability(([]pkg2_api.Capability)(x.RequiredDropCapabilities), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ if x.AllowedCapabilities == nil {
+ r.EncodeNil()
+ } else {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ h.encSliceapi_Capability(([]pkg2_api.Capability)(x.AllowedCapabilities), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("allowedCapabilities"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.AllowedCapabilities == nil {
+ r.EncodeNil()
+ } else {
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.encSliceapi_Capability(([]pkg2_api.Capability)(x.AllowedCapabilities), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ if x.Volumes == nil {
+ r.EncodeNil()
+ } else {
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ h.encSliceFSType(([]FSType)(x.Volumes), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("volumes"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Volumes == nil {
+ r.EncodeNil()
+ } else {
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ h.encSliceFSType(([]FSType)(x.Volumes), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeBool(bool(x.HostNetwork))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("hostNetwork"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeBool(bool(x.HostNetwork))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[6] {
+ if x.HostPorts == nil {
+ r.EncodeNil()
+ } else {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ h.encSliceHostPortRange(([]HostPortRange)(x.HostPorts), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[6] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("hostPorts"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.HostPorts == nil {
+ r.EncodeNil()
+ } else {
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ h.encSliceHostPortRange(([]HostPortRange)(x.HostPorts), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[7] {
+ yym25 := z.EncBinary()
+ _ = yym25
+ if false {
+ } else {
+ r.EncodeBool(bool(x.HostPID))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[7] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("hostPID"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym26 := z.EncBinary()
+ _ = yym26
+ if false {
+ } else {
+ r.EncodeBool(bool(x.HostPID))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[8] {
+ yym28 := z.EncBinary()
+ _ = yym28
+ if false {
+ } else {
+ r.EncodeBool(bool(x.HostIPC))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[8] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("hostIPC"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym29 := z.EncBinary()
+ _ = yym29
+ if false {
+ } else {
+ r.EncodeBool(bool(x.HostIPC))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy31 := &x.SELinux
+ yy31.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("seLinux"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy33 := &x.SELinux
+ yy33.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy36 := &x.RunAsUser
+ yy36.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("runAsUser"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy38 := &x.RunAsUser
+ yy38.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy41 := &x.SupplementalGroups
+ yy41.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("supplementalGroups"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy43 := &x.SupplementalGroups
+ yy43.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy46 := &x.FSGroup
+ yy46.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("fsGroup"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy48 := &x.FSGroup
+ yy48.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[13] {
+ yym51 := z.EncBinary()
+ _ = yym51
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnlyRootFilesystem))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[13] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("readOnlyRootFilesystem"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym52 := z.EncBinary()
+ _ = yym52
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnlyRootFilesystem))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PodSecurityPolicySpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PodSecurityPolicySpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "privileged":
+ if r.TryDecodeAsNil() {
+ x.Privileged = false
+ } else {
+ x.Privileged = bool(r.DecodeBool())
+ }
+ case "defaultAddCapabilities":
+ if r.TryDecodeAsNil() {
+ x.DefaultAddCapabilities = nil
+ } else {
+ yyv5 := &x.DefaultAddCapabilities
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ h.decSliceapi_Capability((*[]pkg2_api.Capability)(yyv5), d)
+ }
+ }
+ case "requiredDropCapabilities":
+ if r.TryDecodeAsNil() {
+ x.RequiredDropCapabilities = nil
+ } else {
+ yyv7 := &x.RequiredDropCapabilities
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.decSliceapi_Capability((*[]pkg2_api.Capability)(yyv7), d)
+ }
+ }
+ case "allowedCapabilities":
+ if r.TryDecodeAsNil() {
+ x.AllowedCapabilities = nil
+ } else {
+ yyv9 := &x.AllowedCapabilities
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.decSliceapi_Capability((*[]pkg2_api.Capability)(yyv9), d)
+ }
+ }
+ case "volumes":
+ if r.TryDecodeAsNil() {
+ x.Volumes = nil
+ } else {
+ yyv11 := &x.Volumes
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ h.decSliceFSType((*[]FSType)(yyv11), d)
+ }
+ }
+ case "hostNetwork":
+ if r.TryDecodeAsNil() {
+ x.HostNetwork = false
+ } else {
+ x.HostNetwork = bool(r.DecodeBool())
+ }
+ case "hostPorts":
+ if r.TryDecodeAsNil() {
+ x.HostPorts = nil
+ } else {
+ yyv14 := &x.HostPorts
+ yym15 := z.DecBinary()
+ _ = yym15
+ if false {
+ } else {
+ h.decSliceHostPortRange((*[]HostPortRange)(yyv14), d)
+ }
+ }
+ case "hostPID":
+ if r.TryDecodeAsNil() {
+ x.HostPID = false
+ } else {
+ x.HostPID = bool(r.DecodeBool())
+ }
+ case "hostIPC":
+ if r.TryDecodeAsNil() {
+ x.HostIPC = false
+ } else {
+ x.HostIPC = bool(r.DecodeBool())
+ }
+ case "seLinux":
+ if r.TryDecodeAsNil() {
+ x.SELinux = SELinuxStrategyOptions{}
+ } else {
+ yyv18 := &x.SELinux
+ yyv18.CodecDecodeSelf(d)
+ }
+ case "runAsUser":
+ if r.TryDecodeAsNil() {
+ x.RunAsUser = RunAsUserStrategyOptions{}
+ } else {
+ yyv19 := &x.RunAsUser
+ yyv19.CodecDecodeSelf(d)
+ }
+ case "supplementalGroups":
+ if r.TryDecodeAsNil() {
+ x.SupplementalGroups = SupplementalGroupsStrategyOptions{}
+ } else {
+ yyv20 := &x.SupplementalGroups
+ yyv20.CodecDecodeSelf(d)
+ }
+ case "fsGroup":
+ if r.TryDecodeAsNil() {
+ x.FSGroup = FSGroupStrategyOptions{}
+ } else {
+ yyv21 := &x.FSGroup
+ yyv21.CodecDecodeSelf(d)
+ }
+ case "readOnlyRootFilesystem":
+ if r.TryDecodeAsNil() {
+ x.ReadOnlyRootFilesystem = false
+ } else {
+ x.ReadOnlyRootFilesystem = bool(r.DecodeBool())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PodSecurityPolicySpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj23 int
+ var yyb23 bool
+ var yyhl23 bool = l >= 0
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Privileged = false
+ } else {
+ x.Privileged = bool(r.DecodeBool())
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.DefaultAddCapabilities = nil
+ } else {
+ yyv25 := &x.DefaultAddCapabilities
+ yym26 := z.DecBinary()
+ _ = yym26
+ if false {
+ } else {
+ h.decSliceapi_Capability((*[]pkg2_api.Capability)(yyv25), d)
+ }
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.RequiredDropCapabilities = nil
+ } else {
+ yyv27 := &x.RequiredDropCapabilities
+ yym28 := z.DecBinary()
+ _ = yym28
+ if false {
+ } else {
+ h.decSliceapi_Capability((*[]pkg2_api.Capability)(yyv27), d)
+ }
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.AllowedCapabilities = nil
+ } else {
+ yyv29 := &x.AllowedCapabilities
+ yym30 := z.DecBinary()
+ _ = yym30
+ if false {
+ } else {
+ h.decSliceapi_Capability((*[]pkg2_api.Capability)(yyv29), d)
+ }
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Volumes = nil
+ } else {
+ yyv31 := &x.Volumes
+ yym32 := z.DecBinary()
+ _ = yym32
+ if false {
+ } else {
+ h.decSliceFSType((*[]FSType)(yyv31), d)
+ }
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.HostNetwork = false
+ } else {
+ x.HostNetwork = bool(r.DecodeBool())
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.HostPorts = nil
+ } else {
+ yyv34 := &x.HostPorts
+ yym35 := z.DecBinary()
+ _ = yym35
+ if false {
+ } else {
+ h.decSliceHostPortRange((*[]HostPortRange)(yyv34), d)
+ }
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.HostPID = false
+ } else {
+ x.HostPID = bool(r.DecodeBool())
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.HostIPC = false
+ } else {
+ x.HostIPC = bool(r.DecodeBool())
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.SELinux = SELinuxStrategyOptions{}
+ } else {
+ yyv38 := &x.SELinux
+ yyv38.CodecDecodeSelf(d)
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.RunAsUser = RunAsUserStrategyOptions{}
+ } else {
+ yyv39 := &x.RunAsUser
+ yyv39.CodecDecodeSelf(d)
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.SupplementalGroups = SupplementalGroupsStrategyOptions{}
+ } else {
+ yyv40 := &x.SupplementalGroups
+ yyv40.CodecDecodeSelf(d)
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.FSGroup = FSGroupStrategyOptions{}
+ } else {
+ yyv41 := &x.FSGroup
+ yyv41.CodecDecodeSelf(d)
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ReadOnlyRootFilesystem = false
+ } else {
+ x.ReadOnlyRootFilesystem = bool(r.DecodeBool())
+ }
+ for {
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj23-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *HostPortRange) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Min))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("min"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Min))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Max))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("max"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Max))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *HostPortRange) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *HostPortRange) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "min":
+ if r.TryDecodeAsNil() {
+ x.Min = 0
+ } else {
+ x.Min = int(r.DecodeInt(codecSelferBitsize1234))
+ }
+ case "max":
+ if r.TryDecodeAsNil() {
+ x.Max = 0
+ } else {
+ x.Max = int(r.DecodeInt(codecSelferBitsize1234))
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *HostPortRange) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Min = 0
+ } else {
+ x.Min = int(r.DecodeInt(codecSelferBitsize1234))
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Max = 0
+ } else {
+ x.Max = int(r.DecodeInt(codecSelferBitsize1234))
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x FSType) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *FSType) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *SELinuxStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.SELinuxOptions != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ x.Rule.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("rule"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Rule.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.SELinuxOptions == nil {
+ r.EncodeNil()
+ } else {
+ x.SELinuxOptions.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("seLinuxOptions"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.SELinuxOptions == nil {
+ r.EncodeNil()
+ } else {
+ x.SELinuxOptions.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *SELinuxStrategyOptions) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *SELinuxStrategyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "rule":
+ if r.TryDecodeAsNil() {
+ x.Rule = ""
+ } else {
+ x.Rule = SELinuxStrategy(r.DecodeString())
+ }
+ case "seLinuxOptions":
+ if r.TryDecodeAsNil() {
+ if x.SELinuxOptions != nil {
+ x.SELinuxOptions = nil
+ }
+ } else {
+ if x.SELinuxOptions == nil {
+ x.SELinuxOptions = new(pkg2_api.SELinuxOptions)
+ }
+ x.SELinuxOptions.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *SELinuxStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Rule = ""
+ } else {
+ x.Rule = SELinuxStrategy(r.DecodeString())
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.SELinuxOptions != nil {
+ x.SELinuxOptions = nil
+ }
+ } else {
+ if x.SELinuxOptions == nil {
+ x.SELinuxOptions = new(pkg2_api.SELinuxOptions)
+ }
+ x.SELinuxOptions.CodecDecodeSelf(d)
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x SELinuxStrategy) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *SELinuxStrategy) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *RunAsUserStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = len(x.Ranges) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ x.Rule.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("rule"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Rule.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Ranges == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.encSliceIDRange(([]IDRange)(x.Ranges), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("ranges"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Ranges == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.encSliceIDRange(([]IDRange)(x.Ranges), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *RunAsUserStrategyOptions) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *RunAsUserStrategyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "rule":
+ if r.TryDecodeAsNil() {
+ x.Rule = ""
+ } else {
+ x.Rule = RunAsUserStrategy(r.DecodeString())
+ }
+ case "ranges":
+ if r.TryDecodeAsNil() {
+ x.Ranges = nil
+ } else {
+ yyv5 := &x.Ranges
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ h.decSliceIDRange((*[]IDRange)(yyv5), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *RunAsUserStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Rule = ""
+ } else {
+ x.Rule = RunAsUserStrategy(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Ranges = nil
+ } else {
+ yyv9 := &x.Ranges
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.decSliceIDRange((*[]IDRange)(yyv9), d)
+ }
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *IDRange) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Min))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("min"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Min))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Max))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("max"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Max))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *IDRange) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *IDRange) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "min":
+ if r.TryDecodeAsNil() {
+ x.Min = 0
+ } else {
+ x.Min = int64(r.DecodeInt(64))
+ }
+ case "max":
+ if r.TryDecodeAsNil() {
+ x.Max = 0
+ } else {
+ x.Max = int64(r.DecodeInt(64))
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *IDRange) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Min = 0
+ } else {
+ x.Min = int64(r.DecodeInt(64))
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Max = 0
+ } else {
+ x.Max = int64(r.DecodeInt(64))
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x RunAsUserStrategy) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *RunAsUserStrategy) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *FSGroupStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Rule != ""
+ yyq2[1] = len(x.Ranges) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ x.Rule.CodecEncodeSelf(e)
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("rule"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Rule.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Ranges == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.encSliceIDRange(([]IDRange)(x.Ranges), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("ranges"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Ranges == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.encSliceIDRange(([]IDRange)(x.Ranges), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *FSGroupStrategyOptions) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *FSGroupStrategyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "rule":
+ if r.TryDecodeAsNil() {
+ x.Rule = ""
+ } else {
+ x.Rule = FSGroupStrategyType(r.DecodeString())
+ }
+ case "ranges":
+ if r.TryDecodeAsNil() {
+ x.Ranges = nil
+ } else {
+ yyv5 := &x.Ranges
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ h.decSliceIDRange((*[]IDRange)(yyv5), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *FSGroupStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Rule = ""
+ } else {
+ x.Rule = FSGroupStrategyType(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Ranges = nil
+ } else {
+ yyv9 := &x.Ranges
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.decSliceIDRange((*[]IDRange)(yyv9), d)
+ }
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x FSGroupStrategyType) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *FSGroupStrategyType) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *SupplementalGroupsStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Rule != ""
+ yyq2[1] = len(x.Ranges) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ x.Rule.CodecEncodeSelf(e)
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("rule"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Rule.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Ranges == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.encSliceIDRange(([]IDRange)(x.Ranges), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("ranges"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Ranges == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.encSliceIDRange(([]IDRange)(x.Ranges), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *SupplementalGroupsStrategyOptions) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *SupplementalGroupsStrategyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "rule":
+ if r.TryDecodeAsNil() {
+ x.Rule = ""
+ } else {
+ x.Rule = SupplementalGroupsStrategyType(r.DecodeString())
+ }
+ case "ranges":
+ if r.TryDecodeAsNil() {
+ x.Ranges = nil
+ } else {
+ yyv5 := &x.Ranges
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ h.decSliceIDRange((*[]IDRange)(yyv5), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *SupplementalGroupsStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Rule = ""
+ } else {
+ x.Rule = SupplementalGroupsStrategyType(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Ranges = nil
+ } else {
+ yyv9 := &x.Ranges
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.decSliceIDRange((*[]IDRange)(yyv9), d)
+ }
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x SupplementalGroupsStrategyType) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *SupplementalGroupsStrategyType) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *PodSecurityPolicyList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSlicePodSecurityPolicy(([]PodSecurityPolicy)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSlicePodSecurityPolicy(([]PodSecurityPolicy)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PodSecurityPolicyList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PodSecurityPolicyList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSlicePodSecurityPolicy((*[]PodSecurityPolicy)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PodSecurityPolicyList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSlicePodSecurityPolicy((*[]PodSecurityPolicy)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *NetworkPolicy) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *NetworkPolicy) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *NetworkPolicy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_api.ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = NetworkPolicySpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *NetworkPolicy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_api.ObjectMeta{}
+ } else {
+ yyv9 := &x.ObjectMeta
+ yyv9.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = NetworkPolicySpec{}
+ } else {
+ yyv10 := &x.Spec
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *NetworkPolicySpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = len(x.Ingress) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy4 := &x.PodSelector
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("podSelector"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.PodSelector
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Ingress == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceNetworkPolicyIngressRule(([]NetworkPolicyIngressRule)(x.Ingress), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("ingress"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Ingress == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceNetworkPolicyIngressRule(([]NetworkPolicyIngressRule)(x.Ingress), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *NetworkPolicySpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *NetworkPolicySpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "podSelector":
+ if r.TryDecodeAsNil() {
+ x.PodSelector = pkg1_unversioned.LabelSelector{}
+ } else {
+ yyv4 := &x.PodSelector
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "ingress":
+ if r.TryDecodeAsNil() {
+ x.Ingress = nil
+ } else {
+ yyv6 := &x.Ingress
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceNetworkPolicyIngressRule((*[]NetworkPolicyIngressRule)(yyv6), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *NetworkPolicySpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.PodSelector = pkg1_unversioned.LabelSelector{}
+ } else {
+ yyv9 := &x.PodSelector
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv9) {
+ } else {
+ z.DecFallback(yyv9, false)
+ }
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Ingress = nil
+ } else {
+ yyv11 := &x.Ingress
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ h.decSliceNetworkPolicyIngressRule((*[]NetworkPolicyIngressRule)(yyv11), d)
+ }
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *NetworkPolicyIngressRule) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = len(x.Ports) != 0
+ yyq2[1] = len(x.From) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Ports == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ h.encSliceNetworkPolicyPort(([]NetworkPolicyPort)(x.Ports), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("ports"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Ports == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.encSliceNetworkPolicyPort(([]NetworkPolicyPort)(x.Ports), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.From == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.encSliceNetworkPolicyPeer(([]NetworkPolicyPeer)(x.From), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("from"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.From == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.encSliceNetworkPolicyPeer(([]NetworkPolicyPeer)(x.From), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *NetworkPolicyIngressRule) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *NetworkPolicyIngressRule) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "ports":
+ if r.TryDecodeAsNil() {
+ x.Ports = nil
+ } else {
+ yyv4 := &x.Ports
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.decSliceNetworkPolicyPort((*[]NetworkPolicyPort)(yyv4), d)
+ }
+ }
+ case "from":
+ if r.TryDecodeAsNil() {
+ x.From = nil
+ } else {
+ yyv6 := &x.From
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceNetworkPolicyPeer((*[]NetworkPolicyPeer)(yyv6), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *NetworkPolicyIngressRule) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Ports = nil
+ } else {
+ yyv9 := &x.Ports
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.decSliceNetworkPolicyPort((*[]NetworkPolicyPort)(yyv9), d)
+ }
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.From = nil
+ } else {
+ yyv11 := &x.From
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ h.decSliceNetworkPolicyPeer((*[]NetworkPolicyPeer)(yyv11), d)
+ }
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *NetworkPolicyPort) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Protocol != nil
+ yyq2[1] = x.Port != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Protocol == nil {
+ r.EncodeNil()
+ } else {
+ yy4 := *x.Protocol
+ yysf5 := &yy4
+ yysf5.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("protocol"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Protocol == nil {
+ r.EncodeNil()
+ } else {
+ yy6 := *x.Protocol
+ yysf7 := &yy6
+ yysf7.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Port == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.Port) {
+ } else if !yym9 && z.IsJSONHandle() {
+ z.EncJSONMarshal(x.Port)
+ } else {
+ z.EncFallback(x.Port)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("port"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Port == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.Port) {
+ } else if !yym10 && z.IsJSONHandle() {
+ z.EncJSONMarshal(x.Port)
+ } else {
+ z.EncFallback(x.Port)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *NetworkPolicyPort) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *NetworkPolicyPort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "protocol":
+ if r.TryDecodeAsNil() {
+ if x.Protocol != nil {
+ x.Protocol = nil
+ }
+ } else {
+ if x.Protocol == nil {
+ x.Protocol = new(pkg2_api.Protocol)
+ }
+ x.Protocol.CodecDecodeSelf(d)
+ }
+ case "port":
+ if r.TryDecodeAsNil() {
+ if x.Port != nil {
+ x.Port = nil
+ }
+ } else {
+ if x.Port == nil {
+ x.Port = new(pkg5_intstr.IntOrString)
+ }
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.Port) {
+ } else if !yym6 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(x.Port)
+ } else {
+ z.DecFallback(x.Port, false)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *NetworkPolicyPort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Protocol != nil {
+ x.Protocol = nil
+ }
+ } else {
+ if x.Protocol == nil {
+ x.Protocol = new(pkg2_api.Protocol)
+ }
+ x.Protocol.CodecDecodeSelf(d)
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Port != nil {
+ x.Port = nil
+ }
+ } else {
+ if x.Port == nil {
+ x.Port = new(pkg5_intstr.IntOrString)
+ }
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.Port) {
+ } else if !yym10 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(x.Port)
+ } else {
+ z.DecFallback(x.Port, false)
+ }
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *NetworkPolicyPeer) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.PodSelector != nil
+ yyq2[1] = x.NamespaceSelector != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.PodSelector == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.PodSelector) {
+ } else {
+ z.EncFallback(x.PodSelector)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("podSelector"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.PodSelector == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.PodSelector) {
+ } else {
+ z.EncFallback(x.PodSelector)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.NamespaceSelector == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.NamespaceSelector) {
+ } else {
+ z.EncFallback(x.NamespaceSelector)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("namespaceSelector"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.NamespaceSelector == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.NamespaceSelector) {
+ } else {
+ z.EncFallback(x.NamespaceSelector)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *NetworkPolicyPeer) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *NetworkPolicyPeer) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "podSelector":
+ if r.TryDecodeAsNil() {
+ if x.PodSelector != nil {
+ x.PodSelector = nil
+ }
+ } else {
+ if x.PodSelector == nil {
+ x.PodSelector = new(pkg1_unversioned.LabelSelector)
+ }
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.PodSelector) {
+ } else {
+ z.DecFallback(x.PodSelector, false)
+ }
+ }
+ case "namespaceSelector":
+ if r.TryDecodeAsNil() {
+ if x.NamespaceSelector != nil {
+ x.NamespaceSelector = nil
+ }
+ } else {
+ if x.NamespaceSelector == nil {
+ x.NamespaceSelector = new(pkg1_unversioned.LabelSelector)
+ }
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.NamespaceSelector) {
+ } else {
+ z.DecFallback(x.NamespaceSelector, false)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *NetworkPolicyPeer) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.PodSelector != nil {
+ x.PodSelector = nil
+ }
+ } else {
+ if x.PodSelector == nil {
+ x.PodSelector = new(pkg1_unversioned.LabelSelector)
+ }
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.PodSelector) {
+ } else {
+ z.DecFallback(x.PodSelector, false)
+ }
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.NamespaceSelector != nil {
+ x.NamespaceSelector = nil
+ }
+ } else {
+ if x.NamespaceSelector == nil {
+ x.NamespaceSelector = new(pkg1_unversioned.LabelSelector)
+ }
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.NamespaceSelector) {
+ } else {
+ z.DecFallback(x.NamespaceSelector, false)
+ }
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *NetworkPolicyList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceNetworkPolicy(([]NetworkPolicy)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceNetworkPolicy(([]NetworkPolicy)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *NetworkPolicyList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *NetworkPolicyList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceNetworkPolicy((*[]NetworkPolicy)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *NetworkPolicyList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceNetworkPolicy((*[]NetworkPolicy)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) encSliceCustomMetricTarget(v []CustomMetricTarget, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceCustomMetricTarget(v *[]CustomMetricTarget, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []CustomMetricTarget{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 72)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]CustomMetricTarget, yyrl1)
+ }
+ } else {
+ yyv1 = make([]CustomMetricTarget, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = CustomMetricTarget{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, CustomMetricTarget{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = CustomMetricTarget{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, CustomMetricTarget{}) // var yyz1 CustomMetricTarget
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = CustomMetricTarget{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []CustomMetricTarget{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceCustomMetricCurrentStatus(v []CustomMetricCurrentStatus, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceCustomMetricCurrentStatus(v *[]CustomMetricCurrentStatus, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []CustomMetricCurrentStatus{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 72)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]CustomMetricCurrentStatus, yyrl1)
+ }
+ } else {
+ yyv1 = make([]CustomMetricCurrentStatus, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = CustomMetricCurrentStatus{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, CustomMetricCurrentStatus{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = CustomMetricCurrentStatus{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, CustomMetricCurrentStatus{}) // var yyz1 CustomMetricCurrentStatus
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = CustomMetricCurrentStatus{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []CustomMetricCurrentStatus{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceAPIVersion(v []APIVersion, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceAPIVersion(v *[]APIVersion, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []APIVersion{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]APIVersion, yyrl1)
+ }
+ } else {
+ yyv1 = make([]APIVersion, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = APIVersion{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, APIVersion{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = APIVersion{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, APIVersion{}) // var yyz1 APIVersion
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = APIVersion{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []APIVersion{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceThirdPartyResource(v []ThirdPartyResource, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceThirdPartyResource(v *[]ThirdPartyResource, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []ThirdPartyResource{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]ThirdPartyResource, yyrl1)
+ }
+ } else {
+ yyv1 = make([]ThirdPartyResource, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ThirdPartyResource{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, ThirdPartyResource{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ThirdPartyResource{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, ThirdPartyResource{}) // var yyz1 ThirdPartyResource
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ThirdPartyResource{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []ThirdPartyResource{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceDeployment(v []Deployment, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceDeployment(v *[]Deployment, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []Deployment{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 768)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]Deployment, yyrl1)
+ }
+ } else {
+ yyv1 = make([]Deployment, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Deployment{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, Deployment{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Deployment{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, Deployment{}) // var yyz1 Deployment
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Deployment{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []Deployment{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceDaemonSet(v []DaemonSet, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceDaemonSet(v *[]DaemonSet, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []DaemonSet{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 696)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]DaemonSet, yyrl1)
+ }
+ } else {
+ yyv1 = make([]DaemonSet, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = DaemonSet{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, DaemonSet{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = DaemonSet{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, DaemonSet{}) // var yyz1 DaemonSet
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = DaemonSet{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []DaemonSet{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceThirdPartyResourceData(v []ThirdPartyResourceData, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceThirdPartyResourceData(v *[]ThirdPartyResourceData, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []ThirdPartyResourceData{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 264)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]ThirdPartyResourceData, yyrl1)
+ }
+ } else {
+ yyv1 = make([]ThirdPartyResourceData, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ThirdPartyResourceData{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, ThirdPartyResourceData{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ThirdPartyResourceData{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, ThirdPartyResourceData{}) // var yyz1 ThirdPartyResourceData
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ThirdPartyResourceData{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []ThirdPartyResourceData{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceIngress(v []Ingress, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceIngress(v *[]Ingress, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []Ingress{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 320)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]Ingress, yyrl1)
+ }
+ } else {
+ yyv1 = make([]Ingress, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Ingress{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, Ingress{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Ingress{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, Ingress{}) // var yyz1 Ingress
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Ingress{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []Ingress{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceIngressTLS(v []IngressTLS, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceIngressTLS(v *[]IngressTLS, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []IngressTLS{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]IngressTLS, yyrl1)
+ }
+ } else {
+ yyv1 = make([]IngressTLS, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = IngressTLS{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, IngressTLS{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = IngressTLS{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, IngressTLS{}) // var yyz1 IngressTLS
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = IngressTLS{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []IngressTLS{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceIngressRule(v []IngressRule, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceIngressRule(v *[]IngressRule, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []IngressRule{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 24)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]IngressRule, yyrl1)
+ }
+ } else {
+ yyv1 = make([]IngressRule, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = IngressRule{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, IngressRule{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = IngressRule{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, IngressRule{}) // var yyz1 IngressRule
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = IngressRule{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []IngressRule{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceHTTPIngressPath(v []HTTPIngressPath, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceHTTPIngressPath(v *[]HTTPIngressPath, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []HTTPIngressPath{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 64)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]HTTPIngressPath, yyrl1)
+ }
+ } else {
+ yyv1 = make([]HTTPIngressPath, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = HTTPIngressPath{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, HTTPIngressPath{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = HTTPIngressPath{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, HTTPIngressPath{}) // var yyz1 HTTPIngressPath
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = HTTPIngressPath{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []HTTPIngressPath{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceReplicaSet(v []ReplicaSet, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceReplicaSet(v *[]ReplicaSet, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []ReplicaSet{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 704)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]ReplicaSet, yyrl1)
+ }
+ } else {
+ yyv1 = make([]ReplicaSet, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ReplicaSet{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, ReplicaSet{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ReplicaSet{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, ReplicaSet{}) // var yyz1 ReplicaSet
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ReplicaSet{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []ReplicaSet{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceapi_Capability(v []pkg2_api.Capability, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yysf2 := &yyv1
+ yysf2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceapi_Capability(v *[]pkg2_api.Capability, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []pkg2_api.Capability{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]pkg2_api.Capability, yyrl1)
+ }
+ } else {
+ yyv1 = make([]pkg2_api.Capability, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = pkg2_api.Capability(r.DecodeString())
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, "")
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = pkg2_api.Capability(r.DecodeString())
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, "") // var yyz1 pkg2_api.Capability
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = pkg2_api.Capability(r.DecodeString())
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []pkg2_api.Capability{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceFSType(v []FSType, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yyv1.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceFSType(v *[]FSType, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []FSType{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]FSType, yyrl1)
+ }
+ } else {
+ yyv1 = make([]FSType, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = FSType(r.DecodeString())
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, "")
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = FSType(r.DecodeString())
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, "") // var yyz1 FSType
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = FSType(r.DecodeString())
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []FSType{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceHostPortRange(v []HostPortRange, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceHostPortRange(v *[]HostPortRange, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []HostPortRange{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]HostPortRange, yyrl1)
+ }
+ } else {
+ yyv1 = make([]HostPortRange, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = HostPortRange{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, HostPortRange{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = HostPortRange{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, HostPortRange{}) // var yyz1 HostPortRange
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = HostPortRange{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []HostPortRange{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceIDRange(v []IDRange, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceIDRange(v *[]IDRange, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []IDRange{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]IDRange, yyrl1)
+ }
+ } else {
+ yyv1 = make([]IDRange, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = IDRange{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, IDRange{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = IDRange{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, IDRange{}) // var yyz1 IDRange
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = IDRange{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []IDRange{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSlicePodSecurityPolicy(v []PodSecurityPolicy, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSlicePodSecurityPolicy(v *[]PodSecurityPolicy, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []PodSecurityPolicy{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 536)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]PodSecurityPolicy, yyrl1)
+ }
+ } else {
+ yyv1 = make([]PodSecurityPolicy, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PodSecurityPolicy{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, PodSecurityPolicy{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PodSecurityPolicy{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, PodSecurityPolicy{}) // var yyz1 PodSecurityPolicy
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PodSecurityPolicy{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []PodSecurityPolicy{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceNetworkPolicyIngressRule(v []NetworkPolicyIngressRule, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceNetworkPolicyIngressRule(v *[]NetworkPolicyIngressRule, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []NetworkPolicyIngressRule{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 48)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]NetworkPolicyIngressRule, yyrl1)
+ }
+ } else {
+ yyv1 = make([]NetworkPolicyIngressRule, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = NetworkPolicyIngressRule{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, NetworkPolicyIngressRule{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = NetworkPolicyIngressRule{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, NetworkPolicyIngressRule{}) // var yyz1 NetworkPolicyIngressRule
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = NetworkPolicyIngressRule{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []NetworkPolicyIngressRule{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceNetworkPolicyPort(v []NetworkPolicyPort, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceNetworkPolicyPort(v *[]NetworkPolicyPort, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []NetworkPolicyPort{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]NetworkPolicyPort, yyrl1)
+ }
+ } else {
+ yyv1 = make([]NetworkPolicyPort, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = NetworkPolicyPort{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, NetworkPolicyPort{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = NetworkPolicyPort{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, NetworkPolicyPort{}) // var yyz1 NetworkPolicyPort
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = NetworkPolicyPort{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []NetworkPolicyPort{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceNetworkPolicyPeer(v []NetworkPolicyPeer, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceNetworkPolicyPeer(v *[]NetworkPolicyPeer, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []NetworkPolicyPeer{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]NetworkPolicyPeer, yyrl1)
+ }
+ } else {
+ yyv1 = make([]NetworkPolicyPeer, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = NetworkPolicyPeer{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, NetworkPolicyPeer{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = NetworkPolicyPeer{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, NetworkPolicyPeer{}) // var yyz1 NetworkPolicyPeer
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = NetworkPolicyPeer{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []NetworkPolicyPeer{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceNetworkPolicy(v []NetworkPolicy, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceNetworkPolicy(v *[]NetworkPolicy, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []NetworkPolicy{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 296)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]NetworkPolicy, yyrl1)
+ }
+ } else {
+ yyv1 = make([]NetworkPolicy, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = NetworkPolicy{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, NetworkPolicy{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = NetworkPolicy{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, NetworkPolicy{}) // var yyz1 NetworkPolicy
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = NetworkPolicy{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []NetworkPolicy{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/types.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/types.go
new file mode 100644
index 0000000..8c73db7
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/types.go
@@ -0,0 +1,901 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+This file (together with pkg/apis/extensions/v1beta1/types.go) contain the experimental
+types in kubernetes. These API objects are experimental, meaning that the
+APIs may be broken at any time by the kubernetes team.
+
+DISCLAIMER: The implementation of the experimental API group itself is
+a temporary one meant as a stopgap solution until kubernetes has proper
+support for multiple API groups. The transition may require changes
+beyond registration differences. In other words, experimental API group
+support is experimental.
+*/
+
+package extensions
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/resource"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/util/intstr"
+)
+
+// describes the attributes of a scale subresource
+type ScaleSpec struct {
+ // desired number of instances for the scaled object.
+ Replicas int32 `json:"replicas,omitempty"`
+}
+
+// represents the current status of a scale subresource.
+type ScaleStatus struct {
+ // actual number of observed instances of the scaled object.
+ Replicas int32 `json:"replicas"`
+
+ // label query over pods that should match the replicas count.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors
+ Selector *unversioned.LabelSelector `json:"selector,omitempty"`
+}
+
+// +genclient=true
+// +noMethods=true
+
+// represents a scaling request for a resource.
+type Scale struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata.
+ api.ObjectMeta `json:"metadata,omitempty"`
+
+ // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.
+ Spec ScaleSpec `json:"spec,omitempty"`
+
+ // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only.
+ Status ScaleStatus `json:"status,omitempty"`
+}
+
+// Dummy definition
+type ReplicationControllerDummy struct {
+ unversioned.TypeMeta `json:",inline"`
+}
+
+// Alpha-level support for Custom Metrics in HPA (as annotations).
+type CustomMetricTarget struct {
+ // Custom Metric name.
+ Name string `json:"name"`
+ // Custom Metric value (average).
+ TargetValue resource.Quantity `json:"value"`
+}
+
+type CustomMetricTargetList struct {
+ Items []CustomMetricTarget `json:"items"`
+}
+
+type CustomMetricCurrentStatus struct {
+ // Custom Metric name.
+ Name string `json:"name"`
+ // Custom Metric value (average).
+ CurrentValue resource.Quantity `json:"value"`
+}
+
+type CustomMetricCurrentStatusList struct {
+ Items []CustomMetricCurrentStatus `json:"items"`
+}
+
+// +genclient=true
+// +nonNamespaced=true
+
+// A ThirdPartyResource is a generic representation of a resource, it is used by add-ons and plugins to add new resource
+// types to the API. It consists of one or more Versions of the api.
+type ThirdPartyResource struct {
+ unversioned.TypeMeta `json:",inline"`
+
+ // Standard object metadata
+ api.ObjectMeta `json:"metadata,omitempty"`
+
+ // Description is the description of this object.
+ Description string `json:"description,omitempty"`
+
+ // Versions are versions for this third party object
+ Versions []APIVersion `json:"versions,omitempty"`
+}
+
+type ThirdPartyResourceList struct {
+ unversioned.TypeMeta `json:",inline"`
+
+ // Standard list metadata.
+ unversioned.ListMeta `json:"metadata,omitempty"`
+
+ // Items is the list of horizontal pod autoscalers.
+ Items []ThirdPartyResource `json:"items"`
+}
+
+// An APIVersion represents a single concrete version of an object model.
+// TODO: we should consider merge this struct with GroupVersion in unversioned.go
+type APIVersion struct {
+ // Name of this version (e.g. 'v1').
+ Name string `json:"name,omitempty"`
+}
+
+// An internal object, used for versioned storage in etcd. Not exposed to the end user.
+type ThirdPartyResourceData struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard object metadata.
+ api.ObjectMeta `json:"metadata,omitempty"`
+
+ // Data is the raw JSON data for this data.
+ Data []byte `json:"data,omitempty"`
+}
+
+// +genclient=true
+
+type Deployment struct {
+ unversioned.TypeMeta `json:",inline"`
+ api.ObjectMeta `json:"metadata,omitempty"`
+
+ // Specification of the desired behavior of the Deployment.
+ Spec DeploymentSpec `json:"spec,omitempty"`
+
+ // Most recently observed status of the Deployment.
+ Status DeploymentStatus `json:"status,omitempty"`
+}
+
+type DeploymentSpec struct {
+ // Number of desired pods. This is a pointer to distinguish between explicit
+ // zero and not specified. Defaults to 1.
+ Replicas int32 `json:"replicas,omitempty"`
+
+ // Label selector for pods. Existing ReplicaSets whose pods are
+ // selected by this will be the ones affected by this deployment.
+ Selector *unversioned.LabelSelector `json:"selector,omitempty"`
+
+ // Template describes the pods that will be created.
+ Template api.PodTemplateSpec `json:"template"`
+
+ // The deployment strategy to use to replace existing pods with new ones.
+ Strategy DeploymentStrategy `json:"strategy,omitempty"`
+
+ // Minimum number of seconds for which a newly created pod should be ready
+ // without any of its container crashing, for it to be considered available.
+ // Defaults to 0 (pod will be considered available as soon as it is ready)
+ MinReadySeconds int32 `json:"minReadySeconds,omitempty"`
+
+ // The number of old ReplicaSets to retain to allow rollback.
+ // This is a pointer to distinguish between explicit zero and not specified.
+ RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"`
+
+ // Indicates that the deployment is paused and will not be processed by the
+ // deployment controller.
+ Paused bool `json:"paused,omitempty"`
+ // The config this deployment is rolling back to. Will be cleared after rollback is done.
+ RollbackTo *RollbackConfig `json:"rollbackTo,omitempty"`
+}
+
+// DeploymentRollback stores the information required to rollback a deployment.
+type DeploymentRollback struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Required: This must match the Name of a deployment.
+ Name string `json:"name"`
+ // The annotations to be updated to a deployment
+ UpdatedAnnotations map[string]string `json:"updatedAnnotations,omitempty"`
+ // The config of this deployment rollback.
+ RollbackTo RollbackConfig `json:"rollbackTo"`
+}
+
+type RollbackConfig struct {
+ // The revision to rollback to. If set to 0, rollbck to the last revision.
+ Revision int64 `json:"revision,omitempty"`
+}
+
+const (
+ // DefaultDeploymentUniqueLabelKey is the default key of the selector that is added
+ // to existing RCs (and label key that is added to its pods) to prevent the existing RCs
+ // to select new pods (and old pods being select by new RC).
+ DefaultDeploymentUniqueLabelKey string = "pod-template-hash"
+)
+
+type DeploymentStrategy struct {
+ // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate.
+ Type DeploymentStrategyType `json:"type,omitempty"`
+
+ // Rolling update config params. Present only if DeploymentStrategyType =
+ // RollingUpdate.
+ //---
+ // TODO: Update this to follow our convention for oneOf, whatever we decide it
+ // to be.
+ RollingUpdate *RollingUpdateDeployment `json:"rollingUpdate,omitempty"`
+}
+
+type DeploymentStrategyType string
+
+const (
+ // Kill all existing pods before creating new ones.
+ RecreateDeploymentStrategyType DeploymentStrategyType = "Recreate"
+
+ // Replace the old RCs by new one using rolling update i.e gradually scale down the old RCs and scale up the new one.
+ RollingUpdateDeploymentStrategyType DeploymentStrategyType = "RollingUpdate"
+)
+
+// Spec to control the desired behavior of rolling update.
+type RollingUpdateDeployment struct {
+ // The maximum number of pods that can be unavailable during the update.
+ // Value can be an absolute number (ex: 5) or a percentage of total pods at the start of update (ex: 10%).
+ // Absolute number is calculated from percentage by rounding up.
+ // This can not be 0 if MaxSurge is 0.
+ // By default, a fixed value of 1 is used.
+ // Example: when this is set to 30%, the old RC can be scaled down by 30%
+ // immediately when the rolling update starts. Once new pods are ready, old RC
+ // can be scaled down further, followed by scaling up the new RC, ensuring
+ // that at least 70% of original number of pods are available at all times
+ // during the update.
+ MaxUnavailable intstr.IntOrString `json:"maxUnavailable,omitempty"`
+
+ // The maximum number of pods that can be scheduled above the original number of
+ // pods.
+ // Value can be an absolute number (ex: 5) or a percentage of total pods at
+ // the start of the update (ex: 10%). This can not be 0 if MaxUnavailable is 0.
+ // Absolute number is calculated from percentage by rounding up.
+ // By default, a value of 1 is used.
+ // Example: when this is set to 30%, the new RC can be scaled up by 30%
+ // immediately when the rolling update starts. Once old pods have been killed,
+ // new RC can be scaled up further, ensuring that total number of pods running
+ // at any time during the update is atmost 130% of original pods.
+ MaxSurge intstr.IntOrString `json:"maxSurge,omitempty"`
+}
+
+type DeploymentStatus struct {
+ // The generation observed by the deployment controller.
+ ObservedGeneration int64 `json:"observedGeneration,omitempty"`
+
+ // Total number of non-terminated pods targeted by this deployment (their labels match the selector).
+ Replicas int32 `json:"replicas,omitempty"`
+
+ // Total number of non-terminated pods targeted by this deployment that have the desired template spec.
+ UpdatedReplicas int32 `json:"updatedReplicas,omitempty"`
+
+ // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.
+ AvailableReplicas int32 `json:"availableReplicas,omitempty"`
+
+ // Total number of unavailable pods targeted by this deployment.
+ UnavailableReplicas int32 `json:"unavailableReplicas,omitempty"`
+}
+
+type DeploymentList struct {
+ unversioned.TypeMeta `json:",inline"`
+ unversioned.ListMeta `json:"metadata,omitempty"`
+
+ // Items is the list of deployments.
+ Items []Deployment `json:"items"`
+}
+
+// TODO(madhusudancs): Uncomment while implementing DaemonSet updates.
+/* Commenting out for v1.2. We are planning to bring these types back with a more robust DaemonSet update implementation in v1.3, hence not deleting but just commenting the types out.
+type DaemonSetUpdateStrategy struct {
+ // Type of daemon set update. Only "RollingUpdate" is supported at this time. Default is RollingUpdate.
+ Type DaemonSetUpdateStrategyType `json:"type,omitempty"`
+
+ // Rolling update config params. Present only if DaemonSetUpdateStrategy =
+ // RollingUpdate.
+ //---
+ // TODO: Update this to follow our convention for oneOf, whatever we decide it
+ // to be. Same as DeploymentStrategy.RollingUpdate.
+ RollingUpdate *RollingUpdateDaemonSet `json:"rollingUpdate,omitempty"`
+}
+
+type DaemonSetUpdateStrategyType string
+
+const (
+ // Replace the old daemons by new ones using rolling update i.e replace them on each node one after the other.
+ RollingUpdateDaemonSetStrategyType DaemonSetUpdateStrategyType = "RollingUpdate"
+)
+
+// Spec to control the desired behavior of daemon set rolling update.
+type RollingUpdateDaemonSet struct {
+ // The maximum number of DaemonSet pods that can be unavailable during the
+ // update. Value can be an absolute number (ex: 5) or a percentage of total
+ // number of DaemonSet pods at the start of the update (ex: 10%). Absolute
+ // number is calculated from percentage by rounding up.
+ // This cannot be 0.
+ // Default value is 1.
+ // Example: when this is set to 30%, 30% of the currently running DaemonSet
+ // pods can be stopped for an update at any given time. The update starts
+ // by stopping at most 30% of the currently running DaemonSet pods and then
+ // brings up new DaemonSet pods in their place. Once the new pods are ready,
+ // it then proceeds onto other DaemonSet pods, thus ensuring that at least
+ // 70% of original number of DaemonSet pods are available at all times
+ // during the update.
+ MaxUnavailable intstr.IntOrString `json:"maxUnavailable,omitempty"`
+
+ // Minimum number of seconds for which a newly created DaemonSet pod should
+ // be ready without any of its container crashing, for it to be considered
+ // available. Defaults to 0 (pod will be considered available as soon as it
+ // is ready).
+ MinReadySeconds int `json:"minReadySeconds,omitempty"`
+}
+*/
+
+// DaemonSetSpec is the specification of a daemon set.
+type DaemonSetSpec struct {
+ // Selector is a label query over pods that are managed by the daemon set.
+ // Must match in order to be controlled.
+ // If empty, defaulted to labels on Pod template.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors
+ Selector *unversioned.LabelSelector `json:"selector,omitempty"`
+
+ // Template is the object that describes the pod that will be created.
+ // The DaemonSet will create exactly one copy of this pod on every node
+ // that matches the template's node selector (or on every node if no node
+ // selector is specified).
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#pod-template
+ Template api.PodTemplateSpec `json:"template"`
+
+ // TODO(madhusudancs): Uncomment while implementing DaemonSet updates.
+ /* Commenting out for v1.2. We are planning to bring these fields back with a more robust DaemonSet update implementation in v1.3, hence not deleting but just commenting these fields out.
+ // Update strategy to replace existing DaemonSet pods with new pods.
+ UpdateStrategy DaemonSetUpdateStrategy `json:"updateStrategy,omitempty"`
+
+ // Label key that is added to DaemonSet pods to distinguish between old and
+ // new pod templates during DaemonSet update.
+ // Users can set this to an empty string to indicate that the system should
+ // not add any label. If unspecified, system uses
+ // DefaultDaemonSetUniqueLabelKey("daemonset.kubernetes.io/podTemplateHash").
+ // Value of this key is hash of DaemonSetSpec.PodTemplateSpec.
+ // No label is added if this is set to empty string.
+ UniqueLabelKey string `json:"uniqueLabelKey,omitempty"`
+ */
+}
+
+const (
+ // DefaultDaemonSetUniqueLabelKey is the default key of the labels that is added
+ // to daemon set pods to distinguish between old and new pod templates during
+ // DaemonSet update. See DaemonSetSpec's UniqueLabelKey field for more information.
+ DefaultDaemonSetUniqueLabelKey string = "daemonset.kubernetes.io/podTemplateHash"
+)
+
+// DaemonSetStatus represents the current status of a daemon set.
+type DaemonSetStatus struct {
+ // CurrentNumberScheduled is the number of nodes that are running at least 1
+ // daemon pod and are supposed to run the daemon pod.
+ CurrentNumberScheduled int32 `json:"currentNumberScheduled"`
+
+ // NumberMisscheduled is the number of nodes that are running the daemon pod, but are
+ // not supposed to run the daemon pod.
+ NumberMisscheduled int32 `json:"numberMisscheduled"`
+
+ // DesiredNumberScheduled is the total number of nodes that should be running the daemon
+ // pod (including nodes correctly running the daemon pod).
+ DesiredNumberScheduled int32 `json:"desiredNumberScheduled"`
+}
+
+// +genclient=true
+
+// DaemonSet represents the configuration of a daemon set.
+type DaemonSet struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ api.ObjectMeta `json:"metadata,omitempty"`
+
+ // Spec defines the desired behavior of this daemon set.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ Spec DaemonSetSpec `json:"spec,omitempty"`
+
+ // Status is the current status of this daemon set. This data may be
+ // out of date by some window of time.
+ // Populated by the system.
+ // Read-only.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ Status DaemonSetStatus `json:"status,omitempty"`
+}
+
+// DaemonSetList is a collection of daemon sets.
+type DaemonSetList struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard list metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ unversioned.ListMeta `json:"metadata,omitempty"`
+
+ // Items is a list of daemon sets.
+ Items []DaemonSet `json:"items"`
+}
+
+type ThirdPartyResourceDataList struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard list metadata
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ unversioned.ListMeta `json:"metadata,omitempty"`
+ // Items is a list of third party objects
+ Items []ThirdPartyResourceData `json:"items"`
+}
+
+// +genclient=true
+
+// Ingress is a collection of rules that allow inbound connections to reach the
+// endpoints defined by a backend. An Ingress can be configured to give services
+// externally-reachable urls, load balance traffic, terminate SSL, offer name
+// based virtual hosting etc.
+type Ingress struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ api.ObjectMeta `json:"metadata,omitempty"`
+
+ // Spec is the desired state of the Ingress.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ Spec IngressSpec `json:"spec,omitempty"`
+
+ // Status is the current state of the Ingress.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ Status IngressStatus `json:"status,omitempty"`
+}
+
+// IngressList is a collection of Ingress.
+type IngressList struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ unversioned.ListMeta `json:"metadata,omitempty"`
+
+ // Items is the list of Ingress.
+ Items []Ingress `json:"items"`
+}
+
+// IngressSpec describes the Ingress the user wishes to exist.
+type IngressSpec struct {
+ // A default backend capable of servicing requests that don't match any
+ // rule. At least one of 'backend' or 'rules' must be specified. This field
+ // is optional to allow the loadbalancer controller or defaulting logic to
+ // specify a global default.
+ Backend *IngressBackend `json:"backend,omitempty"`
+
+ // TLS configuration. Currently the Ingress only supports a single TLS
+ // port, 443. If multiple members of this list specify different hosts, they
+ // will be multiplexed on the same port according to the hostname specified
+ // through the SNI TLS extension, if the ingress controller fulfilling the
+ // ingress supports SNI.
+ TLS []IngressTLS `json:"tls,omitempty"`
+
+ // A list of host rules used to configure the Ingress. If unspecified, or
+ // no rule matches, all traffic is sent to the default backend.
+ Rules []IngressRule `json:"rules,omitempty"`
+ // TODO: Add the ability to specify load-balancer IP through claims
+}
+
+// IngressTLS describes the transport layer security associated with an Ingress.
+type IngressTLS struct {
+ // Hosts are a list of hosts included in the TLS certificate. The values in
+ // this list must match the name/s used in the tlsSecret. Defaults to the
+ // wildcard host setting for the loadbalancer controller fulfilling this
+ // Ingress, if left unspecified.
+ Hosts []string `json:"hosts,omitempty"`
+ // SecretName is the name of the secret used to terminate SSL traffic on 443.
+ // Field is left optional to allow SSL routing based on SNI hostname alone.
+ // If the SNI host in a listener conflicts with the "Host" header field used
+ // by an IngressRule, the SNI host is used for termination and value of the
+ // Host header is used for routing.
+ SecretName string `json:"secretName,omitempty"`
+ // TODO: Consider specifying different modes of termination, protocols etc.
+}
+
+// IngressStatus describe the current state of the Ingress.
+type IngressStatus struct {
+ // LoadBalancer contains the current status of the load-balancer.
+ LoadBalancer api.LoadBalancerStatus `json:"loadBalancer,omitempty"`
+}
+
+// IngressRule represents the rules mapping the paths under a specified host to
+// the related backend services. Incoming requests are first evaluated for a host
+// match, then routed to the backend associated with the matching IngressRuleValue.
+type IngressRule struct {
+ // Host is the fully qualified domain name of a network host, as defined
+ // by RFC 3986. Note the following deviations from the "host" part of the
+ // URI as defined in the RFC:
+ // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the
+ // IP in the Spec of the parent Ingress.
+ // 2. The `:` delimiter is not respected because ports are not allowed.
+ // Currently the port of an Ingress is implicitly :80 for http and
+ // :443 for https.
+ // Both these may change in the future.
+ // Incoming requests are matched against the host before the IngressRuleValue.
+ // If the host is unspecified, the Ingress routes all traffic based on the
+ // specified IngressRuleValue.
+ Host string `json:"host,omitempty"`
+ // IngressRuleValue represents a rule to route requests for this IngressRule.
+ // If unspecified, the rule defaults to a http catch-all. Whether that sends
+ // just traffic matching the host to the default backend or all traffic to the
+ // default backend, is left to the controller fulfilling the Ingress. Http is
+ // currently the only supported IngressRuleValue.
+ IngressRuleValue `json:",inline,omitempty"`
+}
+
+// IngressRuleValue represents a rule to apply against incoming requests. If the
+// rule is satisfied, the request is routed to the specified backend. Currently
+// mixing different types of rules in a single Ingress is disallowed, so exactly
+// one of the following must be set.
+type IngressRuleValue struct {
+ //TODO:
+ // 1. Consider renaming this resource and the associated rules so they
+ // aren't tied to Ingress. They can be used to route intra-cluster traffic.
+ // 2. Consider adding fields for ingress-type specific global options
+ // usable by a loadbalancer, like http keep-alive.
+
+ HTTP *HTTPIngressRuleValue `json:"http,omitempty"`
+}
+
+// HTTPIngressRuleValue is a list of http selectors pointing to backends.
+// In the example: http://<host>/<path>?<searchpart> -> backend where
+// where parts of the url correspond to RFC 3986, this resource will be used
+// to match against everything after the last '/' and before the first '?'
+// or '#'.
+type HTTPIngressRuleValue struct {
+ // A collection of paths that map requests to backends.
+ Paths []HTTPIngressPath `json:"paths"`
+ // TODO: Consider adding fields for ingress-type specific global
+ // options usable by a loadbalancer, like http keep-alive.
+}
+
+// HTTPIngressPath associates a path regex with a backend. Incoming urls matching
+// the path are forwarded to the backend.
+type HTTPIngressPath struct {
+ // Path is a extended POSIX regex as defined by IEEE Std 1003.1,
+ // (i.e this follows the egrep/unix syntax, not the perl syntax)
+ // matched against the path of an incoming request. Currently it can
+ // contain characters disallowed from the conventional "path"
+ // part of a URL as defined by RFC 3986. Paths must begin with
+ // a '/'. If unspecified, the path defaults to a catch all sending
+ // traffic to the backend.
+ Path string `json:"path,omitempty"`
+
+ // Backend defines the referenced service endpoint to which the traffic
+ // will be forwarded to.
+ Backend IngressBackend `json:"backend"`
+}
+
+// IngressBackend describes all endpoints for a given service and port.
+type IngressBackend struct {
+ // Specifies the name of the referenced service.
+ ServiceName string `json:"serviceName"`
+
+ // Specifies the port of the referenced service.
+ ServicePort intstr.IntOrString `json:"servicePort"`
+}
+
+// +genclient=true
+
+// ReplicaSet represents the configuration of a replica set.
+type ReplicaSet struct {
+ unversioned.TypeMeta `json:",inline"`
+ api.ObjectMeta `json:"metadata,omitempty"`
+
+ // Spec defines the desired behavior of this ReplicaSet.
+ Spec ReplicaSetSpec `json:"spec,omitempty"`
+
+ // Status is the current status of this ReplicaSet. This data may be
+ // out of date by some window of time.
+ Status ReplicaSetStatus `json:"status,omitempty"`
+}
+
+// ReplicaSetList is a collection of ReplicaSets.
+type ReplicaSetList struct {
+ unversioned.TypeMeta `json:",inline"`
+ unversioned.ListMeta `json:"metadata,omitempty"`
+
+ Items []ReplicaSet `json:"items"`
+}
+
+// ReplicaSetSpec is the specification of a ReplicaSet.
+// As the internal representation of a ReplicaSet, it must have
+// a Template set.
+type ReplicaSetSpec struct {
+ // Replicas is the number of desired replicas.
+ Replicas int32 `json:"replicas"`
+
+ // Selector is a label query over pods that should match the replica count.
+ // Must match in order to be controlled.
+ // If empty, defaulted to labels on pod template.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors
+ Selector *unversioned.LabelSelector `json:"selector,omitempty"`
+
+ // Template is the object that describes the pod that will be created if
+ // insufficient replicas are detected.
+ Template api.PodTemplateSpec `json:"template,omitempty"`
+}
+
+// ReplicaSetStatus represents the current status of a ReplicaSet.
+type ReplicaSetStatus struct {
+ // Replicas is the number of actual replicas.
+ Replicas int32 `json:"replicas"`
+
+ // The number of pods that have labels matching the labels of the pod template of the replicaset.
+ FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty"`
+
+ // ObservedGeneration is the most recent generation observed by the controller.
+ ObservedGeneration int64 `json:"observedGeneration,omitempty"`
+}
+
+// +genclient=true
+// +nonNamespaced=true
+
+// PodSecurityPolicy governs the ability to make requests that affect the SecurityContext
+// that will be applied to a pod and container.
+type PodSecurityPolicy struct {
+ unversioned.TypeMeta `json:",inline"`
+ api.ObjectMeta `json:"metadata,omitempty"`
+
+ // Spec defines the policy enforced.
+ Spec PodSecurityPolicySpec `json:"spec,omitempty"`
+}
+
+// PodSecurityPolicySpec defines the policy enforced.
+type PodSecurityPolicySpec struct {
+ // Privileged determines if a pod can request to be run as privileged.
+ Privileged bool `json:"privileged,omitempty"`
+ // DefaultAddCapabilities is the default set of capabilities that will be added to the container
+ // unless the pod spec specifically drops the capability. You may not list a capability in both
+ // DefaultAddCapabilities and RequiredDropCapabilities.
+ DefaultAddCapabilities []api.Capability `json:"defaultAddCapabilities,omitempty"`
+ // RequiredDropCapabilities are the capabilities that will be dropped from the container. These
+ // are required to be dropped and cannot be added.
+ RequiredDropCapabilities []api.Capability `json:"requiredDropCapabilities,omitempty"`
+ // AllowedCapabilities is a list of capabilities that can be requested to add to the container.
+ // Capabilities in this field may be added at the pod author's discretion.
+ // You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities.
+ AllowedCapabilities []api.Capability `json:"allowedCapabilities,omitempty"`
+ // Volumes is a white list of allowed volume plugins. Empty indicates that all plugins
+ // may be used.
+ Volumes []FSType `json:"volumes,omitempty"`
+ // HostNetwork determines if the policy allows the use of HostNetwork in the pod spec.
+ HostNetwork bool `json:"hostNetwork,omitempty"`
+ // HostPorts determines which host port ranges are allowed to be exposed.
+ HostPorts []HostPortRange `json:"hostPorts,omitempty"`
+ // HostPID determines if the policy allows the use of HostPID in the pod spec.
+ HostPID bool `json:"hostPID,omitempty"`
+ // HostIPC determines if the policy allows the use of HostIPC in the pod spec.
+ HostIPC bool `json:"hostIPC,omitempty"`
+ // SELinux is the strategy that will dictate the allowable labels that may be set.
+ SELinux SELinuxStrategyOptions `json:"seLinux"`
+ // RunAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.
+ RunAsUser RunAsUserStrategyOptions `json:"runAsUser"`
+ // SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.
+ SupplementalGroups SupplementalGroupsStrategyOptions `json:"supplementalGroups"`
+ // FSGroup is the strategy that will dictate what fs group is used by the SecurityContext.
+ FSGroup FSGroupStrategyOptions `json:"fsGroup"`
+ // ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file
+ // system. If the container specifically requests to run with a non-read only root file system
+ // the PSP should deny the pod.
+ // If set to false the container may run with a read only root file system if it wishes but it
+ // will not be forced to.
+ ReadOnlyRootFilesystem bool `json:"readOnlyRootFilesystem,omitempty"`
+}
+
+// HostPortRange defines a range of host ports that will be enabled by a policy
+// for pods to use. It requires both the start and end to be defined.
+type HostPortRange struct {
+ // Min is the start of the range, inclusive.
+ Min int `json:"min"`
+ // Max is the end of the range, inclusive.
+ Max int `json:"max"`
+}
+
+// FSType gives strong typing to different file systems that are used by volumes.
+type FSType string
+
+var (
+ AzureFile FSType = "azureFile"
+ Flocker FSType = "flocker"
+ FlexVolume FSType = "flexVolume"
+ HostPath FSType = "hostPath"
+ EmptyDir FSType = "emptyDir"
+ GCEPersistentDisk FSType = "gcePersistentDisk"
+ AWSElasticBlockStore FSType = "awsElasticBlockStore"
+ GitRepo FSType = "gitRepo"
+ Secret FSType = "secret"
+ NFS FSType = "nfs"
+ ISCSI FSType = "iscsi"
+ Glusterfs FSType = "glusterfs"
+ PersistentVolumeClaim FSType = "persistentVolumeClaim"
+ RBD FSType = "rbd"
+ Cinder FSType = "cinder"
+ CephFS FSType = "cephFS"
+ DownwardAPI FSType = "downwardAPI"
+ FC FSType = "fc"
+ ConfigMap FSType = "configMap"
+ VsphereVolume FSType = "vsphereVolume"
+ All FSType = "*"
+)
+
+// SELinuxStrategyOptions defines the strategy type and any options used to create the strategy.
+type SELinuxStrategyOptions struct {
+ // Rule is the strategy that will dictate the allowable labels that may be set.
+ Rule SELinuxStrategy `json:"rule"`
+ // seLinuxOptions required to run as; required for MustRunAs
+ // More info: http://releases.k8s.io/HEAD/docs/design/security_context.md#security-context
+ SELinuxOptions *api.SELinuxOptions `json:"seLinuxOptions,omitempty"`
+}
+
+// SELinuxStrategy denotes strategy types for generating SELinux options for a
+// Security.
+type SELinuxStrategy string
+
+const (
+ // container must have SELinux labels of X applied.
+ SELinuxStrategyMustRunAs SELinuxStrategy = "MustRunAs"
+ // container may make requests for any SELinux context labels.
+ SELinuxStrategyRunAsAny SELinuxStrategy = "RunAsAny"
+)
+
+// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy.
+type RunAsUserStrategyOptions struct {
+ // Rule is the strategy that will dictate the allowable RunAsUser values that may be set.
+ Rule RunAsUserStrategy `json:"rule"`
+ // Ranges are the allowed ranges of uids that may be used.
+ Ranges []IDRange `json:"ranges,omitempty"`
+}
+
+// IDRange provides a min/max of an allowed range of IDs.
+type IDRange struct {
+ // Min is the start of the range, inclusive.
+ Min int64 `json:"min"`
+ // Max is the end of the range, inclusive.
+ Max int64 `json:"max"`
+}
+
+// RunAsUserStrategy denotes strategy types for generating RunAsUser values for a
+// SecurityContext.
+type RunAsUserStrategy string
+
+const (
+ // container must run as a particular uid.
+ RunAsUserStrategyMustRunAs RunAsUserStrategy = "MustRunAs"
+ // container must run as a non-root uid
+ RunAsUserStrategyMustRunAsNonRoot RunAsUserStrategy = "MustRunAsNonRoot"
+ // container may make requests for any uid.
+ RunAsUserStrategyRunAsAny RunAsUserStrategy = "RunAsAny"
+)
+
+// FSGroupStrategyOptions defines the strategy type and options used to create the strategy.
+type FSGroupStrategyOptions struct {
+ // Rule is the strategy that will dictate what FSGroup is used in the SecurityContext.
+ Rule FSGroupStrategyType `json:"rule,omitempty"`
+ // Ranges are the allowed ranges of fs groups. If you would like to force a single
+ // fs group then supply a single range with the same start and end.
+ Ranges []IDRange `json:"ranges,omitempty"`
+}
+
+// FSGroupStrategyType denotes strategy types for generating FSGroup values for a
+// SecurityContext
+type FSGroupStrategyType string
+
+const (
+ // container must have FSGroup of X applied.
+ FSGroupStrategyMustRunAs FSGroupStrategyType = "MustRunAs"
+ // container may make requests for any FSGroup labels.
+ FSGroupStrategyRunAsAny FSGroupStrategyType = "RunAsAny"
+)
+
+// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy.
+type SupplementalGroupsStrategyOptions struct {
+ // Rule is the strategy that will dictate what supplemental groups is used in the SecurityContext.
+ Rule SupplementalGroupsStrategyType `json:"rule,omitempty"`
+ // Ranges are the allowed ranges of supplemental groups. If you would like to force a single
+ // supplemental group then supply a single range with the same start and end.
+ Ranges []IDRange `json:"ranges,omitempty"`
+}
+
+// SupplementalGroupsStrategyType denotes strategy types for determining valid supplemental
+// groups for a SecurityContext.
+type SupplementalGroupsStrategyType string
+
+const (
+ // container must run as a particular gid.
+ SupplementalGroupsStrategyMustRunAs SupplementalGroupsStrategyType = "MustRunAs"
+ // container may make requests for any gid.
+ SupplementalGroupsStrategyRunAsAny SupplementalGroupsStrategyType = "RunAsAny"
+)
+
+// PodSecurityPolicyList is a list of PodSecurityPolicy objects.
+type PodSecurityPolicyList struct {
+ unversioned.TypeMeta `json:",inline"`
+ unversioned.ListMeta `json:"metadata,omitempty"`
+
+ Items []PodSecurityPolicy `json:"items"`
+}
+
+type NetworkPolicy struct {
+ unversioned.TypeMeta `json:",inline"`
+ api.ObjectMeta `json:"metadata,omitempty"`
+
+ // Specification of the desired behavior for this NetworkPolicy.
+ Spec NetworkPolicySpec `json:"spec,omitempty"`
+}
+
+type NetworkPolicySpec struct {
+ // Selects the pods to which this NetworkPolicy object applies. The array of ingress rules
+ // is applied to any pods selected by this field. Multiple network policies can select the
+ // same set of pods. In this case, the ingress rules for each are combined additively.
+ // This field is NOT optional and follows standard label selector semantics.
+ // An empty podSelector matches all pods in this namespace.
+ PodSelector unversioned.LabelSelector `json:"podSelector"`
+
+ // List of ingress rules to be applied to the selected pods.
+ // Traffic is allowed to a pod if namespace.networkPolicy.ingress.isolation is undefined and cluster policy allows it,
+ // OR if the traffic source is the pod's local node,
+ // OR if the traffic matches at least one ingress rule across all of the NetworkPolicy
+ // objects whose podSelector matches the pod.
+ // If this field is empty then this NetworkPolicy does not affect ingress isolation.
+ // If this field is present and contains at least one rule, this policy allows any traffic
+ // which matches at least one of the ingress rules in this list.
+ Ingress []NetworkPolicyIngressRule `json:"ingress,omitempty"`
+}
+
+// This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from.
+type NetworkPolicyIngressRule struct {
+ // List of ports which should be made accessible on the pods selected for this rule.
+ // Each item in this list is combined using a logical OR.
+ // If this field is not provided, this rule matches all ports (traffic not restricted by port).
+ // If this field is empty, this rule matches no ports (no traffic matches).
+ // If this field is present and contains at least one item, then this rule allows traffic
+ // only if the traffic matches at least one port in the list.
+ // TODO: Update this to be a pointer to slice as soon as auto-generation supports it.
+ Ports []NetworkPolicyPort `json:"ports,omitempty"`
+
+ // List of sources which should be able to access the pods selected for this rule.
+ // Items in this list are combined using a logical OR operation.
+ // If this field is not provided, this rule matches all sources (traffic not restricted by source).
+ // If this field is empty, this rule matches no sources (no traffic matches).
+ // If this field is present and contains at least on item, this rule allows traffic only if the
+ // traffic matches at least one item in the from list.
+ // TODO: Update this to be a pointer to slice as soon as auto-generation supports it.
+ From []NetworkPolicyPeer `json:"from,omitempty"`
+}
+
+type NetworkPolicyPort struct {
+ // Optional. The protocol (TCP or UDP) which traffic must match.
+ // If not specified, this field defaults to TCP.
+ Protocol *api.Protocol `json:"protocol,omitempty"`
+
+ // If specified, the port on the given protocol. This can
+ // either be a numerical or named port on a pod. If this field is not provided,
+ // this matches all port names and numbers.
+ // If present, only traffic on the specified protocol AND port
+ // will be matched.
+ Port *intstr.IntOrString `json:"port,omitempty"`
+}
+
+type NetworkPolicyPeer struct {
+ // Exactly one of the following must be specified.
+
+ // This is a label selector which selects Pods in this namespace.
+ // This field follows standard label selector semantics.
+ // If not provided, this selector selects no pods.
+ // If present but empty, this selector selects all pods in this namespace.
+ PodSelector *unversioned.LabelSelector `json:"podSelector,omitempty"`
+
+ // Selects Namespaces using cluster scoped-labels. This
+ // matches all pods in all namespaces selected by this label selector.
+ // This field follows standard label selector semantics.
+ // If omitted, this selector selects no namespaces.
+ // If present but empty, this selector selects all namespaces.
+ NamespaceSelector *unversioned.LabelSelector `json:"namespaceSelector,omitempty"`
+}
+
+// NetworkPolicyList is a list of NetworkPolicy objects.
+type NetworkPolicyList struct {
+ unversioned.TypeMeta `json:",inline"`
+ unversioned.ListMeta `json:"metadata,omitempty"`
+
+ Items []NetworkPolicy `json:"items"`
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion.go
new file mode 100644
index 0000000..d685ebf
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion.go
@@ -0,0 +1,404 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+ "fmt"
+
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ v1 "k8s.io/kubernetes/pkg/api/v1"
+ "k8s.io/kubernetes/pkg/apis/autoscaling"
+ "k8s.io/kubernetes/pkg/apis/batch"
+ "k8s.io/kubernetes/pkg/apis/extensions"
+ "k8s.io/kubernetes/pkg/conversion"
+ "k8s.io/kubernetes/pkg/runtime"
+ "k8s.io/kubernetes/pkg/util/intstr"
+)
+
+func addConversionFuncs(scheme *runtime.Scheme) {
+ // Add non-generated conversion functions
+ err := scheme.AddConversionFuncs(
+ Convert_extensions_ScaleStatus_To_v1beta1_ScaleStatus,
+ Convert_v1beta1_ScaleStatus_To_extensions_ScaleStatus,
+ Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec,
+ Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec,
+ Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy,
+ Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy,
+ Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment,
+ Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment,
+ Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec,
+ Convert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec,
+ // autoscaling
+ Convert_autoscaling_CrossVersionObjectReference_To_v1beta1_SubresourceReference,
+ Convert_v1beta1_SubresourceReference_To_autoscaling_CrossVersionObjectReference,
+ Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1beta1_HorizontalPodAutoscalerSpec,
+ Convert_v1beta1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec,
+ // batch
+ Convert_batch_JobSpec_To_v1beta1_JobSpec,
+ Convert_v1beta1_JobSpec_To_batch_JobSpec,
+ )
+ if err != nil {
+ // If one of the conversion functions is malformed, detect it immediately.
+ panic(err)
+ }
+
+ // Add field label conversions for kinds having selectable nothing but ObjectMeta fields.
+ for _, kind := range []string{"DaemonSet", "Deployment", "Ingress"} {
+ err = api.Scheme.AddFieldLabelConversionFunc("extensions/v1beta1", kind,
+ func(label, value string) (string, string, error) {
+ switch label {
+ case "metadata.name", "metadata.namespace":
+ return label, value, nil
+ default:
+ return "", "", fmt.Errorf("field label %q not supported for %q", label, kind)
+ }
+ })
+ if err != nil {
+ // If one of the conversion functions is malformed, detect it immediately.
+ panic(err)
+ }
+ }
+
+ err = api.Scheme.AddFieldLabelConversionFunc("extensions/v1beta1", "Job",
+ func(label, value string) (string, string, error) {
+ switch label {
+ case "metadata.name", "metadata.namespace", "status.successful":
+ return label, value, nil
+ default:
+ return "", "", fmt.Errorf("field label not supported: %s", label)
+ }
+ })
+ if err != nil {
+ // If one of the conversion functions is malformed, detect it immediately.
+ panic(err)
+ }
+}
+
+func Convert_extensions_ScaleStatus_To_v1beta1_ScaleStatus(in *extensions.ScaleStatus, out *ScaleStatus, s conversion.Scope) error {
+ out.Replicas = int32(in.Replicas)
+
+ out.Selector = nil
+ out.TargetSelector = ""
+ if in.Selector != nil {
+ if in.Selector.MatchExpressions == nil || len(in.Selector.MatchExpressions) == 0 {
+ out.Selector = in.Selector.MatchLabels
+ }
+
+ selector, err := unversioned.LabelSelectorAsSelector(in.Selector)
+ if err != nil {
+ return fmt.Errorf("invalid label selector: %v", err)
+ }
+ out.TargetSelector = selector.String()
+ }
+ return nil
+}
+
+func Convert_v1beta1_ScaleStatus_To_extensions_ScaleStatus(in *ScaleStatus, out *extensions.ScaleStatus, s conversion.Scope) error {
+ out.Replicas = in.Replicas
+
+ // Normally when 2 fields map to the same internal value we favor the old field, since
+ // old clients can't be expected to know about new fields but clients that know about the
+ // new field can be expected to know about the old field (though that's not quite true, due
+ // to kubectl apply). However, these fields are readonly, so any non-nil value should work.
+ if in.TargetSelector != "" {
+ labelSelector, err := unversioned.ParseToLabelSelector(in.TargetSelector)
+ if err != nil {
+ out.Selector = nil
+ return fmt.Errorf("failed to parse target selector: %v", err)
+ }
+ out.Selector = labelSelector
+ } else if in.Selector != nil {
+ out.Selector = new(unversioned.LabelSelector)
+ selector := make(map[string]string)
+ for key, val := range in.Selector {
+ selector[key] = val
+ }
+ out.Selector.MatchLabels = selector
+ } else {
+ out.Selector = nil
+ }
+ return nil
+}
+
+func Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(in *extensions.DeploymentSpec, out *DeploymentSpec, s conversion.Scope) error {
+ out.Replicas = &in.Replicas
+ if in.Selector != nil {
+ out.Selector = new(LabelSelector)
+ if err := Convert_unversioned_LabelSelector_To_v1beta1_LabelSelector(in.Selector, out.Selector, s); err != nil {
+ return err
+ }
+ } else {
+ out.Selector = nil
+ }
+ if err := v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
+ return err
+ }
+ if err := Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil {
+ return err
+ }
+ if in.RevisionHistoryLimit != nil {
+ out.RevisionHistoryLimit = new(int32)
+ *out.RevisionHistoryLimit = int32(*in.RevisionHistoryLimit)
+ }
+ out.MinReadySeconds = int32(in.MinReadySeconds)
+ out.Paused = in.Paused
+ if in.RollbackTo != nil {
+ out.RollbackTo = new(RollbackConfig)
+ out.RollbackTo.Revision = int64(in.RollbackTo.Revision)
+ } else {
+ out.RollbackTo = nil
+ }
+ return nil
+}
+
+func Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *DeploymentSpec, out *extensions.DeploymentSpec, s conversion.Scope) error {
+ if in.Replicas != nil {
+ out.Replicas = *in.Replicas
+ }
+
+ if in.Selector != nil {
+ out.Selector = new(unversioned.LabelSelector)
+ if err := Convert_v1beta1_LabelSelector_To_unversioned_LabelSelector(in.Selector, out.Selector, s); err != nil {
+ return err
+ }
+ } else {
+ out.Selector = nil
+ }
+ if err := v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
+ return err
+ }
+ if err := Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil {
+ return err
+ }
+ out.RevisionHistoryLimit = in.RevisionHistoryLimit
+ out.MinReadySeconds = in.MinReadySeconds
+ out.Paused = in.Paused
+ if in.RollbackTo != nil {
+ out.RollbackTo = new(extensions.RollbackConfig)
+ out.RollbackTo.Revision = in.RollbackTo.Revision
+ } else {
+ out.RollbackTo = nil
+ }
+ return nil
+}
+
+func Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in *extensions.DeploymentStrategy, out *DeploymentStrategy, s conversion.Scope) error {
+ out.Type = DeploymentStrategyType(in.Type)
+ if in.RollingUpdate != nil {
+ out.RollingUpdate = new(RollingUpdateDeployment)
+ if err := Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil {
+ return err
+ }
+ } else {
+ out.RollingUpdate = nil
+ }
+ return nil
+}
+
+func Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(in *DeploymentStrategy, out *extensions.DeploymentStrategy, s conversion.Scope) error {
+ out.Type = extensions.DeploymentStrategyType(in.Type)
+ if in.RollingUpdate != nil {
+ out.RollingUpdate = new(extensions.RollingUpdateDeployment)
+ if err := Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil {
+ return err
+ }
+ } else {
+ out.RollingUpdate = nil
+ }
+ return nil
+}
+
+func Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in *extensions.RollingUpdateDeployment, out *RollingUpdateDeployment, s conversion.Scope) error {
+ if out.MaxUnavailable == nil {
+ out.MaxUnavailable = &intstr.IntOrString{}
+ }
+ if err := s.Convert(&in.MaxUnavailable, out.MaxUnavailable, 0); err != nil {
+ return err
+ }
+ if out.MaxSurge == nil {
+ out.MaxSurge = &intstr.IntOrString{}
+ }
+ if err := s.Convert(&in.MaxSurge, out.MaxSurge, 0); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in *RollingUpdateDeployment, out *extensions.RollingUpdateDeployment, s conversion.Scope) error {
+ if err := s.Convert(in.MaxUnavailable, &out.MaxUnavailable, 0); err != nil {
+ return err
+ }
+ if err := s.Convert(in.MaxSurge, &out.MaxSurge, 0); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(in *extensions.ReplicaSetSpec, out *ReplicaSetSpec, s conversion.Scope) error {
+ out.Replicas = new(int32)
+ *out.Replicas = int32(in.Replicas)
+ if in.Selector != nil {
+ out.Selector = new(LabelSelector)
+ if err := Convert_unversioned_LabelSelector_To_v1beta1_LabelSelector(in.Selector, out.Selector, s); err != nil {
+ return err
+ }
+ } else {
+ out.Selector = nil
+ }
+
+ if err := v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(in *ReplicaSetSpec, out *extensions.ReplicaSetSpec, s conversion.Scope) error {
+ if in.Replicas != nil {
+ out.Replicas = *in.Replicas
+ }
+ if in.Selector != nil {
+ out.Selector = new(unversioned.LabelSelector)
+ if err := Convert_v1beta1_LabelSelector_To_unversioned_LabelSelector(in.Selector, out.Selector, s); err != nil {
+ return err
+ }
+ } else {
+ out.Selector = nil
+ }
+ if err := v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_batch_JobSpec_To_v1beta1_JobSpec(in *batch.JobSpec, out *JobSpec, s conversion.Scope) error {
+ out.Parallelism = in.Parallelism
+ out.Completions = in.Completions
+ out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds
+ // unable to generate simple pointer conversion for unversioned.LabelSelector -> v1beta1.LabelSelector
+ if in.Selector != nil {
+ out.Selector = new(LabelSelector)
+ if err := Convert_unversioned_LabelSelector_To_v1beta1_LabelSelector(in.Selector, out.Selector, s); err != nil {
+ return err
+ }
+ } else {
+ out.Selector = nil
+ }
+
+ // BEGIN non-standard conversion
+ // autoSelector has opposite meaning as manualSelector.
+ // in both cases, unset means false, and unset is always preferred to false.
+ // unset vs set-false distinction is not preserved.
+ manualSelector := in.ManualSelector != nil && *in.ManualSelector
+ autoSelector := !manualSelector
+ if autoSelector {
+ out.AutoSelector = new(bool)
+ *out.AutoSelector = true
+ } else {
+ out.AutoSelector = nil
+ }
+ // END non-standard conversion
+
+ if err := v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1beta1_JobSpec_To_batch_JobSpec(in *JobSpec, out *batch.JobSpec, s conversion.Scope) error {
+ out.Parallelism = in.Parallelism
+ out.Completions = in.Completions
+ out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds
+ // unable to generate simple pointer conversion for v1beta1.LabelSelector -> unversioned.LabelSelector
+ if in.Selector != nil {
+ out.Selector = new(unversioned.LabelSelector)
+ if err := Convert_v1beta1_LabelSelector_To_unversioned_LabelSelector(in.Selector, out.Selector, s); err != nil {
+ return err
+ }
+ } else {
+ out.Selector = nil
+ }
+
+ // BEGIN non-standard conversion
+ // autoSelector has opposite meaning as manualSelector.
+ // in both cases, unset means false, and unset is always preferred to false.
+ // unset vs set-false distinction is not preserved.
+ autoSelector := bool(in.AutoSelector != nil && *in.AutoSelector)
+ manualSelector := !autoSelector
+ if manualSelector {
+ out.ManualSelector = new(bool)
+ *out.ManualSelector = true
+ } else {
+ out.ManualSelector = nil
+ }
+ // END non-standard conversion
+
+ if err := v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_autoscaling_CrossVersionObjectReference_To_v1beta1_SubresourceReference(in *autoscaling.CrossVersionObjectReference, out *SubresourceReference, s conversion.Scope) error {
+ out.Kind = in.Kind
+ out.Name = in.Name
+ out.APIVersion = in.APIVersion
+ out.Subresource = "scale"
+ return nil
+}
+
+func Convert_v1beta1_SubresourceReference_To_autoscaling_CrossVersionObjectReference(in *SubresourceReference, out *autoscaling.CrossVersionObjectReference, s conversion.Scope) error {
+ out.Kind = in.Kind
+ out.Name = in.Name
+ out.APIVersion = in.APIVersion
+ return nil
+}
+
+func Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1beta1_HorizontalPodAutoscalerSpec(in *autoscaling.HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, s conversion.Scope) error {
+ if err := Convert_autoscaling_CrossVersionObjectReference_To_v1beta1_SubresourceReference(&in.ScaleTargetRef, &out.ScaleRef, s); err != nil {
+ return err
+ }
+ if in.MinReplicas != nil {
+ out.MinReplicas = new(int32)
+ *out.MinReplicas = *in.MinReplicas
+ } else {
+ out.MinReplicas = nil
+ }
+ out.MaxReplicas = in.MaxReplicas
+ if in.TargetCPUUtilizationPercentage != nil {
+ out.CPUUtilization = &CPUTargetUtilization{TargetPercentage: *in.TargetCPUUtilizationPercentage}
+ }
+ return nil
+}
+
+func Convert_v1beta1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in *HorizontalPodAutoscalerSpec, out *autoscaling.HorizontalPodAutoscalerSpec, s conversion.Scope) error {
+ if err := Convert_v1beta1_SubresourceReference_To_autoscaling_CrossVersionObjectReference(&in.ScaleRef, &out.ScaleTargetRef, s); err != nil {
+ return err
+ }
+ if in.MinReplicas != nil {
+ out.MinReplicas = new(int32)
+ *out.MinReplicas = int32(*in.MinReplicas)
+ } else {
+ out.MinReplicas = nil
+ }
+ out.MaxReplicas = int32(in.MaxReplicas)
+ if in.CPUUtilization != nil {
+ out.TargetCPUUtilizationPercentage = new(int32)
+ *out.TargetCPUUtilizationPercentage = int32(in.CPUUtilization.TargetPercentage)
+ }
+ return nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion_generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion_generated.go
new file mode 100644
index 0000000..6f4ff53
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion_generated.go
@@ -0,0 +1,2539 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by conversion-gen. Do not edit it manually!
+
+package v1beta1
+
+import (
+ api "k8s.io/kubernetes/pkg/api"
+ unversioned "k8s.io/kubernetes/pkg/api/unversioned"
+ v1 "k8s.io/kubernetes/pkg/api/v1"
+ autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling"
+ batch "k8s.io/kubernetes/pkg/apis/batch"
+ extensions "k8s.io/kubernetes/pkg/apis/extensions"
+ conversion "k8s.io/kubernetes/pkg/conversion"
+)
+
+func init() {
+ if err := api.Scheme.AddGeneratedConversionFuncs(
+ Convert_v1beta1_APIVersion_To_extensions_APIVersion,
+ Convert_extensions_APIVersion_To_v1beta1_APIVersion,
+ Convert_v1beta1_CustomMetricCurrentStatus_To_extensions_CustomMetricCurrentStatus,
+ Convert_extensions_CustomMetricCurrentStatus_To_v1beta1_CustomMetricCurrentStatus,
+ Convert_v1beta1_CustomMetricCurrentStatusList_To_extensions_CustomMetricCurrentStatusList,
+ Convert_extensions_CustomMetricCurrentStatusList_To_v1beta1_CustomMetricCurrentStatusList,
+ Convert_v1beta1_CustomMetricTarget_To_extensions_CustomMetricTarget,
+ Convert_extensions_CustomMetricTarget_To_v1beta1_CustomMetricTarget,
+ Convert_v1beta1_CustomMetricTargetList_To_extensions_CustomMetricTargetList,
+ Convert_extensions_CustomMetricTargetList_To_v1beta1_CustomMetricTargetList,
+ Convert_v1beta1_DaemonSet_To_extensions_DaemonSet,
+ Convert_extensions_DaemonSet_To_v1beta1_DaemonSet,
+ Convert_v1beta1_DaemonSetList_To_extensions_DaemonSetList,
+ Convert_extensions_DaemonSetList_To_v1beta1_DaemonSetList,
+ Convert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec,
+ Convert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec,
+ Convert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus,
+ Convert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus,
+ Convert_v1beta1_Deployment_To_extensions_Deployment,
+ Convert_extensions_Deployment_To_v1beta1_Deployment,
+ Convert_v1beta1_DeploymentList_To_extensions_DeploymentList,
+ Convert_extensions_DeploymentList_To_v1beta1_DeploymentList,
+ Convert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback,
+ Convert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback,
+ Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec,
+ Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec,
+ Convert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus,
+ Convert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus,
+ Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy,
+ Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy,
+ Convert_v1beta1_ExportOptions_To_api_ExportOptions,
+ Convert_api_ExportOptions_To_v1beta1_ExportOptions,
+ Convert_v1beta1_FSGroupStrategyOptions_To_extensions_FSGroupStrategyOptions,
+ Convert_extensions_FSGroupStrategyOptions_To_v1beta1_FSGroupStrategyOptions,
+ Convert_v1beta1_HTTPIngressPath_To_extensions_HTTPIngressPath,
+ Convert_extensions_HTTPIngressPath_To_v1beta1_HTTPIngressPath,
+ Convert_v1beta1_HTTPIngressRuleValue_To_extensions_HTTPIngressRuleValue,
+ Convert_extensions_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue,
+ Convert_v1beta1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler,
+ Convert_autoscaling_HorizontalPodAutoscaler_To_v1beta1_HorizontalPodAutoscaler,
+ Convert_v1beta1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList,
+ Convert_autoscaling_HorizontalPodAutoscalerList_To_v1beta1_HorizontalPodAutoscalerList,
+ Convert_v1beta1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec,
+ Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1beta1_HorizontalPodAutoscalerSpec,
+ Convert_v1beta1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus,
+ Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v1beta1_HorizontalPodAutoscalerStatus,
+ Convert_v1beta1_HostPortRange_To_extensions_HostPortRange,
+ Convert_extensions_HostPortRange_To_v1beta1_HostPortRange,
+ Convert_v1beta1_IDRange_To_extensions_IDRange,
+ Convert_extensions_IDRange_To_v1beta1_IDRange,
+ Convert_v1beta1_Ingress_To_extensions_Ingress,
+ Convert_extensions_Ingress_To_v1beta1_Ingress,
+ Convert_v1beta1_IngressBackend_To_extensions_IngressBackend,
+ Convert_extensions_IngressBackend_To_v1beta1_IngressBackend,
+ Convert_v1beta1_IngressList_To_extensions_IngressList,
+ Convert_extensions_IngressList_To_v1beta1_IngressList,
+ Convert_v1beta1_IngressRule_To_extensions_IngressRule,
+ Convert_extensions_IngressRule_To_v1beta1_IngressRule,
+ Convert_v1beta1_IngressRuleValue_To_extensions_IngressRuleValue,
+ Convert_extensions_IngressRuleValue_To_v1beta1_IngressRuleValue,
+ Convert_v1beta1_IngressSpec_To_extensions_IngressSpec,
+ Convert_extensions_IngressSpec_To_v1beta1_IngressSpec,
+ Convert_v1beta1_IngressStatus_To_extensions_IngressStatus,
+ Convert_extensions_IngressStatus_To_v1beta1_IngressStatus,
+ Convert_v1beta1_IngressTLS_To_extensions_IngressTLS,
+ Convert_extensions_IngressTLS_To_v1beta1_IngressTLS,
+ Convert_v1beta1_Job_To_batch_Job,
+ Convert_batch_Job_To_v1beta1_Job,
+ Convert_v1beta1_JobCondition_To_batch_JobCondition,
+ Convert_batch_JobCondition_To_v1beta1_JobCondition,
+ Convert_v1beta1_JobList_To_batch_JobList,
+ Convert_batch_JobList_To_v1beta1_JobList,
+ Convert_v1beta1_JobSpec_To_batch_JobSpec,
+ Convert_batch_JobSpec_To_v1beta1_JobSpec,
+ Convert_v1beta1_JobStatus_To_batch_JobStatus,
+ Convert_batch_JobStatus_To_v1beta1_JobStatus,
+ Convert_v1beta1_LabelSelector_To_unversioned_LabelSelector,
+ Convert_unversioned_LabelSelector_To_v1beta1_LabelSelector,
+ Convert_v1beta1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement,
+ Convert_unversioned_LabelSelectorRequirement_To_v1beta1_LabelSelectorRequirement,
+ Convert_v1beta1_ListOptions_To_api_ListOptions,
+ Convert_api_ListOptions_To_v1beta1_ListOptions,
+ Convert_v1beta1_NetworkPolicy_To_extensions_NetworkPolicy,
+ Convert_extensions_NetworkPolicy_To_v1beta1_NetworkPolicy,
+ Convert_v1beta1_NetworkPolicyIngressRule_To_extensions_NetworkPolicyIngressRule,
+ Convert_extensions_NetworkPolicyIngressRule_To_v1beta1_NetworkPolicyIngressRule,
+ Convert_v1beta1_NetworkPolicyList_To_extensions_NetworkPolicyList,
+ Convert_extensions_NetworkPolicyList_To_v1beta1_NetworkPolicyList,
+ Convert_v1beta1_NetworkPolicyPeer_To_extensions_NetworkPolicyPeer,
+ Convert_extensions_NetworkPolicyPeer_To_v1beta1_NetworkPolicyPeer,
+ Convert_v1beta1_NetworkPolicyPort_To_extensions_NetworkPolicyPort,
+ Convert_extensions_NetworkPolicyPort_To_v1beta1_NetworkPolicyPort,
+ Convert_v1beta1_NetworkPolicySpec_To_extensions_NetworkPolicySpec,
+ Convert_extensions_NetworkPolicySpec_To_v1beta1_NetworkPolicySpec,
+ Convert_v1beta1_PodSecurityPolicy_To_extensions_PodSecurityPolicy,
+ Convert_extensions_PodSecurityPolicy_To_v1beta1_PodSecurityPolicy,
+ Convert_v1beta1_PodSecurityPolicyList_To_extensions_PodSecurityPolicyList,
+ Convert_extensions_PodSecurityPolicyList_To_v1beta1_PodSecurityPolicyList,
+ Convert_v1beta1_PodSecurityPolicySpec_To_extensions_PodSecurityPolicySpec,
+ Convert_extensions_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySpec,
+ Convert_v1beta1_ReplicaSet_To_extensions_ReplicaSet,
+ Convert_extensions_ReplicaSet_To_v1beta1_ReplicaSet,
+ Convert_v1beta1_ReplicaSetList_To_extensions_ReplicaSetList,
+ Convert_extensions_ReplicaSetList_To_v1beta1_ReplicaSetList,
+ Convert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec,
+ Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec,
+ Convert_v1beta1_ReplicaSetStatus_To_extensions_ReplicaSetStatus,
+ Convert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus,
+ Convert_v1beta1_ReplicationControllerDummy_To_extensions_ReplicationControllerDummy,
+ Convert_extensions_ReplicationControllerDummy_To_v1beta1_ReplicationControllerDummy,
+ Convert_v1beta1_RollbackConfig_To_extensions_RollbackConfig,
+ Convert_extensions_RollbackConfig_To_v1beta1_RollbackConfig,
+ Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment,
+ Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment,
+ Convert_v1beta1_RunAsUserStrategyOptions_To_extensions_RunAsUserStrategyOptions,
+ Convert_extensions_RunAsUserStrategyOptions_To_v1beta1_RunAsUserStrategyOptions,
+ Convert_v1beta1_SELinuxStrategyOptions_To_extensions_SELinuxStrategyOptions,
+ Convert_extensions_SELinuxStrategyOptions_To_v1beta1_SELinuxStrategyOptions,
+ Convert_v1beta1_Scale_To_extensions_Scale,
+ Convert_extensions_Scale_To_v1beta1_Scale,
+ Convert_v1beta1_ScaleSpec_To_extensions_ScaleSpec,
+ Convert_extensions_ScaleSpec_To_v1beta1_ScaleSpec,
+ Convert_v1beta1_ScaleStatus_To_extensions_ScaleStatus,
+ Convert_extensions_ScaleStatus_To_v1beta1_ScaleStatus,
+ Convert_v1beta1_SupplementalGroupsStrategyOptions_To_extensions_SupplementalGroupsStrategyOptions,
+ Convert_extensions_SupplementalGroupsStrategyOptions_To_v1beta1_SupplementalGroupsStrategyOptions,
+ Convert_v1beta1_ThirdPartyResource_To_extensions_ThirdPartyResource,
+ Convert_extensions_ThirdPartyResource_To_v1beta1_ThirdPartyResource,
+ Convert_v1beta1_ThirdPartyResourceData_To_extensions_ThirdPartyResourceData,
+ Convert_extensions_ThirdPartyResourceData_To_v1beta1_ThirdPartyResourceData,
+ Convert_v1beta1_ThirdPartyResourceDataList_To_extensions_ThirdPartyResourceDataList,
+ Convert_extensions_ThirdPartyResourceDataList_To_v1beta1_ThirdPartyResourceDataList,
+ Convert_v1beta1_ThirdPartyResourceList_To_extensions_ThirdPartyResourceList,
+ Convert_extensions_ThirdPartyResourceList_To_v1beta1_ThirdPartyResourceList,
+ ); err != nil {
+ // if one of the conversion functions is malformed, detect it immediately.
+ panic(err)
+ }
+}
+
+func autoConvert_v1beta1_APIVersion_To_extensions_APIVersion(in *APIVersion, out *extensions.APIVersion, s conversion.Scope) error {
+ out.Name = in.Name
+ return nil
+}
+
+func Convert_v1beta1_APIVersion_To_extensions_APIVersion(in *APIVersion, out *extensions.APIVersion, s conversion.Scope) error {
+ return autoConvert_v1beta1_APIVersion_To_extensions_APIVersion(in, out, s)
+}
+
+func autoConvert_extensions_APIVersion_To_v1beta1_APIVersion(in *extensions.APIVersion, out *APIVersion, s conversion.Scope) error {
+ out.Name = in.Name
+ return nil
+}
+
+func Convert_extensions_APIVersion_To_v1beta1_APIVersion(in *extensions.APIVersion, out *APIVersion, s conversion.Scope) error {
+ return autoConvert_extensions_APIVersion_To_v1beta1_APIVersion(in, out, s)
+}
+
+func autoConvert_v1beta1_CustomMetricCurrentStatus_To_extensions_CustomMetricCurrentStatus(in *CustomMetricCurrentStatus, out *extensions.CustomMetricCurrentStatus, s conversion.Scope) error {
+ out.Name = in.Name
+ if err := api.Convert_resource_Quantity_To_resource_Quantity(&in.CurrentValue, &out.CurrentValue, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1beta1_CustomMetricCurrentStatus_To_extensions_CustomMetricCurrentStatus(in *CustomMetricCurrentStatus, out *extensions.CustomMetricCurrentStatus, s conversion.Scope) error {
+ return autoConvert_v1beta1_CustomMetricCurrentStatus_To_extensions_CustomMetricCurrentStatus(in, out, s)
+}
+
+func autoConvert_extensions_CustomMetricCurrentStatus_To_v1beta1_CustomMetricCurrentStatus(in *extensions.CustomMetricCurrentStatus, out *CustomMetricCurrentStatus, s conversion.Scope) error {
+ out.Name = in.Name
+ if err := api.Convert_resource_Quantity_To_resource_Quantity(&in.CurrentValue, &out.CurrentValue, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_extensions_CustomMetricCurrentStatus_To_v1beta1_CustomMetricCurrentStatus(in *extensions.CustomMetricCurrentStatus, out *CustomMetricCurrentStatus, s conversion.Scope) error {
+ return autoConvert_extensions_CustomMetricCurrentStatus_To_v1beta1_CustomMetricCurrentStatus(in, out, s)
+}
+
+func autoConvert_v1beta1_CustomMetricCurrentStatusList_To_extensions_CustomMetricCurrentStatusList(in *CustomMetricCurrentStatusList, out *extensions.CustomMetricCurrentStatusList, s conversion.Scope) error {
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]extensions.CustomMetricCurrentStatus, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta1_CustomMetricCurrentStatus_To_extensions_CustomMetricCurrentStatus(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_v1beta1_CustomMetricCurrentStatusList_To_extensions_CustomMetricCurrentStatusList(in *CustomMetricCurrentStatusList, out *extensions.CustomMetricCurrentStatusList, s conversion.Scope) error {
+ return autoConvert_v1beta1_CustomMetricCurrentStatusList_To_extensions_CustomMetricCurrentStatusList(in, out, s)
+}
+
+func autoConvert_extensions_CustomMetricCurrentStatusList_To_v1beta1_CustomMetricCurrentStatusList(in *extensions.CustomMetricCurrentStatusList, out *CustomMetricCurrentStatusList, s conversion.Scope) error {
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]CustomMetricCurrentStatus, len(*in))
+ for i := range *in {
+ if err := Convert_extensions_CustomMetricCurrentStatus_To_v1beta1_CustomMetricCurrentStatus(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_extensions_CustomMetricCurrentStatusList_To_v1beta1_CustomMetricCurrentStatusList(in *extensions.CustomMetricCurrentStatusList, out *CustomMetricCurrentStatusList, s conversion.Scope) error {
+ return autoConvert_extensions_CustomMetricCurrentStatusList_To_v1beta1_CustomMetricCurrentStatusList(in, out, s)
+}
+
+func autoConvert_v1beta1_CustomMetricTarget_To_extensions_CustomMetricTarget(in *CustomMetricTarget, out *extensions.CustomMetricTarget, s conversion.Scope) error {
+ out.Name = in.Name
+ if err := api.Convert_resource_Quantity_To_resource_Quantity(&in.TargetValue, &out.TargetValue, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1beta1_CustomMetricTarget_To_extensions_CustomMetricTarget(in *CustomMetricTarget, out *extensions.CustomMetricTarget, s conversion.Scope) error {
+ return autoConvert_v1beta1_CustomMetricTarget_To_extensions_CustomMetricTarget(in, out, s)
+}
+
+func autoConvert_extensions_CustomMetricTarget_To_v1beta1_CustomMetricTarget(in *extensions.CustomMetricTarget, out *CustomMetricTarget, s conversion.Scope) error {
+ out.Name = in.Name
+ if err := api.Convert_resource_Quantity_To_resource_Quantity(&in.TargetValue, &out.TargetValue, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_extensions_CustomMetricTarget_To_v1beta1_CustomMetricTarget(in *extensions.CustomMetricTarget, out *CustomMetricTarget, s conversion.Scope) error {
+ return autoConvert_extensions_CustomMetricTarget_To_v1beta1_CustomMetricTarget(in, out, s)
+}
+
+func autoConvert_v1beta1_CustomMetricTargetList_To_extensions_CustomMetricTargetList(in *CustomMetricTargetList, out *extensions.CustomMetricTargetList, s conversion.Scope) error {
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]extensions.CustomMetricTarget, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta1_CustomMetricTarget_To_extensions_CustomMetricTarget(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_v1beta1_CustomMetricTargetList_To_extensions_CustomMetricTargetList(in *CustomMetricTargetList, out *extensions.CustomMetricTargetList, s conversion.Scope) error {
+ return autoConvert_v1beta1_CustomMetricTargetList_To_extensions_CustomMetricTargetList(in, out, s)
+}
+
+func autoConvert_extensions_CustomMetricTargetList_To_v1beta1_CustomMetricTargetList(in *extensions.CustomMetricTargetList, out *CustomMetricTargetList, s conversion.Scope) error {
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]CustomMetricTarget, len(*in))
+ for i := range *in {
+ if err := Convert_extensions_CustomMetricTarget_To_v1beta1_CustomMetricTarget(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_extensions_CustomMetricTargetList_To_v1beta1_CustomMetricTargetList(in *extensions.CustomMetricTargetList, out *CustomMetricTargetList, s conversion.Scope) error {
+ return autoConvert_extensions_CustomMetricTargetList_To_v1beta1_CustomMetricTargetList(in, out, s)
+}
+
+func autoConvert_v1beta1_DaemonSet_To_extensions_DaemonSet(in *DaemonSet, out *extensions.DaemonSet, s conversion.Scope) error {
+ SetDefaults_DaemonSet(in)
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ if err := Convert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1beta1_DaemonSet_To_extensions_DaemonSet(in *DaemonSet, out *extensions.DaemonSet, s conversion.Scope) error {
+ return autoConvert_v1beta1_DaemonSet_To_extensions_DaemonSet(in, out, s)
+}
+
+func autoConvert_extensions_DaemonSet_To_v1beta1_DaemonSet(in *extensions.DaemonSet, out *DaemonSet, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ if err := Convert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_extensions_DaemonSet_To_v1beta1_DaemonSet(in *extensions.DaemonSet, out *DaemonSet, s conversion.Scope) error {
+ return autoConvert_extensions_DaemonSet_To_v1beta1_DaemonSet(in, out, s)
+}
+
+func autoConvert_v1beta1_DaemonSetList_To_extensions_DaemonSetList(in *DaemonSetList, out *extensions.DaemonSetList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]extensions.DaemonSet, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta1_DaemonSet_To_extensions_DaemonSet(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_v1beta1_DaemonSetList_To_extensions_DaemonSetList(in *DaemonSetList, out *extensions.DaemonSetList, s conversion.Scope) error {
+ return autoConvert_v1beta1_DaemonSetList_To_extensions_DaemonSetList(in, out, s)
+}
+
+func autoConvert_extensions_DaemonSetList_To_v1beta1_DaemonSetList(in *extensions.DaemonSetList, out *DaemonSetList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]DaemonSet, len(*in))
+ for i := range *in {
+ if err := Convert_extensions_DaemonSet_To_v1beta1_DaemonSet(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_extensions_DaemonSetList_To_v1beta1_DaemonSetList(in *extensions.DaemonSetList, out *DaemonSetList, s conversion.Scope) error {
+ return autoConvert_extensions_DaemonSetList_To_v1beta1_DaemonSetList(in, out, s)
+}
+
+func autoConvert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(in *DaemonSetSpec, out *extensions.DaemonSetSpec, s conversion.Scope) error {
+ if in.Selector != nil {
+ in, out := &in.Selector, &out.Selector
+ *out = new(unversioned.LabelSelector)
+ if err := Convert_v1beta1_LabelSelector_To_unversioned_LabelSelector(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Selector = nil
+ }
+ if err := v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(in *DaemonSetSpec, out *extensions.DaemonSetSpec, s conversion.Scope) error {
+ return autoConvert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(in, out, s)
+}
+
+func autoConvert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec(in *extensions.DaemonSetSpec, out *DaemonSetSpec, s conversion.Scope) error {
+ if in.Selector != nil {
+ in, out := &in.Selector, &out.Selector
+ *out = new(LabelSelector)
+ if err := Convert_unversioned_LabelSelector_To_v1beta1_LabelSelector(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Selector = nil
+ }
+ if err := v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec(in *extensions.DaemonSetSpec, out *DaemonSetSpec, s conversion.Scope) error {
+ return autoConvert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec(in, out, s)
+}
+
+func autoConvert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus(in *DaemonSetStatus, out *extensions.DaemonSetStatus, s conversion.Scope) error {
+ out.CurrentNumberScheduled = in.CurrentNumberScheduled
+ out.NumberMisscheduled = in.NumberMisscheduled
+ out.DesiredNumberScheduled = in.DesiredNumberScheduled
+ return nil
+}
+
+func Convert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus(in *DaemonSetStatus, out *extensions.DaemonSetStatus, s conversion.Scope) error {
+ return autoConvert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus(in, out, s)
+}
+
+func autoConvert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus(in *extensions.DaemonSetStatus, out *DaemonSetStatus, s conversion.Scope) error {
+ out.CurrentNumberScheduled = in.CurrentNumberScheduled
+ out.NumberMisscheduled = in.NumberMisscheduled
+ out.DesiredNumberScheduled = in.DesiredNumberScheduled
+ return nil
+}
+
+func Convert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus(in *extensions.DaemonSetStatus, out *DaemonSetStatus, s conversion.Scope) error {
+ return autoConvert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus(in, out, s)
+}
+
+func autoConvert_v1beta1_Deployment_To_extensions_Deployment(in *Deployment, out *extensions.Deployment, s conversion.Scope) error {
+ SetDefaults_Deployment(in)
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ if err := Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1beta1_Deployment_To_extensions_Deployment(in *Deployment, out *extensions.Deployment, s conversion.Scope) error {
+ return autoConvert_v1beta1_Deployment_To_extensions_Deployment(in, out, s)
+}
+
+func autoConvert_extensions_Deployment_To_v1beta1_Deployment(in *extensions.Deployment, out *Deployment, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ if err := Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_extensions_Deployment_To_v1beta1_Deployment(in *extensions.Deployment, out *Deployment, s conversion.Scope) error {
+ return autoConvert_extensions_Deployment_To_v1beta1_Deployment(in, out, s)
+}
+
+func autoConvert_v1beta1_DeploymentList_To_extensions_DeploymentList(in *DeploymentList, out *extensions.DeploymentList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]extensions.Deployment, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta1_Deployment_To_extensions_Deployment(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_v1beta1_DeploymentList_To_extensions_DeploymentList(in *DeploymentList, out *extensions.DeploymentList, s conversion.Scope) error {
+ return autoConvert_v1beta1_DeploymentList_To_extensions_DeploymentList(in, out, s)
+}
+
+func autoConvert_extensions_DeploymentList_To_v1beta1_DeploymentList(in *extensions.DeploymentList, out *DeploymentList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Deployment, len(*in))
+ for i := range *in {
+ if err := Convert_extensions_Deployment_To_v1beta1_Deployment(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_extensions_DeploymentList_To_v1beta1_DeploymentList(in *extensions.DeploymentList, out *DeploymentList, s conversion.Scope) error {
+ return autoConvert_extensions_DeploymentList_To_v1beta1_DeploymentList(in, out, s)
+}
+
+func autoConvert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback(in *DeploymentRollback, out *extensions.DeploymentRollback, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ out.Name = in.Name
+ out.UpdatedAnnotations = in.UpdatedAnnotations
+ if err := Convert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(&in.RollbackTo, &out.RollbackTo, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback(in *DeploymentRollback, out *extensions.DeploymentRollback, s conversion.Scope) error {
+ return autoConvert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback(in, out, s)
+}
+
+func autoConvert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback(in *extensions.DeploymentRollback, out *DeploymentRollback, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ out.Name = in.Name
+ out.UpdatedAnnotations = in.UpdatedAnnotations
+ if err := Convert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(&in.RollbackTo, &out.RollbackTo, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback(in *extensions.DeploymentRollback, out *DeploymentRollback, s conversion.Scope) error {
+ return autoConvert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback(in, out, s)
+}
+
+func autoConvert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(in *DeploymentStatus, out *extensions.DeploymentStatus, s conversion.Scope) error {
+ out.ObservedGeneration = in.ObservedGeneration
+ out.Replicas = in.Replicas
+ out.UpdatedReplicas = in.UpdatedReplicas
+ out.AvailableReplicas = in.AvailableReplicas
+ out.UnavailableReplicas = in.UnavailableReplicas
+ return nil
+}
+
+func Convert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(in *DeploymentStatus, out *extensions.DeploymentStatus, s conversion.Scope) error {
+ return autoConvert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(in, out, s)
+}
+
+func autoConvert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(in *extensions.DeploymentStatus, out *DeploymentStatus, s conversion.Scope) error {
+ out.ObservedGeneration = in.ObservedGeneration
+ out.Replicas = in.Replicas
+ out.UpdatedReplicas = in.UpdatedReplicas
+ out.AvailableReplicas = in.AvailableReplicas
+ out.UnavailableReplicas = in.UnavailableReplicas
+ return nil
+}
+
+func Convert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(in *extensions.DeploymentStatus, out *DeploymentStatus, s conversion.Scope) error {
+ return autoConvert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(in, out, s)
+}
+
+func autoConvert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(in *DeploymentStrategy, out *extensions.DeploymentStrategy, s conversion.Scope) error {
+ out.Type = extensions.DeploymentStrategyType(in.Type)
+ if in.RollingUpdate != nil {
+ in, out := &in.RollingUpdate, &out.RollingUpdate
+ *out = new(extensions.RollingUpdateDeployment)
+ if err := Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.RollingUpdate = nil
+ }
+ return nil
+}
+
+func autoConvert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in *extensions.DeploymentStrategy, out *DeploymentStrategy, s conversion.Scope) error {
+ out.Type = DeploymentStrategyType(in.Type)
+ if in.RollingUpdate != nil {
+ in, out := &in.RollingUpdate, &out.RollingUpdate
+ *out = new(RollingUpdateDeployment)
+ if err := Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.RollingUpdate = nil
+ }
+ return nil
+}
+
+func autoConvert_v1beta1_ExportOptions_To_api_ExportOptions(in *ExportOptions, out *api.ExportOptions, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ out.Export = in.Export
+ out.Exact = in.Exact
+ return nil
+}
+
+func Convert_v1beta1_ExportOptions_To_api_ExportOptions(in *ExportOptions, out *api.ExportOptions, s conversion.Scope) error {
+ return autoConvert_v1beta1_ExportOptions_To_api_ExportOptions(in, out, s)
+}
+
+func autoConvert_api_ExportOptions_To_v1beta1_ExportOptions(in *api.ExportOptions, out *ExportOptions, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ out.Export = in.Export
+ out.Exact = in.Exact
+ return nil
+}
+
+func Convert_api_ExportOptions_To_v1beta1_ExportOptions(in *api.ExportOptions, out *ExportOptions, s conversion.Scope) error {
+ return autoConvert_api_ExportOptions_To_v1beta1_ExportOptions(in, out, s)
+}
+
+func autoConvert_v1beta1_FSGroupStrategyOptions_To_extensions_FSGroupStrategyOptions(in *FSGroupStrategyOptions, out *extensions.FSGroupStrategyOptions, s conversion.Scope) error {
+ out.Rule = extensions.FSGroupStrategyType(in.Rule)
+ if in.Ranges != nil {
+ in, out := &in.Ranges, &out.Ranges
+ *out = make([]extensions.IDRange, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta1_IDRange_To_extensions_IDRange(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Ranges = nil
+ }
+ return nil
+}
+
+func Convert_v1beta1_FSGroupStrategyOptions_To_extensions_FSGroupStrategyOptions(in *FSGroupStrategyOptions, out *extensions.FSGroupStrategyOptions, s conversion.Scope) error {
+ return autoConvert_v1beta1_FSGroupStrategyOptions_To_extensions_FSGroupStrategyOptions(in, out, s)
+}
+
+func autoConvert_extensions_FSGroupStrategyOptions_To_v1beta1_FSGroupStrategyOptions(in *extensions.FSGroupStrategyOptions, out *FSGroupStrategyOptions, s conversion.Scope) error {
+ out.Rule = FSGroupStrategyType(in.Rule)
+ if in.Ranges != nil {
+ in, out := &in.Ranges, &out.Ranges
+ *out = make([]IDRange, len(*in))
+ for i := range *in {
+ if err := Convert_extensions_IDRange_To_v1beta1_IDRange(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Ranges = nil
+ }
+ return nil
+}
+
+func Convert_extensions_FSGroupStrategyOptions_To_v1beta1_FSGroupStrategyOptions(in *extensions.FSGroupStrategyOptions, out *FSGroupStrategyOptions, s conversion.Scope) error {
+ return autoConvert_extensions_FSGroupStrategyOptions_To_v1beta1_FSGroupStrategyOptions(in, out, s)
+}
+
+func autoConvert_v1beta1_HTTPIngressPath_To_extensions_HTTPIngressPath(in *HTTPIngressPath, out *extensions.HTTPIngressPath, s conversion.Scope) error {
+ out.Path = in.Path
+ if err := Convert_v1beta1_IngressBackend_To_extensions_IngressBackend(&in.Backend, &out.Backend, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1beta1_HTTPIngressPath_To_extensions_HTTPIngressPath(in *HTTPIngressPath, out *extensions.HTTPIngressPath, s conversion.Scope) error {
+ return autoConvert_v1beta1_HTTPIngressPath_To_extensions_HTTPIngressPath(in, out, s)
+}
+
+func autoConvert_extensions_HTTPIngressPath_To_v1beta1_HTTPIngressPath(in *extensions.HTTPIngressPath, out *HTTPIngressPath, s conversion.Scope) error {
+ out.Path = in.Path
+ if err := Convert_extensions_IngressBackend_To_v1beta1_IngressBackend(&in.Backend, &out.Backend, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_extensions_HTTPIngressPath_To_v1beta1_HTTPIngressPath(in *extensions.HTTPIngressPath, out *HTTPIngressPath, s conversion.Scope) error {
+ return autoConvert_extensions_HTTPIngressPath_To_v1beta1_HTTPIngressPath(in, out, s)
+}
+
+func autoConvert_v1beta1_HTTPIngressRuleValue_To_extensions_HTTPIngressRuleValue(in *HTTPIngressRuleValue, out *extensions.HTTPIngressRuleValue, s conversion.Scope) error {
+ if in.Paths != nil {
+ in, out := &in.Paths, &out.Paths
+ *out = make([]extensions.HTTPIngressPath, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta1_HTTPIngressPath_To_extensions_HTTPIngressPath(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Paths = nil
+ }
+ return nil
+}
+
+func Convert_v1beta1_HTTPIngressRuleValue_To_extensions_HTTPIngressRuleValue(in *HTTPIngressRuleValue, out *extensions.HTTPIngressRuleValue, s conversion.Scope) error {
+ return autoConvert_v1beta1_HTTPIngressRuleValue_To_extensions_HTTPIngressRuleValue(in, out, s)
+}
+
+func autoConvert_extensions_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue(in *extensions.HTTPIngressRuleValue, out *HTTPIngressRuleValue, s conversion.Scope) error {
+ if in.Paths != nil {
+ in, out := &in.Paths, &out.Paths
+ *out = make([]HTTPIngressPath, len(*in))
+ for i := range *in {
+ if err := Convert_extensions_HTTPIngressPath_To_v1beta1_HTTPIngressPath(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Paths = nil
+ }
+ return nil
+}
+
+func Convert_extensions_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue(in *extensions.HTTPIngressRuleValue, out *HTTPIngressRuleValue, s conversion.Scope) error {
+ return autoConvert_extensions_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue(in, out, s)
+}
+
+func autoConvert_v1beta1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in *HorizontalPodAutoscaler, out *autoscaling.HorizontalPodAutoscaler, s conversion.Scope) error {
+ SetDefaults_HorizontalPodAutoscaler(in)
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ if err := Convert_v1beta1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1beta1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1beta1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in *HorizontalPodAutoscaler, out *autoscaling.HorizontalPodAutoscaler, s conversion.Scope) error {
+ return autoConvert_v1beta1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in, out, s)
+}
+
+func autoConvert_autoscaling_HorizontalPodAutoscaler_To_v1beta1_HorizontalPodAutoscaler(in *autoscaling.HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ if err := Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1beta1_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v1beta1_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_autoscaling_HorizontalPodAutoscaler_To_v1beta1_HorizontalPodAutoscaler(in *autoscaling.HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, s conversion.Scope) error {
+ return autoConvert_autoscaling_HorizontalPodAutoscaler_To_v1beta1_HorizontalPodAutoscaler(in, out, s)
+}
+
+func autoConvert_v1beta1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in *HorizontalPodAutoscalerList, out *autoscaling.HorizontalPodAutoscalerList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]autoscaling.HorizontalPodAutoscaler, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_v1beta1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in *HorizontalPodAutoscalerList, out *autoscaling.HorizontalPodAutoscalerList, s conversion.Scope) error {
+ return autoConvert_v1beta1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in, out, s)
+}
+
+func autoConvert_autoscaling_HorizontalPodAutoscalerList_To_v1beta1_HorizontalPodAutoscalerList(in *autoscaling.HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]HorizontalPodAutoscaler, len(*in))
+ for i := range *in {
+ if err := Convert_autoscaling_HorizontalPodAutoscaler_To_v1beta1_HorizontalPodAutoscaler(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_autoscaling_HorizontalPodAutoscalerList_To_v1beta1_HorizontalPodAutoscalerList(in *autoscaling.HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, s conversion.Scope) error {
+ return autoConvert_autoscaling_HorizontalPodAutoscalerList_To_v1beta1_HorizontalPodAutoscalerList(in, out, s)
+}
+
+func autoConvert_v1beta1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in *HorizontalPodAutoscalerStatus, out *autoscaling.HorizontalPodAutoscalerStatus, s conversion.Scope) error {
+ out.ObservedGeneration = in.ObservedGeneration
+ out.LastScaleTime = in.LastScaleTime
+ out.CurrentReplicas = in.CurrentReplicas
+ out.DesiredReplicas = in.DesiredReplicas
+ out.CurrentCPUUtilizationPercentage = in.CurrentCPUUtilizationPercentage
+ return nil
+}
+
+func Convert_v1beta1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in *HorizontalPodAutoscalerStatus, out *autoscaling.HorizontalPodAutoscalerStatus, s conversion.Scope) error {
+ return autoConvert_v1beta1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in, out, s)
+}
+
+func autoConvert_autoscaling_HorizontalPodAutoscalerStatus_To_v1beta1_HorizontalPodAutoscalerStatus(in *autoscaling.HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, s conversion.Scope) error {
+ out.ObservedGeneration = in.ObservedGeneration
+ out.LastScaleTime = in.LastScaleTime
+ out.CurrentReplicas = in.CurrentReplicas
+ out.DesiredReplicas = in.DesiredReplicas
+ out.CurrentCPUUtilizationPercentage = in.CurrentCPUUtilizationPercentage
+ return nil
+}
+
+func Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v1beta1_HorizontalPodAutoscalerStatus(in *autoscaling.HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, s conversion.Scope) error {
+ return autoConvert_autoscaling_HorizontalPodAutoscalerStatus_To_v1beta1_HorizontalPodAutoscalerStatus(in, out, s)
+}
+
+func autoConvert_v1beta1_HostPortRange_To_extensions_HostPortRange(in *HostPortRange, out *extensions.HostPortRange, s conversion.Scope) error {
+ out.Min = int(in.Min)
+ out.Max = int(in.Max)
+ return nil
+}
+
+func Convert_v1beta1_HostPortRange_To_extensions_HostPortRange(in *HostPortRange, out *extensions.HostPortRange, s conversion.Scope) error {
+ return autoConvert_v1beta1_HostPortRange_To_extensions_HostPortRange(in, out, s)
+}
+
+func autoConvert_extensions_HostPortRange_To_v1beta1_HostPortRange(in *extensions.HostPortRange, out *HostPortRange, s conversion.Scope) error {
+ out.Min = int32(in.Min)
+ out.Max = int32(in.Max)
+ return nil
+}
+
+func Convert_extensions_HostPortRange_To_v1beta1_HostPortRange(in *extensions.HostPortRange, out *HostPortRange, s conversion.Scope) error {
+ return autoConvert_extensions_HostPortRange_To_v1beta1_HostPortRange(in, out, s)
+}
+
+func autoConvert_v1beta1_IDRange_To_extensions_IDRange(in *IDRange, out *extensions.IDRange, s conversion.Scope) error {
+ out.Min = in.Min
+ out.Max = in.Max
+ return nil
+}
+
+func Convert_v1beta1_IDRange_To_extensions_IDRange(in *IDRange, out *extensions.IDRange, s conversion.Scope) error {
+ return autoConvert_v1beta1_IDRange_To_extensions_IDRange(in, out, s)
+}
+
+func autoConvert_extensions_IDRange_To_v1beta1_IDRange(in *extensions.IDRange, out *IDRange, s conversion.Scope) error {
+ out.Min = in.Min
+ out.Max = in.Max
+ return nil
+}
+
+func Convert_extensions_IDRange_To_v1beta1_IDRange(in *extensions.IDRange, out *IDRange, s conversion.Scope) error {
+ return autoConvert_extensions_IDRange_To_v1beta1_IDRange(in, out, s)
+}
+
+func autoConvert_v1beta1_Ingress_To_extensions_Ingress(in *Ingress, out *extensions.Ingress, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ if err := Convert_v1beta1_IngressSpec_To_extensions_IngressSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1beta1_IngressStatus_To_extensions_IngressStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1beta1_Ingress_To_extensions_Ingress(in *Ingress, out *extensions.Ingress, s conversion.Scope) error {
+ return autoConvert_v1beta1_Ingress_To_extensions_Ingress(in, out, s)
+}
+
+func autoConvert_extensions_Ingress_To_v1beta1_Ingress(in *extensions.Ingress, out *Ingress, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ if err := Convert_extensions_IngressSpec_To_v1beta1_IngressSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_extensions_IngressStatus_To_v1beta1_IngressStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_extensions_Ingress_To_v1beta1_Ingress(in *extensions.Ingress, out *Ingress, s conversion.Scope) error {
+ return autoConvert_extensions_Ingress_To_v1beta1_Ingress(in, out, s)
+}
+
+func autoConvert_v1beta1_IngressBackend_To_extensions_IngressBackend(in *IngressBackend, out *extensions.IngressBackend, s conversion.Scope) error {
+ out.ServiceName = in.ServiceName
+ if err := api.Convert_intstr_IntOrString_To_intstr_IntOrString(&in.ServicePort, &out.ServicePort, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1beta1_IngressBackend_To_extensions_IngressBackend(in *IngressBackend, out *extensions.IngressBackend, s conversion.Scope) error {
+ return autoConvert_v1beta1_IngressBackend_To_extensions_IngressBackend(in, out, s)
+}
+
+func autoConvert_extensions_IngressBackend_To_v1beta1_IngressBackend(in *extensions.IngressBackend, out *IngressBackend, s conversion.Scope) error {
+ out.ServiceName = in.ServiceName
+ if err := api.Convert_intstr_IntOrString_To_intstr_IntOrString(&in.ServicePort, &out.ServicePort, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_extensions_IngressBackend_To_v1beta1_IngressBackend(in *extensions.IngressBackend, out *IngressBackend, s conversion.Scope) error {
+ return autoConvert_extensions_IngressBackend_To_v1beta1_IngressBackend(in, out, s)
+}
+
+func autoConvert_v1beta1_IngressList_To_extensions_IngressList(in *IngressList, out *extensions.IngressList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]extensions.Ingress, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta1_Ingress_To_extensions_Ingress(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_v1beta1_IngressList_To_extensions_IngressList(in *IngressList, out *extensions.IngressList, s conversion.Scope) error {
+ return autoConvert_v1beta1_IngressList_To_extensions_IngressList(in, out, s)
+}
+
+func autoConvert_extensions_IngressList_To_v1beta1_IngressList(in *extensions.IngressList, out *IngressList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Ingress, len(*in))
+ for i := range *in {
+ if err := Convert_extensions_Ingress_To_v1beta1_Ingress(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_extensions_IngressList_To_v1beta1_IngressList(in *extensions.IngressList, out *IngressList, s conversion.Scope) error {
+ return autoConvert_extensions_IngressList_To_v1beta1_IngressList(in, out, s)
+}
+
+func autoConvert_v1beta1_IngressRule_To_extensions_IngressRule(in *IngressRule, out *extensions.IngressRule, s conversion.Scope) error {
+ out.Host = in.Host
+ if err := Convert_v1beta1_IngressRuleValue_To_extensions_IngressRuleValue(&in.IngressRuleValue, &out.IngressRuleValue, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1beta1_IngressRule_To_extensions_IngressRule(in *IngressRule, out *extensions.IngressRule, s conversion.Scope) error {
+ return autoConvert_v1beta1_IngressRule_To_extensions_IngressRule(in, out, s)
+}
+
+func autoConvert_extensions_IngressRule_To_v1beta1_IngressRule(in *extensions.IngressRule, out *IngressRule, s conversion.Scope) error {
+ out.Host = in.Host
+ if err := Convert_extensions_IngressRuleValue_To_v1beta1_IngressRuleValue(&in.IngressRuleValue, &out.IngressRuleValue, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_extensions_IngressRule_To_v1beta1_IngressRule(in *extensions.IngressRule, out *IngressRule, s conversion.Scope) error {
+ return autoConvert_extensions_IngressRule_To_v1beta1_IngressRule(in, out, s)
+}
+
+func autoConvert_v1beta1_IngressRuleValue_To_extensions_IngressRuleValue(in *IngressRuleValue, out *extensions.IngressRuleValue, s conversion.Scope) error {
+ if in.HTTP != nil {
+ in, out := &in.HTTP, &out.HTTP
+ *out = new(extensions.HTTPIngressRuleValue)
+ if err := Convert_v1beta1_HTTPIngressRuleValue_To_extensions_HTTPIngressRuleValue(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.HTTP = nil
+ }
+ return nil
+}
+
+func Convert_v1beta1_IngressRuleValue_To_extensions_IngressRuleValue(in *IngressRuleValue, out *extensions.IngressRuleValue, s conversion.Scope) error {
+ return autoConvert_v1beta1_IngressRuleValue_To_extensions_IngressRuleValue(in, out, s)
+}
+
+func autoConvert_extensions_IngressRuleValue_To_v1beta1_IngressRuleValue(in *extensions.IngressRuleValue, out *IngressRuleValue, s conversion.Scope) error {
+ if in.HTTP != nil {
+ in, out := &in.HTTP, &out.HTTP
+ *out = new(HTTPIngressRuleValue)
+ if err := Convert_extensions_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.HTTP = nil
+ }
+ return nil
+}
+
+func Convert_extensions_IngressRuleValue_To_v1beta1_IngressRuleValue(in *extensions.IngressRuleValue, out *IngressRuleValue, s conversion.Scope) error {
+ return autoConvert_extensions_IngressRuleValue_To_v1beta1_IngressRuleValue(in, out, s)
+}
+
+func autoConvert_v1beta1_IngressSpec_To_extensions_IngressSpec(in *IngressSpec, out *extensions.IngressSpec, s conversion.Scope) error {
+ if in.Backend != nil {
+ in, out := &in.Backend, &out.Backend
+ *out = new(extensions.IngressBackend)
+ if err := Convert_v1beta1_IngressBackend_To_extensions_IngressBackend(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Backend = nil
+ }
+ if in.TLS != nil {
+ in, out := &in.TLS, &out.TLS
+ *out = make([]extensions.IngressTLS, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta1_IngressTLS_To_extensions_IngressTLS(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.TLS = nil
+ }
+ if in.Rules != nil {
+ in, out := &in.Rules, &out.Rules
+ *out = make([]extensions.IngressRule, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta1_IngressRule_To_extensions_IngressRule(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Rules = nil
+ }
+ return nil
+}
+
+func Convert_v1beta1_IngressSpec_To_extensions_IngressSpec(in *IngressSpec, out *extensions.IngressSpec, s conversion.Scope) error {
+ return autoConvert_v1beta1_IngressSpec_To_extensions_IngressSpec(in, out, s)
+}
+
+func autoConvert_extensions_IngressSpec_To_v1beta1_IngressSpec(in *extensions.IngressSpec, out *IngressSpec, s conversion.Scope) error {
+ if in.Backend != nil {
+ in, out := &in.Backend, &out.Backend
+ *out = new(IngressBackend)
+ if err := Convert_extensions_IngressBackend_To_v1beta1_IngressBackend(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Backend = nil
+ }
+ if in.TLS != nil {
+ in, out := &in.TLS, &out.TLS
+ *out = make([]IngressTLS, len(*in))
+ for i := range *in {
+ if err := Convert_extensions_IngressTLS_To_v1beta1_IngressTLS(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.TLS = nil
+ }
+ if in.Rules != nil {
+ in, out := &in.Rules, &out.Rules
+ *out = make([]IngressRule, len(*in))
+ for i := range *in {
+ if err := Convert_extensions_IngressRule_To_v1beta1_IngressRule(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Rules = nil
+ }
+ return nil
+}
+
+func Convert_extensions_IngressSpec_To_v1beta1_IngressSpec(in *extensions.IngressSpec, out *IngressSpec, s conversion.Scope) error {
+ return autoConvert_extensions_IngressSpec_To_v1beta1_IngressSpec(in, out, s)
+}
+
+func autoConvert_v1beta1_IngressStatus_To_extensions_IngressStatus(in *IngressStatus, out *extensions.IngressStatus, s conversion.Scope) error {
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.LoadBalancer, &out.LoadBalancer, 0); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1beta1_IngressStatus_To_extensions_IngressStatus(in *IngressStatus, out *extensions.IngressStatus, s conversion.Scope) error {
+ return autoConvert_v1beta1_IngressStatus_To_extensions_IngressStatus(in, out, s)
+}
+
+func autoConvert_extensions_IngressStatus_To_v1beta1_IngressStatus(in *extensions.IngressStatus, out *IngressStatus, s conversion.Scope) error {
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.LoadBalancer, &out.LoadBalancer, 0); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_extensions_IngressStatus_To_v1beta1_IngressStatus(in *extensions.IngressStatus, out *IngressStatus, s conversion.Scope) error {
+ return autoConvert_extensions_IngressStatus_To_v1beta1_IngressStatus(in, out, s)
+}
+
+func autoConvert_v1beta1_IngressTLS_To_extensions_IngressTLS(in *IngressTLS, out *extensions.IngressTLS, s conversion.Scope) error {
+ out.Hosts = in.Hosts
+ out.SecretName = in.SecretName
+ return nil
+}
+
+func Convert_v1beta1_IngressTLS_To_extensions_IngressTLS(in *IngressTLS, out *extensions.IngressTLS, s conversion.Scope) error {
+ return autoConvert_v1beta1_IngressTLS_To_extensions_IngressTLS(in, out, s)
+}
+
+func autoConvert_extensions_IngressTLS_To_v1beta1_IngressTLS(in *extensions.IngressTLS, out *IngressTLS, s conversion.Scope) error {
+ out.Hosts = in.Hosts
+ out.SecretName = in.SecretName
+ return nil
+}
+
+func Convert_extensions_IngressTLS_To_v1beta1_IngressTLS(in *extensions.IngressTLS, out *IngressTLS, s conversion.Scope) error {
+ return autoConvert_extensions_IngressTLS_To_v1beta1_IngressTLS(in, out, s)
+}
+
+func autoConvert_v1beta1_Job_To_batch_Job(in *Job, out *batch.Job, s conversion.Scope) error {
+ SetDefaults_Job(in)
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ if err := Convert_v1beta1_JobSpec_To_batch_JobSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1beta1_JobStatus_To_batch_JobStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1beta1_Job_To_batch_Job(in *Job, out *batch.Job, s conversion.Scope) error {
+ return autoConvert_v1beta1_Job_To_batch_Job(in, out, s)
+}
+
+func autoConvert_batch_Job_To_v1beta1_Job(in *batch.Job, out *Job, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ if err := Convert_batch_JobSpec_To_v1beta1_JobSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_batch_JobStatus_To_v1beta1_JobStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_batch_Job_To_v1beta1_Job(in *batch.Job, out *Job, s conversion.Scope) error {
+ return autoConvert_batch_Job_To_v1beta1_Job(in, out, s)
+}
+
+func autoConvert_v1beta1_JobCondition_To_batch_JobCondition(in *JobCondition, out *batch.JobCondition, s conversion.Scope) error {
+ out.Type = batch.JobConditionType(in.Type)
+ out.Status = api.ConditionStatus(in.Status)
+ if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastProbeTime, &out.LastProbeTime, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTransitionTime, &out.LastTransitionTime, s); err != nil {
+ return err
+ }
+ out.Reason = in.Reason
+ out.Message = in.Message
+ return nil
+}
+
+func Convert_v1beta1_JobCondition_To_batch_JobCondition(in *JobCondition, out *batch.JobCondition, s conversion.Scope) error {
+ return autoConvert_v1beta1_JobCondition_To_batch_JobCondition(in, out, s)
+}
+
+func autoConvert_batch_JobCondition_To_v1beta1_JobCondition(in *batch.JobCondition, out *JobCondition, s conversion.Scope) error {
+ out.Type = JobConditionType(in.Type)
+ out.Status = v1.ConditionStatus(in.Status)
+ if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastProbeTime, &out.LastProbeTime, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTransitionTime, &out.LastTransitionTime, s); err != nil {
+ return err
+ }
+ out.Reason = in.Reason
+ out.Message = in.Message
+ return nil
+}
+
+func Convert_batch_JobCondition_To_v1beta1_JobCondition(in *batch.JobCondition, out *JobCondition, s conversion.Scope) error {
+ return autoConvert_batch_JobCondition_To_v1beta1_JobCondition(in, out, s)
+}
+
+func autoConvert_v1beta1_JobList_To_batch_JobList(in *JobList, out *batch.JobList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]batch.Job, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta1_Job_To_batch_Job(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_v1beta1_JobList_To_batch_JobList(in *JobList, out *batch.JobList, s conversion.Scope) error {
+ return autoConvert_v1beta1_JobList_To_batch_JobList(in, out, s)
+}
+
+func autoConvert_batch_JobList_To_v1beta1_JobList(in *batch.JobList, out *JobList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Job, len(*in))
+ for i := range *in {
+ if err := Convert_batch_Job_To_v1beta1_Job(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_batch_JobList_To_v1beta1_JobList(in *batch.JobList, out *JobList, s conversion.Scope) error {
+ return autoConvert_batch_JobList_To_v1beta1_JobList(in, out, s)
+}
+
+func autoConvert_v1beta1_JobStatus_To_batch_JobStatus(in *JobStatus, out *batch.JobStatus, s conversion.Scope) error {
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]batch.JobCondition, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta1_JobCondition_To_batch_JobCondition(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Conditions = nil
+ }
+ out.StartTime = in.StartTime
+ out.CompletionTime = in.CompletionTime
+ out.Active = in.Active
+ out.Succeeded = in.Succeeded
+ out.Failed = in.Failed
+ return nil
+}
+
+func Convert_v1beta1_JobStatus_To_batch_JobStatus(in *JobStatus, out *batch.JobStatus, s conversion.Scope) error {
+ return autoConvert_v1beta1_JobStatus_To_batch_JobStatus(in, out, s)
+}
+
+func autoConvert_batch_JobStatus_To_v1beta1_JobStatus(in *batch.JobStatus, out *JobStatus, s conversion.Scope) error {
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]JobCondition, len(*in))
+ for i := range *in {
+ if err := Convert_batch_JobCondition_To_v1beta1_JobCondition(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Conditions = nil
+ }
+ out.StartTime = in.StartTime
+ out.CompletionTime = in.CompletionTime
+ out.Active = in.Active
+ out.Succeeded = in.Succeeded
+ out.Failed = in.Failed
+ return nil
+}
+
+func Convert_batch_JobStatus_To_v1beta1_JobStatus(in *batch.JobStatus, out *JobStatus, s conversion.Scope) error {
+ return autoConvert_batch_JobStatus_To_v1beta1_JobStatus(in, out, s)
+}
+
+func autoConvert_v1beta1_LabelSelector_To_unversioned_LabelSelector(in *LabelSelector, out *unversioned.LabelSelector, s conversion.Scope) error {
+ out.MatchLabels = in.MatchLabels
+ if in.MatchExpressions != nil {
+ in, out := &in.MatchExpressions, &out.MatchExpressions
+ *out = make([]unversioned.LabelSelectorRequirement, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.MatchExpressions = nil
+ }
+ return nil
+}
+
+func Convert_v1beta1_LabelSelector_To_unversioned_LabelSelector(in *LabelSelector, out *unversioned.LabelSelector, s conversion.Scope) error {
+ return autoConvert_v1beta1_LabelSelector_To_unversioned_LabelSelector(in, out, s)
+}
+
+func autoConvert_unversioned_LabelSelector_To_v1beta1_LabelSelector(in *unversioned.LabelSelector, out *LabelSelector, s conversion.Scope) error {
+ out.MatchLabels = in.MatchLabels
+ if in.MatchExpressions != nil {
+ in, out := &in.MatchExpressions, &out.MatchExpressions
+ *out = make([]LabelSelectorRequirement, len(*in))
+ for i := range *in {
+ if err := Convert_unversioned_LabelSelectorRequirement_To_v1beta1_LabelSelectorRequirement(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.MatchExpressions = nil
+ }
+ return nil
+}
+
+func Convert_unversioned_LabelSelector_To_v1beta1_LabelSelector(in *unversioned.LabelSelector, out *LabelSelector, s conversion.Scope) error {
+ return autoConvert_unversioned_LabelSelector_To_v1beta1_LabelSelector(in, out, s)
+}
+
+func autoConvert_v1beta1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(in *LabelSelectorRequirement, out *unversioned.LabelSelectorRequirement, s conversion.Scope) error {
+ out.Key = in.Key
+ out.Operator = unversioned.LabelSelectorOperator(in.Operator)
+ out.Values = in.Values
+ return nil
+}
+
+func Convert_v1beta1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(in *LabelSelectorRequirement, out *unversioned.LabelSelectorRequirement, s conversion.Scope) error {
+ return autoConvert_v1beta1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(in, out, s)
+}
+
+func autoConvert_unversioned_LabelSelectorRequirement_To_v1beta1_LabelSelectorRequirement(in *unversioned.LabelSelectorRequirement, out *LabelSelectorRequirement, s conversion.Scope) error {
+ out.Key = in.Key
+ out.Operator = LabelSelectorOperator(in.Operator)
+ out.Values = in.Values
+ return nil
+}
+
+func Convert_unversioned_LabelSelectorRequirement_To_v1beta1_LabelSelectorRequirement(in *unversioned.LabelSelectorRequirement, out *LabelSelectorRequirement, s conversion.Scope) error {
+ return autoConvert_unversioned_LabelSelectorRequirement_To_v1beta1_LabelSelectorRequirement(in, out, s)
+}
+
+func autoConvert_v1beta1_ListOptions_To_api_ListOptions(in *ListOptions, out *api.ListOptions, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_string_To_labels_Selector(&in.LabelSelector, &out.LabelSelector, s); err != nil {
+ return err
+ }
+ if err := api.Convert_string_To_fields_Selector(&in.FieldSelector, &out.FieldSelector, s); err != nil {
+ return err
+ }
+ out.Watch = in.Watch
+ out.ResourceVersion = in.ResourceVersion
+ out.TimeoutSeconds = in.TimeoutSeconds
+ return nil
+}
+
+func Convert_v1beta1_ListOptions_To_api_ListOptions(in *ListOptions, out *api.ListOptions, s conversion.Scope) error {
+ return autoConvert_v1beta1_ListOptions_To_api_ListOptions(in, out, s)
+}
+
+func autoConvert_api_ListOptions_To_v1beta1_ListOptions(in *api.ListOptions, out *ListOptions, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_labels_Selector_To_string(&in.LabelSelector, &out.LabelSelector, s); err != nil {
+ return err
+ }
+ if err := api.Convert_fields_Selector_To_string(&in.FieldSelector, &out.FieldSelector, s); err != nil {
+ return err
+ }
+ out.Watch = in.Watch
+ out.ResourceVersion = in.ResourceVersion
+ out.TimeoutSeconds = in.TimeoutSeconds
+ return nil
+}
+
+func Convert_api_ListOptions_To_v1beta1_ListOptions(in *api.ListOptions, out *ListOptions, s conversion.Scope) error {
+ return autoConvert_api_ListOptions_To_v1beta1_ListOptions(in, out, s)
+}
+
+func autoConvert_v1beta1_NetworkPolicy_To_extensions_NetworkPolicy(in *NetworkPolicy, out *extensions.NetworkPolicy, s conversion.Scope) error {
+ SetDefaults_NetworkPolicy(in)
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ if err := Convert_v1beta1_NetworkPolicySpec_To_extensions_NetworkPolicySpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1beta1_NetworkPolicy_To_extensions_NetworkPolicy(in *NetworkPolicy, out *extensions.NetworkPolicy, s conversion.Scope) error {
+ return autoConvert_v1beta1_NetworkPolicy_To_extensions_NetworkPolicy(in, out, s)
+}
+
+func autoConvert_extensions_NetworkPolicy_To_v1beta1_NetworkPolicy(in *extensions.NetworkPolicy, out *NetworkPolicy, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ if err := Convert_extensions_NetworkPolicySpec_To_v1beta1_NetworkPolicySpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_extensions_NetworkPolicy_To_v1beta1_NetworkPolicy(in *extensions.NetworkPolicy, out *NetworkPolicy, s conversion.Scope) error {
+ return autoConvert_extensions_NetworkPolicy_To_v1beta1_NetworkPolicy(in, out, s)
+}
+
+func autoConvert_v1beta1_NetworkPolicyIngressRule_To_extensions_NetworkPolicyIngressRule(in *NetworkPolicyIngressRule, out *extensions.NetworkPolicyIngressRule, s conversion.Scope) error {
+ if in.Ports != nil {
+ in, out := &in.Ports, &out.Ports
+ *out = make([]extensions.NetworkPolicyPort, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta1_NetworkPolicyPort_To_extensions_NetworkPolicyPort(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Ports = nil
+ }
+ if in.From != nil {
+ in, out := &in.From, &out.From
+ *out = make([]extensions.NetworkPolicyPeer, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta1_NetworkPolicyPeer_To_extensions_NetworkPolicyPeer(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.From = nil
+ }
+ return nil
+}
+
+func Convert_v1beta1_NetworkPolicyIngressRule_To_extensions_NetworkPolicyIngressRule(in *NetworkPolicyIngressRule, out *extensions.NetworkPolicyIngressRule, s conversion.Scope) error {
+ return autoConvert_v1beta1_NetworkPolicyIngressRule_To_extensions_NetworkPolicyIngressRule(in, out, s)
+}
+
+func autoConvert_extensions_NetworkPolicyIngressRule_To_v1beta1_NetworkPolicyIngressRule(in *extensions.NetworkPolicyIngressRule, out *NetworkPolicyIngressRule, s conversion.Scope) error {
+ if in.Ports != nil {
+ in, out := &in.Ports, &out.Ports
+ *out = make([]NetworkPolicyPort, len(*in))
+ for i := range *in {
+ if err := Convert_extensions_NetworkPolicyPort_To_v1beta1_NetworkPolicyPort(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Ports = nil
+ }
+ if in.From != nil {
+ in, out := &in.From, &out.From
+ *out = make([]NetworkPolicyPeer, len(*in))
+ for i := range *in {
+ if err := Convert_extensions_NetworkPolicyPeer_To_v1beta1_NetworkPolicyPeer(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.From = nil
+ }
+ return nil
+}
+
+func Convert_extensions_NetworkPolicyIngressRule_To_v1beta1_NetworkPolicyIngressRule(in *extensions.NetworkPolicyIngressRule, out *NetworkPolicyIngressRule, s conversion.Scope) error {
+ return autoConvert_extensions_NetworkPolicyIngressRule_To_v1beta1_NetworkPolicyIngressRule(in, out, s)
+}
+
+func autoConvert_v1beta1_NetworkPolicyList_To_extensions_NetworkPolicyList(in *NetworkPolicyList, out *extensions.NetworkPolicyList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]extensions.NetworkPolicy, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta1_NetworkPolicy_To_extensions_NetworkPolicy(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_v1beta1_NetworkPolicyList_To_extensions_NetworkPolicyList(in *NetworkPolicyList, out *extensions.NetworkPolicyList, s conversion.Scope) error {
+ return autoConvert_v1beta1_NetworkPolicyList_To_extensions_NetworkPolicyList(in, out, s)
+}
+
+func autoConvert_extensions_NetworkPolicyList_To_v1beta1_NetworkPolicyList(in *extensions.NetworkPolicyList, out *NetworkPolicyList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]NetworkPolicy, len(*in))
+ for i := range *in {
+ if err := Convert_extensions_NetworkPolicy_To_v1beta1_NetworkPolicy(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_extensions_NetworkPolicyList_To_v1beta1_NetworkPolicyList(in *extensions.NetworkPolicyList, out *NetworkPolicyList, s conversion.Scope) error {
+ return autoConvert_extensions_NetworkPolicyList_To_v1beta1_NetworkPolicyList(in, out, s)
+}
+
+func autoConvert_v1beta1_NetworkPolicyPeer_To_extensions_NetworkPolicyPeer(in *NetworkPolicyPeer, out *extensions.NetworkPolicyPeer, s conversion.Scope) error {
+ if in.PodSelector != nil {
+ in, out := &in.PodSelector, &out.PodSelector
+ *out = new(unversioned.LabelSelector)
+ if err := Convert_v1beta1_LabelSelector_To_unversioned_LabelSelector(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.PodSelector = nil
+ }
+ if in.NamespaceSelector != nil {
+ in, out := &in.NamespaceSelector, &out.NamespaceSelector
+ *out = new(unversioned.LabelSelector)
+ if err := Convert_v1beta1_LabelSelector_To_unversioned_LabelSelector(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.NamespaceSelector = nil
+ }
+ return nil
+}
+
+func Convert_v1beta1_NetworkPolicyPeer_To_extensions_NetworkPolicyPeer(in *NetworkPolicyPeer, out *extensions.NetworkPolicyPeer, s conversion.Scope) error {
+ return autoConvert_v1beta1_NetworkPolicyPeer_To_extensions_NetworkPolicyPeer(in, out, s)
+}
+
+func autoConvert_extensions_NetworkPolicyPeer_To_v1beta1_NetworkPolicyPeer(in *extensions.NetworkPolicyPeer, out *NetworkPolicyPeer, s conversion.Scope) error {
+ if in.PodSelector != nil {
+ in, out := &in.PodSelector, &out.PodSelector
+ *out = new(LabelSelector)
+ if err := Convert_unversioned_LabelSelector_To_v1beta1_LabelSelector(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.PodSelector = nil
+ }
+ if in.NamespaceSelector != nil {
+ in, out := &in.NamespaceSelector, &out.NamespaceSelector
+ *out = new(LabelSelector)
+ if err := Convert_unversioned_LabelSelector_To_v1beta1_LabelSelector(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.NamespaceSelector = nil
+ }
+ return nil
+}
+
+func Convert_extensions_NetworkPolicyPeer_To_v1beta1_NetworkPolicyPeer(in *extensions.NetworkPolicyPeer, out *NetworkPolicyPeer, s conversion.Scope) error {
+ return autoConvert_extensions_NetworkPolicyPeer_To_v1beta1_NetworkPolicyPeer(in, out, s)
+}
+
+func autoConvert_v1beta1_NetworkPolicyPort_To_extensions_NetworkPolicyPort(in *NetworkPolicyPort, out *extensions.NetworkPolicyPort, s conversion.Scope) error {
+ if in.Protocol != nil {
+ in, out := &in.Protocol, &out.Protocol
+ *out = new(api.Protocol)
+ **out = api.Protocol(**in)
+ } else {
+ out.Protocol = nil
+ }
+ out.Port = in.Port
+ return nil
+}
+
+func Convert_v1beta1_NetworkPolicyPort_To_extensions_NetworkPolicyPort(in *NetworkPolicyPort, out *extensions.NetworkPolicyPort, s conversion.Scope) error {
+ return autoConvert_v1beta1_NetworkPolicyPort_To_extensions_NetworkPolicyPort(in, out, s)
+}
+
+func autoConvert_extensions_NetworkPolicyPort_To_v1beta1_NetworkPolicyPort(in *extensions.NetworkPolicyPort, out *NetworkPolicyPort, s conversion.Scope) error {
+ if in.Protocol != nil {
+ in, out := &in.Protocol, &out.Protocol
+ *out = new(v1.Protocol)
+ **out = v1.Protocol(**in)
+ } else {
+ out.Protocol = nil
+ }
+ out.Port = in.Port
+ return nil
+}
+
+func Convert_extensions_NetworkPolicyPort_To_v1beta1_NetworkPolicyPort(in *extensions.NetworkPolicyPort, out *NetworkPolicyPort, s conversion.Scope) error {
+ return autoConvert_extensions_NetworkPolicyPort_To_v1beta1_NetworkPolicyPort(in, out, s)
+}
+
+func autoConvert_v1beta1_NetworkPolicySpec_To_extensions_NetworkPolicySpec(in *NetworkPolicySpec, out *extensions.NetworkPolicySpec, s conversion.Scope) error {
+ if err := Convert_v1beta1_LabelSelector_To_unversioned_LabelSelector(&in.PodSelector, &out.PodSelector, s); err != nil {
+ return err
+ }
+ if in.Ingress != nil {
+ in, out := &in.Ingress, &out.Ingress
+ *out = make([]extensions.NetworkPolicyIngressRule, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta1_NetworkPolicyIngressRule_To_extensions_NetworkPolicyIngressRule(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Ingress = nil
+ }
+ return nil
+}
+
+func Convert_v1beta1_NetworkPolicySpec_To_extensions_NetworkPolicySpec(in *NetworkPolicySpec, out *extensions.NetworkPolicySpec, s conversion.Scope) error {
+ return autoConvert_v1beta1_NetworkPolicySpec_To_extensions_NetworkPolicySpec(in, out, s)
+}
+
+func autoConvert_extensions_NetworkPolicySpec_To_v1beta1_NetworkPolicySpec(in *extensions.NetworkPolicySpec, out *NetworkPolicySpec, s conversion.Scope) error {
+ if err := Convert_unversioned_LabelSelector_To_v1beta1_LabelSelector(&in.PodSelector, &out.PodSelector, s); err != nil {
+ return err
+ }
+ if in.Ingress != nil {
+ in, out := &in.Ingress, &out.Ingress
+ *out = make([]NetworkPolicyIngressRule, len(*in))
+ for i := range *in {
+ if err := Convert_extensions_NetworkPolicyIngressRule_To_v1beta1_NetworkPolicyIngressRule(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Ingress = nil
+ }
+ return nil
+}
+
+func Convert_extensions_NetworkPolicySpec_To_v1beta1_NetworkPolicySpec(in *extensions.NetworkPolicySpec, out *NetworkPolicySpec, s conversion.Scope) error {
+ return autoConvert_extensions_NetworkPolicySpec_To_v1beta1_NetworkPolicySpec(in, out, s)
+}
+
+func autoConvert_v1beta1_PodSecurityPolicy_To_extensions_PodSecurityPolicy(in *PodSecurityPolicy, out *extensions.PodSecurityPolicy, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ if err := Convert_v1beta1_PodSecurityPolicySpec_To_extensions_PodSecurityPolicySpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1beta1_PodSecurityPolicy_To_extensions_PodSecurityPolicy(in *PodSecurityPolicy, out *extensions.PodSecurityPolicy, s conversion.Scope) error {
+ return autoConvert_v1beta1_PodSecurityPolicy_To_extensions_PodSecurityPolicy(in, out, s)
+}
+
+func autoConvert_extensions_PodSecurityPolicy_To_v1beta1_PodSecurityPolicy(in *extensions.PodSecurityPolicy, out *PodSecurityPolicy, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ if err := Convert_extensions_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_extensions_PodSecurityPolicy_To_v1beta1_PodSecurityPolicy(in *extensions.PodSecurityPolicy, out *PodSecurityPolicy, s conversion.Scope) error {
+ return autoConvert_extensions_PodSecurityPolicy_To_v1beta1_PodSecurityPolicy(in, out, s)
+}
+
+func autoConvert_v1beta1_PodSecurityPolicyList_To_extensions_PodSecurityPolicyList(in *PodSecurityPolicyList, out *extensions.PodSecurityPolicyList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]extensions.PodSecurityPolicy, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta1_PodSecurityPolicy_To_extensions_PodSecurityPolicy(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_v1beta1_PodSecurityPolicyList_To_extensions_PodSecurityPolicyList(in *PodSecurityPolicyList, out *extensions.PodSecurityPolicyList, s conversion.Scope) error {
+ return autoConvert_v1beta1_PodSecurityPolicyList_To_extensions_PodSecurityPolicyList(in, out, s)
+}
+
+func autoConvert_extensions_PodSecurityPolicyList_To_v1beta1_PodSecurityPolicyList(in *extensions.PodSecurityPolicyList, out *PodSecurityPolicyList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]PodSecurityPolicy, len(*in))
+ for i := range *in {
+ if err := Convert_extensions_PodSecurityPolicy_To_v1beta1_PodSecurityPolicy(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_extensions_PodSecurityPolicyList_To_v1beta1_PodSecurityPolicyList(in *extensions.PodSecurityPolicyList, out *PodSecurityPolicyList, s conversion.Scope) error {
+ return autoConvert_extensions_PodSecurityPolicyList_To_v1beta1_PodSecurityPolicyList(in, out, s)
+}
+
+func autoConvert_v1beta1_PodSecurityPolicySpec_To_extensions_PodSecurityPolicySpec(in *PodSecurityPolicySpec, out *extensions.PodSecurityPolicySpec, s conversion.Scope) error {
+ out.Privileged = in.Privileged
+ if in.DefaultAddCapabilities != nil {
+ in, out := &in.DefaultAddCapabilities, &out.DefaultAddCapabilities
+ *out = make([]api.Capability, len(*in))
+ for i := range *in {
+ (*out)[i] = api.Capability((*in)[i])
+ }
+ } else {
+ out.DefaultAddCapabilities = nil
+ }
+ if in.RequiredDropCapabilities != nil {
+ in, out := &in.RequiredDropCapabilities, &out.RequiredDropCapabilities
+ *out = make([]api.Capability, len(*in))
+ for i := range *in {
+ (*out)[i] = api.Capability((*in)[i])
+ }
+ } else {
+ out.RequiredDropCapabilities = nil
+ }
+ if in.AllowedCapabilities != nil {
+ in, out := &in.AllowedCapabilities, &out.AllowedCapabilities
+ *out = make([]api.Capability, len(*in))
+ for i := range *in {
+ (*out)[i] = api.Capability((*in)[i])
+ }
+ } else {
+ out.AllowedCapabilities = nil
+ }
+ if in.Volumes != nil {
+ in, out := &in.Volumes, &out.Volumes
+ *out = make([]extensions.FSType, len(*in))
+ for i := range *in {
+ (*out)[i] = extensions.FSType((*in)[i])
+ }
+ } else {
+ out.Volumes = nil
+ }
+ out.HostNetwork = in.HostNetwork
+ if in.HostPorts != nil {
+ in, out := &in.HostPorts, &out.HostPorts
+ *out = make([]extensions.HostPortRange, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta1_HostPortRange_To_extensions_HostPortRange(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.HostPorts = nil
+ }
+ out.HostPID = in.HostPID
+ out.HostIPC = in.HostIPC
+ if err := Convert_v1beta1_SELinuxStrategyOptions_To_extensions_SELinuxStrategyOptions(&in.SELinux, &out.SELinux, s); err != nil {
+ return err
+ }
+ if err := Convert_v1beta1_RunAsUserStrategyOptions_To_extensions_RunAsUserStrategyOptions(&in.RunAsUser, &out.RunAsUser, s); err != nil {
+ return err
+ }
+ if err := Convert_v1beta1_SupplementalGroupsStrategyOptions_To_extensions_SupplementalGroupsStrategyOptions(&in.SupplementalGroups, &out.SupplementalGroups, s); err != nil {
+ return err
+ }
+ if err := Convert_v1beta1_FSGroupStrategyOptions_To_extensions_FSGroupStrategyOptions(&in.FSGroup, &out.FSGroup, s); err != nil {
+ return err
+ }
+ out.ReadOnlyRootFilesystem = in.ReadOnlyRootFilesystem
+ return nil
+}
+
+func Convert_v1beta1_PodSecurityPolicySpec_To_extensions_PodSecurityPolicySpec(in *PodSecurityPolicySpec, out *extensions.PodSecurityPolicySpec, s conversion.Scope) error {
+ return autoConvert_v1beta1_PodSecurityPolicySpec_To_extensions_PodSecurityPolicySpec(in, out, s)
+}
+
+func autoConvert_extensions_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySpec(in *extensions.PodSecurityPolicySpec, out *PodSecurityPolicySpec, s conversion.Scope) error {
+ out.Privileged = in.Privileged
+ if in.DefaultAddCapabilities != nil {
+ in, out := &in.DefaultAddCapabilities, &out.DefaultAddCapabilities
+ *out = make([]v1.Capability, len(*in))
+ for i := range *in {
+ (*out)[i] = v1.Capability((*in)[i])
+ }
+ } else {
+ out.DefaultAddCapabilities = nil
+ }
+ if in.RequiredDropCapabilities != nil {
+ in, out := &in.RequiredDropCapabilities, &out.RequiredDropCapabilities
+ *out = make([]v1.Capability, len(*in))
+ for i := range *in {
+ (*out)[i] = v1.Capability((*in)[i])
+ }
+ } else {
+ out.RequiredDropCapabilities = nil
+ }
+ if in.AllowedCapabilities != nil {
+ in, out := &in.AllowedCapabilities, &out.AllowedCapabilities
+ *out = make([]v1.Capability, len(*in))
+ for i := range *in {
+ (*out)[i] = v1.Capability((*in)[i])
+ }
+ } else {
+ out.AllowedCapabilities = nil
+ }
+ if in.Volumes != nil {
+ in, out := &in.Volumes, &out.Volumes
+ *out = make([]FSType, len(*in))
+ for i := range *in {
+ (*out)[i] = FSType((*in)[i])
+ }
+ } else {
+ out.Volumes = nil
+ }
+ out.HostNetwork = in.HostNetwork
+ if in.HostPorts != nil {
+ in, out := &in.HostPorts, &out.HostPorts
+ *out = make([]HostPortRange, len(*in))
+ for i := range *in {
+ if err := Convert_extensions_HostPortRange_To_v1beta1_HostPortRange(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.HostPorts = nil
+ }
+ out.HostPID = in.HostPID
+ out.HostIPC = in.HostIPC
+ if err := Convert_extensions_SELinuxStrategyOptions_To_v1beta1_SELinuxStrategyOptions(&in.SELinux, &out.SELinux, s); err != nil {
+ return err
+ }
+ if err := Convert_extensions_RunAsUserStrategyOptions_To_v1beta1_RunAsUserStrategyOptions(&in.RunAsUser, &out.RunAsUser, s); err != nil {
+ return err
+ }
+ if err := Convert_extensions_SupplementalGroupsStrategyOptions_To_v1beta1_SupplementalGroupsStrategyOptions(&in.SupplementalGroups, &out.SupplementalGroups, s); err != nil {
+ return err
+ }
+ if err := Convert_extensions_FSGroupStrategyOptions_To_v1beta1_FSGroupStrategyOptions(&in.FSGroup, &out.FSGroup, s); err != nil {
+ return err
+ }
+ out.ReadOnlyRootFilesystem = in.ReadOnlyRootFilesystem
+ return nil
+}
+
+func Convert_extensions_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySpec(in *extensions.PodSecurityPolicySpec, out *PodSecurityPolicySpec, s conversion.Scope) error {
+ return autoConvert_extensions_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySpec(in, out, s)
+}
+
+func autoConvert_v1beta1_ReplicaSet_To_extensions_ReplicaSet(in *ReplicaSet, out *extensions.ReplicaSet, s conversion.Scope) error {
+ SetDefaults_ReplicaSet(in)
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ if err := Convert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1beta1_ReplicaSetStatus_To_extensions_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1beta1_ReplicaSet_To_extensions_ReplicaSet(in *ReplicaSet, out *extensions.ReplicaSet, s conversion.Scope) error {
+ return autoConvert_v1beta1_ReplicaSet_To_extensions_ReplicaSet(in, out, s)
+}
+
+func autoConvert_extensions_ReplicaSet_To_v1beta1_ReplicaSet(in *extensions.ReplicaSet, out *ReplicaSet, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ if err := Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_extensions_ReplicaSet_To_v1beta1_ReplicaSet(in *extensions.ReplicaSet, out *ReplicaSet, s conversion.Scope) error {
+ return autoConvert_extensions_ReplicaSet_To_v1beta1_ReplicaSet(in, out, s)
+}
+
+func autoConvert_v1beta1_ReplicaSetList_To_extensions_ReplicaSetList(in *ReplicaSetList, out *extensions.ReplicaSetList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]extensions.ReplicaSet, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta1_ReplicaSet_To_extensions_ReplicaSet(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_v1beta1_ReplicaSetList_To_extensions_ReplicaSetList(in *ReplicaSetList, out *extensions.ReplicaSetList, s conversion.Scope) error {
+ return autoConvert_v1beta1_ReplicaSetList_To_extensions_ReplicaSetList(in, out, s)
+}
+
+func autoConvert_extensions_ReplicaSetList_To_v1beta1_ReplicaSetList(in *extensions.ReplicaSetList, out *ReplicaSetList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ReplicaSet, len(*in))
+ for i := range *in {
+ if err := Convert_extensions_ReplicaSet_To_v1beta1_ReplicaSet(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_extensions_ReplicaSetList_To_v1beta1_ReplicaSetList(in *extensions.ReplicaSetList, out *ReplicaSetList, s conversion.Scope) error {
+ return autoConvert_extensions_ReplicaSetList_To_v1beta1_ReplicaSetList(in, out, s)
+}
+
+func autoConvert_v1beta1_ReplicaSetStatus_To_extensions_ReplicaSetStatus(in *ReplicaSetStatus, out *extensions.ReplicaSetStatus, s conversion.Scope) error {
+ out.Replicas = in.Replicas
+ out.FullyLabeledReplicas = in.FullyLabeledReplicas
+ out.ObservedGeneration = in.ObservedGeneration
+ return nil
+}
+
+func Convert_v1beta1_ReplicaSetStatus_To_extensions_ReplicaSetStatus(in *ReplicaSetStatus, out *extensions.ReplicaSetStatus, s conversion.Scope) error {
+ return autoConvert_v1beta1_ReplicaSetStatus_To_extensions_ReplicaSetStatus(in, out, s)
+}
+
+func autoConvert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(in *extensions.ReplicaSetStatus, out *ReplicaSetStatus, s conversion.Scope) error {
+ out.Replicas = in.Replicas
+ out.FullyLabeledReplicas = in.FullyLabeledReplicas
+ out.ObservedGeneration = in.ObservedGeneration
+ return nil
+}
+
+func Convert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(in *extensions.ReplicaSetStatus, out *ReplicaSetStatus, s conversion.Scope) error {
+ return autoConvert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(in, out, s)
+}
+
+func autoConvert_v1beta1_ReplicationControllerDummy_To_extensions_ReplicationControllerDummy(in *ReplicationControllerDummy, out *extensions.ReplicationControllerDummy, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1beta1_ReplicationControllerDummy_To_extensions_ReplicationControllerDummy(in *ReplicationControllerDummy, out *extensions.ReplicationControllerDummy, s conversion.Scope) error {
+ return autoConvert_v1beta1_ReplicationControllerDummy_To_extensions_ReplicationControllerDummy(in, out, s)
+}
+
+func autoConvert_extensions_ReplicationControllerDummy_To_v1beta1_ReplicationControllerDummy(in *extensions.ReplicationControllerDummy, out *ReplicationControllerDummy, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_extensions_ReplicationControllerDummy_To_v1beta1_ReplicationControllerDummy(in *extensions.ReplicationControllerDummy, out *ReplicationControllerDummy, s conversion.Scope) error {
+ return autoConvert_extensions_ReplicationControllerDummy_To_v1beta1_ReplicationControllerDummy(in, out, s)
+}
+
+func autoConvert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(in *RollbackConfig, out *extensions.RollbackConfig, s conversion.Scope) error {
+ out.Revision = in.Revision
+ return nil
+}
+
+func Convert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(in *RollbackConfig, out *extensions.RollbackConfig, s conversion.Scope) error {
+ return autoConvert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(in, out, s)
+}
+
+func autoConvert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(in *extensions.RollbackConfig, out *RollbackConfig, s conversion.Scope) error {
+ out.Revision = in.Revision
+ return nil
+}
+
+func Convert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(in *extensions.RollbackConfig, out *RollbackConfig, s conversion.Scope) error {
+ return autoConvert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(in, out, s)
+}
+
+func autoConvert_v1beta1_RunAsUserStrategyOptions_To_extensions_RunAsUserStrategyOptions(in *RunAsUserStrategyOptions, out *extensions.RunAsUserStrategyOptions, s conversion.Scope) error {
+ out.Rule = extensions.RunAsUserStrategy(in.Rule)
+ if in.Ranges != nil {
+ in, out := &in.Ranges, &out.Ranges
+ *out = make([]extensions.IDRange, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta1_IDRange_To_extensions_IDRange(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Ranges = nil
+ }
+ return nil
+}
+
+func Convert_v1beta1_RunAsUserStrategyOptions_To_extensions_RunAsUserStrategyOptions(in *RunAsUserStrategyOptions, out *extensions.RunAsUserStrategyOptions, s conversion.Scope) error {
+ return autoConvert_v1beta1_RunAsUserStrategyOptions_To_extensions_RunAsUserStrategyOptions(in, out, s)
+}
+
+func autoConvert_extensions_RunAsUserStrategyOptions_To_v1beta1_RunAsUserStrategyOptions(in *extensions.RunAsUserStrategyOptions, out *RunAsUserStrategyOptions, s conversion.Scope) error {
+ out.Rule = RunAsUserStrategy(in.Rule)
+ if in.Ranges != nil {
+ in, out := &in.Ranges, &out.Ranges
+ *out = make([]IDRange, len(*in))
+ for i := range *in {
+ if err := Convert_extensions_IDRange_To_v1beta1_IDRange(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Ranges = nil
+ }
+ return nil
+}
+
+func Convert_extensions_RunAsUserStrategyOptions_To_v1beta1_RunAsUserStrategyOptions(in *extensions.RunAsUserStrategyOptions, out *RunAsUserStrategyOptions, s conversion.Scope) error {
+ return autoConvert_extensions_RunAsUserStrategyOptions_To_v1beta1_RunAsUserStrategyOptions(in, out, s)
+}
+
+func autoConvert_v1beta1_SELinuxStrategyOptions_To_extensions_SELinuxStrategyOptions(in *SELinuxStrategyOptions, out *extensions.SELinuxStrategyOptions, s conversion.Scope) error {
+ out.Rule = extensions.SELinuxStrategy(in.Rule)
+ if in.SELinuxOptions != nil {
+ in, out := &in.SELinuxOptions, &out.SELinuxOptions
+ *out = new(api.SELinuxOptions)
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(*in, *out, 0); err != nil {
+ return err
+ }
+ } else {
+ out.SELinuxOptions = nil
+ }
+ return nil
+}
+
+func Convert_v1beta1_SELinuxStrategyOptions_To_extensions_SELinuxStrategyOptions(in *SELinuxStrategyOptions, out *extensions.SELinuxStrategyOptions, s conversion.Scope) error {
+ return autoConvert_v1beta1_SELinuxStrategyOptions_To_extensions_SELinuxStrategyOptions(in, out, s)
+}
+
+func autoConvert_extensions_SELinuxStrategyOptions_To_v1beta1_SELinuxStrategyOptions(in *extensions.SELinuxStrategyOptions, out *SELinuxStrategyOptions, s conversion.Scope) error {
+ out.Rule = SELinuxStrategy(in.Rule)
+ if in.SELinuxOptions != nil {
+ in, out := &in.SELinuxOptions, &out.SELinuxOptions
+ *out = new(v1.SELinuxOptions)
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(*in, *out, 0); err != nil {
+ return err
+ }
+ } else {
+ out.SELinuxOptions = nil
+ }
+ return nil
+}
+
+func Convert_extensions_SELinuxStrategyOptions_To_v1beta1_SELinuxStrategyOptions(in *extensions.SELinuxStrategyOptions, out *SELinuxStrategyOptions, s conversion.Scope) error {
+ return autoConvert_extensions_SELinuxStrategyOptions_To_v1beta1_SELinuxStrategyOptions(in, out, s)
+}
+
+func autoConvert_v1beta1_Scale_To_extensions_Scale(in *Scale, out *extensions.Scale, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ if err := Convert_v1beta1_ScaleSpec_To_extensions_ScaleSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1beta1_ScaleStatus_To_extensions_ScaleStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1beta1_Scale_To_extensions_Scale(in *Scale, out *extensions.Scale, s conversion.Scope) error {
+ return autoConvert_v1beta1_Scale_To_extensions_Scale(in, out, s)
+}
+
+func autoConvert_extensions_Scale_To_v1beta1_Scale(in *extensions.Scale, out *Scale, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ if err := Convert_extensions_ScaleSpec_To_v1beta1_ScaleSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_extensions_ScaleStatus_To_v1beta1_ScaleStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_extensions_Scale_To_v1beta1_Scale(in *extensions.Scale, out *Scale, s conversion.Scope) error {
+ return autoConvert_extensions_Scale_To_v1beta1_Scale(in, out, s)
+}
+
+func autoConvert_v1beta1_ScaleSpec_To_extensions_ScaleSpec(in *ScaleSpec, out *extensions.ScaleSpec, s conversion.Scope) error {
+ out.Replicas = in.Replicas
+ return nil
+}
+
+func Convert_v1beta1_ScaleSpec_To_extensions_ScaleSpec(in *ScaleSpec, out *extensions.ScaleSpec, s conversion.Scope) error {
+ return autoConvert_v1beta1_ScaleSpec_To_extensions_ScaleSpec(in, out, s)
+}
+
+func autoConvert_extensions_ScaleSpec_To_v1beta1_ScaleSpec(in *extensions.ScaleSpec, out *ScaleSpec, s conversion.Scope) error {
+ out.Replicas = in.Replicas
+ return nil
+}
+
+func Convert_extensions_ScaleSpec_To_v1beta1_ScaleSpec(in *extensions.ScaleSpec, out *ScaleSpec, s conversion.Scope) error {
+ return autoConvert_extensions_ScaleSpec_To_v1beta1_ScaleSpec(in, out, s)
+}
+
+func autoConvert_v1beta1_SupplementalGroupsStrategyOptions_To_extensions_SupplementalGroupsStrategyOptions(in *SupplementalGroupsStrategyOptions, out *extensions.SupplementalGroupsStrategyOptions, s conversion.Scope) error {
+ out.Rule = extensions.SupplementalGroupsStrategyType(in.Rule)
+ if in.Ranges != nil {
+ in, out := &in.Ranges, &out.Ranges
+ *out = make([]extensions.IDRange, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta1_IDRange_To_extensions_IDRange(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Ranges = nil
+ }
+ return nil
+}
+
+func Convert_v1beta1_SupplementalGroupsStrategyOptions_To_extensions_SupplementalGroupsStrategyOptions(in *SupplementalGroupsStrategyOptions, out *extensions.SupplementalGroupsStrategyOptions, s conversion.Scope) error {
+ return autoConvert_v1beta1_SupplementalGroupsStrategyOptions_To_extensions_SupplementalGroupsStrategyOptions(in, out, s)
+}
+
+func autoConvert_extensions_SupplementalGroupsStrategyOptions_To_v1beta1_SupplementalGroupsStrategyOptions(in *extensions.SupplementalGroupsStrategyOptions, out *SupplementalGroupsStrategyOptions, s conversion.Scope) error {
+ out.Rule = SupplementalGroupsStrategyType(in.Rule)
+ if in.Ranges != nil {
+ in, out := &in.Ranges, &out.Ranges
+ *out = make([]IDRange, len(*in))
+ for i := range *in {
+ if err := Convert_extensions_IDRange_To_v1beta1_IDRange(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Ranges = nil
+ }
+ return nil
+}
+
+func Convert_extensions_SupplementalGroupsStrategyOptions_To_v1beta1_SupplementalGroupsStrategyOptions(in *extensions.SupplementalGroupsStrategyOptions, out *SupplementalGroupsStrategyOptions, s conversion.Scope) error {
+ return autoConvert_extensions_SupplementalGroupsStrategyOptions_To_v1beta1_SupplementalGroupsStrategyOptions(in, out, s)
+}
+
+func autoConvert_v1beta1_ThirdPartyResource_To_extensions_ThirdPartyResource(in *ThirdPartyResource, out *extensions.ThirdPartyResource, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ out.Description = in.Description
+ if in.Versions != nil {
+ in, out := &in.Versions, &out.Versions
+ *out = make([]extensions.APIVersion, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta1_APIVersion_To_extensions_APIVersion(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Versions = nil
+ }
+ return nil
+}
+
+func Convert_v1beta1_ThirdPartyResource_To_extensions_ThirdPartyResource(in *ThirdPartyResource, out *extensions.ThirdPartyResource, s conversion.Scope) error {
+ return autoConvert_v1beta1_ThirdPartyResource_To_extensions_ThirdPartyResource(in, out, s)
+}
+
+func autoConvert_extensions_ThirdPartyResource_To_v1beta1_ThirdPartyResource(in *extensions.ThirdPartyResource, out *ThirdPartyResource, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ out.Description = in.Description
+ if in.Versions != nil {
+ in, out := &in.Versions, &out.Versions
+ *out = make([]APIVersion, len(*in))
+ for i := range *in {
+ if err := Convert_extensions_APIVersion_To_v1beta1_APIVersion(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Versions = nil
+ }
+ return nil
+}
+
+func Convert_extensions_ThirdPartyResource_To_v1beta1_ThirdPartyResource(in *extensions.ThirdPartyResource, out *ThirdPartyResource, s conversion.Scope) error {
+ return autoConvert_extensions_ThirdPartyResource_To_v1beta1_ThirdPartyResource(in, out, s)
+}
+
+func autoConvert_v1beta1_ThirdPartyResourceData_To_extensions_ThirdPartyResourceData(in *ThirdPartyResourceData, out *extensions.ThirdPartyResourceData, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ if err := conversion.Convert_Slice_byte_To_Slice_byte(&in.Data, &out.Data, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1beta1_ThirdPartyResourceData_To_extensions_ThirdPartyResourceData(in *ThirdPartyResourceData, out *extensions.ThirdPartyResourceData, s conversion.Scope) error {
+ return autoConvert_v1beta1_ThirdPartyResourceData_To_extensions_ThirdPartyResourceData(in, out, s)
+}
+
+func autoConvert_extensions_ThirdPartyResourceData_To_v1beta1_ThirdPartyResourceData(in *extensions.ThirdPartyResourceData, out *ThirdPartyResourceData, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ if err := conversion.Convert_Slice_byte_To_Slice_byte(&in.Data, &out.Data, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_extensions_ThirdPartyResourceData_To_v1beta1_ThirdPartyResourceData(in *extensions.ThirdPartyResourceData, out *ThirdPartyResourceData, s conversion.Scope) error {
+ return autoConvert_extensions_ThirdPartyResourceData_To_v1beta1_ThirdPartyResourceData(in, out, s)
+}
+
+func autoConvert_v1beta1_ThirdPartyResourceDataList_To_extensions_ThirdPartyResourceDataList(in *ThirdPartyResourceDataList, out *extensions.ThirdPartyResourceDataList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]extensions.ThirdPartyResourceData, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta1_ThirdPartyResourceData_To_extensions_ThirdPartyResourceData(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_v1beta1_ThirdPartyResourceDataList_To_extensions_ThirdPartyResourceDataList(in *ThirdPartyResourceDataList, out *extensions.ThirdPartyResourceDataList, s conversion.Scope) error {
+ return autoConvert_v1beta1_ThirdPartyResourceDataList_To_extensions_ThirdPartyResourceDataList(in, out, s)
+}
+
+func autoConvert_extensions_ThirdPartyResourceDataList_To_v1beta1_ThirdPartyResourceDataList(in *extensions.ThirdPartyResourceDataList, out *ThirdPartyResourceDataList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ThirdPartyResourceData, len(*in))
+ for i := range *in {
+ if err := Convert_extensions_ThirdPartyResourceData_To_v1beta1_ThirdPartyResourceData(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_extensions_ThirdPartyResourceDataList_To_v1beta1_ThirdPartyResourceDataList(in *extensions.ThirdPartyResourceDataList, out *ThirdPartyResourceDataList, s conversion.Scope) error {
+ return autoConvert_extensions_ThirdPartyResourceDataList_To_v1beta1_ThirdPartyResourceDataList(in, out, s)
+}
+
+func autoConvert_v1beta1_ThirdPartyResourceList_To_extensions_ThirdPartyResourceList(in *ThirdPartyResourceList, out *extensions.ThirdPartyResourceList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]extensions.ThirdPartyResource, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta1_ThirdPartyResource_To_extensions_ThirdPartyResource(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_v1beta1_ThirdPartyResourceList_To_extensions_ThirdPartyResourceList(in *ThirdPartyResourceList, out *extensions.ThirdPartyResourceList, s conversion.Scope) error {
+ return autoConvert_v1beta1_ThirdPartyResourceList_To_extensions_ThirdPartyResourceList(in, out, s)
+}
+
+func autoConvert_extensions_ThirdPartyResourceList_To_v1beta1_ThirdPartyResourceList(in *extensions.ThirdPartyResourceList, out *ThirdPartyResourceList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ThirdPartyResource, len(*in))
+ for i := range *in {
+ if err := Convert_extensions_ThirdPartyResource_To_v1beta1_ThirdPartyResource(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_extensions_ThirdPartyResourceList_To_v1beta1_ThirdPartyResourceList(in *extensions.ThirdPartyResourceList, out *ThirdPartyResourceList, s conversion.Scope) error {
+ return autoConvert_extensions_ThirdPartyResourceList_To_v1beta1_ThirdPartyResourceList(in, out, s)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/deep_copy_generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/deep_copy_generated.go
new file mode 100644
index 0000000..b7e43ed
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/deep_copy_generated.go
@@ -0,0 +1,1170 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by deepcopy-gen. Do not edit it manually!
+
+package v1beta1
+
+import (
+ api "k8s.io/kubernetes/pkg/api"
+ unversioned "k8s.io/kubernetes/pkg/api/unversioned"
+ v1 "k8s.io/kubernetes/pkg/api/v1"
+ conversion "k8s.io/kubernetes/pkg/conversion"
+ intstr "k8s.io/kubernetes/pkg/util/intstr"
+)
+
+func init() {
+ if err := api.Scheme.AddGeneratedDeepCopyFuncs(
+ DeepCopy_v1beta1_APIVersion,
+ DeepCopy_v1beta1_CPUTargetUtilization,
+ DeepCopy_v1beta1_CustomMetricCurrentStatus,
+ DeepCopy_v1beta1_CustomMetricCurrentStatusList,
+ DeepCopy_v1beta1_CustomMetricTarget,
+ DeepCopy_v1beta1_CustomMetricTargetList,
+ DeepCopy_v1beta1_DaemonSet,
+ DeepCopy_v1beta1_DaemonSetList,
+ DeepCopy_v1beta1_DaemonSetSpec,
+ DeepCopy_v1beta1_DaemonSetStatus,
+ DeepCopy_v1beta1_Deployment,
+ DeepCopy_v1beta1_DeploymentList,
+ DeepCopy_v1beta1_DeploymentRollback,
+ DeepCopy_v1beta1_DeploymentSpec,
+ DeepCopy_v1beta1_DeploymentStatus,
+ DeepCopy_v1beta1_DeploymentStrategy,
+ DeepCopy_v1beta1_ExportOptions,
+ DeepCopy_v1beta1_FSGroupStrategyOptions,
+ DeepCopy_v1beta1_HTTPIngressPath,
+ DeepCopy_v1beta1_HTTPIngressRuleValue,
+ DeepCopy_v1beta1_HorizontalPodAutoscaler,
+ DeepCopy_v1beta1_HorizontalPodAutoscalerList,
+ DeepCopy_v1beta1_HorizontalPodAutoscalerSpec,
+ DeepCopy_v1beta1_HorizontalPodAutoscalerStatus,
+ DeepCopy_v1beta1_HostPortRange,
+ DeepCopy_v1beta1_IDRange,
+ DeepCopy_v1beta1_Ingress,
+ DeepCopy_v1beta1_IngressBackend,
+ DeepCopy_v1beta1_IngressList,
+ DeepCopy_v1beta1_IngressRule,
+ DeepCopy_v1beta1_IngressRuleValue,
+ DeepCopy_v1beta1_IngressSpec,
+ DeepCopy_v1beta1_IngressStatus,
+ DeepCopy_v1beta1_IngressTLS,
+ DeepCopy_v1beta1_Job,
+ DeepCopy_v1beta1_JobCondition,
+ DeepCopy_v1beta1_JobList,
+ DeepCopy_v1beta1_JobSpec,
+ DeepCopy_v1beta1_JobStatus,
+ DeepCopy_v1beta1_LabelSelector,
+ DeepCopy_v1beta1_LabelSelectorRequirement,
+ DeepCopy_v1beta1_ListOptions,
+ DeepCopy_v1beta1_NetworkPolicy,
+ DeepCopy_v1beta1_NetworkPolicyIngressRule,
+ DeepCopy_v1beta1_NetworkPolicyList,
+ DeepCopy_v1beta1_NetworkPolicyPeer,
+ DeepCopy_v1beta1_NetworkPolicyPort,
+ DeepCopy_v1beta1_NetworkPolicySpec,
+ DeepCopy_v1beta1_PodSecurityPolicy,
+ DeepCopy_v1beta1_PodSecurityPolicyList,
+ DeepCopy_v1beta1_PodSecurityPolicySpec,
+ DeepCopy_v1beta1_ReplicaSet,
+ DeepCopy_v1beta1_ReplicaSetList,
+ DeepCopy_v1beta1_ReplicaSetSpec,
+ DeepCopy_v1beta1_ReplicaSetStatus,
+ DeepCopy_v1beta1_ReplicationControllerDummy,
+ DeepCopy_v1beta1_RollbackConfig,
+ DeepCopy_v1beta1_RollingUpdateDeployment,
+ DeepCopy_v1beta1_RunAsUserStrategyOptions,
+ DeepCopy_v1beta1_SELinuxStrategyOptions,
+ DeepCopy_v1beta1_Scale,
+ DeepCopy_v1beta1_ScaleSpec,
+ DeepCopy_v1beta1_ScaleStatus,
+ DeepCopy_v1beta1_SubresourceReference,
+ DeepCopy_v1beta1_SupplementalGroupsStrategyOptions,
+ DeepCopy_v1beta1_ThirdPartyResource,
+ DeepCopy_v1beta1_ThirdPartyResourceData,
+ DeepCopy_v1beta1_ThirdPartyResourceDataList,
+ DeepCopy_v1beta1_ThirdPartyResourceList,
+ ); err != nil {
+ // if one of the deep copy functions is malformed, detect it immediately.
+ panic(err)
+ }
+}
+
+func DeepCopy_v1beta1_APIVersion(in APIVersion, out *APIVersion, c *conversion.Cloner) error {
+ out.Name = in.Name
+ return nil
+}
+
+func DeepCopy_v1beta1_CPUTargetUtilization(in CPUTargetUtilization, out *CPUTargetUtilization, c *conversion.Cloner) error {
+ out.TargetPercentage = in.TargetPercentage
+ return nil
+}
+
+func DeepCopy_v1beta1_CustomMetricCurrentStatus(in CustomMetricCurrentStatus, out *CustomMetricCurrentStatus, c *conversion.Cloner) error {
+ out.Name = in.Name
+ out.CurrentValue = in.CurrentValue.DeepCopy()
+ return nil
+}
+
+func DeepCopy_v1beta1_CustomMetricCurrentStatusList(in CustomMetricCurrentStatusList, out *CustomMetricCurrentStatusList, c *conversion.Cloner) error {
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]CustomMetricCurrentStatus, len(in))
+ for i := range in {
+ if err := DeepCopy_v1beta1_CustomMetricCurrentStatus(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1beta1_CustomMetricTarget(in CustomMetricTarget, out *CustomMetricTarget, c *conversion.Cloner) error {
+ out.Name = in.Name
+ out.TargetValue = in.TargetValue.DeepCopy()
+ return nil
+}
+
+func DeepCopy_v1beta1_CustomMetricTargetList(in CustomMetricTargetList, out *CustomMetricTargetList, c *conversion.Cloner) error {
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]CustomMetricTarget, len(in))
+ for i := range in {
+ if err := DeepCopy_v1beta1_CustomMetricTarget(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1beta1_DaemonSet(in DaemonSet, out *DaemonSet, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_v1beta1_DaemonSetSpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ out.Status = in.Status
+ return nil
+}
+
+func DeepCopy_v1beta1_DaemonSetList(in DaemonSetList, out *DaemonSetList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]DaemonSet, len(in))
+ for i := range in {
+ if err := DeepCopy_v1beta1_DaemonSet(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1beta1_DaemonSetSpec(in DaemonSetSpec, out *DaemonSetSpec, c *conversion.Cloner) error {
+ if in.Selector != nil {
+ in, out := in.Selector, &out.Selector
+ *out = new(LabelSelector)
+ if err := DeepCopy_v1beta1_LabelSelector(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.Selector = nil
+ }
+ if err := v1.DeepCopy_v1_PodTemplateSpec(in.Template, &out.Template, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_v1beta1_DaemonSetStatus(in DaemonSetStatus, out *DaemonSetStatus, c *conversion.Cloner) error {
+ out.CurrentNumberScheduled = in.CurrentNumberScheduled
+ out.NumberMisscheduled = in.NumberMisscheduled
+ out.DesiredNumberScheduled = in.DesiredNumberScheduled
+ return nil
+}
+
+func DeepCopy_v1beta1_Deployment(in Deployment, out *Deployment, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_v1beta1_DeploymentSpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ out.Status = in.Status
+ return nil
+}
+
+func DeepCopy_v1beta1_DeploymentList(in DeploymentList, out *DeploymentList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]Deployment, len(in))
+ for i := range in {
+ if err := DeepCopy_v1beta1_Deployment(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1beta1_DeploymentRollback(in DeploymentRollback, out *DeploymentRollback, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.Name = in.Name
+ if in.UpdatedAnnotations != nil {
+ in, out := in.UpdatedAnnotations, &out.UpdatedAnnotations
+ *out = make(map[string]string)
+ for key, val := range in {
+ (*out)[key] = val
+ }
+ } else {
+ out.UpdatedAnnotations = nil
+ }
+ out.RollbackTo = in.RollbackTo
+ return nil
+}
+
+func DeepCopy_v1beta1_DeploymentSpec(in DeploymentSpec, out *DeploymentSpec, c *conversion.Cloner) error {
+ if in.Replicas != nil {
+ in, out := in.Replicas, &out.Replicas
+ *out = new(int32)
+ **out = *in
+ } else {
+ out.Replicas = nil
+ }
+ if in.Selector != nil {
+ in, out := in.Selector, &out.Selector
+ *out = new(LabelSelector)
+ if err := DeepCopy_v1beta1_LabelSelector(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.Selector = nil
+ }
+ if err := v1.DeepCopy_v1_PodTemplateSpec(in.Template, &out.Template, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_v1beta1_DeploymentStrategy(in.Strategy, &out.Strategy, c); err != nil {
+ return err
+ }
+ out.MinReadySeconds = in.MinReadySeconds
+ if in.RevisionHistoryLimit != nil {
+ in, out := in.RevisionHistoryLimit, &out.RevisionHistoryLimit
+ *out = new(int32)
+ **out = *in
+ } else {
+ out.RevisionHistoryLimit = nil
+ }
+ out.Paused = in.Paused
+ if in.RollbackTo != nil {
+ in, out := in.RollbackTo, &out.RollbackTo
+ *out = new(RollbackConfig)
+ **out = *in
+ } else {
+ out.RollbackTo = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1beta1_DeploymentStatus(in DeploymentStatus, out *DeploymentStatus, c *conversion.Cloner) error {
+ out.ObservedGeneration = in.ObservedGeneration
+ out.Replicas = in.Replicas
+ out.UpdatedReplicas = in.UpdatedReplicas
+ out.AvailableReplicas = in.AvailableReplicas
+ out.UnavailableReplicas = in.UnavailableReplicas
+ return nil
+}
+
+func DeepCopy_v1beta1_DeploymentStrategy(in DeploymentStrategy, out *DeploymentStrategy, c *conversion.Cloner) error {
+ out.Type = in.Type
+ if in.RollingUpdate != nil {
+ in, out := in.RollingUpdate, &out.RollingUpdate
+ *out = new(RollingUpdateDeployment)
+ if err := DeepCopy_v1beta1_RollingUpdateDeployment(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.RollingUpdate = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1beta1_ExportOptions(in ExportOptions, out *ExportOptions, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.Export = in.Export
+ out.Exact = in.Exact
+ return nil
+}
+
+func DeepCopy_v1beta1_FSGroupStrategyOptions(in FSGroupStrategyOptions, out *FSGroupStrategyOptions, c *conversion.Cloner) error {
+ out.Rule = in.Rule
+ if in.Ranges != nil {
+ in, out := in.Ranges, &out.Ranges
+ *out = make([]IDRange, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.Ranges = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1beta1_HTTPIngressPath(in HTTPIngressPath, out *HTTPIngressPath, c *conversion.Cloner) error {
+ out.Path = in.Path
+ out.Backend = in.Backend
+ return nil
+}
+
+func DeepCopy_v1beta1_HTTPIngressRuleValue(in HTTPIngressRuleValue, out *HTTPIngressRuleValue, c *conversion.Cloner) error {
+ if in.Paths != nil {
+ in, out := in.Paths, &out.Paths
+ *out = make([]HTTPIngressPath, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.Paths = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1beta1_HorizontalPodAutoscaler(in HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_v1beta1_HorizontalPodAutoscalerSpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_v1beta1_HorizontalPodAutoscalerStatus(in.Status, &out.Status, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_v1beta1_HorizontalPodAutoscalerList(in HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]HorizontalPodAutoscaler, len(in))
+ for i := range in {
+ if err := DeepCopy_v1beta1_HorizontalPodAutoscaler(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1beta1_HorizontalPodAutoscalerSpec(in HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, c *conversion.Cloner) error {
+ out.ScaleRef = in.ScaleRef
+ if in.MinReplicas != nil {
+ in, out := in.MinReplicas, &out.MinReplicas
+ *out = new(int32)
+ **out = *in
+ } else {
+ out.MinReplicas = nil
+ }
+ out.MaxReplicas = in.MaxReplicas
+ if in.CPUUtilization != nil {
+ in, out := in.CPUUtilization, &out.CPUUtilization
+ *out = new(CPUTargetUtilization)
+ **out = *in
+ } else {
+ out.CPUUtilization = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1beta1_HorizontalPodAutoscalerStatus(in HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, c *conversion.Cloner) error {
+ if in.ObservedGeneration != nil {
+ in, out := in.ObservedGeneration, &out.ObservedGeneration
+ *out = new(int64)
+ **out = *in
+ } else {
+ out.ObservedGeneration = nil
+ }
+ if in.LastScaleTime != nil {
+ in, out := in.LastScaleTime, &out.LastScaleTime
+ *out = new(unversioned.Time)
+ **out = in.DeepCopy()
+ } else {
+ out.LastScaleTime = nil
+ }
+ out.CurrentReplicas = in.CurrentReplicas
+ out.DesiredReplicas = in.DesiredReplicas
+ if in.CurrentCPUUtilizationPercentage != nil {
+ in, out := in.CurrentCPUUtilizationPercentage, &out.CurrentCPUUtilizationPercentage
+ *out = new(int32)
+ **out = *in
+ } else {
+ out.CurrentCPUUtilizationPercentage = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1beta1_HostPortRange(in HostPortRange, out *HostPortRange, c *conversion.Cloner) error {
+ out.Min = in.Min
+ out.Max = in.Max
+ return nil
+}
+
+func DeepCopy_v1beta1_IDRange(in IDRange, out *IDRange, c *conversion.Cloner) error {
+ out.Min = in.Min
+ out.Max = in.Max
+ return nil
+}
+
+func DeepCopy_v1beta1_Ingress(in Ingress, out *Ingress, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_v1beta1_IngressSpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_v1beta1_IngressStatus(in.Status, &out.Status, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_v1beta1_IngressBackend(in IngressBackend, out *IngressBackend, c *conversion.Cloner) error {
+ out.ServiceName = in.ServiceName
+ out.ServicePort = in.ServicePort
+ return nil
+}
+
+func DeepCopy_v1beta1_IngressList(in IngressList, out *IngressList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]Ingress, len(in))
+ for i := range in {
+ if err := DeepCopy_v1beta1_Ingress(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1beta1_IngressRule(in IngressRule, out *IngressRule, c *conversion.Cloner) error {
+ out.Host = in.Host
+ if err := DeepCopy_v1beta1_IngressRuleValue(in.IngressRuleValue, &out.IngressRuleValue, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_v1beta1_IngressRuleValue(in IngressRuleValue, out *IngressRuleValue, c *conversion.Cloner) error {
+ if in.HTTP != nil {
+ in, out := in.HTTP, &out.HTTP
+ *out = new(HTTPIngressRuleValue)
+ if err := DeepCopy_v1beta1_HTTPIngressRuleValue(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.HTTP = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1beta1_IngressSpec(in IngressSpec, out *IngressSpec, c *conversion.Cloner) error {
+ if in.Backend != nil {
+ in, out := in.Backend, &out.Backend
+ *out = new(IngressBackend)
+ **out = *in
+ } else {
+ out.Backend = nil
+ }
+ if in.TLS != nil {
+ in, out := in.TLS, &out.TLS
+ *out = make([]IngressTLS, len(in))
+ for i := range in {
+ if err := DeepCopy_v1beta1_IngressTLS(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.TLS = nil
+ }
+ if in.Rules != nil {
+ in, out := in.Rules, &out.Rules
+ *out = make([]IngressRule, len(in))
+ for i := range in {
+ if err := DeepCopy_v1beta1_IngressRule(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Rules = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1beta1_IngressStatus(in IngressStatus, out *IngressStatus, c *conversion.Cloner) error {
+ if err := v1.DeepCopy_v1_LoadBalancerStatus(in.LoadBalancer, &out.LoadBalancer, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_v1beta1_IngressTLS(in IngressTLS, out *IngressTLS, c *conversion.Cloner) error {
+ if in.Hosts != nil {
+ in, out := in.Hosts, &out.Hosts
+ *out = make([]string, len(in))
+ copy(*out, in)
+ } else {
+ out.Hosts = nil
+ }
+ out.SecretName = in.SecretName
+ return nil
+}
+
+func DeepCopy_v1beta1_Job(in Job, out *Job, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_v1beta1_JobSpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_v1beta1_JobStatus(in.Status, &out.Status, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_v1beta1_JobCondition(in JobCondition, out *JobCondition, c *conversion.Cloner) error {
+ out.Type = in.Type
+ out.Status = in.Status
+ out.LastProbeTime = in.LastProbeTime.DeepCopy()
+ out.LastTransitionTime = in.LastTransitionTime.DeepCopy()
+ out.Reason = in.Reason
+ out.Message = in.Message
+ return nil
+}
+
+func DeepCopy_v1beta1_JobList(in JobList, out *JobList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]Job, len(in))
+ for i := range in {
+ if err := DeepCopy_v1beta1_Job(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1beta1_JobSpec(in JobSpec, out *JobSpec, c *conversion.Cloner) error {
+ if in.Parallelism != nil {
+ in, out := in.Parallelism, &out.Parallelism
+ *out = new(int32)
+ **out = *in
+ } else {
+ out.Parallelism = nil
+ }
+ if in.Completions != nil {
+ in, out := in.Completions, &out.Completions
+ *out = new(int32)
+ **out = *in
+ } else {
+ out.Completions = nil
+ }
+ if in.ActiveDeadlineSeconds != nil {
+ in, out := in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds
+ *out = new(int64)
+ **out = *in
+ } else {
+ out.ActiveDeadlineSeconds = nil
+ }
+ if in.Selector != nil {
+ in, out := in.Selector, &out.Selector
+ *out = new(LabelSelector)
+ if err := DeepCopy_v1beta1_LabelSelector(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.Selector = nil
+ }
+ if in.AutoSelector != nil {
+ in, out := in.AutoSelector, &out.AutoSelector
+ *out = new(bool)
+ **out = *in
+ } else {
+ out.AutoSelector = nil
+ }
+ if err := v1.DeepCopy_v1_PodTemplateSpec(in.Template, &out.Template, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_v1beta1_JobStatus(in JobStatus, out *JobStatus, c *conversion.Cloner) error {
+ if in.Conditions != nil {
+ in, out := in.Conditions, &out.Conditions
+ *out = make([]JobCondition, len(in))
+ for i := range in {
+ if err := DeepCopy_v1beta1_JobCondition(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Conditions = nil
+ }
+ if in.StartTime != nil {
+ in, out := in.StartTime, &out.StartTime
+ *out = new(unversioned.Time)
+ **out = in.DeepCopy()
+ } else {
+ out.StartTime = nil
+ }
+ if in.CompletionTime != nil {
+ in, out := in.CompletionTime, &out.CompletionTime
+ *out = new(unversioned.Time)
+ **out = in.DeepCopy()
+ } else {
+ out.CompletionTime = nil
+ }
+ out.Active = in.Active
+ out.Succeeded = in.Succeeded
+ out.Failed = in.Failed
+ return nil
+}
+
+func DeepCopy_v1beta1_LabelSelector(in LabelSelector, out *LabelSelector, c *conversion.Cloner) error {
+ if in.MatchLabels != nil {
+ in, out := in.MatchLabels, &out.MatchLabels
+ *out = make(map[string]string)
+ for key, val := range in {
+ (*out)[key] = val
+ }
+ } else {
+ out.MatchLabels = nil
+ }
+ if in.MatchExpressions != nil {
+ in, out := in.MatchExpressions, &out.MatchExpressions
+ *out = make([]LabelSelectorRequirement, len(in))
+ for i := range in {
+ if err := DeepCopy_v1beta1_LabelSelectorRequirement(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.MatchExpressions = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1beta1_LabelSelectorRequirement(in LabelSelectorRequirement, out *LabelSelectorRequirement, c *conversion.Cloner) error {
+ out.Key = in.Key
+ out.Operator = in.Operator
+ if in.Values != nil {
+ in, out := in.Values, &out.Values
+ *out = make([]string, len(in))
+ copy(*out, in)
+ } else {
+ out.Values = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1beta1_ListOptions(in ListOptions, out *ListOptions, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.LabelSelector = in.LabelSelector
+ out.FieldSelector = in.FieldSelector
+ out.Watch = in.Watch
+ out.ResourceVersion = in.ResourceVersion
+ if in.TimeoutSeconds != nil {
+ in, out := in.TimeoutSeconds, &out.TimeoutSeconds
+ *out = new(int64)
+ **out = *in
+ } else {
+ out.TimeoutSeconds = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1beta1_NetworkPolicy(in NetworkPolicy, out *NetworkPolicy, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_v1beta1_NetworkPolicySpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_v1beta1_NetworkPolicyIngressRule(in NetworkPolicyIngressRule, out *NetworkPolicyIngressRule, c *conversion.Cloner) error {
+ if in.Ports != nil {
+ in, out := in.Ports, &out.Ports
+ *out = make([]NetworkPolicyPort, len(in))
+ for i := range in {
+ if err := DeepCopy_v1beta1_NetworkPolicyPort(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Ports = nil
+ }
+ if in.From != nil {
+ in, out := in.From, &out.From
+ *out = make([]NetworkPolicyPeer, len(in))
+ for i := range in {
+ if err := DeepCopy_v1beta1_NetworkPolicyPeer(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.From = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1beta1_NetworkPolicyList(in NetworkPolicyList, out *NetworkPolicyList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]NetworkPolicy, len(in))
+ for i := range in {
+ if err := DeepCopy_v1beta1_NetworkPolicy(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1beta1_NetworkPolicyPeer(in NetworkPolicyPeer, out *NetworkPolicyPeer, c *conversion.Cloner) error {
+ if in.PodSelector != nil {
+ in, out := in.PodSelector, &out.PodSelector
+ *out = new(LabelSelector)
+ if err := DeepCopy_v1beta1_LabelSelector(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.PodSelector = nil
+ }
+ if in.NamespaceSelector != nil {
+ in, out := in.NamespaceSelector, &out.NamespaceSelector
+ *out = new(LabelSelector)
+ if err := DeepCopy_v1beta1_LabelSelector(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.NamespaceSelector = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1beta1_NetworkPolicyPort(in NetworkPolicyPort, out *NetworkPolicyPort, c *conversion.Cloner) error {
+ if in.Protocol != nil {
+ in, out := in.Protocol, &out.Protocol
+ *out = new(v1.Protocol)
+ **out = *in
+ } else {
+ out.Protocol = nil
+ }
+ if in.Port != nil {
+ in, out := in.Port, &out.Port
+ *out = new(intstr.IntOrString)
+ **out = *in
+ } else {
+ out.Port = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1beta1_NetworkPolicySpec(in NetworkPolicySpec, out *NetworkPolicySpec, c *conversion.Cloner) error {
+ if err := DeepCopy_v1beta1_LabelSelector(in.PodSelector, &out.PodSelector, c); err != nil {
+ return err
+ }
+ if in.Ingress != nil {
+ in, out := in.Ingress, &out.Ingress
+ *out = make([]NetworkPolicyIngressRule, len(in))
+ for i := range in {
+ if err := DeepCopy_v1beta1_NetworkPolicyIngressRule(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Ingress = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1beta1_PodSecurityPolicy(in PodSecurityPolicy, out *PodSecurityPolicy, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_v1beta1_PodSecurityPolicySpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_v1beta1_PodSecurityPolicyList(in PodSecurityPolicyList, out *PodSecurityPolicyList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]PodSecurityPolicy, len(in))
+ for i := range in {
+ if err := DeepCopy_v1beta1_PodSecurityPolicy(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1beta1_PodSecurityPolicySpec(in PodSecurityPolicySpec, out *PodSecurityPolicySpec, c *conversion.Cloner) error {
+ out.Privileged = in.Privileged
+ if in.DefaultAddCapabilities != nil {
+ in, out := in.DefaultAddCapabilities, &out.DefaultAddCapabilities
+ *out = make([]v1.Capability, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.DefaultAddCapabilities = nil
+ }
+ if in.RequiredDropCapabilities != nil {
+ in, out := in.RequiredDropCapabilities, &out.RequiredDropCapabilities
+ *out = make([]v1.Capability, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.RequiredDropCapabilities = nil
+ }
+ if in.AllowedCapabilities != nil {
+ in, out := in.AllowedCapabilities, &out.AllowedCapabilities
+ *out = make([]v1.Capability, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.AllowedCapabilities = nil
+ }
+ if in.Volumes != nil {
+ in, out := in.Volumes, &out.Volumes
+ *out = make([]FSType, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.Volumes = nil
+ }
+ out.HostNetwork = in.HostNetwork
+ if in.HostPorts != nil {
+ in, out := in.HostPorts, &out.HostPorts
+ *out = make([]HostPortRange, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.HostPorts = nil
+ }
+ out.HostPID = in.HostPID
+ out.HostIPC = in.HostIPC
+ if err := DeepCopy_v1beta1_SELinuxStrategyOptions(in.SELinux, &out.SELinux, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_v1beta1_RunAsUserStrategyOptions(in.RunAsUser, &out.RunAsUser, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_v1beta1_SupplementalGroupsStrategyOptions(in.SupplementalGroups, &out.SupplementalGroups, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_v1beta1_FSGroupStrategyOptions(in.FSGroup, &out.FSGroup, c); err != nil {
+ return err
+ }
+ out.ReadOnlyRootFilesystem = in.ReadOnlyRootFilesystem
+ return nil
+}
+
+func DeepCopy_v1beta1_ReplicaSet(in ReplicaSet, out *ReplicaSet, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_v1beta1_ReplicaSetSpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ out.Status = in.Status
+ return nil
+}
+
+func DeepCopy_v1beta1_ReplicaSetList(in ReplicaSetList, out *ReplicaSetList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]ReplicaSet, len(in))
+ for i := range in {
+ if err := DeepCopy_v1beta1_ReplicaSet(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1beta1_ReplicaSetSpec(in ReplicaSetSpec, out *ReplicaSetSpec, c *conversion.Cloner) error {
+ if in.Replicas != nil {
+ in, out := in.Replicas, &out.Replicas
+ *out = new(int32)
+ **out = *in
+ } else {
+ out.Replicas = nil
+ }
+ if in.Selector != nil {
+ in, out := in.Selector, &out.Selector
+ *out = new(LabelSelector)
+ if err := DeepCopy_v1beta1_LabelSelector(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.Selector = nil
+ }
+ if err := v1.DeepCopy_v1_PodTemplateSpec(in.Template, &out.Template, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_v1beta1_ReplicaSetStatus(in ReplicaSetStatus, out *ReplicaSetStatus, c *conversion.Cloner) error {
+ out.Replicas = in.Replicas
+ out.FullyLabeledReplicas = in.FullyLabeledReplicas
+ out.ObservedGeneration = in.ObservedGeneration
+ return nil
+}
+
+func DeepCopy_v1beta1_ReplicationControllerDummy(in ReplicationControllerDummy, out *ReplicationControllerDummy, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ return nil
+}
+
+func DeepCopy_v1beta1_RollbackConfig(in RollbackConfig, out *RollbackConfig, c *conversion.Cloner) error {
+ out.Revision = in.Revision
+ return nil
+}
+
+func DeepCopy_v1beta1_RollingUpdateDeployment(in RollingUpdateDeployment, out *RollingUpdateDeployment, c *conversion.Cloner) error {
+ if in.MaxUnavailable != nil {
+ in, out := in.MaxUnavailable, &out.MaxUnavailable
+ *out = new(intstr.IntOrString)
+ **out = *in
+ } else {
+ out.MaxUnavailable = nil
+ }
+ if in.MaxSurge != nil {
+ in, out := in.MaxSurge, &out.MaxSurge
+ *out = new(intstr.IntOrString)
+ **out = *in
+ } else {
+ out.MaxSurge = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1beta1_RunAsUserStrategyOptions(in RunAsUserStrategyOptions, out *RunAsUserStrategyOptions, c *conversion.Cloner) error {
+ out.Rule = in.Rule
+ if in.Ranges != nil {
+ in, out := in.Ranges, &out.Ranges
+ *out = make([]IDRange, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.Ranges = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1beta1_SELinuxStrategyOptions(in SELinuxStrategyOptions, out *SELinuxStrategyOptions, c *conversion.Cloner) error {
+ out.Rule = in.Rule
+ if in.SELinuxOptions != nil {
+ in, out := in.SELinuxOptions, &out.SELinuxOptions
+ *out = new(v1.SELinuxOptions)
+ **out = *in
+ } else {
+ out.SELinuxOptions = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1beta1_Scale(in Scale, out *Scale, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ out.Spec = in.Spec
+ if err := DeepCopy_v1beta1_ScaleStatus(in.Status, &out.Status, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func DeepCopy_v1beta1_ScaleSpec(in ScaleSpec, out *ScaleSpec, c *conversion.Cloner) error {
+ out.Replicas = in.Replicas
+ return nil
+}
+
+func DeepCopy_v1beta1_ScaleStatus(in ScaleStatus, out *ScaleStatus, c *conversion.Cloner) error {
+ out.Replicas = in.Replicas
+ if in.Selector != nil {
+ in, out := in.Selector, &out.Selector
+ *out = make(map[string]string)
+ for key, val := range in {
+ (*out)[key] = val
+ }
+ } else {
+ out.Selector = nil
+ }
+ out.TargetSelector = in.TargetSelector
+ return nil
+}
+
+func DeepCopy_v1beta1_SubresourceReference(in SubresourceReference, out *SubresourceReference, c *conversion.Cloner) error {
+ out.Kind = in.Kind
+ out.Name = in.Name
+ out.APIVersion = in.APIVersion
+ out.Subresource = in.Subresource
+ return nil
+}
+
+func DeepCopy_v1beta1_SupplementalGroupsStrategyOptions(in SupplementalGroupsStrategyOptions, out *SupplementalGroupsStrategyOptions, c *conversion.Cloner) error {
+ out.Rule = in.Rule
+ if in.Ranges != nil {
+ in, out := in.Ranges, &out.Ranges
+ *out = make([]IDRange, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.Ranges = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1beta1_ThirdPartyResource(in ThirdPartyResource, out *ThirdPartyResource, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ out.Description = in.Description
+ if in.Versions != nil {
+ in, out := in.Versions, &out.Versions
+ *out = make([]APIVersion, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.Versions = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1beta1_ThirdPartyResourceData(in ThirdPartyResourceData, out *ThirdPartyResourceData, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if in.Data != nil {
+ in, out := in.Data, &out.Data
+ *out = make([]byte, len(in))
+ copy(*out, in)
+ } else {
+ out.Data = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1beta1_ThirdPartyResourceDataList(in ThirdPartyResourceDataList, out *ThirdPartyResourceDataList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]ThirdPartyResourceData, len(in))
+ for i := range in {
+ if err := DeepCopy_v1beta1_ThirdPartyResourceData(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1beta1_ThirdPartyResourceList(in ThirdPartyResourceList, out *ThirdPartyResourceList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]ThirdPartyResource, len(in))
+ for i := range in {
+ if err := DeepCopy_v1beta1_ThirdPartyResource(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/defaults.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/defaults.go
new file mode 100644
index 0000000..ab6a202
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/defaults.go
@@ -0,0 +1,167 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+ "k8s.io/kubernetes/pkg/api/v1"
+ "k8s.io/kubernetes/pkg/runtime"
+ "k8s.io/kubernetes/pkg/util/intstr"
+)
+
+func addDefaultingFuncs(scheme *runtime.Scheme) {
+ scheme.AddDefaultingFuncs(
+ SetDefaults_DaemonSet,
+ SetDefaults_Deployment,
+ SetDefaults_Job,
+ SetDefaults_HorizontalPodAutoscaler,
+ SetDefaults_ReplicaSet,
+ SetDefaults_NetworkPolicy,
+ )
+}
+
+func SetDefaults_DaemonSet(obj *DaemonSet) {
+ labels := obj.Spec.Template.Labels
+
+ // TODO: support templates defined elsewhere when we support them in the API
+ if labels != nil {
+ if obj.Spec.Selector == nil {
+ obj.Spec.Selector = &LabelSelector{
+ MatchLabels: labels,
+ }
+ }
+ if len(obj.Labels) == 0 {
+ obj.Labels = labels
+ }
+ }
+}
+
+func SetDefaults_Deployment(obj *Deployment) {
+ // Default labels and selector to labels from pod template spec.
+ labels := obj.Spec.Template.Labels
+
+ if labels != nil {
+ if obj.Spec.Selector == nil {
+ obj.Spec.Selector = &LabelSelector{MatchLabels: labels}
+ }
+ if len(obj.Labels) == 0 {
+ obj.Labels = labels
+ }
+ }
+ // Set DeploymentSpec.Replicas to 1 if it is not set.
+ if obj.Spec.Replicas == nil {
+ obj.Spec.Replicas = new(int32)
+ *obj.Spec.Replicas = 1
+ }
+ strategy := &obj.Spec.Strategy
+ // Set default DeploymentStrategyType as RollingUpdate.
+ if strategy.Type == "" {
+ strategy.Type = RollingUpdateDeploymentStrategyType
+ }
+ if strategy.Type == RollingUpdateDeploymentStrategyType {
+ if strategy.RollingUpdate == nil {
+ rollingUpdate := RollingUpdateDeployment{}
+ strategy.RollingUpdate = &rollingUpdate
+ }
+ if strategy.RollingUpdate.MaxUnavailable == nil {
+ // Set default MaxUnavailable as 1 by default.
+ maxUnavailable := intstr.FromInt(1)
+ strategy.RollingUpdate.MaxUnavailable = &maxUnavailable
+ }
+ if strategy.RollingUpdate.MaxSurge == nil {
+ // Set default MaxSurge as 1 by default.
+ maxSurge := intstr.FromInt(1)
+ strategy.RollingUpdate.MaxSurge = &maxSurge
+ }
+ }
+}
+
+func SetDefaults_Job(obj *Job) {
+ labels := obj.Spec.Template.Labels
+ // TODO: support templates defined elsewhere when we support them in the API
+ if labels != nil {
+ // if an autoselector is requested, we'll build the selector later with controller-uid and job-name
+ autoSelector := bool(obj.Spec.AutoSelector != nil && *obj.Spec.AutoSelector)
+
+ // otherwise, we are using a manual selector
+ manualSelector := !autoSelector
+
+ // and default behavior for an unspecified manual selector is to use the pod template labels
+ if manualSelector && obj.Spec.Selector == nil {
+ obj.Spec.Selector = &LabelSelector{
+ MatchLabels: labels,
+ }
+ }
+ if len(obj.Labels) == 0 {
+ obj.Labels = labels
+ }
+ }
+ // For a non-parallel job, you can leave both `.spec.completions` and
+ // `.spec.parallelism` unset. When both are unset, both are defaulted to 1.
+ if obj.Spec.Completions == nil && obj.Spec.Parallelism == nil {
+ obj.Spec.Completions = new(int32)
+ *obj.Spec.Completions = 1
+ obj.Spec.Parallelism = new(int32)
+ *obj.Spec.Parallelism = 1
+ }
+ if obj.Spec.Parallelism == nil {
+ obj.Spec.Parallelism = new(int32)
+ *obj.Spec.Parallelism = 1
+ }
+}
+
+func SetDefaults_HorizontalPodAutoscaler(obj *HorizontalPodAutoscaler) {
+ if obj.Spec.MinReplicas == nil {
+ minReplicas := int32(1)
+ obj.Spec.MinReplicas = &minReplicas
+ }
+ if obj.Spec.CPUUtilization == nil {
+ obj.Spec.CPUUtilization = &CPUTargetUtilization{TargetPercentage: 80}
+ }
+}
+
+func SetDefaults_ReplicaSet(obj *ReplicaSet) {
+ labels := obj.Spec.Template.Labels
+
+ // TODO: support templates defined elsewhere when we support them in the API
+ if labels != nil {
+ if obj.Spec.Selector == nil {
+ obj.Spec.Selector = &LabelSelector{
+ MatchLabels: labels,
+ }
+ }
+ if len(obj.Labels) == 0 {
+ obj.Labels = labels
+ }
+ }
+ if obj.Spec.Replicas == nil {
+ obj.Spec.Replicas = new(int32)
+ *obj.Spec.Replicas = 1
+ }
+}
+
+func SetDefaults_NetworkPolicy(obj *NetworkPolicy) {
+ // Default any undefined Protocol fields to TCP.
+ for _, i := range obj.Spec.Ingress {
+ // TODO: Update Ports to be a pointer to slice as soon as auto-generation supports it.
+ for _, p := range i.Ports {
+ if p.Protocol == nil {
+ proto := v1.ProtocolTCP
+ p.Protocol = &proto
+ }
+ }
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/doc.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/doc.go
new file mode 100644
index 0000000..dc87900
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/doc.go
@@ -0,0 +1,22 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package,register
+// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/extensions
+// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/autoscaling
+// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/batch
+
+package v1beta1
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.pb.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.pb.go
new file mode 100644
index 0000000..b585533
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.pb.go
@@ -0,0 +1,13005 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by protoc-gen-gogo.
+// source: k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.proto
+// DO NOT EDIT!
+
+/*
+ Package v1beta1 is a generated protocol buffer package.
+
+ It is generated from these files:
+ k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.proto
+
+ It has these top-level messages:
+ APIVersion
+ CPUTargetUtilization
+ CustomMetricCurrentStatus
+ CustomMetricCurrentStatusList
+ CustomMetricTarget
+ CustomMetricTargetList
+ DaemonSet
+ DaemonSetList
+ DaemonSetSpec
+ DaemonSetStatus
+ Deployment
+ DeploymentList
+ DeploymentRollback
+ DeploymentSpec
+ DeploymentStatus
+ DeploymentStrategy
+ ExportOptions
+ FSGroupStrategyOptions
+ HTTPIngressPath
+ HTTPIngressRuleValue
+ HorizontalPodAutoscaler
+ HorizontalPodAutoscalerList
+ HorizontalPodAutoscalerSpec
+ HorizontalPodAutoscalerStatus
+ HostPortRange
+ IDRange
+ Ingress
+ IngressBackend
+ IngressList
+ IngressRule
+ IngressRuleValue
+ IngressSpec
+ IngressStatus
+ IngressTLS
+ Job
+ JobCondition
+ JobList
+ JobSpec
+ JobStatus
+ LabelSelector
+ LabelSelectorRequirement
+ ListOptions
+ NetworkPolicy
+ NetworkPolicyIngressRule
+ NetworkPolicyList
+ NetworkPolicyPeer
+ NetworkPolicyPort
+ NetworkPolicySpec
+ PodSecurityPolicy
+ PodSecurityPolicyList
+ PodSecurityPolicySpec
+ ReplicaSet
+ ReplicaSetList
+ ReplicaSetSpec
+ ReplicaSetStatus
+ ReplicationControllerDummy
+ RollbackConfig
+ RollingUpdateDeployment
+ RunAsUserStrategyOptions
+ SELinuxStrategyOptions
+ Scale
+ ScaleSpec
+ ScaleStatus
+ SubresourceReference
+ SupplementalGroupsStrategyOptions
+ ThirdPartyResource
+ ThirdPartyResourceData
+ ThirdPartyResourceDataList
+ ThirdPartyResourceList
+*/
+package v1beta1
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+import k8s_io_kubernetes_pkg_api_unversioned "k8s.io/kubernetes/pkg/api/unversioned"
+import k8s_io_kubernetes_pkg_api_v1 "k8s.io/kubernetes/pkg/api/v1"
+
+import k8s_io_kubernetes_pkg_util_intstr "k8s.io/kubernetes/pkg/util/intstr"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+func (m *APIVersion) Reset() { *m = APIVersion{} }
+func (m *APIVersion) String() string { return proto.CompactTextString(m) }
+func (*APIVersion) ProtoMessage() {}
+
+func (m *CPUTargetUtilization) Reset() { *m = CPUTargetUtilization{} }
+func (m *CPUTargetUtilization) String() string { return proto.CompactTextString(m) }
+func (*CPUTargetUtilization) ProtoMessage() {}
+
+func (m *CustomMetricCurrentStatus) Reset() { *m = CustomMetricCurrentStatus{} }
+func (m *CustomMetricCurrentStatus) String() string { return proto.CompactTextString(m) }
+func (*CustomMetricCurrentStatus) ProtoMessage() {}
+
+func (m *CustomMetricCurrentStatusList) Reset() { *m = CustomMetricCurrentStatusList{} }
+func (m *CustomMetricCurrentStatusList) String() string { return proto.CompactTextString(m) }
+func (*CustomMetricCurrentStatusList) ProtoMessage() {}
+
+func (m *CustomMetricTarget) Reset() { *m = CustomMetricTarget{} }
+func (m *CustomMetricTarget) String() string { return proto.CompactTextString(m) }
+func (*CustomMetricTarget) ProtoMessage() {}
+
+func (m *CustomMetricTargetList) Reset() { *m = CustomMetricTargetList{} }
+func (m *CustomMetricTargetList) String() string { return proto.CompactTextString(m) }
+func (*CustomMetricTargetList) ProtoMessage() {}
+
+func (m *DaemonSet) Reset() { *m = DaemonSet{} }
+func (m *DaemonSet) String() string { return proto.CompactTextString(m) }
+func (*DaemonSet) ProtoMessage() {}
+
+func (m *DaemonSetList) Reset() { *m = DaemonSetList{} }
+func (m *DaemonSetList) String() string { return proto.CompactTextString(m) }
+func (*DaemonSetList) ProtoMessage() {}
+
+func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} }
+func (m *DaemonSetSpec) String() string { return proto.CompactTextString(m) }
+func (*DaemonSetSpec) ProtoMessage() {}
+
+func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} }
+func (m *DaemonSetStatus) String() string { return proto.CompactTextString(m) }
+func (*DaemonSetStatus) ProtoMessage() {}
+
+func (m *Deployment) Reset() { *m = Deployment{} }
+func (m *Deployment) String() string { return proto.CompactTextString(m) }
+func (*Deployment) ProtoMessage() {}
+
+func (m *DeploymentList) Reset() { *m = DeploymentList{} }
+func (m *DeploymentList) String() string { return proto.CompactTextString(m) }
+func (*DeploymentList) ProtoMessage() {}
+
+func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} }
+func (m *DeploymentRollback) String() string { return proto.CompactTextString(m) }
+func (*DeploymentRollback) ProtoMessage() {}
+
+func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} }
+func (m *DeploymentSpec) String() string { return proto.CompactTextString(m) }
+func (*DeploymentSpec) ProtoMessage() {}
+
+func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} }
+func (m *DeploymentStatus) String() string { return proto.CompactTextString(m) }
+func (*DeploymentStatus) ProtoMessage() {}
+
+func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} }
+func (m *DeploymentStrategy) String() string { return proto.CompactTextString(m) }
+func (*DeploymentStrategy) ProtoMessage() {}
+
+func (m *ExportOptions) Reset() { *m = ExportOptions{} }
+func (m *ExportOptions) String() string { return proto.CompactTextString(m) }
+func (*ExportOptions) ProtoMessage() {}
+
+func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} }
+func (m *FSGroupStrategyOptions) String() string { return proto.CompactTextString(m) }
+func (*FSGroupStrategyOptions) ProtoMessage() {}
+
+func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} }
+func (m *HTTPIngressPath) String() string { return proto.CompactTextString(m) }
+func (*HTTPIngressPath) ProtoMessage() {}
+
+func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} }
+func (m *HTTPIngressRuleValue) String() string { return proto.CompactTextString(m) }
+func (*HTTPIngressRuleValue) ProtoMessage() {}
+
+func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} }
+func (m *HorizontalPodAutoscaler) String() string { return proto.CompactTextString(m) }
+func (*HorizontalPodAutoscaler) ProtoMessage() {}
+
+func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} }
+func (m *HorizontalPodAutoscalerList) String() string { return proto.CompactTextString(m) }
+func (*HorizontalPodAutoscalerList) ProtoMessage() {}
+
+func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} }
+func (m *HorizontalPodAutoscalerSpec) String() string { return proto.CompactTextString(m) }
+func (*HorizontalPodAutoscalerSpec) ProtoMessage() {}
+
+func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} }
+func (m *HorizontalPodAutoscalerStatus) String() string { return proto.CompactTextString(m) }
+func (*HorizontalPodAutoscalerStatus) ProtoMessage() {}
+
+func (m *HostPortRange) Reset() { *m = HostPortRange{} }
+func (m *HostPortRange) String() string { return proto.CompactTextString(m) }
+func (*HostPortRange) ProtoMessage() {}
+
+func (m *IDRange) Reset() { *m = IDRange{} }
+func (m *IDRange) String() string { return proto.CompactTextString(m) }
+func (*IDRange) ProtoMessage() {}
+
+func (m *Ingress) Reset() { *m = Ingress{} }
+func (m *Ingress) String() string { return proto.CompactTextString(m) }
+func (*Ingress) ProtoMessage() {}
+
+func (m *IngressBackend) Reset() { *m = IngressBackend{} }
+func (m *IngressBackend) String() string { return proto.CompactTextString(m) }
+func (*IngressBackend) ProtoMessage() {}
+
+func (m *IngressList) Reset() { *m = IngressList{} }
+func (m *IngressList) String() string { return proto.CompactTextString(m) }
+func (*IngressList) ProtoMessage() {}
+
+func (m *IngressRule) Reset() { *m = IngressRule{} }
+func (m *IngressRule) String() string { return proto.CompactTextString(m) }
+func (*IngressRule) ProtoMessage() {}
+
+func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} }
+func (m *IngressRuleValue) String() string { return proto.CompactTextString(m) }
+func (*IngressRuleValue) ProtoMessage() {}
+
+func (m *IngressSpec) Reset() { *m = IngressSpec{} }
+func (m *IngressSpec) String() string { return proto.CompactTextString(m) }
+func (*IngressSpec) ProtoMessage() {}
+
+func (m *IngressStatus) Reset() { *m = IngressStatus{} }
+func (m *IngressStatus) String() string { return proto.CompactTextString(m) }
+func (*IngressStatus) ProtoMessage() {}
+
+func (m *IngressTLS) Reset() { *m = IngressTLS{} }
+func (m *IngressTLS) String() string { return proto.CompactTextString(m) }
+func (*IngressTLS) ProtoMessage() {}
+
+func (m *Job) Reset() { *m = Job{} }
+func (m *Job) String() string { return proto.CompactTextString(m) }
+func (*Job) ProtoMessage() {}
+
+func (m *JobCondition) Reset() { *m = JobCondition{} }
+func (m *JobCondition) String() string { return proto.CompactTextString(m) }
+func (*JobCondition) ProtoMessage() {}
+
+func (m *JobList) Reset() { *m = JobList{} }
+func (m *JobList) String() string { return proto.CompactTextString(m) }
+func (*JobList) ProtoMessage() {}
+
+func (m *JobSpec) Reset() { *m = JobSpec{} }
+func (m *JobSpec) String() string { return proto.CompactTextString(m) }
+func (*JobSpec) ProtoMessage() {}
+
+func (m *JobStatus) Reset() { *m = JobStatus{} }
+func (m *JobStatus) String() string { return proto.CompactTextString(m) }
+func (*JobStatus) ProtoMessage() {}
+
+func (m *LabelSelector) Reset() { *m = LabelSelector{} }
+func (m *LabelSelector) String() string { return proto.CompactTextString(m) }
+func (*LabelSelector) ProtoMessage() {}
+
+func (m *LabelSelectorRequirement) Reset() { *m = LabelSelectorRequirement{} }
+func (m *LabelSelectorRequirement) String() string { return proto.CompactTextString(m) }
+func (*LabelSelectorRequirement) ProtoMessage() {}
+
+func (m *ListOptions) Reset() { *m = ListOptions{} }
+func (m *ListOptions) String() string { return proto.CompactTextString(m) }
+func (*ListOptions) ProtoMessage() {}
+
+func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} }
+func (m *NetworkPolicy) String() string { return proto.CompactTextString(m) }
+func (*NetworkPolicy) ProtoMessage() {}
+
+func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} }
+func (m *NetworkPolicyIngressRule) String() string { return proto.CompactTextString(m) }
+func (*NetworkPolicyIngressRule) ProtoMessage() {}
+
+func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} }
+func (m *NetworkPolicyList) String() string { return proto.CompactTextString(m) }
+func (*NetworkPolicyList) ProtoMessage() {}
+
+func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} }
+func (m *NetworkPolicyPeer) String() string { return proto.CompactTextString(m) }
+func (*NetworkPolicyPeer) ProtoMessage() {}
+
+func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} }
+func (m *NetworkPolicyPort) String() string { return proto.CompactTextString(m) }
+func (*NetworkPolicyPort) ProtoMessage() {}
+
+func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} }
+func (m *NetworkPolicySpec) String() string { return proto.CompactTextString(m) }
+func (*NetworkPolicySpec) ProtoMessage() {}
+
+func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} }
+func (m *PodSecurityPolicy) String() string { return proto.CompactTextString(m) }
+func (*PodSecurityPolicy) ProtoMessage() {}
+
+func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} }
+func (m *PodSecurityPolicyList) String() string { return proto.CompactTextString(m) }
+func (*PodSecurityPolicyList) ProtoMessage() {}
+
+func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} }
+func (m *PodSecurityPolicySpec) String() string { return proto.CompactTextString(m) }
+func (*PodSecurityPolicySpec) ProtoMessage() {}
+
+func (m *ReplicaSet) Reset() { *m = ReplicaSet{} }
+func (m *ReplicaSet) String() string { return proto.CompactTextString(m) }
+func (*ReplicaSet) ProtoMessage() {}
+
+func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} }
+func (m *ReplicaSetList) String() string { return proto.CompactTextString(m) }
+func (*ReplicaSetList) ProtoMessage() {}
+
+func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} }
+func (m *ReplicaSetSpec) String() string { return proto.CompactTextString(m) }
+func (*ReplicaSetSpec) ProtoMessage() {}
+
+func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} }
+func (m *ReplicaSetStatus) String() string { return proto.CompactTextString(m) }
+func (*ReplicaSetStatus) ProtoMessage() {}
+
+func (m *ReplicationControllerDummy) Reset() { *m = ReplicationControllerDummy{} }
+func (m *ReplicationControllerDummy) String() string { return proto.CompactTextString(m) }
+func (*ReplicationControllerDummy) ProtoMessage() {}
+
+func (m *RollbackConfig) Reset() { *m = RollbackConfig{} }
+func (m *RollbackConfig) String() string { return proto.CompactTextString(m) }
+func (*RollbackConfig) ProtoMessage() {}
+
+func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} }
+func (m *RollingUpdateDeployment) String() string { return proto.CompactTextString(m) }
+func (*RollingUpdateDeployment) ProtoMessage() {}
+
+func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} }
+func (m *RunAsUserStrategyOptions) String() string { return proto.CompactTextString(m) }
+func (*RunAsUserStrategyOptions) ProtoMessage() {}
+
+func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} }
+func (m *SELinuxStrategyOptions) String() string { return proto.CompactTextString(m) }
+func (*SELinuxStrategyOptions) ProtoMessage() {}
+
+func (m *Scale) Reset() { *m = Scale{} }
+func (m *Scale) String() string { return proto.CompactTextString(m) }
+func (*Scale) ProtoMessage() {}
+
+func (m *ScaleSpec) Reset() { *m = ScaleSpec{} }
+func (m *ScaleSpec) String() string { return proto.CompactTextString(m) }
+func (*ScaleSpec) ProtoMessage() {}
+
+func (m *ScaleStatus) Reset() { *m = ScaleStatus{} }
+func (m *ScaleStatus) String() string { return proto.CompactTextString(m) }
+func (*ScaleStatus) ProtoMessage() {}
+
+func (m *SubresourceReference) Reset() { *m = SubresourceReference{} }
+func (m *SubresourceReference) String() string { return proto.CompactTextString(m) }
+func (*SubresourceReference) ProtoMessage() {}
+
+func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} }
+func (m *SupplementalGroupsStrategyOptions) String() string { return proto.CompactTextString(m) }
+func (*SupplementalGroupsStrategyOptions) ProtoMessage() {}
+
+func (m *ThirdPartyResource) Reset() { *m = ThirdPartyResource{} }
+func (m *ThirdPartyResource) String() string { return proto.CompactTextString(m) }
+func (*ThirdPartyResource) ProtoMessage() {}
+
+func (m *ThirdPartyResourceData) Reset() { *m = ThirdPartyResourceData{} }
+func (m *ThirdPartyResourceData) String() string { return proto.CompactTextString(m) }
+func (*ThirdPartyResourceData) ProtoMessage() {}
+
+func (m *ThirdPartyResourceDataList) Reset() { *m = ThirdPartyResourceDataList{} }
+func (m *ThirdPartyResourceDataList) String() string { return proto.CompactTextString(m) }
+func (*ThirdPartyResourceDataList) ProtoMessage() {}
+
+func (m *ThirdPartyResourceList) Reset() { *m = ThirdPartyResourceList{} }
+func (m *ThirdPartyResourceList) String() string { return proto.CompactTextString(m) }
+func (*ThirdPartyResourceList) ProtoMessage() {}
+
+func init() {
+ proto.RegisterType((*APIVersion)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.APIVersion")
+ proto.RegisterType((*CPUTargetUtilization)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.CPUTargetUtilization")
+ proto.RegisterType((*CustomMetricCurrentStatus)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.CustomMetricCurrentStatus")
+ proto.RegisterType((*CustomMetricCurrentStatusList)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.CustomMetricCurrentStatusList")
+ proto.RegisterType((*CustomMetricTarget)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.CustomMetricTarget")
+ proto.RegisterType((*CustomMetricTargetList)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.CustomMetricTargetList")
+ proto.RegisterType((*DaemonSet)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.DaemonSet")
+ proto.RegisterType((*DaemonSetList)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.DaemonSetList")
+ proto.RegisterType((*DaemonSetSpec)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.DaemonSetSpec")
+ proto.RegisterType((*DaemonSetStatus)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.DaemonSetStatus")
+ proto.RegisterType((*Deployment)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.Deployment")
+ proto.RegisterType((*DeploymentList)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.DeploymentList")
+ proto.RegisterType((*DeploymentRollback)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.DeploymentRollback")
+ proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.DeploymentSpec")
+ proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.DeploymentStatus")
+ proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.DeploymentStrategy")
+ proto.RegisterType((*ExportOptions)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.ExportOptions")
+ proto.RegisterType((*FSGroupStrategyOptions)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.FSGroupStrategyOptions")
+ proto.RegisterType((*HTTPIngressPath)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.HTTPIngressPath")
+ proto.RegisterType((*HTTPIngressRuleValue)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.HTTPIngressRuleValue")
+ proto.RegisterType((*HorizontalPodAutoscaler)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.HorizontalPodAutoscaler")
+ proto.RegisterType((*HorizontalPodAutoscalerList)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.HorizontalPodAutoscalerList")
+ proto.RegisterType((*HorizontalPodAutoscalerSpec)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.HorizontalPodAutoscalerSpec")
+ proto.RegisterType((*HorizontalPodAutoscalerStatus)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.HorizontalPodAutoscalerStatus")
+ proto.RegisterType((*HostPortRange)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.HostPortRange")
+ proto.RegisterType((*IDRange)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.IDRange")
+ proto.RegisterType((*Ingress)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.Ingress")
+ proto.RegisterType((*IngressBackend)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.IngressBackend")
+ proto.RegisterType((*IngressList)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.IngressList")
+ proto.RegisterType((*IngressRule)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.IngressRule")
+ proto.RegisterType((*IngressRuleValue)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.IngressRuleValue")
+ proto.RegisterType((*IngressSpec)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.IngressSpec")
+ proto.RegisterType((*IngressStatus)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.IngressStatus")
+ proto.RegisterType((*IngressTLS)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.IngressTLS")
+ proto.RegisterType((*Job)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.Job")
+ proto.RegisterType((*JobCondition)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.JobCondition")
+ proto.RegisterType((*JobList)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.JobList")
+ proto.RegisterType((*JobSpec)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.JobSpec")
+ proto.RegisterType((*JobStatus)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.JobStatus")
+ proto.RegisterType((*LabelSelector)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.LabelSelector")
+ proto.RegisterType((*LabelSelectorRequirement)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.LabelSelectorRequirement")
+ proto.RegisterType((*ListOptions)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.ListOptions")
+ proto.RegisterType((*NetworkPolicy)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.NetworkPolicy")
+ proto.RegisterType((*NetworkPolicyIngressRule)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.NetworkPolicyIngressRule")
+ proto.RegisterType((*NetworkPolicyList)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.NetworkPolicyList")
+ proto.RegisterType((*NetworkPolicyPeer)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.NetworkPolicyPeer")
+ proto.RegisterType((*NetworkPolicyPort)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.NetworkPolicyPort")
+ proto.RegisterType((*NetworkPolicySpec)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.NetworkPolicySpec")
+ proto.RegisterType((*PodSecurityPolicy)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.PodSecurityPolicy")
+ proto.RegisterType((*PodSecurityPolicyList)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.PodSecurityPolicyList")
+ proto.RegisterType((*PodSecurityPolicySpec)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.PodSecurityPolicySpec")
+ proto.RegisterType((*ReplicaSet)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.ReplicaSet")
+ proto.RegisterType((*ReplicaSetList)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.ReplicaSetList")
+ proto.RegisterType((*ReplicaSetSpec)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.ReplicaSetSpec")
+ proto.RegisterType((*ReplicaSetStatus)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.ReplicaSetStatus")
+ proto.RegisterType((*ReplicationControllerDummy)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.ReplicationControllerDummy")
+ proto.RegisterType((*RollbackConfig)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.RollbackConfig")
+ proto.RegisterType((*RollingUpdateDeployment)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.RollingUpdateDeployment")
+ proto.RegisterType((*RunAsUserStrategyOptions)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.RunAsUserStrategyOptions")
+ proto.RegisterType((*SELinuxStrategyOptions)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.SELinuxStrategyOptions")
+ proto.RegisterType((*Scale)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.Scale")
+ proto.RegisterType((*ScaleSpec)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.ScaleSpec")
+ proto.RegisterType((*ScaleStatus)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.ScaleStatus")
+ proto.RegisterType((*SubresourceReference)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.SubresourceReference")
+ proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.SupplementalGroupsStrategyOptions")
+ proto.RegisterType((*ThirdPartyResource)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.ThirdPartyResource")
+ proto.RegisterType((*ThirdPartyResourceData)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.ThirdPartyResourceData")
+ proto.RegisterType((*ThirdPartyResourceDataList)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.ThirdPartyResourceDataList")
+ proto.RegisterType((*ThirdPartyResourceList)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.ThirdPartyResourceList")
+}
+func (m *APIVersion) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *APIVersion) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Name)))
+ i += copy(data[i:], m.Name)
+ return i, nil
+}
+
+func (m *CPUTargetUtilization) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *CPUTargetUtilization) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0x8
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.TargetPercentage))
+ return i, nil
+}
+
+func (m *CustomMetricCurrentStatus) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *CustomMetricCurrentStatus) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Name)))
+ i += copy(data[i:], m.Name)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.CurrentValue.Size()))
+ n1, err := m.CurrentValue.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n1
+ return i, nil
+}
+
+func (m *CustomMetricCurrentStatusList) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *CustomMetricCurrentStatusList) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for _, msg := range m.Items {
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *CustomMetricTarget) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *CustomMetricTarget) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Name)))
+ i += copy(data[i:], m.Name)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.TargetValue.Size()))
+ n2, err := m.TargetValue.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n2
+ return i, nil
+}
+
+func (m *CustomMetricTargetList) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *CustomMetricTargetList) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for _, msg := range m.Items {
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *DaemonSet) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *DaemonSet) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
+ n3, err := m.ObjectMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n3
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size()))
+ n4, err := m.Spec.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n4
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Status.Size()))
+ n5, err := m.Status.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n5
+ return i, nil
+}
+
+func (m *DaemonSetList) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *DaemonSetList) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size()))
+ n6, err := m.ListMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n6
+ if len(m.Items) > 0 {
+ for _, msg := range m.Items {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *DaemonSetSpec) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *DaemonSetSpec) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Selector != nil {
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Selector.Size()))
+ n7, err := m.Selector.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n7
+ }
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Template.Size()))
+ n8, err := m.Template.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n8
+ return i, nil
+}
+
+func (m *DaemonSetStatus) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *DaemonSetStatus) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0x8
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.CurrentNumberScheduled))
+ data[i] = 0x10
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.NumberMisscheduled))
+ data[i] = 0x18
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.DesiredNumberScheduled))
+ return i, nil
+}
+
+func (m *Deployment) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Deployment) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
+ n9, err := m.ObjectMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n9
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size()))
+ n10, err := m.Spec.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n10
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Status.Size()))
+ n11, err := m.Status.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n11
+ return i, nil
+}
+
+func (m *DeploymentList) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *DeploymentList) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size()))
+ n12, err := m.ListMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n12
+ if len(m.Items) > 0 {
+ for _, msg := range m.Items {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *DeploymentRollback) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *DeploymentRollback) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Name)))
+ i += copy(data[i:], m.Name)
+ if len(m.UpdatedAnnotations) > 0 {
+ for k := range m.UpdatedAnnotations {
+ data[i] = 0x12
+ i++
+ v := m.UpdatedAnnotations[k]
+ mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ i = encodeVarintGenerated(data, i, uint64(mapSize))
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(k)))
+ i += copy(data[i:], k)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(v)))
+ i += copy(data[i:], v)
+ }
+ }
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.RollbackTo.Size()))
+ n13, err := m.RollbackTo.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n13
+ return i, nil
+}
+
+func (m *DeploymentSpec) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *DeploymentSpec) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Replicas != nil {
+ data[i] = 0x8
+ i++
+ i = encodeVarintGenerated(data, i, uint64(*m.Replicas))
+ }
+ if m.Selector != nil {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Selector.Size()))
+ n14, err := m.Selector.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n14
+ }
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Template.Size()))
+ n15, err := m.Template.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n15
+ data[i] = 0x22
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Strategy.Size()))
+ n16, err := m.Strategy.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n16
+ data[i] = 0x28
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.MinReadySeconds))
+ if m.RevisionHistoryLimit != nil {
+ data[i] = 0x30
+ i++
+ i = encodeVarintGenerated(data, i, uint64(*m.RevisionHistoryLimit))
+ }
+ data[i] = 0x38
+ i++
+ if m.Paused {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ if m.RollbackTo != nil {
+ data[i] = 0x42
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.RollbackTo.Size()))
+ n17, err := m.RollbackTo.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n17
+ }
+ return i, nil
+}
+
+func (m *DeploymentStatus) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *DeploymentStatus) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0x8
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ObservedGeneration))
+ data[i] = 0x10
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Replicas))
+ data[i] = 0x18
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.UpdatedReplicas))
+ data[i] = 0x20
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.AvailableReplicas))
+ data[i] = 0x28
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.UnavailableReplicas))
+ return i, nil
+}
+
+func (m *DeploymentStrategy) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *DeploymentStrategy) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Type)))
+ i += copy(data[i:], m.Type)
+ if m.RollingUpdate != nil {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.RollingUpdate.Size()))
+ n18, err := m.RollingUpdate.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n18
+ }
+ return i, nil
+}
+
+func (m *ExportOptions) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ExportOptions) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0x8
+ i++
+ if m.Export {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ data[i] = 0x10
+ i++
+ if m.Exact {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ return i, nil
+}
+
+func (m *FSGroupStrategyOptions) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *FSGroupStrategyOptions) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Rule)))
+ i += copy(data[i:], m.Rule)
+ if len(m.Ranges) > 0 {
+ for _, msg := range m.Ranges {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *HTTPIngressPath) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *HTTPIngressPath) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Path)))
+ i += copy(data[i:], m.Path)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Backend.Size()))
+ n19, err := m.Backend.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n19
+ return i, nil
+}
+
+func (m *HTTPIngressRuleValue) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *HTTPIngressRuleValue) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Paths) > 0 {
+ for _, msg := range m.Paths {
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *HorizontalPodAutoscaler) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *HorizontalPodAutoscaler) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
+ n20, err := m.ObjectMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n20
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size()))
+ n21, err := m.Spec.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n21
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Status.Size()))
+ n22, err := m.Status.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n22
+ return i, nil
+}
+
+func (m *HorizontalPodAutoscalerList) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *HorizontalPodAutoscalerList) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size()))
+ n23, err := m.ListMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n23
+ if len(m.Items) > 0 {
+ for _, msg := range m.Items {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *HorizontalPodAutoscalerSpec) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *HorizontalPodAutoscalerSpec) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ScaleRef.Size()))
+ n24, err := m.ScaleRef.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n24
+ if m.MinReplicas != nil {
+ data[i] = 0x10
+ i++
+ i = encodeVarintGenerated(data, i, uint64(*m.MinReplicas))
+ }
+ data[i] = 0x18
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.MaxReplicas))
+ if m.CPUUtilization != nil {
+ data[i] = 0x22
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.CPUUtilization.Size()))
+ n25, err := m.CPUUtilization.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n25
+ }
+ return i, nil
+}
+
+func (m *HorizontalPodAutoscalerStatus) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *HorizontalPodAutoscalerStatus) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.ObservedGeneration != nil {
+ data[i] = 0x8
+ i++
+ i = encodeVarintGenerated(data, i, uint64(*m.ObservedGeneration))
+ }
+ if m.LastScaleTime != nil {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.LastScaleTime.Size()))
+ n26, err := m.LastScaleTime.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n26
+ }
+ data[i] = 0x18
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.CurrentReplicas))
+ data[i] = 0x20
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.DesiredReplicas))
+ if m.CurrentCPUUtilizationPercentage != nil {
+ data[i] = 0x28
+ i++
+ i = encodeVarintGenerated(data, i, uint64(*m.CurrentCPUUtilizationPercentage))
+ }
+ return i, nil
+}
+
+func (m *HostPortRange) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *HostPortRange) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0x8
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Min))
+ data[i] = 0x10
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Max))
+ return i, nil
+}
+
+func (m *IDRange) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *IDRange) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0x8
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Min))
+ data[i] = 0x10
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Max))
+ return i, nil
+}
+
+func (m *Ingress) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Ingress) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
+ n27, err := m.ObjectMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n27
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size()))
+ n28, err := m.Spec.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n28
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Status.Size()))
+ n29, err := m.Status.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n29
+ return i, nil
+}
+
+func (m *IngressBackend) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *IngressBackend) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.ServiceName)))
+ i += copy(data[i:], m.ServiceName)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ServicePort.Size()))
+ n30, err := m.ServicePort.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n30
+ return i, nil
+}
+
+func (m *IngressList) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *IngressList) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size()))
+ n31, err := m.ListMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n31
+ if len(m.Items) > 0 {
+ for _, msg := range m.Items {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *IngressRule) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *IngressRule) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Host)))
+ i += copy(data[i:], m.Host)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.IngressRuleValue.Size()))
+ n32, err := m.IngressRuleValue.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n32
+ return i, nil
+}
+
+func (m *IngressRuleValue) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *IngressRuleValue) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.HTTP != nil {
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.HTTP.Size()))
+ n33, err := m.HTTP.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n33
+ }
+ return i, nil
+}
+
+func (m *IngressSpec) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *IngressSpec) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Backend != nil {
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Backend.Size()))
+ n34, err := m.Backend.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n34
+ }
+ if len(m.TLS) > 0 {
+ for _, msg := range m.TLS {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ if len(m.Rules) > 0 {
+ for _, msg := range m.Rules {
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *IngressStatus) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *IngressStatus) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.LoadBalancer.Size()))
+ n35, err := m.LoadBalancer.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n35
+ return i, nil
+}
+
+func (m *IngressTLS) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *IngressTLS) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Hosts) > 0 {
+ for _, s := range m.Hosts {
+ data[i] = 0xa
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.SecretName)))
+ i += copy(data[i:], m.SecretName)
+ return i, nil
+}
+
+func (m *Job) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Job) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
+ n36, err := m.ObjectMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n36
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size()))
+ n37, err := m.Spec.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n37
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Status.Size()))
+ n38, err := m.Status.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n38
+ return i, nil
+}
+
+func (m *JobCondition) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *JobCondition) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Type)))
+ i += copy(data[i:], m.Type)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Status)))
+ i += copy(data[i:], m.Status)
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.LastProbeTime.Size()))
+ n39, err := m.LastProbeTime.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n39
+ data[i] = 0x22
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size()))
+ n40, err := m.LastTransitionTime.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n40
+ data[i] = 0x2a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Reason)))
+ i += copy(data[i:], m.Reason)
+ data[i] = 0x32
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Message)))
+ i += copy(data[i:], m.Message)
+ return i, nil
+}
+
+func (m *JobList) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *JobList) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size()))
+ n41, err := m.ListMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n41
+ if len(m.Items) > 0 {
+ for _, msg := range m.Items {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *JobSpec) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *JobSpec) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Parallelism != nil {
+ data[i] = 0x8
+ i++
+ i = encodeVarintGenerated(data, i, uint64(*m.Parallelism))
+ }
+ if m.Completions != nil {
+ data[i] = 0x10
+ i++
+ i = encodeVarintGenerated(data, i, uint64(*m.Completions))
+ }
+ if m.ActiveDeadlineSeconds != nil {
+ data[i] = 0x18
+ i++
+ i = encodeVarintGenerated(data, i, uint64(*m.ActiveDeadlineSeconds))
+ }
+ if m.Selector != nil {
+ data[i] = 0x22
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Selector.Size()))
+ n42, err := m.Selector.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n42
+ }
+ if m.AutoSelector != nil {
+ data[i] = 0x28
+ i++
+ if *m.AutoSelector {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ }
+ data[i] = 0x32
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Template.Size()))
+ n43, err := m.Template.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n43
+ return i, nil
+}
+
+func (m *JobStatus) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *JobStatus) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Conditions) > 0 {
+ for _, msg := range m.Conditions {
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ if m.StartTime != nil {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.StartTime.Size()))
+ n44, err := m.StartTime.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n44
+ }
+ if m.CompletionTime != nil {
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.CompletionTime.Size()))
+ n45, err := m.CompletionTime.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n45
+ }
+ data[i] = 0x20
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Active))
+ data[i] = 0x28
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Succeeded))
+ data[i] = 0x30
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Failed))
+ return i, nil
+}
+
+func (m *LabelSelector) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *LabelSelector) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.MatchLabels) > 0 {
+ for k := range m.MatchLabels {
+ data[i] = 0xa
+ i++
+ v := m.MatchLabels[k]
+ mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ i = encodeVarintGenerated(data, i, uint64(mapSize))
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(k)))
+ i += copy(data[i:], k)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(v)))
+ i += copy(data[i:], v)
+ }
+ }
+ if len(m.MatchExpressions) > 0 {
+ for _, msg := range m.MatchExpressions {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *LabelSelectorRequirement) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *LabelSelectorRequirement) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Key)))
+ i += copy(data[i:], m.Key)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Operator)))
+ i += copy(data[i:], m.Operator)
+ if len(m.Values) > 0 {
+ for _, s := range m.Values {
+ data[i] = 0x1a
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ return i, nil
+}
+
+func (m *ListOptions) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ListOptions) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.LabelSelector)))
+ i += copy(data[i:], m.LabelSelector)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.FieldSelector)))
+ i += copy(data[i:], m.FieldSelector)
+ data[i] = 0x18
+ i++
+ if m.Watch {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ data[i] = 0x22
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.ResourceVersion)))
+ i += copy(data[i:], m.ResourceVersion)
+ if m.TimeoutSeconds != nil {
+ data[i] = 0x28
+ i++
+ i = encodeVarintGenerated(data, i, uint64(*m.TimeoutSeconds))
+ }
+ return i, nil
+}
+
+func (m *NetworkPolicy) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *NetworkPolicy) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
+ n46, err := m.ObjectMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n46
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size()))
+ n47, err := m.Spec.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n47
+ return i, nil
+}
+
+func (m *NetworkPolicyIngressRule) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *NetworkPolicyIngressRule) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Ports) > 0 {
+ for _, msg := range m.Ports {
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ if len(m.From) > 0 {
+ for _, msg := range m.From {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *NetworkPolicyList) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *NetworkPolicyList) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size()))
+ n48, err := m.ListMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n48
+ if len(m.Items) > 0 {
+ for _, msg := range m.Items {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *NetworkPolicyPeer) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *NetworkPolicyPeer) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.PodSelector != nil {
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.PodSelector.Size()))
+ n49, err := m.PodSelector.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n49
+ }
+ if m.NamespaceSelector != nil {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.NamespaceSelector.Size()))
+ n50, err := m.NamespaceSelector.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n50
+ }
+ return i, nil
+}
+
+func (m *NetworkPolicyPort) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *NetworkPolicyPort) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Protocol != nil {
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(*m.Protocol)))
+ i += copy(data[i:], *m.Protocol)
+ }
+ if m.Port != nil {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Port.Size()))
+ n51, err := m.Port.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n51
+ }
+ return i, nil
+}
+
+func (m *NetworkPolicySpec) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *NetworkPolicySpec) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.PodSelector.Size()))
+ n52, err := m.PodSelector.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n52
+ if len(m.Ingress) > 0 {
+ for _, msg := range m.Ingress {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *PodSecurityPolicy) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *PodSecurityPolicy) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
+ n53, err := m.ObjectMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n53
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size()))
+ n54, err := m.Spec.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n54
+ return i, nil
+}
+
+func (m *PodSecurityPolicyList) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *PodSecurityPolicyList) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size()))
+ n55, err := m.ListMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n55
+ if len(m.Items) > 0 {
+ for _, msg := range m.Items {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *PodSecurityPolicySpec) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *PodSecurityPolicySpec) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0x8
+ i++
+ if m.Privileged {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ if len(m.DefaultAddCapabilities) > 0 {
+ for _, s := range m.DefaultAddCapabilities {
+ data[i] = 0x12
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ if len(m.RequiredDropCapabilities) > 0 {
+ for _, s := range m.RequiredDropCapabilities {
+ data[i] = 0x1a
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ if len(m.AllowedCapabilities) > 0 {
+ for _, s := range m.AllowedCapabilities {
+ data[i] = 0x22
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ if len(m.Volumes) > 0 {
+ for _, s := range m.Volumes {
+ data[i] = 0x2a
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ data[i] = 0x30
+ i++
+ if m.HostNetwork {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ if len(m.HostPorts) > 0 {
+ for _, msg := range m.HostPorts {
+ data[i] = 0x3a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ data[i] = 0x40
+ i++
+ if m.HostPID {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ data[i] = 0x48
+ i++
+ if m.HostIPC {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ data[i] = 0x52
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.SELinux.Size()))
+ n56, err := m.SELinux.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n56
+ data[i] = 0x5a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.RunAsUser.Size()))
+ n57, err := m.RunAsUser.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n57
+ data[i] = 0x62
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.SupplementalGroups.Size()))
+ n58, err := m.SupplementalGroups.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n58
+ data[i] = 0x6a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.FSGroup.Size()))
+ n59, err := m.FSGroup.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n59
+ data[i] = 0x70
+ i++
+ if m.ReadOnlyRootFilesystem {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ return i, nil
+}
+
+func (m *ReplicaSet) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ReplicaSet) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
+ n60, err := m.ObjectMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n60
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size()))
+ n61, err := m.Spec.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n61
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Status.Size()))
+ n62, err := m.Status.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n62
+ return i, nil
+}
+
+func (m *ReplicaSetList) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ReplicaSetList) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size()))
+ n63, err := m.ListMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n63
+ if len(m.Items) > 0 {
+ for _, msg := range m.Items {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *ReplicaSetSpec) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ReplicaSetSpec) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Replicas != nil {
+ data[i] = 0x8
+ i++
+ i = encodeVarintGenerated(data, i, uint64(*m.Replicas))
+ }
+ if m.Selector != nil {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Selector.Size()))
+ n64, err := m.Selector.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n64
+ }
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Template.Size()))
+ n65, err := m.Template.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n65
+ return i, nil
+}
+
+func (m *ReplicaSetStatus) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ReplicaSetStatus) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0x8
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Replicas))
+ data[i] = 0x10
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.FullyLabeledReplicas))
+ data[i] = 0x18
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ObservedGeneration))
+ return i, nil
+}
+
+func (m *ReplicationControllerDummy) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ReplicationControllerDummy) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ return i, nil
+}
+
+func (m *RollbackConfig) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *RollbackConfig) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0x8
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Revision))
+ return i, nil
+}
+
+func (m *RollingUpdateDeployment) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *RollingUpdateDeployment) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.MaxUnavailable != nil {
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.MaxUnavailable.Size()))
+ n66, err := m.MaxUnavailable.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n66
+ }
+ if m.MaxSurge != nil {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.MaxSurge.Size()))
+ n67, err := m.MaxSurge.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n67
+ }
+ return i, nil
+}
+
+func (m *RunAsUserStrategyOptions) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *RunAsUserStrategyOptions) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Rule)))
+ i += copy(data[i:], m.Rule)
+ if len(m.Ranges) > 0 {
+ for _, msg := range m.Ranges {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *SELinuxStrategyOptions) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *SELinuxStrategyOptions) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Rule)))
+ i += copy(data[i:], m.Rule)
+ if m.SELinuxOptions != nil {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.SELinuxOptions.Size()))
+ n68, err := m.SELinuxOptions.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n68
+ }
+ return i, nil
+}
+
+func (m *Scale) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Scale) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
+ n69, err := m.ObjectMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n69
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size()))
+ n70, err := m.Spec.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n70
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Status.Size()))
+ n71, err := m.Status.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n71
+ return i, nil
+}
+
+func (m *ScaleSpec) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ScaleSpec) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0x8
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Replicas))
+ return i, nil
+}
+
+func (m *ScaleStatus) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ScaleStatus) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0x8
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Replicas))
+ if len(m.Selector) > 0 {
+ for k := range m.Selector {
+ data[i] = 0x12
+ i++
+ v := m.Selector[k]
+ mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ i = encodeVarintGenerated(data, i, uint64(mapSize))
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(k)))
+ i += copy(data[i:], k)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(v)))
+ i += copy(data[i:], v)
+ }
+ }
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.TargetSelector)))
+ i += copy(data[i:], m.TargetSelector)
+ return i, nil
+}
+
+func (m *SubresourceReference) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *SubresourceReference) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Kind)))
+ i += copy(data[i:], m.Kind)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Name)))
+ i += copy(data[i:], m.Name)
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion)))
+ i += copy(data[i:], m.APIVersion)
+ data[i] = 0x22
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Subresource)))
+ i += copy(data[i:], m.Subresource)
+ return i, nil
+}
+
+func (m *SupplementalGroupsStrategyOptions) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *SupplementalGroupsStrategyOptions) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Rule)))
+ i += copy(data[i:], m.Rule)
+ if len(m.Ranges) > 0 {
+ for _, msg := range m.Ranges {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *ThirdPartyResource) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ThirdPartyResource) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
+ n72, err := m.ObjectMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n72
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Description)))
+ i += copy(data[i:], m.Description)
+ if len(m.Versions) > 0 {
+ for _, msg := range m.Versions {
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *ThirdPartyResourceData) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ThirdPartyResourceData) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
+ n73, err := m.ObjectMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n73
+ if m.Data != nil {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Data)))
+ i += copy(data[i:], m.Data)
+ }
+ return i, nil
+}
+
+func (m *ThirdPartyResourceDataList) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ThirdPartyResourceDataList) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size()))
+ n74, err := m.ListMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n74
+ if len(m.Items) > 0 {
+ for _, msg := range m.Items {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *ThirdPartyResourceList) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ThirdPartyResourceList) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size()))
+ n75, err := m.ListMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n75
+ if len(m.Items) > 0 {
+ for _, msg := range m.Items {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func encodeFixed64Generated(data []byte, offset int, v uint64) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ data[offset+4] = uint8(v >> 32)
+ data[offset+5] = uint8(v >> 40)
+ data[offset+6] = uint8(v >> 48)
+ data[offset+7] = uint8(v >> 56)
+ return offset + 8
+}
+func encodeFixed32Generated(data []byte, offset int, v uint32) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ return offset + 4
+}
+func encodeVarintGenerated(data []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ data[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ data[offset] = uint8(v)
+ return offset + 1
+}
+func (m *APIVersion) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *CPUTargetUtilization) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovGenerated(uint64(m.TargetPercentage))
+ return n
+}
+
+func (m *CustomMetricCurrentStatus) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.CurrentValue.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *CustomMetricCurrentStatusList) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *CustomMetricTarget) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.TargetValue.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *CustomMetricTargetList) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *DaemonSet) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *DaemonSetList) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *DaemonSetSpec) Size() (n int) {
+ var l int
+ _ = l
+ if m.Selector != nil {
+ l = m.Selector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = m.Template.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *DaemonSetStatus) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovGenerated(uint64(m.CurrentNumberScheduled))
+ n += 1 + sovGenerated(uint64(m.NumberMisscheduled))
+ n += 1 + sovGenerated(uint64(m.DesiredNumberScheduled))
+ return n
+}
+
+func (m *Deployment) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *DeploymentList) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *DeploymentRollback) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.UpdatedAnnotations) > 0 {
+ for k, v := range m.UpdatedAnnotations {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ l = m.RollbackTo.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *DeploymentSpec) Size() (n int) {
+ var l int
+ _ = l
+ if m.Replicas != nil {
+ n += 1 + sovGenerated(uint64(*m.Replicas))
+ }
+ if m.Selector != nil {
+ l = m.Selector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = m.Template.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Strategy.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.MinReadySeconds))
+ if m.RevisionHistoryLimit != nil {
+ n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit))
+ }
+ n += 2
+ if m.RollbackTo != nil {
+ l = m.RollbackTo.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *DeploymentStatus) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovGenerated(uint64(m.ObservedGeneration))
+ n += 1 + sovGenerated(uint64(m.Replicas))
+ n += 1 + sovGenerated(uint64(m.UpdatedReplicas))
+ n += 1 + sovGenerated(uint64(m.AvailableReplicas))
+ n += 1 + sovGenerated(uint64(m.UnavailableReplicas))
+ return n
+}
+
+func (m *DeploymentStrategy) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.RollingUpdate != nil {
+ l = m.RollingUpdate.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *ExportOptions) Size() (n int) {
+ var l int
+ _ = l
+ n += 2
+ n += 2
+ return n
+}
+
+func (m *FSGroupStrategyOptions) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Rule)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Ranges) > 0 {
+ for _, e := range m.Ranges {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *HTTPIngressPath) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Path)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Backend.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *HTTPIngressRuleValue) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Paths) > 0 {
+ for _, e := range m.Paths {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *HorizontalPodAutoscaler) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *HorizontalPodAutoscalerList) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *HorizontalPodAutoscalerSpec) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ScaleRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.MinReplicas != nil {
+ n += 1 + sovGenerated(uint64(*m.MinReplicas))
+ }
+ n += 1 + sovGenerated(uint64(m.MaxReplicas))
+ if m.CPUUtilization != nil {
+ l = m.CPUUtilization.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *HorizontalPodAutoscalerStatus) Size() (n int) {
+ var l int
+ _ = l
+ if m.ObservedGeneration != nil {
+ n += 1 + sovGenerated(uint64(*m.ObservedGeneration))
+ }
+ if m.LastScaleTime != nil {
+ l = m.LastScaleTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ n += 1 + sovGenerated(uint64(m.CurrentReplicas))
+ n += 1 + sovGenerated(uint64(m.DesiredReplicas))
+ if m.CurrentCPUUtilizationPercentage != nil {
+ n += 1 + sovGenerated(uint64(*m.CurrentCPUUtilizationPercentage))
+ }
+ return n
+}
+
+func (m *HostPortRange) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovGenerated(uint64(m.Min))
+ n += 1 + sovGenerated(uint64(m.Max))
+ return n
+}
+
+func (m *IDRange) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovGenerated(uint64(m.Min))
+ n += 1 + sovGenerated(uint64(m.Max))
+ return n
+}
+
+func (m *Ingress) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *IngressBackend) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.ServiceName)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.ServicePort.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *IngressList) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *IngressRule) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Host)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.IngressRuleValue.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *IngressRuleValue) Size() (n int) {
+ var l int
+ _ = l
+ if m.HTTP != nil {
+ l = m.HTTP.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *IngressSpec) Size() (n int) {
+ var l int
+ _ = l
+ if m.Backend != nil {
+ l = m.Backend.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.TLS) > 0 {
+ for _, e := range m.TLS {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Rules) > 0 {
+ for _, e := range m.Rules {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *IngressStatus) Size() (n int) {
+ var l int
+ _ = l
+ l = m.LoadBalancer.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *IngressTLS) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Hosts) > 0 {
+ for _, s := range m.Hosts {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = len(m.SecretName)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *Job) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *JobCondition) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Status)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.LastProbeTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.LastTransitionTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Reason)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Message)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *JobList) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *JobSpec) Size() (n int) {
+ var l int
+ _ = l
+ if m.Parallelism != nil {
+ n += 1 + sovGenerated(uint64(*m.Parallelism))
+ }
+ if m.Completions != nil {
+ n += 1 + sovGenerated(uint64(*m.Completions))
+ }
+ if m.ActiveDeadlineSeconds != nil {
+ n += 1 + sovGenerated(uint64(*m.ActiveDeadlineSeconds))
+ }
+ if m.Selector != nil {
+ l = m.Selector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.AutoSelector != nil {
+ n += 2
+ }
+ l = m.Template.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *JobStatus) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Conditions) > 0 {
+ for _, e := range m.Conditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.StartTime != nil {
+ l = m.StartTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.CompletionTime != nil {
+ l = m.CompletionTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ n += 1 + sovGenerated(uint64(m.Active))
+ n += 1 + sovGenerated(uint64(m.Succeeded))
+ n += 1 + sovGenerated(uint64(m.Failed))
+ return n
+}
+
+func (m *LabelSelector) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.MatchLabels) > 0 {
+ for k, v := range m.MatchLabels {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ if len(m.MatchExpressions) > 0 {
+ for _, e := range m.MatchExpressions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *LabelSelectorRequirement) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Key)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Operator)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Values) > 0 {
+ for _, s := range m.Values {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ListOptions) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.LabelSelector)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.FieldSelector)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 2
+ l = len(m.ResourceVersion)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.TimeoutSeconds != nil {
+ n += 1 + sovGenerated(uint64(*m.TimeoutSeconds))
+ }
+ return n
+}
+
+func (m *NetworkPolicy) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *NetworkPolicyIngressRule) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Ports) > 0 {
+ for _, e := range m.Ports {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.From) > 0 {
+ for _, e := range m.From {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *NetworkPolicyList) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *NetworkPolicyPeer) Size() (n int) {
+ var l int
+ _ = l
+ if m.PodSelector != nil {
+ l = m.PodSelector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.NamespaceSelector != nil {
+ l = m.NamespaceSelector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *NetworkPolicyPort) Size() (n int) {
+ var l int
+ _ = l
+ if m.Protocol != nil {
+ l = len(*m.Protocol)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Port != nil {
+ l = m.Port.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *NetworkPolicySpec) Size() (n int) {
+ var l int
+ _ = l
+ l = m.PodSelector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Ingress) > 0 {
+ for _, e := range m.Ingress {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *PodSecurityPolicy) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *PodSecurityPolicyList) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *PodSecurityPolicySpec) Size() (n int) {
+ var l int
+ _ = l
+ n += 2
+ if len(m.DefaultAddCapabilities) > 0 {
+ for _, s := range m.DefaultAddCapabilities {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.RequiredDropCapabilities) > 0 {
+ for _, s := range m.RequiredDropCapabilities {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.AllowedCapabilities) > 0 {
+ for _, s := range m.AllowedCapabilities {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Volumes) > 0 {
+ for _, s := range m.Volumes {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ n += 2
+ if len(m.HostPorts) > 0 {
+ for _, e := range m.HostPorts {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ n += 2
+ n += 2
+ l = m.SELinux.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.RunAsUser.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.SupplementalGroups.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.FSGroup.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 2
+ return n
+}
+
+func (m *ReplicaSet) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ReplicaSetList) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ReplicaSetSpec) Size() (n int) {
+ var l int
+ _ = l
+ if m.Replicas != nil {
+ n += 1 + sovGenerated(uint64(*m.Replicas))
+ }
+ if m.Selector != nil {
+ l = m.Selector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = m.Template.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ReplicaSetStatus) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovGenerated(uint64(m.Replicas))
+ n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas))
+ n += 1 + sovGenerated(uint64(m.ObservedGeneration))
+ return n
+}
+
+func (m *ReplicationControllerDummy) Size() (n int) {
+ var l int
+ _ = l
+ return n
+}
+
+func (m *RollbackConfig) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovGenerated(uint64(m.Revision))
+ return n
+}
+
+func (m *RollingUpdateDeployment) Size() (n int) {
+ var l int
+ _ = l
+ if m.MaxUnavailable != nil {
+ l = m.MaxUnavailable.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.MaxSurge != nil {
+ l = m.MaxSurge.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *RunAsUserStrategyOptions) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Rule)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Ranges) > 0 {
+ for _, e := range m.Ranges {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *SELinuxStrategyOptions) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Rule)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.SELinuxOptions != nil {
+ l = m.SELinuxOptions.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *Scale) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ScaleSpec) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovGenerated(uint64(m.Replicas))
+ return n
+}
+
+func (m *ScaleStatus) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovGenerated(uint64(m.Replicas))
+ if len(m.Selector) > 0 {
+ for k, v := range m.Selector {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ l = len(m.TargetSelector)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *SubresourceReference) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Kind)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.APIVersion)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Subresource)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *SupplementalGroupsStrategyOptions) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Rule)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Ranges) > 0 {
+ for _, e := range m.Ranges {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ThirdPartyResource) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Description)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Versions) > 0 {
+ for _, e := range m.Versions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ThirdPartyResourceData) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Data != nil {
+ l = len(m.Data)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *ThirdPartyResourceDataList) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ThirdPartyResourceList) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *APIVersion) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: APIVersion: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: APIVersion: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CPUTargetUtilization) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CPUTargetUtilization: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CPUTargetUtilization: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TargetPercentage", wireType)
+ }
+ m.TargetPercentage = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.TargetPercentage |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CustomMetricCurrentStatus) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CustomMetricCurrentStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CustomMetricCurrentStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CurrentValue", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.CurrentValue.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CustomMetricCurrentStatusList) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CustomMetricCurrentStatusList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CustomMetricCurrentStatusList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, CustomMetricCurrentStatus{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CustomMetricTarget) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CustomMetricTarget: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CustomMetricTarget: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TargetValue", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.TargetValue.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CustomMetricTargetList) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CustomMetricTargetList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CustomMetricTargetList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, CustomMetricTarget{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DaemonSet) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DaemonSet: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DaemonSet: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DaemonSetList) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DaemonSetList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DaemonSetList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, DaemonSet{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DaemonSetSpec) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DaemonSetSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DaemonSetSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Selector == nil {
+ m.Selector = &LabelSelector{}
+ }
+ if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DaemonSetStatus) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DaemonSetStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DaemonSetStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CurrentNumberScheduled", wireType)
+ }
+ m.CurrentNumberScheduled = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.CurrentNumberScheduled |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NumberMisscheduled", wireType)
+ }
+ m.NumberMisscheduled = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.NumberMisscheduled |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DesiredNumberScheduled", wireType)
+ }
+ m.DesiredNumberScheduled = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.DesiredNumberScheduled |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Deployment) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Deployment: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Deployment: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeploymentList) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeploymentList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeploymentList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, Deployment{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeploymentRollback) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeploymentRollback: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeploymentRollback: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAnnotations", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var keykey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ keykey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey := string(data[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ var valuekey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ valuekey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue := string(data[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ if m.UpdatedAnnotations == nil {
+ m.UpdatedAnnotations = make(map[string]string)
+ }
+ m.UpdatedAnnotations[mapkey] = mapvalue
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.RollbackTo.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeploymentSpec) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeploymentSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeploymentSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Replicas = &v
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Selector == nil {
+ m.Selector = &LabelSelector{}
+ }
+ if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Strategy.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType)
+ }
+ m.MinReadySeconds = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.MinReadySeconds |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.RevisionHistoryLimit = &v
+ case 7:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Paused = bool(v != 0)
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.RollbackTo == nil {
+ m.RollbackTo = &RollbackConfig{}
+ }
+ if err := m.RollbackTo.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeploymentStatus) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeploymentStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeploymentStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType)
+ }
+ m.ObservedGeneration = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.ObservedGeneration |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType)
+ }
+ m.Replicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Replicas |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType)
+ }
+ m.UpdatedReplicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.UpdatedReplicas |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType)
+ }
+ m.AvailableReplicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.AvailableReplicas |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UnavailableReplicas", wireType)
+ }
+ m.UnavailableReplicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.UnavailableReplicas |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeploymentStrategy) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeploymentStrategy: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeploymentStrategy: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = DeploymentStrategyType(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.RollingUpdate == nil {
+ m.RollingUpdate = &RollingUpdateDeployment{}
+ }
+ if err := m.RollingUpdate.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ExportOptions) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ExportOptions: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ExportOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Export", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Export = bool(v != 0)
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Exact", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Exact = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *FSGroupStrategyOptions) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: FSGroupStrategyOptions: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: FSGroupStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Rule = FSGroupStrategyType(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Ranges = append(m.Ranges, IDRange{})
+ if err := m.Ranges[len(m.Ranges)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *HTTPIngressPath) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: HTTPIngressPath: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: HTTPIngressPath: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Path = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Backend.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *HTTPIngressRuleValue) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: HTTPIngressRuleValue: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: HTTPIngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Paths = append(m.Paths, HTTPIngressPath{})
+ if err := m.Paths[len(m.Paths)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *HorizontalPodAutoscaler) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: HorizontalPodAutoscaler: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: HorizontalPodAutoscaler: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *HorizontalPodAutoscalerList) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: HorizontalPodAutoscalerList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: HorizontalPodAutoscalerList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, HorizontalPodAutoscaler{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *HorizontalPodAutoscalerSpec) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ScaleRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ScaleRef.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MinReplicas", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.MinReplicas = &v
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MaxReplicas", wireType)
+ }
+ m.MaxReplicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.MaxReplicas |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CPUUtilization", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.CPUUtilization == nil {
+ m.CPUUtilization = &CPUTargetUtilization{}
+ }
+ if err := m.CPUUtilization.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *HorizontalPodAutoscalerStatus) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.ObservedGeneration = &v
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastScaleTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.LastScaleTime == nil {
+ m.LastScaleTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{}
+ }
+ if err := m.LastScaleTime.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CurrentReplicas", wireType)
+ }
+ m.CurrentReplicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.CurrentReplicas |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DesiredReplicas", wireType)
+ }
+ m.DesiredReplicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.DesiredReplicas |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CurrentCPUUtilizationPercentage", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.CurrentCPUUtilizationPercentage = &v
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *HostPortRange) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: HostPortRange: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: HostPortRange: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType)
+ }
+ m.Min = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Min |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType)
+ }
+ m.Max = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Max |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *IDRange) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: IDRange: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: IDRange: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType)
+ }
+ m.Min = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Min |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType)
+ }
+ m.Max = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Max |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Ingress) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Ingress: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Ingress: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *IngressBackend) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: IngressBackend: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: IngressBackend: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ServiceName = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ServicePort", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ServicePort.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *IngressList) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: IngressList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: IngressList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, Ingress{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *IngressRule) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: IngressRule: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: IngressRule: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Host = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IngressRuleValue", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.IngressRuleValue.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *IngressRuleValue) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: IngressRuleValue: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: IngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.HTTP == nil {
+ m.HTTP = &HTTPIngressRuleValue{}
+ }
+ if err := m.HTTP.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *IngressSpec) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: IngressSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: IngressSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Backend == nil {
+ m.Backend = &IngressBackend{}
+ }
+ if err := m.Backend.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.TLS = append(m.TLS, IngressTLS{})
+ if err := m.TLS[len(m.TLS)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Rules = append(m.Rules, IngressRule{})
+ if err := m.Rules[len(m.Rules)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *IngressStatus) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: IngressStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: IngressStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LoadBalancer.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *IngressTLS) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: IngressTLS: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: IngressTLS: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Hosts = append(m.Hosts, string(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SecretName = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Job) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Job: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Job: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *JobCondition) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: JobCondition: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: JobCondition: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = JobConditionType(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Status = k8s_io_kubernetes_pkg_api_v1.ConditionStatus(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastProbeTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LastProbeTime.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LastTransitionTime.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Reason = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *JobList) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: JobList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: JobList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, Job{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *JobSpec) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: JobSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: JobSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Parallelism", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Parallelism = &v
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Completions", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Completions = &v
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ActiveDeadlineSeconds", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.ActiveDeadlineSeconds = &v
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Selector == nil {
+ m.Selector = &LabelSelector{}
+ }
+ if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AutoSelector", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.AutoSelector = &b
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *JobStatus) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: JobStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: JobStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Conditions = append(m.Conditions, JobCondition{})
+ if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.StartTime == nil {
+ m.StartTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{}
+ }
+ if err := m.StartTime.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CompletionTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.CompletionTime == nil {
+ m.CompletionTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{}
+ }
+ if err := m.CompletionTime.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Active", wireType)
+ }
+ m.Active = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Active |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Succeeded", wireType)
+ }
+ m.Succeeded = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Succeeded |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Failed", wireType)
+ }
+ m.Failed = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Failed |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LabelSelector) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LabelSelector: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LabelSelector: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MatchLabels", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var keykey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ keykey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey := string(data[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ var valuekey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ valuekey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue := string(data[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ if m.MatchLabels == nil {
+ m.MatchLabels = make(map[string]string)
+ }
+ m.MatchLabels[mapkey] = mapvalue
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MatchExpressions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.MatchExpressions = append(m.MatchExpressions, LabelSelectorRequirement{})
+ if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LabelSelectorRequirement) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LabelSelectorRequirement: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LabelSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Operator = LabelSelectorOperator(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Values = append(m.Values, string(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ListOptions) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ListOptions: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ListOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.LabelSelector = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FieldSelector", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.FieldSelector = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Watch", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Watch = bool(v != 0)
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ResourceVersion = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.TimeoutSeconds = &v
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NetworkPolicy) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NetworkPolicy: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NetworkPolicy: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NetworkPolicyIngressRule) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NetworkPolicyIngressRule: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NetworkPolicyIngressRule: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Ports = append(m.Ports, NetworkPolicyPort{})
+ if err := m.Ports[len(m.Ports)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field From", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.From = append(m.From, NetworkPolicyPeer{})
+ if err := m.From[len(m.From)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NetworkPolicyList) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NetworkPolicyList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NetworkPolicyList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, NetworkPolicy{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NetworkPolicyPeer) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NetworkPolicyPeer: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NetworkPolicyPeer: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.PodSelector == nil {
+ m.PodSelector = &LabelSelector{}
+ }
+ if err := m.PodSelector.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.NamespaceSelector == nil {
+ m.NamespaceSelector = &LabelSelector{}
+ }
+ if err := m.NamespaceSelector.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NetworkPolicyPort) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NetworkPolicyPort: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NetworkPolicyPort: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := k8s_io_kubernetes_pkg_api_v1.Protocol(data[iNdEx:postIndex])
+ m.Protocol = &s
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Port == nil {
+ m.Port = &k8s_io_kubernetes_pkg_util_intstr.IntOrString{}
+ }
+ if err := m.Port.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NetworkPolicySpec) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NetworkPolicySpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NetworkPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.PodSelector.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Ingress = append(m.Ingress, NetworkPolicyIngressRule{})
+ if err := m.Ingress[len(m.Ingress)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PodSecurityPolicy) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PodSecurityPolicy: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PodSecurityPolicy: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PodSecurityPolicyList) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PodSecurityPolicyList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PodSecurityPolicyList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, PodSecurityPolicy{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PodSecurityPolicySpec) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PodSecurityPolicySpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PodSecurityPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Privileged", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Privileged = bool(v != 0)
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DefaultAddCapabilities", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DefaultAddCapabilities = append(m.DefaultAddCapabilities, k8s_io_kubernetes_pkg_api_v1.Capability(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RequiredDropCapabilities", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.RequiredDropCapabilities = append(m.RequiredDropCapabilities, k8s_io_kubernetes_pkg_api_v1.Capability(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AllowedCapabilities", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.AllowedCapabilities = append(m.AllowedCapabilities, k8s_io_kubernetes_pkg_api_v1.Capability(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Volumes = append(m.Volumes, FSType(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field HostNetwork", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.HostNetwork = bool(v != 0)
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field HostPorts", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.HostPorts = append(m.HostPorts, HostPortRange{})
+ if err := m.HostPorts[len(m.HostPorts)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field HostPID", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.HostPID = bool(v != 0)
+ case 9:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field HostIPC", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.HostIPC = bool(v != 0)
+ case 10:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SELinux", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.SELinux.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 11:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RunAsUser", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.RunAsUser.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 12:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroups", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.SupplementalGroups.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 13:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FSGroup", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.FSGroup.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 14:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ReadOnlyRootFilesystem", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.ReadOnlyRootFilesystem = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ReplicaSet) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ReplicaSet: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ReplicaSet: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ReplicaSetList) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ReplicaSetList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ReplicaSetList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, ReplicaSet{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ReplicaSetSpec) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ReplicaSetSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ReplicaSetSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Replicas = &v
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Selector == nil {
+ m.Selector = &LabelSelector{}
+ }
+ if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ReplicaSetStatus) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ReplicaSetStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ReplicaSetStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType)
+ }
+ m.Replicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Replicas |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FullyLabeledReplicas", wireType)
+ }
+ m.FullyLabeledReplicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.FullyLabeledReplicas |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType)
+ }
+ m.ObservedGeneration = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.ObservedGeneration |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ReplicationControllerDummy) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ReplicationControllerDummy: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ReplicationControllerDummy: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RollbackConfig) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RollbackConfig: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RollbackConfig: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
+ }
+ m.Revision = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Revision |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RollingUpdateDeployment) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RollingUpdateDeployment: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RollingUpdateDeployment: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.MaxUnavailable == nil {
+ m.MaxUnavailable = &k8s_io_kubernetes_pkg_util_intstr.IntOrString{}
+ }
+ if err := m.MaxUnavailable.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.MaxSurge == nil {
+ m.MaxSurge = &k8s_io_kubernetes_pkg_util_intstr.IntOrString{}
+ }
+ if err := m.MaxSurge.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RunAsUserStrategyOptions) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RunAsUserStrategyOptions: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RunAsUserStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Rule = RunAsUserStrategy(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Ranges = append(m.Ranges, IDRange{})
+ if err := m.Ranges[len(m.Ranges)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SELinuxStrategyOptions) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SELinuxStrategyOptions: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SELinuxStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Rule = SELinuxStrategy(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SELinuxOptions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.SELinuxOptions == nil {
+ m.SELinuxOptions = &k8s_io_kubernetes_pkg_api_v1.SELinuxOptions{}
+ }
+ if err := m.SELinuxOptions.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Scale) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Scale: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Scale: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ScaleSpec) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ScaleSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ScaleSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType)
+ }
+ m.Replicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Replicas |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ScaleStatus) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ScaleStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ScaleStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType)
+ }
+ m.Replicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Replicas |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var keykey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ keykey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey := string(data[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ var valuekey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ valuekey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue := string(data[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ if m.Selector == nil {
+ m.Selector = make(map[string]string)
+ }
+ m.Selector[mapkey] = mapvalue
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TargetSelector", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.TargetSelector = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SubresourceReference) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SubresourceReference: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SubresourceReference: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Kind = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.APIVersion = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Subresource", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Subresource = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SupplementalGroupsStrategyOptions) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SupplementalGroupsStrategyOptions: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SupplementalGroupsStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Rule = SupplementalGroupsStrategyType(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Ranges = append(m.Ranges, IDRange{})
+ if err := m.Ranges[len(m.Ranges)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ThirdPartyResource) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ThirdPartyResource: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ThirdPartyResource: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Description = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Versions = append(m.Versions, APIVersion{})
+ if err := m.Versions[len(m.Versions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ThirdPartyResourceData) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ThirdPartyResourceData: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ThirdPartyResourceData: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Data = append(m.Data[:0], data[iNdEx:postIndex]...)
+ if m.Data == nil {
+ m.Data = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ThirdPartyResourceDataList) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ThirdPartyResourceDataList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ThirdPartyResourceDataList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, ThirdPartyResourceData{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ThirdPartyResourceList) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ThirdPartyResourceList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ThirdPartyResourceList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, ThirdPartyResource{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenerated(data []byte) (n int, err error) {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if data[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipGenerated(data[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
+)
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.proto b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.proto
new file mode 100644
index 0000000..46df0d6
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.proto
@@ -0,0 +1,1010 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.kubernetes.pkg.apis.extensions.v1beta1;
+
+import "k8s.io/kubernetes/pkg/api/resource/generated.proto";
+import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto";
+import "k8s.io/kubernetes/pkg/api/v1/generated.proto";
+import "k8s.io/kubernetes/pkg/runtime/generated.proto";
+import "k8s.io/kubernetes/pkg/util/intstr/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v1beta1";
+
+// An APIVersion represents a single concrete version of an object model.
+message APIVersion {
+ // Name of this version (e.g. 'v1').
+ optional string name = 1;
+}
+
+message CPUTargetUtilization {
+ // fraction of the requested CPU that should be utilized/used,
+ // e.g. 70 means that 70% of the requested CPU should be in use.
+ optional int32 targetPercentage = 1;
+}
+
+message CustomMetricCurrentStatus {
+ // Custom Metric name.
+ optional string name = 1;
+
+ // Custom Metric value (average).
+ optional k8s.io.kubernetes.pkg.api.resource.Quantity value = 2;
+}
+
+message CustomMetricCurrentStatusList {
+ repeated CustomMetricCurrentStatus items = 1;
+}
+
+// Alpha-level support for Custom Metrics in HPA (as annotations).
+message CustomMetricTarget {
+ // Custom Metric name.
+ optional string name = 1;
+
+ // Custom Metric value (average).
+ optional k8s.io.kubernetes.pkg.api.resource.Quantity value = 2;
+}
+
+message CustomMetricTargetList {
+ repeated CustomMetricTarget items = 1;
+}
+
+// DaemonSet represents the configuration of a daemon set.
+message DaemonSet {
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1;
+
+ // Spec defines the desired behavior of this daemon set.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ optional DaemonSetSpec spec = 2;
+
+ // Status is the current status of this daemon set. This data may be
+ // out of date by some window of time.
+ // Populated by the system.
+ // Read-only.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ optional DaemonSetStatus status = 3;
+}
+
+// DaemonSetList is a collection of daemon sets.
+message DaemonSetList {
+ // Standard list metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1;
+
+ // Items is a list of daemon sets.
+ repeated DaemonSet items = 2;
+}
+
+// DaemonSetSpec is the specification of a daemon set.
+message DaemonSetSpec {
+ // Selector is a label query over pods that are managed by the daemon set.
+ // Must match in order to be controlled.
+ // If empty, defaulted to labels on Pod template.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors
+ optional LabelSelector selector = 1;
+
+ // Template is the object that describes the pod that will be created.
+ // The DaemonSet will create exactly one copy of this pod on every node
+ // that matches the template's node selector (or on every node if no node
+ // selector is specified).
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#pod-template
+ optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 2;
+}
+
+// DaemonSetStatus represents the current status of a daemon set.
+message DaemonSetStatus {
+ // CurrentNumberScheduled is the number of nodes that are running at least 1
+ // daemon pod and are supposed to run the daemon pod.
+ // More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md
+ optional int32 currentNumberScheduled = 1;
+
+ // NumberMisscheduled is the number of nodes that are running the daemon pod, but are
+ // not supposed to run the daemon pod.
+ // More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md
+ optional int32 numberMisscheduled = 2;
+
+ // DesiredNumberScheduled is the total number of nodes that should be running the daemon
+ // pod (including nodes correctly running the daemon pod).
+ // More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md
+ optional int32 desiredNumberScheduled = 3;
+}
+
+// Deployment enables declarative updates for Pods and ReplicaSets.
+message Deployment {
+ // Standard object metadata.
+ optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1;
+
+ // Specification of the desired behavior of the Deployment.
+ optional DeploymentSpec spec = 2;
+
+ // Most recently observed status of the Deployment.
+ optional DeploymentStatus status = 3;
+}
+
+// DeploymentList is a list of Deployments.
+message DeploymentList {
+ // Standard list metadata.
+ optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1;
+
+ // Items is the list of Deployments.
+ repeated Deployment items = 2;
+}
+
+// DeploymentRollback stores the information required to rollback a deployment.
+message DeploymentRollback {
+ // Required: This must match the Name of a deployment.
+ optional string name = 1;
+
+ // The annotations to be updated to a deployment
+ map<string, string> updatedAnnotations = 2;
+
+ // The config of this deployment rollback.
+ optional RollbackConfig rollbackTo = 3;
+}
+
+// DeploymentSpec is the specification of the desired behavior of the Deployment.
+message DeploymentSpec {
+ // Number of desired pods. This is a pointer to distinguish between explicit
+ // zero and not specified. Defaults to 1.
+ optional int32 replicas = 1;
+
+ // Label selector for pods. Existing ReplicaSets whose pods are
+ // selected by this will be the ones affected by this deployment.
+ optional LabelSelector selector = 2;
+
+ // Template describes the pods that will be created.
+ optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 3;
+
+ // The deployment strategy to use to replace existing pods with new ones.
+ optional DeploymentStrategy strategy = 4;
+
+ // Minimum number of seconds for which a newly created pod should be ready
+ // without any of its container crashing, for it to be considered available.
+ // Defaults to 0 (pod will be considered available as soon as it is ready)
+ optional int32 minReadySeconds = 5;
+
+ // The number of old ReplicaSets to retain to allow rollback.
+ // This is a pointer to distinguish between explicit zero and not specified.
+ optional int32 revisionHistoryLimit = 6;
+
+ // Indicates that the deployment is paused and will not be processed by the
+ // deployment controller.
+ optional bool paused = 7;
+
+ // The config this deployment is rolling back to. Will be cleared after rollback is done.
+ optional RollbackConfig rollbackTo = 8;
+}
+
+// DeploymentStatus is the most recently observed status of the Deployment.
+message DeploymentStatus {
+ // The generation observed by the deployment controller.
+ optional int64 observedGeneration = 1;
+
+ // Total number of non-terminated pods targeted by this deployment (their labels match the selector).
+ optional int32 replicas = 2;
+
+ // Total number of non-terminated pods targeted by this deployment that have the desired template spec.
+ optional int32 updatedReplicas = 3;
+
+ // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.
+ optional int32 availableReplicas = 4;
+
+ // Total number of unavailable pods targeted by this deployment.
+ optional int32 unavailableReplicas = 5;
+}
+
+// DeploymentStrategy describes how to replace existing pods with new ones.
+message DeploymentStrategy {
+ // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate.
+ optional string type = 1;
+
+ // Rolling update config params. Present only if DeploymentStrategyType =
+ // RollingUpdate.
+ // ---
+ // TODO: Update this to follow our convention for oneOf, whatever we decide it
+ // to be.
+ optional RollingUpdateDeployment rollingUpdate = 2;
+}
+
+// ExportOptions is the query options to the standard REST get call.
+message ExportOptions {
+ // Should this value be exported. Export strips fields that a user can not specify.
+ optional bool export = 1;
+
+ // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'
+ optional bool exact = 2;
+}
+
+// FSGroupStrategyOptions defines the strategy type and options used to create the strategy.
+message FSGroupStrategyOptions {
+ // Rule is the strategy that will dictate what FSGroup is used in the SecurityContext.
+ optional string rule = 1;
+
+ // Ranges are the allowed ranges of fs groups. If you would like to force a single
+ // fs group then supply a single range with the same start and end.
+ repeated IDRange ranges = 2;
+}
+
+// HTTPIngressPath associates a path regex with a backend. Incoming urls matching
+// the path are forwarded to the backend.
+message HTTPIngressPath {
+ // Path is a extended POSIX regex as defined by IEEE Std 1003.1,
+ // (i.e this follows the egrep/unix syntax, not the perl syntax)
+ // matched against the path of an incoming request. Currently it can
+ // contain characters disallowed from the conventional "path"
+ // part of a URL as defined by RFC 3986. Paths must begin with
+ // a '/'. If unspecified, the path defaults to a catch all sending
+ // traffic to the backend.
+ optional string path = 1;
+
+ // Backend defines the referenced service endpoint to which the traffic
+ // will be forwarded to.
+ optional IngressBackend backend = 2;
+}
+
+// HTTPIngressRuleValue is a list of http selectors pointing to backends.
+// In the example: http://<host>/<path>?<searchpart> -> backend where
+// where parts of the url correspond to RFC 3986, this resource will be used
+// to match against everything after the last '/' and before the first '?'
+// or '#'.
+message HTTPIngressRuleValue {
+ // A collection of paths that map requests to backends.
+ repeated HTTPIngressPath paths = 1;
+}
+
+// configuration of a horizontal pod autoscaler.
+message HorizontalPodAutoscaler {
+ // Standard object metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1;
+
+ // behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.
+ optional HorizontalPodAutoscalerSpec spec = 2;
+
+ // current information about the autoscaler.
+ optional HorizontalPodAutoscalerStatus status = 3;
+}
+
+// list of horizontal pod autoscaler objects.
+message HorizontalPodAutoscalerList {
+ // Standard list metadata.
+ optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1;
+
+ // list of horizontal pod autoscaler objects.
+ repeated HorizontalPodAutoscaler items = 2;
+}
+
+// specification of a horizontal pod autoscaler.
+message HorizontalPodAutoscalerSpec {
+ // reference to Scale subresource; horizontal pod autoscaler will learn the current resource consumption from its status,
+ // and will set the desired number of pods by modifying its spec.
+ optional SubresourceReference scaleRef = 1;
+
+ // lower limit for the number of pods that can be set by the autoscaler, default 1.
+ optional int32 minReplicas = 2;
+
+ // upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.
+ optional int32 maxReplicas = 3;
+
+ // target average CPU utilization (represented as a percentage of requested CPU) over all the pods;
+ // if not specified it defaults to the target CPU utilization at 80% of the requested resources.
+ optional CPUTargetUtilization cpuUtilization = 4;
+}
+
+// current status of a horizontal pod autoscaler
+message HorizontalPodAutoscalerStatus {
+ // most recent generation observed by this autoscaler.
+ optional int64 observedGeneration = 1;
+
+ // last time the HorizontalPodAutoscaler scaled the number of pods;
+ // used by the autoscaler to control how often the number of pods is changed.
+ optional k8s.io.kubernetes.pkg.api.unversioned.Time lastScaleTime = 2;
+
+ // current number of replicas of pods managed by this autoscaler.
+ optional int32 currentReplicas = 3;
+
+ // desired number of replicas of pods managed by this autoscaler.
+ optional int32 desiredReplicas = 4;
+
+ // current average CPU utilization over all pods, represented as a percentage of requested CPU,
+ // e.g. 70 means that an average pod is using now 70% of its requested CPU.
+ optional int32 currentCPUUtilizationPercentage = 5;
+}
+
+// Host Port Range defines a range of host ports that will be enabled by a policy
+// for pods to use. It requires both the start and end to be defined.
+message HostPortRange {
+ // min is the start of the range, inclusive.
+ optional int32 min = 1;
+
+ // max is the end of the range, inclusive.
+ optional int32 max = 2;
+}
+
+// ID Range provides a min/max of an allowed range of IDs.
+message IDRange {
+ // Min is the start of the range, inclusive.
+ optional int64 min = 1;
+
+ // Max is the end of the range, inclusive.
+ optional int64 max = 2;
+}
+
+// Ingress is a collection of rules that allow inbound connections to reach the
+// endpoints defined by a backend. An Ingress can be configured to give services
+// externally-reachable urls, load balance traffic, terminate SSL, offer name
+// based virtual hosting etc.
+message Ingress {
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1;
+
+ // Spec is the desired state of the Ingress.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ optional IngressSpec spec = 2;
+
+ // Status is the current state of the Ingress.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ optional IngressStatus status = 3;
+}
+
+// IngressBackend describes all endpoints for a given service and port.
+message IngressBackend {
+ // Specifies the name of the referenced service.
+ optional string serviceName = 1;
+
+ // Specifies the port of the referenced service.
+ optional k8s.io.kubernetes.pkg.util.intstr.IntOrString servicePort = 2;
+}
+
+// IngressList is a collection of Ingress.
+message IngressList {
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1;
+
+ // Items is the list of Ingress.
+ repeated Ingress items = 2;
+}
+
+// IngressRule represents the rules mapping the paths under a specified host to
+// the related backend services. Incoming requests are first evaluated for a host
+// match, then routed to the backend associated with the matching IngressRuleValue.
+message IngressRule {
+ // Host is the fully qualified domain name of a network host, as defined
+ // by RFC 3986. Note the following deviations from the "host" part of the
+ // URI as defined in the RFC:
+ // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the
+ // IP in the Spec of the parent Ingress.
+ // 2. The `:` delimiter is not respected because ports are not allowed.
+ // Currently the port of an Ingress is implicitly :80 for http and
+ // :443 for https.
+ // Both these may change in the future.
+ // Incoming requests are matched against the host before the IngressRuleValue.
+ // If the host is unspecified, the Ingress routes all traffic based on the
+ // specified IngressRuleValue.
+ optional string host = 1;
+
+ // IngressRuleValue represents a rule to route requests for this IngressRule.
+ // If unspecified, the rule defaults to a http catch-all. Whether that sends
+ // just traffic matching the host to the default backend or all traffic to the
+ // default backend, is left to the controller fulfilling the Ingress. Http is
+ // currently the only supported IngressRuleValue.
+ optional IngressRuleValue ingressRuleValue = 2;
+}
+
+// IngressRuleValue represents a rule to apply against incoming requests. If the
+// rule is satisfied, the request is routed to the specified backend. Currently
+// mixing different types of rules in a single Ingress is disallowed, so exactly
+// one of the following must be set.
+message IngressRuleValue {
+ optional HTTPIngressRuleValue http = 1;
+}
+
+// IngressSpec describes the Ingress the user wishes to exist.
+message IngressSpec {
+ // A default backend capable of servicing requests that don't match any
+ // rule. At least one of 'backend' or 'rules' must be specified. This field
+ // is optional to allow the loadbalancer controller or defaulting logic to
+ // specify a global default.
+ optional IngressBackend backend = 1;
+
+ // TLS configuration. Currently the Ingress only supports a single TLS
+ // port, 443. If multiple members of this list specify different hosts, they
+ // will be multiplexed on the same port according to the hostname specified
+ // through the SNI TLS extension, if the ingress controller fulfilling the
+ // ingress supports SNI.
+ repeated IngressTLS tls = 2;
+
+ // A list of host rules used to configure the Ingress. If unspecified, or
+ // no rule matches, all traffic is sent to the default backend.
+ repeated IngressRule rules = 3;
+}
+
+// IngressStatus describe the current state of the Ingress.
+message IngressStatus {
+ // LoadBalancer contains the current status of the load-balancer.
+ optional k8s.io.kubernetes.pkg.api.v1.LoadBalancerStatus loadBalancer = 1;
+}
+
+// IngressTLS describes the transport layer security associated with an Ingress.
+message IngressTLS {
+ // Hosts are a list of hosts included in the TLS certificate. The values in
+ // this list must match the name/s used in the tlsSecret. Defaults to the
+ // wildcard host setting for the loadbalancer controller fulfilling this
+ // Ingress, if left unspecified.
+ repeated string hosts = 1;
+
+ // SecretName is the name of the secret used to terminate SSL traffic on 443.
+ // Field is left optional to allow SSL routing based on SNI hostname alone.
+ // If the SNI host in a listener conflicts with the "Host" header field used
+ // by an IngressRule, the SNI host is used for termination and value of the
+ // Host header is used for routing.
+ optional string secretName = 2;
+}
+
+// Job represents the configuration of a single job.
+message Job {
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1;
+
+ // Spec is a structure defining the expected behavior of a job.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ optional JobSpec spec = 2;
+
+ // Status is a structure describing current status of a job.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ optional JobStatus status = 3;
+}
+
+// JobCondition describes current state of a job.
+message JobCondition {
+ // Type of job condition, Complete or Failed.
+ optional string type = 1;
+
+ // Status of the condition, one of True, False, Unknown.
+ optional string status = 2;
+
+ // Last time the condition was checked.
+ optional k8s.io.kubernetes.pkg.api.unversioned.Time lastProbeTime = 3;
+
+ // Last time the condition transit from one status to another.
+ optional k8s.io.kubernetes.pkg.api.unversioned.Time lastTransitionTime = 4;
+
+ // (brief) reason for the condition's last transition.
+ optional string reason = 5;
+
+ // Human readable message indicating details about last transition.
+ optional string message = 6;
+}
+
+// JobList is a collection of jobs.
+message JobList {
+ // Standard list metadata
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1;
+
+ // Items is the list of Job.
+ repeated Job items = 2;
+}
+
+// JobSpec describes how the job execution will look like.
+message JobSpec {
+ // Parallelism specifies the maximum desired number of pods the job should
+ // run at any given time. The actual number of pods running in steady state will
+ // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism),
+ // i.e. when the work left to do is less than max parallelism.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md
+ optional int32 parallelism = 1;
+
+ // Completions specifies the desired number of successfully finished pods the
+ // job should be run with. Setting to nil means that the success of any
+ // pod signals the success of all pods, and allows parallelism to have any positive
+ // value. Setting to 1 means that parallelism is limited to 1 and the success of that
+ // pod signals the success of the job.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md
+ optional int32 completions = 2;
+
+ // Optional duration in seconds relative to the startTime that the job may be active
+ // before the system tries to terminate it; value must be positive integer
+ optional int64 activeDeadlineSeconds = 3;
+
+ // Selector is a label query over pods that should match the pod count.
+ // Normally, the system sets this field for you.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors
+ optional LabelSelector selector = 4;
+
+ // AutoSelector controls generation of pod labels and pod selectors.
+ // It was not present in the original extensions/v1beta1 Job definition, but exists
+ // to allow conversion from batch/v1 Jobs, where it corresponds to, but has the opposite
+ // meaning as, ManualSelector.
+ // More info: http://releases.k8s.io/HEAD/docs/design/selector-generation.md
+ optional bool autoSelector = 5;
+
+ // Template is the object that describes the pod that will be created when
+ // executing a job.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md
+ optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 6;
+}
+
+// JobStatus represents the current state of a Job.
+message JobStatus {
+ // Conditions represent the latest available observations of an object's current state.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md
+ repeated JobCondition conditions = 1;
+
+ // StartTime represents time when the job was acknowledged by the Job Manager.
+ // It is not guaranteed to be set in happens-before order across separate operations.
+ // It is represented in RFC3339 form and is in UTC.
+ optional k8s.io.kubernetes.pkg.api.unversioned.Time startTime = 2;
+
+ // CompletionTime represents time when the job was completed. It is not guaranteed to
+ // be set in happens-before order across separate operations.
+ // It is represented in RFC3339 form and is in UTC.
+ optional k8s.io.kubernetes.pkg.api.unversioned.Time completionTime = 3;
+
+ // Active is the number of actively running pods.
+ optional int32 active = 4;
+
+ // Succeeded is the number of pods which reached Phase Succeeded.
+ optional int32 succeeded = 5;
+
+ // Failed is the number of pods which reached Phase Failed.
+ optional int32 failed = 6;
+}
+
+// A label selector is a label query over a set of resources. The result of matchLabels and
+// matchExpressions are ANDed. An empty label selector matches all objects. A null
+// label selector matches no objects.
+message LabelSelector {
+ // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ // map is equivalent to an element of matchExpressions, whose key field is "key", the
+ // operator is "In", and the values array contains only "value". The requirements are ANDed.
+ map<string, string> matchLabels = 1;
+
+ // matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ repeated LabelSelectorRequirement matchExpressions = 2;
+}
+
+// A label selector requirement is a selector that contains values, a key, and an operator that
+// relates the key and values.
+message LabelSelectorRequirement {
+ // key is the label key that the selector applies to.
+ optional string key = 1;
+
+ // operator represents a key's relationship to a set of values.
+ // Valid operators ard In, NotIn, Exists and DoesNotExist.
+ optional string operator = 2;
+
+ // values is an array of string values. If the operator is In or NotIn,
+ // the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ // the values array must be empty. This array is replaced during a strategic
+ // merge patch.
+ repeated string values = 3;
+}
+
+// ListOptions is the query options to a standard REST list call.
+message ListOptions {
+ // A selector to restrict the list of returned objects by their labels.
+ // Defaults to everything.
+ optional string labelSelector = 1;
+
+ // A selector to restrict the list of returned objects by their fields.
+ // Defaults to everything.
+ optional string fieldSelector = 2;
+
+ // Watch for changes to the described resources and return them as a stream of
+ // add, update, and remove notifications. Specify resourceVersion.
+ optional bool watch = 3;
+
+ // When specified with a watch call, shows changes that occur after that particular version of a resource.
+ // Defaults to changes from the beginning of history.
+ optional string resourceVersion = 4;
+
+ // Timeout for the list/watch call.
+ optional int64 timeoutSeconds = 5;
+}
+
+message NetworkPolicy {
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1;
+
+ // Specification of the desired behavior for this NetworkPolicy.
+ optional NetworkPolicySpec spec = 2;
+}
+
+// This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from.
+message NetworkPolicyIngressRule {
+ // List of ports which should be made accessible on the pods selected for this rule.
+ // Each item in this list is combined using a logical OR.
+ // If this field is not provided, this rule matches all ports (traffic not restricted by port).
+ // If this field is empty, this rule matches no ports (no traffic matches).
+ // If this field is present and contains at least one item, then this rule allows traffic
+ // only if the traffic matches at least one port in the list.
+ // TODO: Update this to be a pointer to slice as soon as auto-generation supports it.
+ repeated NetworkPolicyPort ports = 1;
+
+ // List of sources which should be able to access the pods selected for this rule.
+ // Items in this list are combined using a logical OR operation.
+ // If this field is not provided, this rule matches all sources (traffic not restricted by source).
+ // If this field is empty, this rule matches no sources (no traffic matches).
+ // If this field is present and contains at least on item, this rule allows traffic only if the
+ // traffic matches at least one item in the from list.
+ // TODO: Update this to be a pointer to slice as soon as auto-generation supports it.
+ repeated NetworkPolicyPeer from = 2;
+}
+
+// Network Policy List is a list of NetworkPolicy objects.
+message NetworkPolicyList {
+ // Standard list metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1;
+
+ // Items is a list of schema objects.
+ repeated NetworkPolicy items = 2;
+}
+
+message NetworkPolicyPeer {
+ // This is a label selector which selects Pods in this namespace.
+ // This field follows standard label selector semantics.
+ // If not provided, this selector selects no pods.
+ // If present but empty, this selector selects all pods in this namespace.
+ optional LabelSelector podSelector = 1;
+
+ // Selects Namespaces using cluster scoped-labels. This
+ // matches all pods in all namespaces selected by this label selector.
+ // This field follows standard label selector semantics.
+ // If omitted, this selector selects no namespaces.
+ // If present but empty, this selector selects all namespaces.
+ optional LabelSelector namespaceSelector = 2;
+}
+
+message NetworkPolicyPort {
+ // Optional. The protocol (TCP or UDP) which traffic must match.
+ // If not specified, this field defaults to TCP.
+ optional string protocol = 1;
+
+ // If specified, the port on the given protocol. This can
+ // either be a numerical or named port on a pod. If this field is not provided,
+ // this matches all port names and numbers.
+ // If present, only traffic on the specified protocol AND port
+ // will be matched.
+ optional k8s.io.kubernetes.pkg.util.intstr.IntOrString port = 2;
+}
+
+message NetworkPolicySpec {
+ // Selects the pods to which this NetworkPolicy object applies. The array of ingress rules
+ // is applied to any pods selected by this field. Multiple network policies can select the
+ // same set of pods. In this case, the ingress rules for each are combined additively.
+ // This field is NOT optional and follows standard label selector semantics.
+ // An empty podSelector matches all pods in this namespace.
+ optional LabelSelector podSelector = 1;
+
+ // List of ingress rules to be applied to the selected pods.
+ // Traffic is allowed to a pod if namespace.networkPolicy.ingress.isolation is undefined and cluster policy allows it,
+ // OR if the traffic source is the pod's local node,
+ // OR if the traffic matches at least one ingress rule across all of the NetworkPolicy
+ // objects whose podSelector matches the pod.
+ // If this field is empty then this NetworkPolicy does not affect ingress isolation.
+ // If this field is present and contains at least one rule, this policy allows any traffic
+ // which matches at least one of the ingress rules in this list.
+ repeated NetworkPolicyIngressRule ingress = 2;
+}
+
+// Pod Security Policy governs the ability to make requests that affect the Security Context
+// that will be applied to a pod and container.
+message PodSecurityPolicy {
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1;
+
+ // spec defines the policy enforced.
+ optional PodSecurityPolicySpec spec = 2;
+}
+
+// Pod Security Policy List is a list of PodSecurityPolicy objects.
+message PodSecurityPolicyList {
+ // Standard list metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1;
+
+ // Items is a list of schema objects.
+ repeated PodSecurityPolicy items = 2;
+}
+
+// Pod Security Policy Spec defines the policy enforced.
+message PodSecurityPolicySpec {
+ // privileged determines if a pod can request to be run as privileged.
+ optional bool privileged = 1;
+
+ // DefaultAddCapabilities is the default set of capabilities that will be added to the container
+ // unless the pod spec specifically drops the capability. You may not list a capabiility in both
+ // DefaultAddCapabilities and RequiredDropCapabilities.
+ repeated string defaultAddCapabilities = 2;
+
+ // RequiredDropCapabilities are the capabilities that will be dropped from the container. These
+ // are required to be dropped and cannot be added.
+ repeated string requiredDropCapabilities = 3;
+
+ // AllowedCapabilities is a list of capabilities that can be requested to add to the container.
+ // Capabilities in this field may be added at the pod author's discretion.
+ // You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities.
+ repeated string allowedCapabilities = 4;
+
+ // volumes is a white list of allowed volume plugins. Empty indicates that all plugins
+ // may be used.
+ repeated string volumes = 5;
+
+ // hostNetwork determines if the policy allows the use of HostNetwork in the pod spec.
+ optional bool hostNetwork = 6;
+
+ // hostPorts determines which host port ranges are allowed to be exposed.
+ repeated HostPortRange hostPorts = 7;
+
+ // hostPID determines if the policy allows the use of HostPID in the pod spec.
+ optional bool hostPID = 8;
+
+ // hostIPC determines if the policy allows the use of HostIPC in the pod spec.
+ optional bool hostIPC = 9;
+
+ // seLinux is the strategy that will dictate the allowable labels that may be set.
+ optional SELinuxStrategyOptions seLinux = 10;
+
+ // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.
+ optional RunAsUserStrategyOptions runAsUser = 11;
+
+ // SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.
+ optional SupplementalGroupsStrategyOptions supplementalGroups = 12;
+
+ // FSGroup is the strategy that will dictate what fs group is used by the SecurityContext.
+ optional FSGroupStrategyOptions fsGroup = 13;
+
+ // ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file
+ // system. If the container specifically requests to run with a non-read only root file system
+ // the PSP should deny the pod.
+ // If set to false the container may run with a read only root file system if it wishes but it
+ // will not be forced to.
+ optional bool readOnlyRootFilesystem = 14;
+}
+
+// ReplicaSet represents the configuration of a ReplicaSet.
+message ReplicaSet {
+ // If the Labels of a ReplicaSet are empty, they are defaulted to
+ // be the same as the Pod(s) that the ReplicaSet manages.
+ // Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1;
+
+ // Spec defines the specification of the desired behavior of the ReplicaSet.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ optional ReplicaSetSpec spec = 2;
+
+ // Status is the most recently observed status of the ReplicaSet.
+ // This data may be out of date by some window of time.
+ // Populated by the system.
+ // Read-only.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ optional ReplicaSetStatus status = 3;
+}
+
+// ReplicaSetList is a collection of ReplicaSets.
+message ReplicaSetList {
+ // Standard list metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
+ optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1;
+
+ // List of ReplicaSets.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md
+ repeated ReplicaSet items = 2;
+}
+
+// ReplicaSetSpec is the specification of a ReplicaSet.
+message ReplicaSetSpec {
+ // Replicas is the number of desired replicas.
+ // This is a pointer to distinguish between explicit zero and unspecified.
+ // Defaults to 1.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller
+ optional int32 replicas = 1;
+
+ // Selector is a label query over pods that should match the replica count.
+ // If the selector is empty, it is defaulted to the labels present on the pod template.
+ // Label keys and values that must match in order to be controlled by this replica set.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors
+ optional LabelSelector selector = 2;
+
+ // Template is the object that describes the pod that will be created if
+ // insufficient replicas are detected.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#pod-template
+ optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 3;
+}
+
+// ReplicaSetStatus represents the current status of a ReplicaSet.
+message ReplicaSetStatus {
+ // Replicas is the most recently oberved number of replicas.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller
+ optional int32 replicas = 1;
+
+ // The number of pods that have labels matching the labels of the pod template of the replicaset.
+ optional int32 fullyLabeledReplicas = 2;
+
+ // ObservedGeneration reflects the generation of the most recently observed ReplicaSet.
+ optional int64 observedGeneration = 3;
+}
+
+// Dummy definition
+message ReplicationControllerDummy {
+}
+
+message RollbackConfig {
+ // The revision to rollback to. If set to 0, rollbck to the last revision.
+ optional int64 revision = 1;
+}
+
+// Spec to control the desired behavior of rolling update.
+message RollingUpdateDeployment {
+ // The maximum number of pods that can be unavailable during the update.
+ // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+ // Absolute number is calculated from percentage by rounding up.
+ // This can not be 0 if MaxSurge is 0.
+ // By default, a fixed value of 1 is used.
+ // Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods
+ // immediately when the rolling update starts. Once new pods are ready, old RC
+ // can be scaled down further, followed by scaling up the new RC, ensuring
+ // that the total number of pods available at all times during the update is at
+ // least 70% of desired pods.
+ optional k8s.io.kubernetes.pkg.util.intstr.IntOrString maxUnavailable = 1;
+
+ // The maximum number of pods that can be scheduled above the desired number of
+ // pods.
+ // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+ // This can not be 0 if MaxUnavailable is 0.
+ // Absolute number is calculated from percentage by rounding up.
+ // By default, a value of 1 is used.
+ // Example: when this is set to 30%, the new RC can be scaled up immediately when
+ // the rolling update starts, such that the total number of old and new pods do not exceed
+ // 130% of desired pods. Once old pods have been killed,
+ // new RC can be scaled up further, ensuring that total number of pods running
+ // at any time during the update is atmost 130% of desired pods.
+ optional k8s.io.kubernetes.pkg.util.intstr.IntOrString maxSurge = 2;
+}
+
+// Run A sUser Strategy Options defines the strategy type and any options used to create the strategy.
+message RunAsUserStrategyOptions {
+ // Rule is the strategy that will dictate the allowable RunAsUser values that may be set.
+ optional string rule = 1;
+
+ // Ranges are the allowed ranges of uids that may be used.
+ repeated IDRange ranges = 2;
+}
+
+// SELinux Strategy Options defines the strategy type and any options used to create the strategy.
+message SELinuxStrategyOptions {
+ // type is the strategy that will dictate the allowable labels that may be set.
+ optional string rule = 1;
+
+ // seLinuxOptions required to run as; required for MustRunAs
+ // More info: http://releases.k8s.io/HEAD/docs/design/security_context.md#security-context
+ optional k8s.io.kubernetes.pkg.api.v1.SELinuxOptions seLinuxOptions = 2;
+}
+
+// represents a scaling request for a resource.
+message Scale {
+ // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata.
+ optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1;
+
+ // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.
+ optional ScaleSpec spec = 2;
+
+ // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only.
+ optional ScaleStatus status = 3;
+}
+
+// describes the attributes of a scale subresource
+message ScaleSpec {
+ // desired number of instances for the scaled object.
+ optional int32 replicas = 1;
+}
+
+// represents the current status of a scale subresource.
+message ScaleStatus {
+ // actual number of observed instances of the scaled object.
+ optional int32 replicas = 1;
+
+ // label query over pods that should match the replicas count. More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors
+ map<string, string> selector = 2;
+
+ // label selector for pods that should match the replicas count. This is a serializated
+ // version of both map-based and more expressive set-based selectors. This is done to
+ // avoid introspection in the clients. The string will be in the same format as the
+ // query-param syntax. If the target type only supports map-based selectors, both this
+ // field and map-based selector field are populated.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors
+ optional string targetSelector = 3;
+}
+
+// SubresourceReference contains enough information to let you inspect or modify the referred subresource.
+message SubresourceReference {
+ // Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
+ optional string kind = 1;
+
+ // Name of the referent; More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names
+ optional string name = 2;
+
+ // API version of the referent
+ optional string apiVersion = 3;
+
+ // Subresource name of the referent
+ optional string subresource = 4;
+}
+
+// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy.
+message SupplementalGroupsStrategyOptions {
+ // Rule is the strategy that will dictate what supplemental groups is used in the SecurityContext.
+ optional string rule = 1;
+
+ // Ranges are the allowed ranges of supplemental groups. If you would like to force a single
+ // supplemental group then supply a single range with the same start and end.
+ repeated IDRange ranges = 2;
+}
+
+// A ThirdPartyResource is a generic representation of a resource, it is used by add-ons and plugins to add new resource
+// types to the API. It consists of one or more Versions of the api.
+message ThirdPartyResource {
+ // Standard object metadata
+ optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1;
+
+ // Description is the description of this object.
+ optional string description = 2;
+
+ // Versions are versions for this third party object
+ repeated APIVersion versions = 3;
+}
+
+// An internal object, used for versioned storage in etcd. Not exposed to the end user.
+message ThirdPartyResourceData {
+ // Standard object metadata.
+ optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1;
+
+ // Data is the raw JSON data for this data.
+ optional bytes data = 2;
+}
+
+// ThirdPartyResrouceDataList is a list of ThirdPartyResourceData.
+message ThirdPartyResourceDataList {
+ // Standard list metadata
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1;
+
+ // Items is the list of ThirdpartyResourceData.
+ repeated ThirdPartyResourceData items = 2;
+}
+
+// ThirdPartyResourceList is a list of ThirdPartyResources.
+message ThirdPartyResourceList {
+ // Standard list metadata.
+ optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1;
+
+ // Items is the list of ThirdPartyResources.
+ repeated ThirdPartyResource items = 2;
+}
+
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/register.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/register.go
new file mode 100644
index 0000000..91c1c48
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/register.go
@@ -0,0 +1,69 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/api/v1"
+ "k8s.io/kubernetes/pkg/runtime"
+ versionedwatch "k8s.io/kubernetes/pkg/watch/versioned"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "extensions"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1beta1"}
+
+func AddToScheme(scheme *runtime.Scheme) {
+ addKnownTypes(scheme)
+ addDefaultingFuncs(scheme)
+ addConversionFuncs(scheme)
+}
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &Deployment{},
+ &DeploymentList{},
+ &DeploymentRollback{},
+ &HorizontalPodAutoscaler{},
+ &HorizontalPodAutoscalerList{},
+ &Job{},
+ &JobList{},
+ &ReplicationControllerDummy{},
+ &Scale{},
+ &ThirdPartyResource{},
+ &ThirdPartyResourceList{},
+ &DaemonSetList{},
+ &DaemonSet{},
+ &ThirdPartyResourceData{},
+ &ThirdPartyResourceDataList{},
+ &Ingress{},
+ &IngressList{},
+ &ListOptions{},
+ &v1.DeleteOptions{},
+ &ReplicaSet{},
+ &ReplicaSetList{},
+ &PodSecurityPolicy{},
+ &PodSecurityPolicyList{},
+ &NetworkPolicy{},
+ &NetworkPolicyList{},
+ )
+ // Add the watch version that applies
+ versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/types.generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/types.generated.go
new file mode 100644
index 0000000..a0220de
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/types.generated.go
@@ -0,0 +1,23939 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// ************************************************************
+// DO NOT EDIT.
+// THIS FILE IS AUTO-GENERATED BY codecgen.
+// ************************************************************
+
+package v1beta1
+
+import (
+ "errors"
+ "fmt"
+ codec1978 "github.com/ugorji/go/codec"
+ pkg4_resource "k8s.io/kubernetes/pkg/api/resource"
+ pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned"
+ pkg2_v1 "k8s.io/kubernetes/pkg/api/v1"
+ pkg3_types "k8s.io/kubernetes/pkg/types"
+ pkg5_intstr "k8s.io/kubernetes/pkg/util/intstr"
+ "reflect"
+ "runtime"
+ time "time"
+)
+
+const (
+ // ----- content types ----
+ codecSelferC_UTF81234 = 1
+ codecSelferC_RAW1234 = 0
+ // ----- value types used ----
+ codecSelferValueTypeArray1234 = 10
+ codecSelferValueTypeMap1234 = 9
+ // ----- containerStateValues ----
+ codecSelfer_containerMapKey1234 = 2
+ codecSelfer_containerMapValue1234 = 3
+ codecSelfer_containerMapEnd1234 = 4
+ codecSelfer_containerArrayElem1234 = 6
+ codecSelfer_containerArrayEnd1234 = 7
+)
+
+var (
+ codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits())
+ codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`)
+)
+
+type codecSelfer1234 struct{}
+
+func init() {
+ if codec1978.GenVersion != 5 {
+ _, file, _, _ := runtime.Caller(0)
+ err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v",
+ 5, codec1978.GenVersion, file)
+ panic(err)
+ }
+ if false { // reference the types, but skip this branch at build/run time
+ var v0 pkg4_resource.Quantity
+ var v1 pkg1_unversioned.TypeMeta
+ var v2 pkg2_v1.ObjectMeta
+ var v3 pkg3_types.UID
+ var v4 pkg5_intstr.IntOrString
+ var v5 time.Time
+ _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5
+ }
+}
+
+func (x *ScaleSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Replicas != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Replicas))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("replicas"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Replicas))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ScaleSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ScaleSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "replicas":
+ if r.TryDecodeAsNil() {
+ x.Replicas = 0
+ } else {
+ x.Replicas = int32(r.DecodeInt(32))
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ScaleSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj5 int
+ var yyb5 bool
+ var yyhl5 bool = l >= 0
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Replicas = 0
+ } else {
+ x.Replicas = int32(r.DecodeInt(32))
+ }
+ for {
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj5-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ScaleStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = len(x.Selector) != 0
+ yyq2[2] = x.TargetSelector != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Replicas))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("replicas"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Replicas))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Selector == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ z.F.EncMapStringStringV(x.Selector, false, e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("selector"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Selector == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ z.F.EncMapStringStringV(x.Selector, false, e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.TargetSelector))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("targetSelector"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.TargetSelector))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ScaleStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ScaleStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "replicas":
+ if r.TryDecodeAsNil() {
+ x.Replicas = 0
+ } else {
+ x.Replicas = int32(r.DecodeInt(32))
+ }
+ case "selector":
+ if r.TryDecodeAsNil() {
+ x.Selector = nil
+ } else {
+ yyv5 := &x.Selector
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ z.F.DecMapStringStringX(yyv5, false, d)
+ }
+ }
+ case "targetSelector":
+ if r.TryDecodeAsNil() {
+ x.TargetSelector = ""
+ } else {
+ x.TargetSelector = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ScaleStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Replicas = 0
+ } else {
+ x.Replicas = int32(r.DecodeInt(32))
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Selector = nil
+ } else {
+ yyv10 := &x.Selector
+ yym11 := z.DecBinary()
+ _ = yym11
+ if false {
+ } else {
+ z.F.DecMapStringStringX(yyv10, false, d)
+ }
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.TargetSelector = ""
+ } else {
+ x.TargetSelector = string(r.DecodeString())
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *Scale) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = true
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy14 := &x.Status
+ yy14.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy16 := &x.Status
+ yy16.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *Scale) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *Scale) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_v1.ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = ScaleSpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = ScaleStatus{}
+ } else {
+ yyv6 := &x.Status
+ yyv6.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *Scale) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_v1.ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = ScaleSpec{}
+ } else {
+ yyv11 := &x.Spec
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = ScaleStatus{}
+ } else {
+ yyv12 := &x.Status
+ yyv12.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ReplicationControllerDummy) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Kind != ""
+ yyq2[1] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ReplicationControllerDummy) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ReplicationControllerDummy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ReplicationControllerDummy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *SubresourceReference) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Kind != ""
+ yyq2[1] = x.Name != ""
+ yyq2[2] = x.APIVersion != ""
+ yyq2[3] = x.Subresource != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("name"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Subresource))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("subresource"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Subresource))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *SubresourceReference) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *SubresourceReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "name":
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ case "subresource":
+ if r.TryDecodeAsNil() {
+ x.Subresource = ""
+ } else {
+ x.Subresource = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *SubresourceReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Subresource = ""
+ } else {
+ x.Subresource = string(r.DecodeString())
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *CPUTargetUtilization) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeInt(int64(x.TargetPercentage))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("targetPercentage"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(x.TargetPercentage))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *CPUTargetUtilization) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *CPUTargetUtilization) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "targetPercentage":
+ if r.TryDecodeAsNil() {
+ x.TargetPercentage = 0
+ } else {
+ x.TargetPercentage = int32(r.DecodeInt(32))
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *CPUTargetUtilization) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj5 int
+ var yyb5 bool
+ var yyhl5 bool = l >= 0
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.TargetPercentage = 0
+ } else {
+ x.TargetPercentage = int32(r.DecodeInt(32))
+ }
+ for {
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj5-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *CustomMetricTarget) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("name"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy7 := &x.TargetValue
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy7) {
+ } else if !yym8 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy7)
+ } else {
+ z.EncFallback(yy7)
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("value"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy9 := &x.TargetValue
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy9) {
+ } else if !yym10 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy9)
+ } else {
+ z.EncFallback(yy9)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *CustomMetricTarget) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *CustomMetricTarget) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "name":
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ case "value":
+ if r.TryDecodeAsNil() {
+ x.TargetValue = pkg4_resource.Quantity{}
+ } else {
+ yyv5 := &x.TargetValue
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv5) {
+ } else if !yym6 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv5)
+ } else {
+ z.DecFallback(yyv5, false)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *CustomMetricTarget) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.TargetValue = pkg4_resource.Quantity{}
+ } else {
+ yyv9 := &x.TargetValue
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv9) {
+ } else if !yym10 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv9)
+ } else {
+ z.DecFallback(yyv9, false)
+ }
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *CustomMetricTargetList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ h.encSliceCustomMetricTarget(([]CustomMetricTarget)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.encSliceCustomMetricTarget(([]CustomMetricTarget)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *CustomMetricTargetList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *CustomMetricTargetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv4 := &x.Items
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.decSliceCustomMetricTarget((*[]CustomMetricTarget)(yyv4), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *CustomMetricTargetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv7 := &x.Items
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.decSliceCustomMetricTarget((*[]CustomMetricTarget)(yyv7), d)
+ }
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *CustomMetricCurrentStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("name"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy7 := &x.CurrentValue
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy7) {
+ } else if !yym8 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy7)
+ } else {
+ z.EncFallback(yy7)
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("value"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy9 := &x.CurrentValue
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy9) {
+ } else if !yym10 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy9)
+ } else {
+ z.EncFallback(yy9)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *CustomMetricCurrentStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *CustomMetricCurrentStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "name":
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ case "value":
+ if r.TryDecodeAsNil() {
+ x.CurrentValue = pkg4_resource.Quantity{}
+ } else {
+ yyv5 := &x.CurrentValue
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv5) {
+ } else if !yym6 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv5)
+ } else {
+ z.DecFallback(yyv5, false)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *CustomMetricCurrentStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.CurrentValue = pkg4_resource.Quantity{}
+ } else {
+ yyv9 := &x.CurrentValue
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv9) {
+ } else if !yym10 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv9)
+ } else {
+ z.DecFallback(yyv9, false)
+ }
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *CustomMetricCurrentStatusList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ h.encSliceCustomMetricCurrentStatus(([]CustomMetricCurrentStatus)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.encSliceCustomMetricCurrentStatus(([]CustomMetricCurrentStatus)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *CustomMetricCurrentStatusList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *CustomMetricCurrentStatusList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv4 := &x.Items
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.decSliceCustomMetricCurrentStatus((*[]CustomMetricCurrentStatus)(yyv4), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *CustomMetricCurrentStatusList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv7 := &x.Items
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.decSliceCustomMetricCurrentStatus((*[]CustomMetricCurrentStatus)(yyv7), d)
+ }
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *HorizontalPodAutoscalerSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.MinReplicas != nil
+ yyq2[3] = x.CPUUtilization != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy4 := &x.ScaleRef
+ yy4.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("scaleRef"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ScaleRef
+ yy6.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.MinReplicas == nil {
+ r.EncodeNil()
+ } else {
+ yy9 := *x.MinReplicas
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeInt(int64(yy9))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("minReplicas"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.MinReplicas == nil {
+ r.EncodeNil()
+ } else {
+ yy11 := *x.MinReplicas
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeInt(int64(yy11))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeInt(int64(x.MaxReplicas))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("maxReplicas"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeInt(int64(x.MaxReplicas))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ if x.CPUUtilization == nil {
+ r.EncodeNil()
+ } else {
+ x.CPUUtilization.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("cpuUtilization"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.CPUUtilization == nil {
+ r.EncodeNil()
+ } else {
+ x.CPUUtilization.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *HorizontalPodAutoscalerSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *HorizontalPodAutoscalerSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "scaleRef":
+ if r.TryDecodeAsNil() {
+ x.ScaleRef = SubresourceReference{}
+ } else {
+ yyv4 := &x.ScaleRef
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "minReplicas":
+ if r.TryDecodeAsNil() {
+ if x.MinReplicas != nil {
+ x.MinReplicas = nil
+ }
+ } else {
+ if x.MinReplicas == nil {
+ x.MinReplicas = new(int32)
+ }
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ *((*int32)(x.MinReplicas)) = int32(r.DecodeInt(32))
+ }
+ }
+ case "maxReplicas":
+ if r.TryDecodeAsNil() {
+ x.MaxReplicas = 0
+ } else {
+ x.MaxReplicas = int32(r.DecodeInt(32))
+ }
+ case "cpuUtilization":
+ if r.TryDecodeAsNil() {
+ if x.CPUUtilization != nil {
+ x.CPUUtilization = nil
+ }
+ } else {
+ if x.CPUUtilization == nil {
+ x.CPUUtilization = new(CPUTargetUtilization)
+ }
+ x.CPUUtilization.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *HorizontalPodAutoscalerSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ScaleRef = SubresourceReference{}
+ } else {
+ yyv10 := &x.ScaleRef
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.MinReplicas != nil {
+ x.MinReplicas = nil
+ }
+ } else {
+ if x.MinReplicas == nil {
+ x.MinReplicas = new(int32)
+ }
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ *((*int32)(x.MinReplicas)) = int32(r.DecodeInt(32))
+ }
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.MaxReplicas = 0
+ } else {
+ x.MaxReplicas = int32(r.DecodeInt(32))
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.CPUUtilization != nil {
+ x.CPUUtilization = nil
+ }
+ } else {
+ if x.CPUUtilization == nil {
+ x.CPUUtilization = new(CPUTargetUtilization)
+ }
+ x.CPUUtilization.CodecDecodeSelf(d)
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *HorizontalPodAutoscalerStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.ObservedGeneration != nil
+ yyq2[1] = x.LastScaleTime != nil
+ yyq2[4] = x.CurrentCPUUtilizationPercentage != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.ObservedGeneration == nil {
+ r.EncodeNil()
+ } else {
+ yy4 := *x.ObservedGeneration
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(yy4))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("observedGeneration"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.ObservedGeneration == nil {
+ r.EncodeNil()
+ } else {
+ yy6 := *x.ObservedGeneration
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeInt(int64(yy6))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.LastScaleTime == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.LastScaleTime) {
+ } else if yym9 {
+ z.EncBinaryMarshal(x.LastScaleTime)
+ } else if !yym9 && z.IsJSONHandle() {
+ z.EncJSONMarshal(x.LastScaleTime)
+ } else {
+ z.EncFallback(x.LastScaleTime)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("lastScaleTime"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.LastScaleTime == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.LastScaleTime) {
+ } else if yym10 {
+ z.EncBinaryMarshal(x.LastScaleTime)
+ } else if !yym10 && z.IsJSONHandle() {
+ z.EncJSONMarshal(x.LastScaleTime)
+ } else {
+ z.EncFallback(x.LastScaleTime)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeInt(int64(x.CurrentReplicas))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("currentReplicas"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeInt(int64(x.CurrentReplicas))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeInt(int64(x.DesiredReplicas))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("desiredReplicas"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeInt(int64(x.DesiredReplicas))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ if x.CurrentCPUUtilizationPercentage == nil {
+ r.EncodeNil()
+ } else {
+ yy18 := *x.CurrentCPUUtilizationPercentage
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeInt(int64(yy18))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("currentCPUUtilizationPercentage"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.CurrentCPUUtilizationPercentage == nil {
+ r.EncodeNil()
+ } else {
+ yy20 := *x.CurrentCPUUtilizationPercentage
+ yym21 := z.EncBinary()
+ _ = yym21
+ if false {
+ } else {
+ r.EncodeInt(int64(yy20))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *HorizontalPodAutoscalerStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *HorizontalPodAutoscalerStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "observedGeneration":
+ if r.TryDecodeAsNil() {
+ if x.ObservedGeneration != nil {
+ x.ObservedGeneration = nil
+ }
+ } else {
+ if x.ObservedGeneration == nil {
+ x.ObservedGeneration = new(int64)
+ }
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64))
+ }
+ }
+ case "lastScaleTime":
+ if r.TryDecodeAsNil() {
+ if x.LastScaleTime != nil {
+ x.LastScaleTime = nil
+ }
+ } else {
+ if x.LastScaleTime == nil {
+ x.LastScaleTime = new(pkg1_unversioned.Time)
+ }
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.LastScaleTime) {
+ } else if yym7 {
+ z.DecBinaryUnmarshal(x.LastScaleTime)
+ } else if !yym7 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(x.LastScaleTime)
+ } else {
+ z.DecFallback(x.LastScaleTime, false)
+ }
+ }
+ case "currentReplicas":
+ if r.TryDecodeAsNil() {
+ x.CurrentReplicas = 0
+ } else {
+ x.CurrentReplicas = int32(r.DecodeInt(32))
+ }
+ case "desiredReplicas":
+ if r.TryDecodeAsNil() {
+ x.DesiredReplicas = 0
+ } else {
+ x.DesiredReplicas = int32(r.DecodeInt(32))
+ }
+ case "currentCPUUtilizationPercentage":
+ if r.TryDecodeAsNil() {
+ if x.CurrentCPUUtilizationPercentage != nil {
+ x.CurrentCPUUtilizationPercentage = nil
+ }
+ } else {
+ if x.CurrentCPUUtilizationPercentage == nil {
+ x.CurrentCPUUtilizationPercentage = new(int32)
+ }
+ yym11 := z.DecBinary()
+ _ = yym11
+ if false {
+ } else {
+ *((*int32)(x.CurrentCPUUtilizationPercentage)) = int32(r.DecodeInt(32))
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *HorizontalPodAutoscalerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj12 int
+ var yyb12 bool
+ var yyhl12 bool = l >= 0
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.ObservedGeneration != nil {
+ x.ObservedGeneration = nil
+ }
+ } else {
+ if x.ObservedGeneration == nil {
+ x.ObservedGeneration = new(int64)
+ }
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64))
+ }
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.LastScaleTime != nil {
+ x.LastScaleTime = nil
+ }
+ } else {
+ if x.LastScaleTime == nil {
+ x.LastScaleTime = new(pkg1_unversioned.Time)
+ }
+ yym16 := z.DecBinary()
+ _ = yym16
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.LastScaleTime) {
+ } else if yym16 {
+ z.DecBinaryUnmarshal(x.LastScaleTime)
+ } else if !yym16 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(x.LastScaleTime)
+ } else {
+ z.DecFallback(x.LastScaleTime, false)
+ }
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.CurrentReplicas = 0
+ } else {
+ x.CurrentReplicas = int32(r.DecodeInt(32))
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.DesiredReplicas = 0
+ } else {
+ x.DesiredReplicas = int32(r.DecodeInt(32))
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.CurrentCPUUtilizationPercentage != nil {
+ x.CurrentCPUUtilizationPercentage = nil
+ }
+ } else {
+ if x.CurrentCPUUtilizationPercentage == nil {
+ x.CurrentCPUUtilizationPercentage = new(int32)
+ }
+ yym20 := z.DecBinary()
+ _ = yym20
+ if false {
+ } else {
+ *((*int32)(x.CurrentCPUUtilizationPercentage)) = int32(r.DecodeInt(32))
+ }
+ }
+ for {
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj12-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *HorizontalPodAutoscaler) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = true
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy14 := &x.Status
+ yy14.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy16 := &x.Status
+ yy16.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *HorizontalPodAutoscaler) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *HorizontalPodAutoscaler) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_v1.ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = HorizontalPodAutoscalerSpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = HorizontalPodAutoscalerStatus{}
+ } else {
+ yyv6 := &x.Status
+ yyv6.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *HorizontalPodAutoscaler) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_v1.ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = HorizontalPodAutoscalerSpec{}
+ } else {
+ yyv11 := &x.Spec
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = HorizontalPodAutoscalerStatus{}
+ } else {
+ yyv12 := &x.Status
+ yyv12.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *HorizontalPodAutoscalerList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceHorizontalPodAutoscaler(([]HorizontalPodAutoscaler)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceHorizontalPodAutoscaler(([]HorizontalPodAutoscaler)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *HorizontalPodAutoscalerList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *HorizontalPodAutoscalerList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceHorizontalPodAutoscaler((*[]HorizontalPodAutoscaler)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *HorizontalPodAutoscalerList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceHorizontalPodAutoscaler((*[]HorizontalPodAutoscaler)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ThirdPartyResource) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = x.Description != ""
+ yyq2[2] = len(x.Versions) != 0
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Description))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("description"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Description))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.Versions == nil {
+ r.EncodeNil()
+ } else {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ h.encSliceAPIVersion(([]APIVersion)(x.Versions), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("versions"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Versions == nil {
+ r.EncodeNil()
+ } else {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ h.encSliceAPIVersion(([]APIVersion)(x.Versions), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ThirdPartyResource) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ThirdPartyResource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_v1.ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "description":
+ if r.TryDecodeAsNil() {
+ x.Description = ""
+ } else {
+ x.Description = string(r.DecodeString())
+ }
+ case "versions":
+ if r.TryDecodeAsNil() {
+ x.Versions = nil
+ } else {
+ yyv6 := &x.Versions
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceAPIVersion((*[]APIVersion)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ThirdPartyResource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_v1.ObjectMeta{}
+ } else {
+ yyv11 := &x.ObjectMeta
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Description = ""
+ } else {
+ x.Description = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Versions = nil
+ } else {
+ yyv13 := &x.Versions
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceAPIVersion((*[]APIVersion)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ThirdPartyResourceList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceThirdPartyResource(([]ThirdPartyResource)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceThirdPartyResource(([]ThirdPartyResource)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ThirdPartyResourceList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ThirdPartyResourceList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceThirdPartyResource((*[]ThirdPartyResource)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ThirdPartyResourceList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceThirdPartyResource((*[]ThirdPartyResource)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *APIVersion) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Name != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("name"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *APIVersion) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *APIVersion) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "name":
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *APIVersion) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj5 int
+ var yyb5 bool
+ var yyhl5 bool = l >= 0
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ for {
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj5-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ThirdPartyResourceData) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = len(x.Data) != 0
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Data == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Data))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("data"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Data == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Data))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ThirdPartyResourceData) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ThirdPartyResourceData) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_v1.ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "data":
+ if r.TryDecodeAsNil() {
+ x.Data = nil
+ } else {
+ yyv5 := &x.Data
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ *yyv5 = r.DecodeBytes(*(*[]byte)(yyv5), false, false)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ThirdPartyResourceData) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_v1.ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Data = nil
+ } else {
+ yyv11 := &x.Data
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ *yyv11 = r.DecodeBytes(*(*[]byte)(yyv11), false, false)
+ }
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *Deployment) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = true
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy14 := &x.Status
+ yy14.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy16 := &x.Status
+ yy16.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *Deployment) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *Deployment) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_v1.ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = DeploymentSpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = DeploymentStatus{}
+ } else {
+ yyv6 := &x.Status
+ yyv6.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *Deployment) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_v1.ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = DeploymentSpec{}
+ } else {
+ yyv11 := &x.Spec
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = DeploymentStatus{}
+ } else {
+ yyv12 := &x.Status
+ yyv12.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *DeploymentSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [8]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Replicas != nil
+ yyq2[1] = x.Selector != nil
+ yyq2[3] = true
+ yyq2[4] = x.MinReadySeconds != 0
+ yyq2[5] = x.RevisionHistoryLimit != nil
+ yyq2[6] = x.Paused != false
+ yyq2[7] = x.RollbackTo != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(8)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Replicas == nil {
+ r.EncodeNil()
+ } else {
+ yy4 := *x.Replicas
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(yy4))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("replicas"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Replicas == nil {
+ r.EncodeNil()
+ } else {
+ yy6 := *x.Replicas
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeInt(int64(yy6))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Selector == nil {
+ r.EncodeNil()
+ } else {
+ x.Selector.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("selector"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Selector == nil {
+ r.EncodeNil()
+ } else {
+ x.Selector.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy12 := &x.Template
+ yy12.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("template"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy14 := &x.Template
+ yy14.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yy17 := &x.Strategy
+ yy17.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("strategy"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy19 := &x.Strategy
+ yy19.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeInt(int64(x.MinReadySeconds))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("minReadySeconds"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeInt(int64(x.MinReadySeconds))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ if x.RevisionHistoryLimit == nil {
+ r.EncodeNil()
+ } else {
+ yy25 := *x.RevisionHistoryLimit
+ yym26 := z.EncBinary()
+ _ = yym26
+ if false {
+ } else {
+ r.EncodeInt(int64(yy25))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("revisionHistoryLimit"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.RevisionHistoryLimit == nil {
+ r.EncodeNil()
+ } else {
+ yy27 := *x.RevisionHistoryLimit
+ yym28 := z.EncBinary()
+ _ = yym28
+ if false {
+ } else {
+ r.EncodeInt(int64(yy27))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[6] {
+ yym30 := z.EncBinary()
+ _ = yym30
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Paused))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[6] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("paused"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym31 := z.EncBinary()
+ _ = yym31
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Paused))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[7] {
+ if x.RollbackTo == nil {
+ r.EncodeNil()
+ } else {
+ x.RollbackTo.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[7] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("rollbackTo"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.RollbackTo == nil {
+ r.EncodeNil()
+ } else {
+ x.RollbackTo.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *DeploymentSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *DeploymentSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "replicas":
+ if r.TryDecodeAsNil() {
+ if x.Replicas != nil {
+ x.Replicas = nil
+ }
+ } else {
+ if x.Replicas == nil {
+ x.Replicas = new(int32)
+ }
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ *((*int32)(x.Replicas)) = int32(r.DecodeInt(32))
+ }
+ }
+ case "selector":
+ if r.TryDecodeAsNil() {
+ if x.Selector != nil {
+ x.Selector = nil
+ }
+ } else {
+ if x.Selector == nil {
+ x.Selector = new(LabelSelector)
+ }
+ x.Selector.CodecDecodeSelf(d)
+ }
+ case "template":
+ if r.TryDecodeAsNil() {
+ x.Template = pkg2_v1.PodTemplateSpec{}
+ } else {
+ yyv7 := &x.Template
+ yyv7.CodecDecodeSelf(d)
+ }
+ case "strategy":
+ if r.TryDecodeAsNil() {
+ x.Strategy = DeploymentStrategy{}
+ } else {
+ yyv8 := &x.Strategy
+ yyv8.CodecDecodeSelf(d)
+ }
+ case "minReadySeconds":
+ if r.TryDecodeAsNil() {
+ x.MinReadySeconds = 0
+ } else {
+ x.MinReadySeconds = int32(r.DecodeInt(32))
+ }
+ case "revisionHistoryLimit":
+ if r.TryDecodeAsNil() {
+ if x.RevisionHistoryLimit != nil {
+ x.RevisionHistoryLimit = nil
+ }
+ } else {
+ if x.RevisionHistoryLimit == nil {
+ x.RevisionHistoryLimit = new(int32)
+ }
+ yym11 := z.DecBinary()
+ _ = yym11
+ if false {
+ } else {
+ *((*int32)(x.RevisionHistoryLimit)) = int32(r.DecodeInt(32))
+ }
+ }
+ case "paused":
+ if r.TryDecodeAsNil() {
+ x.Paused = false
+ } else {
+ x.Paused = bool(r.DecodeBool())
+ }
+ case "rollbackTo":
+ if r.TryDecodeAsNil() {
+ if x.RollbackTo != nil {
+ x.RollbackTo = nil
+ }
+ } else {
+ if x.RollbackTo == nil {
+ x.RollbackTo = new(RollbackConfig)
+ }
+ x.RollbackTo.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *DeploymentSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj14 int
+ var yyb14 bool
+ var yyhl14 bool = l >= 0
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Replicas != nil {
+ x.Replicas = nil
+ }
+ } else {
+ if x.Replicas == nil {
+ x.Replicas = new(int32)
+ }
+ yym16 := z.DecBinary()
+ _ = yym16
+ if false {
+ } else {
+ *((*int32)(x.Replicas)) = int32(r.DecodeInt(32))
+ }
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Selector != nil {
+ x.Selector = nil
+ }
+ } else {
+ if x.Selector == nil {
+ x.Selector = new(LabelSelector)
+ }
+ x.Selector.CodecDecodeSelf(d)
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Template = pkg2_v1.PodTemplateSpec{}
+ } else {
+ yyv18 := &x.Template
+ yyv18.CodecDecodeSelf(d)
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Strategy = DeploymentStrategy{}
+ } else {
+ yyv19 := &x.Strategy
+ yyv19.CodecDecodeSelf(d)
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.MinReadySeconds = 0
+ } else {
+ x.MinReadySeconds = int32(r.DecodeInt(32))
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.RevisionHistoryLimit != nil {
+ x.RevisionHistoryLimit = nil
+ }
+ } else {
+ if x.RevisionHistoryLimit == nil {
+ x.RevisionHistoryLimit = new(int32)
+ }
+ yym22 := z.DecBinary()
+ _ = yym22
+ if false {
+ } else {
+ *((*int32)(x.RevisionHistoryLimit)) = int32(r.DecodeInt(32))
+ }
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Paused = false
+ } else {
+ x.Paused = bool(r.DecodeBool())
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.RollbackTo != nil {
+ x.RollbackTo = nil
+ }
+ } else {
+ if x.RollbackTo == nil {
+ x.RollbackTo = new(RollbackConfig)
+ }
+ x.RollbackTo.CodecDecodeSelf(d)
+ }
+ for {
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj14-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *DeploymentRollback) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = len(x.UpdatedAnnotations) != 0
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("name"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.UpdatedAnnotations == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ z.F.EncMapStringStringV(x.UpdatedAnnotations, false, e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("updatedAnnotations"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.UpdatedAnnotations == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ z.F.EncMapStringStringV(x.UpdatedAnnotations, false, e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy10 := &x.RollbackTo
+ yy10.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("rollbackTo"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy12 := &x.RollbackTo
+ yy12.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *DeploymentRollback) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *DeploymentRollback) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "name":
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ case "updatedAnnotations":
+ if r.TryDecodeAsNil() {
+ x.UpdatedAnnotations = nil
+ } else {
+ yyv5 := &x.UpdatedAnnotations
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ z.F.DecMapStringStringX(yyv5, false, d)
+ }
+ }
+ case "rollbackTo":
+ if r.TryDecodeAsNil() {
+ x.RollbackTo = RollbackConfig{}
+ } else {
+ yyv7 := &x.RollbackTo
+ yyv7.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *DeploymentRollback) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.UpdatedAnnotations = nil
+ } else {
+ yyv12 := &x.UpdatedAnnotations
+ yym13 := z.DecBinary()
+ _ = yym13
+ if false {
+ } else {
+ z.F.DecMapStringStringX(yyv12, false, d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.RollbackTo = RollbackConfig{}
+ } else {
+ yyv14 := &x.RollbackTo
+ yyv14.CodecDecodeSelf(d)
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *RollbackConfig) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Revision != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Revision))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("revision"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Revision))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *RollbackConfig) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *RollbackConfig) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "revision":
+ if r.TryDecodeAsNil() {
+ x.Revision = 0
+ } else {
+ x.Revision = int64(r.DecodeInt(64))
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *RollbackConfig) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj5 int
+ var yyb5 bool
+ var yyhl5 bool = l >= 0
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Revision = 0
+ } else {
+ x.Revision = int64(r.DecodeInt(64))
+ }
+ for {
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj5-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *DeploymentStrategy) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Type != ""
+ yyq2[1] = x.RollingUpdate != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ x.Type.CodecEncodeSelf(e)
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("type"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Type.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.RollingUpdate == nil {
+ r.EncodeNil()
+ } else {
+ x.RollingUpdate.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("rollingUpdate"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.RollingUpdate == nil {
+ r.EncodeNil()
+ } else {
+ x.RollingUpdate.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *DeploymentStrategy) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *DeploymentStrategy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "type":
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = DeploymentStrategyType(r.DecodeString())
+ }
+ case "rollingUpdate":
+ if r.TryDecodeAsNil() {
+ if x.RollingUpdate != nil {
+ x.RollingUpdate = nil
+ }
+ } else {
+ if x.RollingUpdate == nil {
+ x.RollingUpdate = new(RollingUpdateDeployment)
+ }
+ x.RollingUpdate.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *DeploymentStrategy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = DeploymentStrategyType(r.DecodeString())
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.RollingUpdate != nil {
+ x.RollingUpdate = nil
+ }
+ } else {
+ if x.RollingUpdate == nil {
+ x.RollingUpdate = new(RollingUpdateDeployment)
+ }
+ x.RollingUpdate.CodecDecodeSelf(d)
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x DeploymentStrategyType) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *DeploymentStrategyType) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *RollingUpdateDeployment) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.MaxUnavailable != nil
+ yyq2[1] = x.MaxSurge != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.MaxUnavailable == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.MaxUnavailable) {
+ } else if !yym4 && z.IsJSONHandle() {
+ z.EncJSONMarshal(x.MaxUnavailable)
+ } else {
+ z.EncFallback(x.MaxUnavailable)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("maxUnavailable"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.MaxUnavailable == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.MaxUnavailable) {
+ } else if !yym5 && z.IsJSONHandle() {
+ z.EncJSONMarshal(x.MaxUnavailable)
+ } else {
+ z.EncFallback(x.MaxUnavailable)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.MaxSurge == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.MaxSurge) {
+ } else if !yym7 && z.IsJSONHandle() {
+ z.EncJSONMarshal(x.MaxSurge)
+ } else {
+ z.EncFallback(x.MaxSurge)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("maxSurge"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.MaxSurge == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.MaxSurge) {
+ } else if !yym8 && z.IsJSONHandle() {
+ z.EncJSONMarshal(x.MaxSurge)
+ } else {
+ z.EncFallback(x.MaxSurge)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *RollingUpdateDeployment) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *RollingUpdateDeployment) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "maxUnavailable":
+ if r.TryDecodeAsNil() {
+ if x.MaxUnavailable != nil {
+ x.MaxUnavailable = nil
+ }
+ } else {
+ if x.MaxUnavailable == nil {
+ x.MaxUnavailable = new(pkg5_intstr.IntOrString)
+ }
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.MaxUnavailable) {
+ } else if !yym5 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(x.MaxUnavailable)
+ } else {
+ z.DecFallback(x.MaxUnavailable, false)
+ }
+ }
+ case "maxSurge":
+ if r.TryDecodeAsNil() {
+ if x.MaxSurge != nil {
+ x.MaxSurge = nil
+ }
+ } else {
+ if x.MaxSurge == nil {
+ x.MaxSurge = new(pkg5_intstr.IntOrString)
+ }
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.MaxSurge) {
+ } else if !yym7 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(x.MaxSurge)
+ } else {
+ z.DecFallback(x.MaxSurge, false)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *RollingUpdateDeployment) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.MaxUnavailable != nil {
+ x.MaxUnavailable = nil
+ }
+ } else {
+ if x.MaxUnavailable == nil {
+ x.MaxUnavailable = new(pkg5_intstr.IntOrString)
+ }
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.MaxUnavailable) {
+ } else if !yym10 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(x.MaxUnavailable)
+ } else {
+ z.DecFallback(x.MaxUnavailable, false)
+ }
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.MaxSurge != nil {
+ x.MaxSurge = nil
+ }
+ } else {
+ if x.MaxSurge == nil {
+ x.MaxSurge = new(pkg5_intstr.IntOrString)
+ }
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.MaxSurge) {
+ } else if !yym12 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(x.MaxSurge)
+ } else {
+ z.DecFallback(x.MaxSurge, false)
+ }
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *DeploymentStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.ObservedGeneration != 0
+ yyq2[1] = x.Replicas != 0
+ yyq2[2] = x.UpdatedReplicas != 0
+ yyq2[3] = x.AvailableReplicas != 0
+ yyq2[4] = x.UnavailableReplicas != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ObservedGeneration))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("observedGeneration"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ObservedGeneration))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Replicas))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("replicas"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Replicas))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeInt(int64(x.UpdatedReplicas))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("updatedReplicas"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeInt(int64(x.UpdatedReplicas))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeInt(int64(x.AvailableReplicas))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("availableReplicas"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeInt(int64(x.AvailableReplicas))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeInt(int64(x.UnavailableReplicas))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("unavailableReplicas"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeInt(int64(x.UnavailableReplicas))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *DeploymentStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *DeploymentStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "observedGeneration":
+ if r.TryDecodeAsNil() {
+ x.ObservedGeneration = 0
+ } else {
+ x.ObservedGeneration = int64(r.DecodeInt(64))
+ }
+ case "replicas":
+ if r.TryDecodeAsNil() {
+ x.Replicas = 0
+ } else {
+ x.Replicas = int32(r.DecodeInt(32))
+ }
+ case "updatedReplicas":
+ if r.TryDecodeAsNil() {
+ x.UpdatedReplicas = 0
+ } else {
+ x.UpdatedReplicas = int32(r.DecodeInt(32))
+ }
+ case "availableReplicas":
+ if r.TryDecodeAsNil() {
+ x.AvailableReplicas = 0
+ } else {
+ x.AvailableReplicas = int32(r.DecodeInt(32))
+ }
+ case "unavailableReplicas":
+ if r.TryDecodeAsNil() {
+ x.UnavailableReplicas = 0
+ } else {
+ x.UnavailableReplicas = int32(r.DecodeInt(32))
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *DeploymentStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObservedGeneration = 0
+ } else {
+ x.ObservedGeneration = int64(r.DecodeInt(64))
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Replicas = 0
+ } else {
+ x.Replicas = int32(r.DecodeInt(32))
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.UpdatedReplicas = 0
+ } else {
+ x.UpdatedReplicas = int32(r.DecodeInt(32))
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.AvailableReplicas = 0
+ } else {
+ x.AvailableReplicas = int32(r.DecodeInt(32))
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.UnavailableReplicas = 0
+ } else {
+ x.UnavailableReplicas = int32(r.DecodeInt(32))
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *DeploymentList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceDeployment(([]Deployment)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceDeployment(([]Deployment)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *DeploymentList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *DeploymentList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceDeployment((*[]Deployment)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *DeploymentList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceDeployment((*[]Deployment)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *DaemonSetSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Selector != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Selector == nil {
+ r.EncodeNil()
+ } else {
+ x.Selector.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("selector"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Selector == nil {
+ r.EncodeNil()
+ } else {
+ x.Selector.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy7 := &x.Template
+ yy7.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("template"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy9 := &x.Template
+ yy9.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *DaemonSetSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *DaemonSetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "selector":
+ if r.TryDecodeAsNil() {
+ if x.Selector != nil {
+ x.Selector = nil
+ }
+ } else {
+ if x.Selector == nil {
+ x.Selector = new(LabelSelector)
+ }
+ x.Selector.CodecDecodeSelf(d)
+ }
+ case "template":
+ if r.TryDecodeAsNil() {
+ x.Template = pkg2_v1.PodTemplateSpec{}
+ } else {
+ yyv5 := &x.Template
+ yyv5.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *DaemonSetSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Selector != nil {
+ x.Selector = nil
+ }
+ } else {
+ if x.Selector == nil {
+ x.Selector = new(LabelSelector)
+ }
+ x.Selector.CodecDecodeSelf(d)
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Template = pkg2_v1.PodTemplateSpec{}
+ } else {
+ yyv8 := &x.Template
+ yyv8.CodecDecodeSelf(d)
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *DaemonSetStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 3
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeInt(int64(x.CurrentNumberScheduled))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("currentNumberScheduled"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(x.CurrentNumberScheduled))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeInt(int64(x.NumberMisscheduled))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("numberMisscheduled"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeInt(int64(x.NumberMisscheduled))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeInt(int64(x.DesiredNumberScheduled))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("desiredNumberScheduled"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeInt(int64(x.DesiredNumberScheduled))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *DaemonSetStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *DaemonSetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "currentNumberScheduled":
+ if r.TryDecodeAsNil() {
+ x.CurrentNumberScheduled = 0
+ } else {
+ x.CurrentNumberScheduled = int32(r.DecodeInt(32))
+ }
+ case "numberMisscheduled":
+ if r.TryDecodeAsNil() {
+ x.NumberMisscheduled = 0
+ } else {
+ x.NumberMisscheduled = int32(r.DecodeInt(32))
+ }
+ case "desiredNumberScheduled":
+ if r.TryDecodeAsNil() {
+ x.DesiredNumberScheduled = 0
+ } else {
+ x.DesiredNumberScheduled = int32(r.DecodeInt(32))
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *DaemonSetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.CurrentNumberScheduled = 0
+ } else {
+ x.CurrentNumberScheduled = int32(r.DecodeInt(32))
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.NumberMisscheduled = 0
+ } else {
+ x.NumberMisscheduled = int32(r.DecodeInt(32))
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.DesiredNumberScheduled = 0
+ } else {
+ x.DesiredNumberScheduled = int32(r.DecodeInt(32))
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *DaemonSet) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = true
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy14 := &x.Status
+ yy14.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy16 := &x.Status
+ yy16.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *DaemonSet) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *DaemonSet) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_v1.ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = DaemonSetSpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = DaemonSetStatus{}
+ } else {
+ yyv6 := &x.Status
+ yyv6.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *DaemonSet) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_v1.ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = DaemonSetSpec{}
+ } else {
+ yyv11 := &x.Spec
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = DaemonSetStatus{}
+ } else {
+ yyv12 := &x.Status
+ yyv12.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *DaemonSetList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceDaemonSet(([]DaemonSet)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceDaemonSet(([]DaemonSet)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *DaemonSetList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *DaemonSetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceDaemonSet((*[]DaemonSet)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *DaemonSetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceDaemonSet((*[]DaemonSet)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ThirdPartyResourceDataList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceThirdPartyResourceData(([]ThirdPartyResourceData)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceThirdPartyResourceData(([]ThirdPartyResourceData)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ThirdPartyResourceDataList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ThirdPartyResourceDataList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceThirdPartyResourceData((*[]ThirdPartyResourceData)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ThirdPartyResourceDataList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceThirdPartyResourceData((*[]ThirdPartyResourceData)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *Job) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = true
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy14 := &x.Status
+ yy14.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy16 := &x.Status
+ yy16.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *Job) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *Job) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_v1.ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = JobSpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = JobStatus{}
+ } else {
+ yyv6 := &x.Status
+ yyv6.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *Job) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_v1.ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = JobSpec{}
+ } else {
+ yyv11 := &x.Spec
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = JobStatus{}
+ } else {
+ yyv12 := &x.Status
+ yyv12.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *JobList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceJob(([]Job)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceJob(([]Job)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *JobList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *JobList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceJob((*[]Job)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *JobList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceJob((*[]Job)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *JobSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [6]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Parallelism != nil
+ yyq2[1] = x.Completions != nil
+ yyq2[2] = x.ActiveDeadlineSeconds != nil
+ yyq2[3] = x.Selector != nil
+ yyq2[4] = x.AutoSelector != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(6)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Parallelism == nil {
+ r.EncodeNil()
+ } else {
+ yy4 := *x.Parallelism
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(yy4))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("parallelism"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Parallelism == nil {
+ r.EncodeNil()
+ } else {
+ yy6 := *x.Parallelism
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeInt(int64(yy6))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Completions == nil {
+ r.EncodeNil()
+ } else {
+ yy9 := *x.Completions
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeInt(int64(yy9))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("completions"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Completions == nil {
+ r.EncodeNil()
+ } else {
+ yy11 := *x.Completions
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeInt(int64(yy11))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.ActiveDeadlineSeconds == nil {
+ r.EncodeNil()
+ } else {
+ yy14 := *x.ActiveDeadlineSeconds
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeInt(int64(yy14))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("activeDeadlineSeconds"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.ActiveDeadlineSeconds == nil {
+ r.EncodeNil()
+ } else {
+ yy16 := *x.ActiveDeadlineSeconds
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeInt(int64(yy16))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ if x.Selector == nil {
+ r.EncodeNil()
+ } else {
+ x.Selector.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("selector"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Selector == nil {
+ r.EncodeNil()
+ } else {
+ x.Selector.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ if x.AutoSelector == nil {
+ r.EncodeNil()
+ } else {
+ yy22 := *x.AutoSelector
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeBool(bool(yy22))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("autoSelector"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.AutoSelector == nil {
+ r.EncodeNil()
+ } else {
+ yy24 := *x.AutoSelector
+ yym25 := z.EncBinary()
+ _ = yym25
+ if false {
+ } else {
+ r.EncodeBool(bool(yy24))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy27 := &x.Template
+ yy27.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("template"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy29 := &x.Template
+ yy29.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *JobSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *JobSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "parallelism":
+ if r.TryDecodeAsNil() {
+ if x.Parallelism != nil {
+ x.Parallelism = nil
+ }
+ } else {
+ if x.Parallelism == nil {
+ x.Parallelism = new(int32)
+ }
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ *((*int32)(x.Parallelism)) = int32(r.DecodeInt(32))
+ }
+ }
+ case "completions":
+ if r.TryDecodeAsNil() {
+ if x.Completions != nil {
+ x.Completions = nil
+ }
+ } else {
+ if x.Completions == nil {
+ x.Completions = new(int32)
+ }
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ *((*int32)(x.Completions)) = int32(r.DecodeInt(32))
+ }
+ }
+ case "activeDeadlineSeconds":
+ if r.TryDecodeAsNil() {
+ if x.ActiveDeadlineSeconds != nil {
+ x.ActiveDeadlineSeconds = nil
+ }
+ } else {
+ if x.ActiveDeadlineSeconds == nil {
+ x.ActiveDeadlineSeconds = new(int64)
+ }
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else {
+ *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64))
+ }
+ }
+ case "selector":
+ if r.TryDecodeAsNil() {
+ if x.Selector != nil {
+ x.Selector = nil
+ }
+ } else {
+ if x.Selector == nil {
+ x.Selector = new(LabelSelector)
+ }
+ x.Selector.CodecDecodeSelf(d)
+ }
+ case "autoSelector":
+ if r.TryDecodeAsNil() {
+ if x.AutoSelector != nil {
+ x.AutoSelector = nil
+ }
+ } else {
+ if x.AutoSelector == nil {
+ x.AutoSelector = new(bool)
+ }
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ *((*bool)(x.AutoSelector)) = r.DecodeBool()
+ }
+ }
+ case "template":
+ if r.TryDecodeAsNil() {
+ x.Template = pkg2_v1.PodTemplateSpec{}
+ } else {
+ yyv13 := &x.Template
+ yyv13.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *JobSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj14 int
+ var yyb14 bool
+ var yyhl14 bool = l >= 0
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Parallelism != nil {
+ x.Parallelism = nil
+ }
+ } else {
+ if x.Parallelism == nil {
+ x.Parallelism = new(int32)
+ }
+ yym16 := z.DecBinary()
+ _ = yym16
+ if false {
+ } else {
+ *((*int32)(x.Parallelism)) = int32(r.DecodeInt(32))
+ }
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Completions != nil {
+ x.Completions = nil
+ }
+ } else {
+ if x.Completions == nil {
+ x.Completions = new(int32)
+ }
+ yym18 := z.DecBinary()
+ _ = yym18
+ if false {
+ } else {
+ *((*int32)(x.Completions)) = int32(r.DecodeInt(32))
+ }
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.ActiveDeadlineSeconds != nil {
+ x.ActiveDeadlineSeconds = nil
+ }
+ } else {
+ if x.ActiveDeadlineSeconds == nil {
+ x.ActiveDeadlineSeconds = new(int64)
+ }
+ yym20 := z.DecBinary()
+ _ = yym20
+ if false {
+ } else {
+ *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64))
+ }
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Selector != nil {
+ x.Selector = nil
+ }
+ } else {
+ if x.Selector == nil {
+ x.Selector = new(LabelSelector)
+ }
+ x.Selector.CodecDecodeSelf(d)
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.AutoSelector != nil {
+ x.AutoSelector = nil
+ }
+ } else {
+ if x.AutoSelector == nil {
+ x.AutoSelector = new(bool)
+ }
+ yym23 := z.DecBinary()
+ _ = yym23
+ if false {
+ } else {
+ *((*bool)(x.AutoSelector)) = r.DecodeBool()
+ }
+ }
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Template = pkg2_v1.PodTemplateSpec{}
+ } else {
+ yyv24 := &x.Template
+ yyv24.CodecDecodeSelf(d)
+ }
+ for {
+ yyj14++
+ if yyhl14 {
+ yyb14 = yyj14 > l
+ } else {
+ yyb14 = r.CheckBreak()
+ }
+ if yyb14 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj14-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *JobStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [6]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = len(x.Conditions) != 0
+ yyq2[1] = x.StartTime != nil
+ yyq2[2] = x.CompletionTime != nil
+ yyq2[3] = x.Active != 0
+ yyq2[4] = x.Succeeded != 0
+ yyq2[5] = x.Failed != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(6)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Conditions == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ h.encSliceJobCondition(([]JobCondition)(x.Conditions), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("conditions"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Conditions == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.encSliceJobCondition(([]JobCondition)(x.Conditions), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.StartTime == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.StartTime) {
+ } else if yym7 {
+ z.EncBinaryMarshal(x.StartTime)
+ } else if !yym7 && z.IsJSONHandle() {
+ z.EncJSONMarshal(x.StartTime)
+ } else {
+ z.EncFallback(x.StartTime)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("startTime"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.StartTime == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.StartTime) {
+ } else if yym8 {
+ z.EncBinaryMarshal(x.StartTime)
+ } else if !yym8 && z.IsJSONHandle() {
+ z.EncJSONMarshal(x.StartTime)
+ } else {
+ z.EncFallback(x.StartTime)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.CompletionTime == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.CompletionTime) {
+ } else if yym10 {
+ z.EncBinaryMarshal(x.CompletionTime)
+ } else if !yym10 && z.IsJSONHandle() {
+ z.EncJSONMarshal(x.CompletionTime)
+ } else {
+ z.EncFallback(x.CompletionTime)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("completionTime"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.CompletionTime == nil {
+ r.EncodeNil()
+ } else {
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.CompletionTime) {
+ } else if yym11 {
+ z.EncBinaryMarshal(x.CompletionTime)
+ } else if !yym11 && z.IsJSONHandle() {
+ z.EncJSONMarshal(x.CompletionTime)
+ } else {
+ z.EncFallback(x.CompletionTime)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Active))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("active"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Active))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Succeeded))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("succeeded"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Succeeded))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Failed))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("failed"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Failed))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *JobStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *JobStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "conditions":
+ if r.TryDecodeAsNil() {
+ x.Conditions = nil
+ } else {
+ yyv4 := &x.Conditions
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.decSliceJobCondition((*[]JobCondition)(yyv4), d)
+ }
+ }
+ case "startTime":
+ if r.TryDecodeAsNil() {
+ if x.StartTime != nil {
+ x.StartTime = nil
+ }
+ } else {
+ if x.StartTime == nil {
+ x.StartTime = new(pkg1_unversioned.Time)
+ }
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.StartTime) {
+ } else if yym7 {
+ z.DecBinaryUnmarshal(x.StartTime)
+ } else if !yym7 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(x.StartTime)
+ } else {
+ z.DecFallback(x.StartTime, false)
+ }
+ }
+ case "completionTime":
+ if r.TryDecodeAsNil() {
+ if x.CompletionTime != nil {
+ x.CompletionTime = nil
+ }
+ } else {
+ if x.CompletionTime == nil {
+ x.CompletionTime = new(pkg1_unversioned.Time)
+ }
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.CompletionTime) {
+ } else if yym9 {
+ z.DecBinaryUnmarshal(x.CompletionTime)
+ } else if !yym9 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(x.CompletionTime)
+ } else {
+ z.DecFallback(x.CompletionTime, false)
+ }
+ }
+ case "active":
+ if r.TryDecodeAsNil() {
+ x.Active = 0
+ } else {
+ x.Active = int32(r.DecodeInt(32))
+ }
+ case "succeeded":
+ if r.TryDecodeAsNil() {
+ x.Succeeded = 0
+ } else {
+ x.Succeeded = int32(r.DecodeInt(32))
+ }
+ case "failed":
+ if r.TryDecodeAsNil() {
+ x.Failed = 0
+ } else {
+ x.Failed = int32(r.DecodeInt(32))
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *JobStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj13 int
+ var yyb13 bool
+ var yyhl13 bool = l >= 0
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Conditions = nil
+ } else {
+ yyv14 := &x.Conditions
+ yym15 := z.DecBinary()
+ _ = yym15
+ if false {
+ } else {
+ h.decSliceJobCondition((*[]JobCondition)(yyv14), d)
+ }
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.StartTime != nil {
+ x.StartTime = nil
+ }
+ } else {
+ if x.StartTime == nil {
+ x.StartTime = new(pkg1_unversioned.Time)
+ }
+ yym17 := z.DecBinary()
+ _ = yym17
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.StartTime) {
+ } else if yym17 {
+ z.DecBinaryUnmarshal(x.StartTime)
+ } else if !yym17 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(x.StartTime)
+ } else {
+ z.DecFallback(x.StartTime, false)
+ }
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.CompletionTime != nil {
+ x.CompletionTime = nil
+ }
+ } else {
+ if x.CompletionTime == nil {
+ x.CompletionTime = new(pkg1_unversioned.Time)
+ }
+ yym19 := z.DecBinary()
+ _ = yym19
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.CompletionTime) {
+ } else if yym19 {
+ z.DecBinaryUnmarshal(x.CompletionTime)
+ } else if !yym19 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(x.CompletionTime)
+ } else {
+ z.DecFallback(x.CompletionTime, false)
+ }
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Active = 0
+ } else {
+ x.Active = int32(r.DecodeInt(32))
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Succeeded = 0
+ } else {
+ x.Succeeded = int32(r.DecodeInt(32))
+ }
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Failed = 0
+ } else {
+ x.Failed = int32(r.DecodeInt(32))
+ }
+ for {
+ yyj13++
+ if yyhl13 {
+ yyb13 = yyj13 > l
+ } else {
+ yyb13 = r.CheckBreak()
+ }
+ if yyb13 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj13-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x JobConditionType) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *JobConditionType) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *JobCondition) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [6]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[2] = true
+ yyq2[3] = true
+ yyq2[4] = x.Reason != ""
+ yyq2[5] = x.Message != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(6)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ x.Type.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("type"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Type.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yysf7 := &x.Status
+ yysf7.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yysf8 := &x.Status
+ yysf8.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy10 := &x.LastProbeTime
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy10) {
+ } else if yym11 {
+ z.EncBinaryMarshal(yy10)
+ } else if !yym11 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy10)
+ } else {
+ z.EncFallback(yy10)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("lastProbeTime"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy12 := &x.LastProbeTime
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy12) {
+ } else if yym13 {
+ z.EncBinaryMarshal(yy12)
+ } else if !yym13 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy12)
+ } else {
+ z.EncFallback(yy12)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yy15 := &x.LastTransitionTime
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy15) {
+ } else if yym16 {
+ z.EncBinaryMarshal(yy15)
+ } else if !yym16 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy15)
+ } else {
+ z.EncFallback(yy15)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy17 := &x.LastTransitionTime
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy17) {
+ } else if yym18 {
+ z.EncBinaryMarshal(yy17)
+ } else if !yym18 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy17)
+ } else {
+ z.EncFallback(yy17)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Reason))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("reason"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym21 := z.EncBinary()
+ _ = yym21
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Reason))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Message))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("message"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym24 := z.EncBinary()
+ _ = yym24
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Message))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *JobCondition) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *JobCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "type":
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = JobConditionType(r.DecodeString())
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = ""
+ } else {
+ x.Status = pkg2_v1.ConditionStatus(r.DecodeString())
+ }
+ case "lastProbeTime":
+ if r.TryDecodeAsNil() {
+ x.LastProbeTime = pkg1_unversioned.Time{}
+ } else {
+ yyv6 := &x.LastProbeTime
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv6) {
+ } else if yym7 {
+ z.DecBinaryUnmarshal(yyv6)
+ } else if !yym7 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv6)
+ } else {
+ z.DecFallback(yyv6, false)
+ }
+ }
+ case "lastTransitionTime":
+ if r.TryDecodeAsNil() {
+ x.LastTransitionTime = pkg1_unversioned.Time{}
+ } else {
+ yyv8 := &x.LastTransitionTime
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv8) {
+ } else if yym9 {
+ z.DecBinaryUnmarshal(yyv8)
+ } else if !yym9 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv8)
+ } else {
+ z.DecFallback(yyv8, false)
+ }
+ }
+ case "reason":
+ if r.TryDecodeAsNil() {
+ x.Reason = ""
+ } else {
+ x.Reason = string(r.DecodeString())
+ }
+ case "message":
+ if r.TryDecodeAsNil() {
+ x.Message = ""
+ } else {
+ x.Message = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *JobCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj12 int
+ var yyb12 bool
+ var yyhl12 bool = l >= 0
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Type = ""
+ } else {
+ x.Type = JobConditionType(r.DecodeString())
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = ""
+ } else {
+ x.Status = pkg2_v1.ConditionStatus(r.DecodeString())
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.LastProbeTime = pkg1_unversioned.Time{}
+ } else {
+ yyv15 := &x.LastProbeTime
+ yym16 := z.DecBinary()
+ _ = yym16
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv15) {
+ } else if yym16 {
+ z.DecBinaryUnmarshal(yyv15)
+ } else if !yym16 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv15)
+ } else {
+ z.DecFallback(yyv15, false)
+ }
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.LastTransitionTime = pkg1_unversioned.Time{}
+ } else {
+ yyv17 := &x.LastTransitionTime
+ yym18 := z.DecBinary()
+ _ = yym18
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv17) {
+ } else if yym18 {
+ z.DecBinaryUnmarshal(yyv17)
+ } else if !yym18 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv17)
+ } else {
+ z.DecFallback(yyv17, false)
+ }
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Reason = ""
+ } else {
+ x.Reason = string(r.DecodeString())
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Message = ""
+ } else {
+ x.Message = string(r.DecodeString())
+ }
+ for {
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj12-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *Ingress) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = true
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy14 := &x.Status
+ yy14.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy16 := &x.Status
+ yy16.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *Ingress) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *Ingress) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_v1.ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = IngressSpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = IngressStatus{}
+ } else {
+ yyv6 := &x.Status
+ yyv6.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *Ingress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_v1.ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = IngressSpec{}
+ } else {
+ yyv11 := &x.Spec
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = IngressStatus{}
+ } else {
+ yyv12 := &x.Status
+ yyv12.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *IngressList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceIngress(([]Ingress)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceIngress(([]Ingress)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *IngressList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *IngressList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceIngress((*[]Ingress)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *IngressList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceIngress((*[]Ingress)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *IngressSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Backend != nil
+ yyq2[1] = len(x.TLS) != 0
+ yyq2[2] = len(x.Rules) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Backend == nil {
+ r.EncodeNil()
+ } else {
+ x.Backend.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("backend"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Backend == nil {
+ r.EncodeNil()
+ } else {
+ x.Backend.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.TLS == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.encSliceIngressTLS(([]IngressTLS)(x.TLS), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("tls"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.TLS == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.encSliceIngressTLS(([]IngressTLS)(x.TLS), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.Rules == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceIngressRule(([]IngressRule)(x.Rules), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("rules"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Rules == nil {
+ r.EncodeNil()
+ } else {
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ h.encSliceIngressRule(([]IngressRule)(x.Rules), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *IngressSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *IngressSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "backend":
+ if r.TryDecodeAsNil() {
+ if x.Backend != nil {
+ x.Backend = nil
+ }
+ } else {
+ if x.Backend == nil {
+ x.Backend = new(IngressBackend)
+ }
+ x.Backend.CodecDecodeSelf(d)
+ }
+ case "tls":
+ if r.TryDecodeAsNil() {
+ x.TLS = nil
+ } else {
+ yyv5 := &x.TLS
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ h.decSliceIngressTLS((*[]IngressTLS)(yyv5), d)
+ }
+ }
+ case "rules":
+ if r.TryDecodeAsNil() {
+ x.Rules = nil
+ } else {
+ yyv7 := &x.Rules
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.decSliceIngressRule((*[]IngressRule)(yyv7), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *IngressSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Backend != nil {
+ x.Backend = nil
+ }
+ } else {
+ if x.Backend == nil {
+ x.Backend = new(IngressBackend)
+ }
+ x.Backend.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.TLS = nil
+ } else {
+ yyv11 := &x.TLS
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ h.decSliceIngressTLS((*[]IngressTLS)(yyv11), d)
+ }
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Rules = nil
+ } else {
+ yyv13 := &x.Rules
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceIngressRule((*[]IngressRule)(yyv13), d)
+ }
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *IngressTLS) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = len(x.Hosts) != 0
+ yyq2[1] = x.SecretName != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Hosts == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Hosts, false, e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("hosts"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Hosts == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Hosts, false, e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.SecretName))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("secretName"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.SecretName))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *IngressTLS) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *IngressTLS) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "hosts":
+ if r.TryDecodeAsNil() {
+ x.Hosts = nil
+ } else {
+ yyv4 := &x.Hosts
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv4, false, d)
+ }
+ }
+ case "secretName":
+ if r.TryDecodeAsNil() {
+ x.SecretName = ""
+ } else {
+ x.SecretName = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *IngressTLS) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Hosts = nil
+ } else {
+ yyv8 := &x.Hosts
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv8, false, d)
+ }
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.SecretName = ""
+ } else {
+ x.SecretName = string(r.DecodeString())
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *IngressStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.LoadBalancer
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("loadBalancer"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.LoadBalancer
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *IngressStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *IngressStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "loadBalancer":
+ if r.TryDecodeAsNil() {
+ x.LoadBalancer = pkg2_v1.LoadBalancerStatus{}
+ } else {
+ yyv4 := &x.LoadBalancer
+ yyv4.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *IngressStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj5 int
+ var yyb5 bool
+ var yyhl5 bool = l >= 0
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.LoadBalancer = pkg2_v1.LoadBalancerStatus{}
+ } else {
+ yyv6 := &x.LoadBalancer
+ yyv6.CodecDecodeSelf(d)
+ }
+ for {
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj5-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *IngressRule) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Host != ""
+ yyq2[1] = x.IngressRuleValue.HTTP != nil && x.HTTP != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Host))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("host"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Host))
+ }
+ }
+ }
+ var yyn6 bool
+ if x.IngressRuleValue.HTTP == nil {
+ yyn6 = true
+ goto LABEL6
+ }
+ LABEL6:
+ if yyr2 || yy2arr2 {
+ if yyn6 {
+ r.EncodeNil()
+ } else {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.HTTP == nil {
+ r.EncodeNil()
+ } else {
+ x.HTTP.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("http"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if yyn6 {
+ r.EncodeNil()
+ } else {
+ if x.HTTP == nil {
+ r.EncodeNil()
+ } else {
+ x.HTTP.CodecEncodeSelf(e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *IngressRule) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *IngressRule) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "host":
+ if r.TryDecodeAsNil() {
+ x.Host = ""
+ } else {
+ x.Host = string(r.DecodeString())
+ }
+ case "http":
+ if x.IngressRuleValue.HTTP == nil {
+ x.IngressRuleValue.HTTP = new(HTTPIngressRuleValue)
+ }
+ if r.TryDecodeAsNil() {
+ if x.HTTP != nil {
+ x.HTTP = nil
+ }
+ } else {
+ if x.HTTP == nil {
+ x.HTTP = new(HTTPIngressRuleValue)
+ }
+ x.HTTP.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *IngressRule) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Host = ""
+ } else {
+ x.Host = string(r.DecodeString())
+ }
+ if x.IngressRuleValue.HTTP == nil {
+ x.IngressRuleValue.HTTP = new(HTTPIngressRuleValue)
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.HTTP != nil {
+ x.HTTP = nil
+ }
+ } else {
+ if x.HTTP == nil {
+ x.HTTP = new(HTTPIngressRuleValue)
+ }
+ x.HTTP.CodecDecodeSelf(d)
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *IngressRuleValue) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.HTTP != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.HTTP == nil {
+ r.EncodeNil()
+ } else {
+ x.HTTP.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("http"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.HTTP == nil {
+ r.EncodeNil()
+ } else {
+ x.HTTP.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *IngressRuleValue) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *IngressRuleValue) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "http":
+ if r.TryDecodeAsNil() {
+ if x.HTTP != nil {
+ x.HTTP = nil
+ }
+ } else {
+ if x.HTTP == nil {
+ x.HTTP = new(HTTPIngressRuleValue)
+ }
+ x.HTTP.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *IngressRuleValue) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj5 int
+ var yyb5 bool
+ var yyhl5 bool = l >= 0
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.HTTP != nil {
+ x.HTTP = nil
+ }
+ } else {
+ if x.HTTP == nil {
+ x.HTTP = new(HTTPIngressRuleValue)
+ }
+ x.HTTP.CodecDecodeSelf(d)
+ }
+ for {
+ yyj5++
+ if yyhl5 {
+ yyb5 = yyj5 > l
+ } else {
+ yyb5 = r.CheckBreak()
+ }
+ if yyb5 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj5-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *HTTPIngressRuleValue) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Paths == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ h.encSliceHTTPIngressPath(([]HTTPIngressPath)(x.Paths), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("paths"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Paths == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.encSliceHTTPIngressPath(([]HTTPIngressPath)(x.Paths), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *HTTPIngressRuleValue) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *HTTPIngressRuleValue) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "paths":
+ if r.TryDecodeAsNil() {
+ x.Paths = nil
+ } else {
+ yyv4 := &x.Paths
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.decSliceHTTPIngressPath((*[]HTTPIngressPath)(yyv4), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *HTTPIngressRuleValue) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Paths = nil
+ } else {
+ yyv7 := &x.Paths
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.decSliceHTTPIngressPath((*[]HTTPIngressPath)(yyv7), d)
+ }
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *HTTPIngressPath) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Path != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Path))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("path"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Path))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy7 := &x.Backend
+ yy7.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("backend"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy9 := &x.Backend
+ yy9.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *HTTPIngressPath) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *HTTPIngressPath) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "path":
+ if r.TryDecodeAsNil() {
+ x.Path = ""
+ } else {
+ x.Path = string(r.DecodeString())
+ }
+ case "backend":
+ if r.TryDecodeAsNil() {
+ x.Backend = IngressBackend{}
+ } else {
+ yyv5 := &x.Backend
+ yyv5.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *HTTPIngressPath) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Path = ""
+ } else {
+ x.Path = string(r.DecodeString())
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Backend = IngressBackend{}
+ } else {
+ yyv8 := &x.Backend
+ yyv8.CodecDecodeSelf(d)
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *IngressBackend) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ServiceName))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("serviceName"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ServiceName))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy7 := &x.ServicePort
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy7) {
+ } else if !yym8 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy7)
+ } else {
+ z.EncFallback(yy7)
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("servicePort"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy9 := &x.ServicePort
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy9) {
+ } else if !yym10 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy9)
+ } else {
+ z.EncFallback(yy9)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *IngressBackend) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *IngressBackend) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "serviceName":
+ if r.TryDecodeAsNil() {
+ x.ServiceName = ""
+ } else {
+ x.ServiceName = string(r.DecodeString())
+ }
+ case "servicePort":
+ if r.TryDecodeAsNil() {
+ x.ServicePort = pkg5_intstr.IntOrString{}
+ } else {
+ yyv5 := &x.ServicePort
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv5) {
+ } else if !yym6 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv5)
+ } else {
+ z.DecFallback(yyv5, false)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *IngressBackend) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ServiceName = ""
+ } else {
+ x.ServiceName = string(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ServicePort = pkg5_intstr.IntOrString{}
+ } else {
+ yyv9 := &x.ServicePort
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv9) {
+ } else if !yym10 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv9)
+ } else {
+ z.DecFallback(yyv9, false)
+ }
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ExportOptions) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Export))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("export"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Export))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Exact))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("exact"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Exact))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ExportOptions) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ExportOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "export":
+ if r.TryDecodeAsNil() {
+ x.Export = false
+ } else {
+ x.Export = bool(r.DecodeBool())
+ }
+ case "exact":
+ if r.TryDecodeAsNil() {
+ x.Exact = false
+ } else {
+ x.Exact = bool(r.DecodeBool())
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ExportOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Export = false
+ } else {
+ x.Export = bool(r.DecodeBool())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Exact = false
+ } else {
+ x.Exact = bool(r.DecodeBool())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ListOptions) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [7]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.LabelSelector != ""
+ yyq2[1] = x.FieldSelector != ""
+ yyq2[2] = x.Watch != false
+ yyq2[3] = x.ResourceVersion != ""
+ yyq2[4] = x.TimeoutSeconds != nil
+ yyq2[5] = x.Kind != ""
+ yyq2[6] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(7)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.LabelSelector))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("labelSelector"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.LabelSelector))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.FieldSelector))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("fieldSelector"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.FieldSelector))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Watch))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("watch"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Watch))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("resourceVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ if x.TimeoutSeconds == nil {
+ r.EncodeNil()
+ } else {
+ yy16 := *x.TimeoutSeconds
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeInt(int64(yy16))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("timeoutSeconds"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.TimeoutSeconds == nil {
+ r.EncodeNil()
+ } else {
+ yy18 := *x.TimeoutSeconds
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeInt(int64(yy18))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ yym21 := z.EncBinary()
+ _ = yym21
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[6] {
+ yym24 := z.EncBinary()
+ _ = yym24
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[6] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym25 := z.EncBinary()
+ _ = yym25
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ListOptions) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ListOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "labelSelector":
+ if r.TryDecodeAsNil() {
+ x.LabelSelector = ""
+ } else {
+ x.LabelSelector = string(r.DecodeString())
+ }
+ case "fieldSelector":
+ if r.TryDecodeAsNil() {
+ x.FieldSelector = ""
+ } else {
+ x.FieldSelector = string(r.DecodeString())
+ }
+ case "watch":
+ if r.TryDecodeAsNil() {
+ x.Watch = false
+ } else {
+ x.Watch = bool(r.DecodeBool())
+ }
+ case "resourceVersion":
+ if r.TryDecodeAsNil() {
+ x.ResourceVersion = ""
+ } else {
+ x.ResourceVersion = string(r.DecodeString())
+ }
+ case "timeoutSeconds":
+ if r.TryDecodeAsNil() {
+ if x.TimeoutSeconds != nil {
+ x.TimeoutSeconds = nil
+ }
+ } else {
+ if x.TimeoutSeconds == nil {
+ x.TimeoutSeconds = new(int64)
+ }
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else {
+ *((*int64)(x.TimeoutSeconds)) = int64(r.DecodeInt(64))
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ListOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj12 int
+ var yyb12 bool
+ var yyhl12 bool = l >= 0
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.LabelSelector = ""
+ } else {
+ x.LabelSelector = string(r.DecodeString())
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.FieldSelector = ""
+ } else {
+ x.FieldSelector = string(r.DecodeString())
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Watch = false
+ } else {
+ x.Watch = bool(r.DecodeBool())
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ResourceVersion = ""
+ } else {
+ x.ResourceVersion = string(r.DecodeString())
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.TimeoutSeconds != nil {
+ x.TimeoutSeconds = nil
+ }
+ } else {
+ if x.TimeoutSeconds == nil {
+ x.TimeoutSeconds = new(int64)
+ }
+ yym18 := z.DecBinary()
+ _ = yym18
+ if false {
+ } else {
+ *((*int64)(x.TimeoutSeconds)) = int64(r.DecodeInt(64))
+ }
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj12++
+ if yyhl12 {
+ yyb12 = yyj12 > l
+ } else {
+ yyb12 = r.CheckBreak()
+ }
+ if yyb12 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj12-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *LabelSelector) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = len(x.MatchLabels) != 0
+ yyq2[1] = len(x.MatchExpressions) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.MatchLabels == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ z.F.EncMapStringStringV(x.MatchLabels, false, e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("matchLabels"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.MatchLabels == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ z.F.EncMapStringStringV(x.MatchLabels, false, e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.MatchExpressions == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.encSliceLabelSelectorRequirement(([]LabelSelectorRequirement)(x.MatchExpressions), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("matchExpressions"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.MatchExpressions == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.encSliceLabelSelectorRequirement(([]LabelSelectorRequirement)(x.MatchExpressions), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *LabelSelector) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *LabelSelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "matchLabels":
+ if r.TryDecodeAsNil() {
+ x.MatchLabels = nil
+ } else {
+ yyv4 := &x.MatchLabels
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ z.F.DecMapStringStringX(yyv4, false, d)
+ }
+ }
+ case "matchExpressions":
+ if r.TryDecodeAsNil() {
+ x.MatchExpressions = nil
+ } else {
+ yyv6 := &x.MatchExpressions
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceLabelSelectorRequirement((*[]LabelSelectorRequirement)(yyv6), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *LabelSelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.MatchLabels = nil
+ } else {
+ yyv9 := &x.MatchLabels
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else {
+ z.F.DecMapStringStringX(yyv9, false, d)
+ }
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.MatchExpressions = nil
+ } else {
+ yyv11 := &x.MatchExpressions
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ h.decSliceLabelSelectorRequirement((*[]LabelSelectorRequirement)(yyv11), d)
+ }
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *LabelSelectorRequirement) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[2] = len(x.Values) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Key))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("key"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Key))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ x.Operator.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("operator"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Operator.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.Values == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Values, false, e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("values"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Values == nil {
+ r.EncodeNil()
+ } else {
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Values, false, e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *LabelSelectorRequirement) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *LabelSelectorRequirement) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "key":
+ if r.TryDecodeAsNil() {
+ x.Key = ""
+ } else {
+ x.Key = string(r.DecodeString())
+ }
+ case "operator":
+ if r.TryDecodeAsNil() {
+ x.Operator = ""
+ } else {
+ x.Operator = LabelSelectorOperator(r.DecodeString())
+ }
+ case "values":
+ if r.TryDecodeAsNil() {
+ x.Values = nil
+ } else {
+ yyv6 := &x.Values
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv6, false, d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *LabelSelectorRequirement) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Key = ""
+ } else {
+ x.Key = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Operator = ""
+ } else {
+ x.Operator = LabelSelectorOperator(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Values = nil
+ } else {
+ yyv11 := &x.Values
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv11, false, d)
+ }
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x LabelSelectorOperator) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *LabelSelectorOperator) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *ReplicaSet) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = true
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy14 := &x.Status
+ yy14.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy16 := &x.Status
+ yy16.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ReplicaSet) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ReplicaSet) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_v1.ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = ReplicaSetSpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = ReplicaSetStatus{}
+ } else {
+ yyv6 := &x.Status
+ yyv6.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ReplicaSet) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_v1.ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = ReplicaSetSpec{}
+ } else {
+ yyv11 := &x.Spec
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = ReplicaSetStatus{}
+ } else {
+ yyv12 := &x.Status
+ yyv12.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ReplicaSetList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceReplicaSet(([]ReplicaSet)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceReplicaSet(([]ReplicaSet)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ReplicaSetList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ReplicaSetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceReplicaSet((*[]ReplicaSet)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ReplicaSetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceReplicaSet((*[]ReplicaSet)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ReplicaSetSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Replicas != nil
+ yyq2[1] = x.Selector != nil
+ yyq2[2] = true
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Replicas == nil {
+ r.EncodeNil()
+ } else {
+ yy4 := *x.Replicas
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(yy4))
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("replicas"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Replicas == nil {
+ r.EncodeNil()
+ } else {
+ yy6 := *x.Replicas
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeInt(int64(yy6))
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Selector == nil {
+ r.EncodeNil()
+ } else {
+ x.Selector.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("selector"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Selector == nil {
+ r.EncodeNil()
+ } else {
+ x.Selector.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy12 := &x.Template
+ yy12.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("template"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy14 := &x.Template
+ yy14.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ReplicaSetSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ReplicaSetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "replicas":
+ if r.TryDecodeAsNil() {
+ if x.Replicas != nil {
+ x.Replicas = nil
+ }
+ } else {
+ if x.Replicas == nil {
+ x.Replicas = new(int32)
+ }
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ *((*int32)(x.Replicas)) = int32(r.DecodeInt(32))
+ }
+ }
+ case "selector":
+ if r.TryDecodeAsNil() {
+ if x.Selector != nil {
+ x.Selector = nil
+ }
+ } else {
+ if x.Selector == nil {
+ x.Selector = new(LabelSelector)
+ }
+ x.Selector.CodecDecodeSelf(d)
+ }
+ case "template":
+ if r.TryDecodeAsNil() {
+ x.Template = pkg2_v1.PodTemplateSpec{}
+ } else {
+ yyv7 := &x.Template
+ yyv7.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ReplicaSetSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Replicas != nil {
+ x.Replicas = nil
+ }
+ } else {
+ if x.Replicas == nil {
+ x.Replicas = new(int32)
+ }
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else {
+ *((*int32)(x.Replicas)) = int32(r.DecodeInt(32))
+ }
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Selector != nil {
+ x.Selector = nil
+ }
+ } else {
+ if x.Selector == nil {
+ x.Selector = new(LabelSelector)
+ }
+ x.Selector.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Template = pkg2_v1.PodTemplateSpec{}
+ } else {
+ yyv12 := &x.Template
+ yyv12.CodecDecodeSelf(d)
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ReplicaSetStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.FullyLabeledReplicas != 0
+ yyq2[2] = x.ObservedGeneration != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Replicas))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("replicas"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Replicas))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeInt(int64(x.FullyLabeledReplicas))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("fullyLabeledReplicas"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeInt(int64(x.FullyLabeledReplicas))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ObservedGeneration))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("observedGeneration"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ObservedGeneration))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ReplicaSetStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ReplicaSetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "replicas":
+ if r.TryDecodeAsNil() {
+ x.Replicas = 0
+ } else {
+ x.Replicas = int32(r.DecodeInt(32))
+ }
+ case "fullyLabeledReplicas":
+ if r.TryDecodeAsNil() {
+ x.FullyLabeledReplicas = 0
+ } else {
+ x.FullyLabeledReplicas = int32(r.DecodeInt(32))
+ }
+ case "observedGeneration":
+ if r.TryDecodeAsNil() {
+ x.ObservedGeneration = 0
+ } else {
+ x.ObservedGeneration = int64(r.DecodeInt(64))
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ReplicaSetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Replicas = 0
+ } else {
+ x.Replicas = int32(r.DecodeInt(32))
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.FullyLabeledReplicas = 0
+ } else {
+ x.FullyLabeledReplicas = int32(r.DecodeInt(32))
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObservedGeneration = 0
+ } else {
+ x.ObservedGeneration = int64(r.DecodeInt(64))
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PodSecurityPolicy) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PodSecurityPolicy) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PodSecurityPolicy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_v1.ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = PodSecurityPolicySpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PodSecurityPolicy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_v1.ObjectMeta{}
+ } else {
+ yyv9 := &x.ObjectMeta
+ yyv9.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = PodSecurityPolicySpec{}
+ } else {
+ yyv10 := &x.Spec
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PodSecurityPolicySpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [14]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Privileged != false
+ yyq2[1] = len(x.DefaultAddCapabilities) != 0
+ yyq2[2] = len(x.RequiredDropCapabilities) != 0
+ yyq2[3] = len(x.AllowedCapabilities) != 0
+ yyq2[4] = len(x.Volumes) != 0
+ yyq2[5] = x.HostNetwork != false
+ yyq2[6] = len(x.HostPorts) != 0
+ yyq2[7] = x.HostPID != false
+ yyq2[8] = x.HostIPC != false
+ yyq2[13] = x.ReadOnlyRootFilesystem != false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(14)
+ } else {
+ yynn2 = 4
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Privileged))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("privileged"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Privileged))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.DefaultAddCapabilities == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.encSlicev1_Capability(([]pkg2_v1.Capability)(x.DefaultAddCapabilities), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("defaultAddCapabilities"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.DefaultAddCapabilities == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.encSlicev1_Capability(([]pkg2_v1.Capability)(x.DefaultAddCapabilities), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ if x.RequiredDropCapabilities == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSlicev1_Capability(([]pkg2_v1.Capability)(x.RequiredDropCapabilities), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("requiredDropCapabilities"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.RequiredDropCapabilities == nil {
+ r.EncodeNil()
+ } else {
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ h.encSlicev1_Capability(([]pkg2_v1.Capability)(x.RequiredDropCapabilities), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ if x.AllowedCapabilities == nil {
+ r.EncodeNil()
+ } else {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ h.encSlicev1_Capability(([]pkg2_v1.Capability)(x.AllowedCapabilities), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("allowedCapabilities"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.AllowedCapabilities == nil {
+ r.EncodeNil()
+ } else {
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.encSlicev1_Capability(([]pkg2_v1.Capability)(x.AllowedCapabilities), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ if x.Volumes == nil {
+ r.EncodeNil()
+ } else {
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ h.encSliceFSType(([]FSType)(x.Volumes), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("volumes"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Volumes == nil {
+ r.EncodeNil()
+ } else {
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ h.encSliceFSType(([]FSType)(x.Volumes), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeBool(bool(x.HostNetwork))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("hostNetwork"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeBool(bool(x.HostNetwork))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[6] {
+ if x.HostPorts == nil {
+ r.EncodeNil()
+ } else {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ h.encSliceHostPortRange(([]HostPortRange)(x.HostPorts), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[6] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("hostPorts"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.HostPorts == nil {
+ r.EncodeNil()
+ } else {
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ h.encSliceHostPortRange(([]HostPortRange)(x.HostPorts), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[7] {
+ yym25 := z.EncBinary()
+ _ = yym25
+ if false {
+ } else {
+ r.EncodeBool(bool(x.HostPID))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[7] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("hostPID"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym26 := z.EncBinary()
+ _ = yym26
+ if false {
+ } else {
+ r.EncodeBool(bool(x.HostPID))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[8] {
+ yym28 := z.EncBinary()
+ _ = yym28
+ if false {
+ } else {
+ r.EncodeBool(bool(x.HostIPC))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[8] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("hostIPC"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym29 := z.EncBinary()
+ _ = yym29
+ if false {
+ } else {
+ r.EncodeBool(bool(x.HostIPC))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy31 := &x.SELinux
+ yy31.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("seLinux"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy33 := &x.SELinux
+ yy33.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy36 := &x.RunAsUser
+ yy36.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("runAsUser"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy38 := &x.RunAsUser
+ yy38.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy41 := &x.SupplementalGroups
+ yy41.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("supplementalGroups"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy43 := &x.SupplementalGroups
+ yy43.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy46 := &x.FSGroup
+ yy46.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("fsGroup"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy48 := &x.FSGroup
+ yy48.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[13] {
+ yym51 := z.EncBinary()
+ _ = yym51
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnlyRootFilesystem))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq2[13] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("readOnlyRootFilesystem"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym52 := z.EncBinary()
+ _ = yym52
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ReadOnlyRootFilesystem))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PodSecurityPolicySpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PodSecurityPolicySpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "privileged":
+ if r.TryDecodeAsNil() {
+ x.Privileged = false
+ } else {
+ x.Privileged = bool(r.DecodeBool())
+ }
+ case "defaultAddCapabilities":
+ if r.TryDecodeAsNil() {
+ x.DefaultAddCapabilities = nil
+ } else {
+ yyv5 := &x.DefaultAddCapabilities
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ h.decSlicev1_Capability((*[]pkg2_v1.Capability)(yyv5), d)
+ }
+ }
+ case "requiredDropCapabilities":
+ if r.TryDecodeAsNil() {
+ x.RequiredDropCapabilities = nil
+ } else {
+ yyv7 := &x.RequiredDropCapabilities
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.decSlicev1_Capability((*[]pkg2_v1.Capability)(yyv7), d)
+ }
+ }
+ case "allowedCapabilities":
+ if r.TryDecodeAsNil() {
+ x.AllowedCapabilities = nil
+ } else {
+ yyv9 := &x.AllowedCapabilities
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.decSlicev1_Capability((*[]pkg2_v1.Capability)(yyv9), d)
+ }
+ }
+ case "volumes":
+ if r.TryDecodeAsNil() {
+ x.Volumes = nil
+ } else {
+ yyv11 := &x.Volumes
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ h.decSliceFSType((*[]FSType)(yyv11), d)
+ }
+ }
+ case "hostNetwork":
+ if r.TryDecodeAsNil() {
+ x.HostNetwork = false
+ } else {
+ x.HostNetwork = bool(r.DecodeBool())
+ }
+ case "hostPorts":
+ if r.TryDecodeAsNil() {
+ x.HostPorts = nil
+ } else {
+ yyv14 := &x.HostPorts
+ yym15 := z.DecBinary()
+ _ = yym15
+ if false {
+ } else {
+ h.decSliceHostPortRange((*[]HostPortRange)(yyv14), d)
+ }
+ }
+ case "hostPID":
+ if r.TryDecodeAsNil() {
+ x.HostPID = false
+ } else {
+ x.HostPID = bool(r.DecodeBool())
+ }
+ case "hostIPC":
+ if r.TryDecodeAsNil() {
+ x.HostIPC = false
+ } else {
+ x.HostIPC = bool(r.DecodeBool())
+ }
+ case "seLinux":
+ if r.TryDecodeAsNil() {
+ x.SELinux = SELinuxStrategyOptions{}
+ } else {
+ yyv18 := &x.SELinux
+ yyv18.CodecDecodeSelf(d)
+ }
+ case "runAsUser":
+ if r.TryDecodeAsNil() {
+ x.RunAsUser = RunAsUserStrategyOptions{}
+ } else {
+ yyv19 := &x.RunAsUser
+ yyv19.CodecDecodeSelf(d)
+ }
+ case "supplementalGroups":
+ if r.TryDecodeAsNil() {
+ x.SupplementalGroups = SupplementalGroupsStrategyOptions{}
+ } else {
+ yyv20 := &x.SupplementalGroups
+ yyv20.CodecDecodeSelf(d)
+ }
+ case "fsGroup":
+ if r.TryDecodeAsNil() {
+ x.FSGroup = FSGroupStrategyOptions{}
+ } else {
+ yyv21 := &x.FSGroup
+ yyv21.CodecDecodeSelf(d)
+ }
+ case "readOnlyRootFilesystem":
+ if r.TryDecodeAsNil() {
+ x.ReadOnlyRootFilesystem = false
+ } else {
+ x.ReadOnlyRootFilesystem = bool(r.DecodeBool())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PodSecurityPolicySpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj23 int
+ var yyb23 bool
+ var yyhl23 bool = l >= 0
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Privileged = false
+ } else {
+ x.Privileged = bool(r.DecodeBool())
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.DefaultAddCapabilities = nil
+ } else {
+ yyv25 := &x.DefaultAddCapabilities
+ yym26 := z.DecBinary()
+ _ = yym26
+ if false {
+ } else {
+ h.decSlicev1_Capability((*[]pkg2_v1.Capability)(yyv25), d)
+ }
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.RequiredDropCapabilities = nil
+ } else {
+ yyv27 := &x.RequiredDropCapabilities
+ yym28 := z.DecBinary()
+ _ = yym28
+ if false {
+ } else {
+ h.decSlicev1_Capability((*[]pkg2_v1.Capability)(yyv27), d)
+ }
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.AllowedCapabilities = nil
+ } else {
+ yyv29 := &x.AllowedCapabilities
+ yym30 := z.DecBinary()
+ _ = yym30
+ if false {
+ } else {
+ h.decSlicev1_Capability((*[]pkg2_v1.Capability)(yyv29), d)
+ }
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Volumes = nil
+ } else {
+ yyv31 := &x.Volumes
+ yym32 := z.DecBinary()
+ _ = yym32
+ if false {
+ } else {
+ h.decSliceFSType((*[]FSType)(yyv31), d)
+ }
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.HostNetwork = false
+ } else {
+ x.HostNetwork = bool(r.DecodeBool())
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.HostPorts = nil
+ } else {
+ yyv34 := &x.HostPorts
+ yym35 := z.DecBinary()
+ _ = yym35
+ if false {
+ } else {
+ h.decSliceHostPortRange((*[]HostPortRange)(yyv34), d)
+ }
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.HostPID = false
+ } else {
+ x.HostPID = bool(r.DecodeBool())
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.HostIPC = false
+ } else {
+ x.HostIPC = bool(r.DecodeBool())
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.SELinux = SELinuxStrategyOptions{}
+ } else {
+ yyv38 := &x.SELinux
+ yyv38.CodecDecodeSelf(d)
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.RunAsUser = RunAsUserStrategyOptions{}
+ } else {
+ yyv39 := &x.RunAsUser
+ yyv39.CodecDecodeSelf(d)
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.SupplementalGroups = SupplementalGroupsStrategyOptions{}
+ } else {
+ yyv40 := &x.SupplementalGroups
+ yyv40.CodecDecodeSelf(d)
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.FSGroup = FSGroupStrategyOptions{}
+ } else {
+ yyv41 := &x.FSGroup
+ yyv41.CodecDecodeSelf(d)
+ }
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ReadOnlyRootFilesystem = false
+ } else {
+ x.ReadOnlyRootFilesystem = bool(r.DecodeBool())
+ }
+ for {
+ yyj23++
+ if yyhl23 {
+ yyb23 = yyj23 > l
+ } else {
+ yyb23 = r.CheckBreak()
+ }
+ if yyb23 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj23-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x FSType) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *FSType) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *HostPortRange) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Min))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("min"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Min))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Max))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("max"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Max))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *HostPortRange) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *HostPortRange) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "min":
+ if r.TryDecodeAsNil() {
+ x.Min = 0
+ } else {
+ x.Min = int32(r.DecodeInt(32))
+ }
+ case "max":
+ if r.TryDecodeAsNil() {
+ x.Max = 0
+ } else {
+ x.Max = int32(r.DecodeInt(32))
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *HostPortRange) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Min = 0
+ } else {
+ x.Min = int32(r.DecodeInt(32))
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Max = 0
+ } else {
+ x.Max = int32(r.DecodeInt(32))
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *SELinuxStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.SELinuxOptions != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ x.Rule.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("rule"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Rule.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.SELinuxOptions == nil {
+ r.EncodeNil()
+ } else {
+ x.SELinuxOptions.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("seLinuxOptions"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.SELinuxOptions == nil {
+ r.EncodeNil()
+ } else {
+ x.SELinuxOptions.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *SELinuxStrategyOptions) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *SELinuxStrategyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "rule":
+ if r.TryDecodeAsNil() {
+ x.Rule = ""
+ } else {
+ x.Rule = SELinuxStrategy(r.DecodeString())
+ }
+ case "seLinuxOptions":
+ if r.TryDecodeAsNil() {
+ if x.SELinuxOptions != nil {
+ x.SELinuxOptions = nil
+ }
+ } else {
+ if x.SELinuxOptions == nil {
+ x.SELinuxOptions = new(pkg2_v1.SELinuxOptions)
+ }
+ x.SELinuxOptions.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *SELinuxStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Rule = ""
+ } else {
+ x.Rule = SELinuxStrategy(r.DecodeString())
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.SELinuxOptions != nil {
+ x.SELinuxOptions = nil
+ }
+ } else {
+ if x.SELinuxOptions == nil {
+ x.SELinuxOptions = new(pkg2_v1.SELinuxOptions)
+ }
+ x.SELinuxOptions.CodecDecodeSelf(d)
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x SELinuxStrategy) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *SELinuxStrategy) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *RunAsUserStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = len(x.Ranges) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ x.Rule.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("rule"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Rule.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Ranges == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.encSliceIDRange(([]IDRange)(x.Ranges), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("ranges"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Ranges == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.encSliceIDRange(([]IDRange)(x.Ranges), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *RunAsUserStrategyOptions) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *RunAsUserStrategyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "rule":
+ if r.TryDecodeAsNil() {
+ x.Rule = ""
+ } else {
+ x.Rule = RunAsUserStrategy(r.DecodeString())
+ }
+ case "ranges":
+ if r.TryDecodeAsNil() {
+ x.Ranges = nil
+ } else {
+ yyv5 := &x.Ranges
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ h.decSliceIDRange((*[]IDRange)(yyv5), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *RunAsUserStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Rule = ""
+ } else {
+ x.Rule = RunAsUserStrategy(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Ranges = nil
+ } else {
+ yyv9 := &x.Ranges
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.decSliceIDRange((*[]IDRange)(yyv9), d)
+ }
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *IDRange) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Min))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("min"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Min))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Max))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("max"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Max))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *IDRange) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *IDRange) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "min":
+ if r.TryDecodeAsNil() {
+ x.Min = 0
+ } else {
+ x.Min = int64(r.DecodeInt(64))
+ }
+ case "max":
+ if r.TryDecodeAsNil() {
+ x.Max = 0
+ } else {
+ x.Max = int64(r.DecodeInt(64))
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *IDRange) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Min = 0
+ } else {
+ x.Min = int64(r.DecodeInt(64))
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Max = 0
+ } else {
+ x.Max = int64(r.DecodeInt(64))
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x RunAsUserStrategy) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *RunAsUserStrategy) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *FSGroupStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Rule != ""
+ yyq2[1] = len(x.Ranges) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ x.Rule.CodecEncodeSelf(e)
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("rule"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Rule.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Ranges == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.encSliceIDRange(([]IDRange)(x.Ranges), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("ranges"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Ranges == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.encSliceIDRange(([]IDRange)(x.Ranges), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *FSGroupStrategyOptions) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *FSGroupStrategyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "rule":
+ if r.TryDecodeAsNil() {
+ x.Rule = ""
+ } else {
+ x.Rule = FSGroupStrategyType(r.DecodeString())
+ }
+ case "ranges":
+ if r.TryDecodeAsNil() {
+ x.Ranges = nil
+ } else {
+ yyv5 := &x.Ranges
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ h.decSliceIDRange((*[]IDRange)(yyv5), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *FSGroupStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Rule = ""
+ } else {
+ x.Rule = FSGroupStrategyType(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Ranges = nil
+ } else {
+ yyv9 := &x.Ranges
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.decSliceIDRange((*[]IDRange)(yyv9), d)
+ }
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x FSGroupStrategyType) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *FSGroupStrategyType) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *SupplementalGroupsStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Rule != ""
+ yyq2[1] = len(x.Ranges) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ x.Rule.CodecEncodeSelf(e)
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("rule"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ x.Rule.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Ranges == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.encSliceIDRange(([]IDRange)(x.Ranges), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("ranges"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Ranges == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.encSliceIDRange(([]IDRange)(x.Ranges), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *SupplementalGroupsStrategyOptions) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *SupplementalGroupsStrategyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "rule":
+ if r.TryDecodeAsNil() {
+ x.Rule = ""
+ } else {
+ x.Rule = SupplementalGroupsStrategyType(r.DecodeString())
+ }
+ case "ranges":
+ if r.TryDecodeAsNil() {
+ x.Ranges = nil
+ } else {
+ yyv5 := &x.Ranges
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ h.decSliceIDRange((*[]IDRange)(yyv5), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *SupplementalGroupsStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Rule = ""
+ } else {
+ x.Rule = SupplementalGroupsStrategyType(r.DecodeString())
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Ranges = nil
+ } else {
+ yyv9 := &x.Ranges
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.decSliceIDRange((*[]IDRange)(yyv9), d)
+ }
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x SupplementalGroupsStrategyType) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x))
+ }
+}
+
+func (x *SupplementalGroupsStrategyType) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*string)(x)) = r.DecodeString()
+ }
+}
+
+func (x *PodSecurityPolicyList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSlicePodSecurityPolicy(([]PodSecurityPolicy)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSlicePodSecurityPolicy(([]PodSecurityPolicy)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PodSecurityPolicyList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PodSecurityPolicyList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSlicePodSecurityPolicy((*[]PodSecurityPolicy)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PodSecurityPolicyList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSlicePodSecurityPolicy((*[]PodSecurityPolicy)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *NetworkPolicy) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *NetworkPolicy) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *NetworkPolicy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_v1.ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = NetworkPolicySpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *NetworkPolicy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg2_v1.ObjectMeta{}
+ } else {
+ yyv9 := &x.ObjectMeta
+ yyv9.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = NetworkPolicySpec{}
+ } else {
+ yyv10 := &x.Spec
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *NetworkPolicySpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = len(x.Ingress) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy4 := &x.PodSelector
+ yy4.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("podSelector"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.PodSelector
+ yy6.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Ingress == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceNetworkPolicyIngressRule(([]NetworkPolicyIngressRule)(x.Ingress), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("ingress"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Ingress == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceNetworkPolicyIngressRule(([]NetworkPolicyIngressRule)(x.Ingress), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *NetworkPolicySpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *NetworkPolicySpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "podSelector":
+ if r.TryDecodeAsNil() {
+ x.PodSelector = LabelSelector{}
+ } else {
+ yyv4 := &x.PodSelector
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "ingress":
+ if r.TryDecodeAsNil() {
+ x.Ingress = nil
+ } else {
+ yyv5 := &x.Ingress
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ h.decSliceNetworkPolicyIngressRule((*[]NetworkPolicyIngressRule)(yyv5), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *NetworkPolicySpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.PodSelector = LabelSelector{}
+ } else {
+ yyv8 := &x.PodSelector
+ yyv8.CodecDecodeSelf(d)
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Ingress = nil
+ } else {
+ yyv9 := &x.Ingress
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.decSliceNetworkPolicyIngressRule((*[]NetworkPolicyIngressRule)(yyv9), d)
+ }
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *NetworkPolicyIngressRule) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = len(x.Ports) != 0
+ yyq2[1] = len(x.From) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Ports == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ h.encSliceNetworkPolicyPort(([]NetworkPolicyPort)(x.Ports), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("ports"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Ports == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.encSliceNetworkPolicyPort(([]NetworkPolicyPort)(x.Ports), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.From == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.encSliceNetworkPolicyPeer(([]NetworkPolicyPeer)(x.From), e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("from"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.From == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.encSliceNetworkPolicyPeer(([]NetworkPolicyPeer)(x.From), e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *NetworkPolicyIngressRule) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *NetworkPolicyIngressRule) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "ports":
+ if r.TryDecodeAsNil() {
+ x.Ports = nil
+ } else {
+ yyv4 := &x.Ports
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.decSliceNetworkPolicyPort((*[]NetworkPolicyPort)(yyv4), d)
+ }
+ }
+ case "from":
+ if r.TryDecodeAsNil() {
+ x.From = nil
+ } else {
+ yyv6 := &x.From
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceNetworkPolicyPeer((*[]NetworkPolicyPeer)(yyv6), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *NetworkPolicyIngressRule) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Ports = nil
+ } else {
+ yyv9 := &x.Ports
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.decSliceNetworkPolicyPort((*[]NetworkPolicyPort)(yyv9), d)
+ }
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.From = nil
+ } else {
+ yyv11 := &x.From
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ h.decSliceNetworkPolicyPeer((*[]NetworkPolicyPeer)(yyv11), d)
+ }
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *NetworkPolicyPort) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.Protocol != nil
+ yyq2[1] = x.Port != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.Protocol == nil {
+ r.EncodeNil()
+ } else {
+ yy4 := *x.Protocol
+ yysf5 := &yy4
+ yysf5.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("protocol"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Protocol == nil {
+ r.EncodeNil()
+ } else {
+ yy6 := *x.Protocol
+ yysf7 := &yy6
+ yysf7.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Port == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.Port) {
+ } else if !yym9 && z.IsJSONHandle() {
+ z.EncJSONMarshal(x.Port)
+ } else {
+ z.EncFallback(x.Port)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("port"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Port == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.Port) {
+ } else if !yym10 && z.IsJSONHandle() {
+ z.EncJSONMarshal(x.Port)
+ } else {
+ z.EncFallback(x.Port)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *NetworkPolicyPort) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *NetworkPolicyPort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "protocol":
+ if r.TryDecodeAsNil() {
+ if x.Protocol != nil {
+ x.Protocol = nil
+ }
+ } else {
+ if x.Protocol == nil {
+ x.Protocol = new(pkg2_v1.Protocol)
+ }
+ x.Protocol.CodecDecodeSelf(d)
+ }
+ case "port":
+ if r.TryDecodeAsNil() {
+ if x.Port != nil {
+ x.Port = nil
+ }
+ } else {
+ if x.Port == nil {
+ x.Port = new(pkg5_intstr.IntOrString)
+ }
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.Port) {
+ } else if !yym6 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(x.Port)
+ } else {
+ z.DecFallback(x.Port, false)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *NetworkPolicyPort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Protocol != nil {
+ x.Protocol = nil
+ }
+ } else {
+ if x.Protocol == nil {
+ x.Protocol = new(pkg2_v1.Protocol)
+ }
+ x.Protocol.CodecDecodeSelf(d)
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Port != nil {
+ x.Port = nil
+ }
+ } else {
+ if x.Port == nil {
+ x.Port = new(pkg5_intstr.IntOrString)
+ }
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.Port) {
+ } else if !yym10 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(x.Port)
+ } else {
+ z.DecFallback(x.Port, false)
+ }
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *NetworkPolicyPeer) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = x.PodSelector != nil
+ yyq2[1] = x.NamespaceSelector != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ if x.PodSelector == nil {
+ r.EncodeNil()
+ } else {
+ x.PodSelector.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("podSelector"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.PodSelector == nil {
+ r.EncodeNil()
+ } else {
+ x.PodSelector.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.NamespaceSelector == nil {
+ r.EncodeNil()
+ } else {
+ x.NamespaceSelector.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("namespaceSelector"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.NamespaceSelector == nil {
+ r.EncodeNil()
+ } else {
+ x.NamespaceSelector.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *NetworkPolicyPeer) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *NetworkPolicyPeer) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "podSelector":
+ if r.TryDecodeAsNil() {
+ if x.PodSelector != nil {
+ x.PodSelector = nil
+ }
+ } else {
+ if x.PodSelector == nil {
+ x.PodSelector = new(LabelSelector)
+ }
+ x.PodSelector.CodecDecodeSelf(d)
+ }
+ case "namespaceSelector":
+ if r.TryDecodeAsNil() {
+ if x.NamespaceSelector != nil {
+ x.NamespaceSelector = nil
+ }
+ } else {
+ if x.NamespaceSelector == nil {
+ x.NamespaceSelector = new(LabelSelector)
+ }
+ x.NamespaceSelector.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *NetworkPolicyPeer) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.PodSelector != nil {
+ x.PodSelector = nil
+ }
+ } else {
+ if x.PodSelector == nil {
+ x.PodSelector = new(LabelSelector)
+ }
+ x.PodSelector.CodecDecodeSelf(d)
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.NamespaceSelector != nil {
+ x.NamespaceSelector = nil
+ }
+ } else {
+ if x.NamespaceSelector == nil {
+ x.NamespaceSelector = new(LabelSelector)
+ }
+ x.NamespaceSelector.CodecDecodeSelf(d)
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *NetworkPolicyList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceNetworkPolicy(([]NetworkPolicy)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceNetworkPolicy(([]NetworkPolicy)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *NetworkPolicyList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *NetworkPolicyList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceNetworkPolicy((*[]NetworkPolicy)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *NetworkPolicyList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg1_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceNetworkPolicy((*[]NetworkPolicy)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) encSliceCustomMetricTarget(v []CustomMetricTarget, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceCustomMetricTarget(v *[]CustomMetricTarget, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []CustomMetricTarget{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 72)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]CustomMetricTarget, yyrl1)
+ }
+ } else {
+ yyv1 = make([]CustomMetricTarget, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = CustomMetricTarget{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, CustomMetricTarget{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = CustomMetricTarget{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, CustomMetricTarget{}) // var yyz1 CustomMetricTarget
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = CustomMetricTarget{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []CustomMetricTarget{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceCustomMetricCurrentStatus(v []CustomMetricCurrentStatus, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceCustomMetricCurrentStatus(v *[]CustomMetricCurrentStatus, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []CustomMetricCurrentStatus{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 72)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]CustomMetricCurrentStatus, yyrl1)
+ }
+ } else {
+ yyv1 = make([]CustomMetricCurrentStatus, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = CustomMetricCurrentStatus{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, CustomMetricCurrentStatus{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = CustomMetricCurrentStatus{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, CustomMetricCurrentStatus{}) // var yyz1 CustomMetricCurrentStatus
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = CustomMetricCurrentStatus{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []CustomMetricCurrentStatus{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceHorizontalPodAutoscaler(v []HorizontalPodAutoscaler, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceHorizontalPodAutoscaler(v *[]HorizontalPodAutoscaler, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []HorizontalPodAutoscaler{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 360)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]HorizontalPodAutoscaler, yyrl1)
+ }
+ } else {
+ yyv1 = make([]HorizontalPodAutoscaler, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = HorizontalPodAutoscaler{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, HorizontalPodAutoscaler{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = HorizontalPodAutoscaler{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, HorizontalPodAutoscaler{}) // var yyz1 HorizontalPodAutoscaler
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = HorizontalPodAutoscaler{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []HorizontalPodAutoscaler{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceAPIVersion(v []APIVersion, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceAPIVersion(v *[]APIVersion, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []APIVersion{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]APIVersion, yyrl1)
+ }
+ } else {
+ yyv1 = make([]APIVersion, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = APIVersion{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, APIVersion{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = APIVersion{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, APIVersion{}) // var yyz1 APIVersion
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = APIVersion{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []APIVersion{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceThirdPartyResource(v []ThirdPartyResource, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceThirdPartyResource(v *[]ThirdPartyResource, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []ThirdPartyResource{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]ThirdPartyResource, yyrl1)
+ }
+ } else {
+ yyv1 = make([]ThirdPartyResource, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ThirdPartyResource{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, ThirdPartyResource{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ThirdPartyResource{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, ThirdPartyResource{}) // var yyz1 ThirdPartyResource
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ThirdPartyResource{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []ThirdPartyResource{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceDeployment(v []Deployment, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceDeployment(v *[]Deployment, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []Deployment{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 792)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]Deployment, yyrl1)
+ }
+ } else {
+ yyv1 = make([]Deployment, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Deployment{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, Deployment{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Deployment{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, Deployment{}) // var yyz1 Deployment
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Deployment{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []Deployment{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceDaemonSet(v []DaemonSet, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceDaemonSet(v *[]DaemonSet, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []DaemonSet{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 720)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]DaemonSet, yyrl1)
+ }
+ } else {
+ yyv1 = make([]DaemonSet, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = DaemonSet{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, DaemonSet{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = DaemonSet{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, DaemonSet{}) // var yyz1 DaemonSet
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = DaemonSet{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []DaemonSet{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceThirdPartyResourceData(v []ThirdPartyResourceData, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceThirdPartyResourceData(v *[]ThirdPartyResourceData, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []ThirdPartyResourceData{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 264)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]ThirdPartyResourceData, yyrl1)
+ }
+ } else {
+ yyv1 = make([]ThirdPartyResourceData, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ThirdPartyResourceData{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, ThirdPartyResourceData{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ThirdPartyResourceData{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, ThirdPartyResourceData{}) // var yyz1 ThirdPartyResourceData
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ThirdPartyResourceData{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []ThirdPartyResourceData{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceJob(v []Job, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceJob(v *[]Job, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []Job{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 792)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]Job, yyrl1)
+ }
+ } else {
+ yyv1 = make([]Job, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Job{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, Job{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Job{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, Job{}) // var yyz1 Job
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Job{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []Job{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceJobCondition(v []JobCondition, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceJobCondition(v *[]JobCondition, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []JobCondition{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]JobCondition, yyrl1)
+ }
+ } else {
+ yyv1 = make([]JobCondition, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = JobCondition{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, JobCondition{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = JobCondition{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, JobCondition{}) // var yyz1 JobCondition
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = JobCondition{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []JobCondition{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceIngress(v []Ingress, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceIngress(v *[]Ingress, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []Ingress{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 320)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]Ingress, yyrl1)
+ }
+ } else {
+ yyv1 = make([]Ingress, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Ingress{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, Ingress{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Ingress{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, Ingress{}) // var yyz1 Ingress
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Ingress{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []Ingress{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceIngressTLS(v []IngressTLS, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceIngressTLS(v *[]IngressTLS, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []IngressTLS{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]IngressTLS, yyrl1)
+ }
+ } else {
+ yyv1 = make([]IngressTLS, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = IngressTLS{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, IngressTLS{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = IngressTLS{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, IngressTLS{}) // var yyz1 IngressTLS
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = IngressTLS{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []IngressTLS{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceIngressRule(v []IngressRule, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceIngressRule(v *[]IngressRule, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []IngressRule{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 24)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]IngressRule, yyrl1)
+ }
+ } else {
+ yyv1 = make([]IngressRule, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = IngressRule{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, IngressRule{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = IngressRule{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, IngressRule{}) // var yyz1 IngressRule
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = IngressRule{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []IngressRule{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceHTTPIngressPath(v []HTTPIngressPath, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceHTTPIngressPath(v *[]HTTPIngressPath, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []HTTPIngressPath{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 64)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]HTTPIngressPath, yyrl1)
+ }
+ } else {
+ yyv1 = make([]HTTPIngressPath, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = HTTPIngressPath{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, HTTPIngressPath{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = HTTPIngressPath{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, HTTPIngressPath{}) // var yyz1 HTTPIngressPath
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = HTTPIngressPath{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []HTTPIngressPath{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceLabelSelectorRequirement(v []LabelSelectorRequirement, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceLabelSelectorRequirement(v *[]LabelSelectorRequirement, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []LabelSelectorRequirement{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]LabelSelectorRequirement, yyrl1)
+ }
+ } else {
+ yyv1 = make([]LabelSelectorRequirement, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = LabelSelectorRequirement{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, LabelSelectorRequirement{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = LabelSelectorRequirement{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, LabelSelectorRequirement{}) // var yyz1 LabelSelectorRequirement
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = LabelSelectorRequirement{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []LabelSelectorRequirement{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceReplicaSet(v []ReplicaSet, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceReplicaSet(v *[]ReplicaSet, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []ReplicaSet{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 728)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]ReplicaSet, yyrl1)
+ }
+ } else {
+ yyv1 = make([]ReplicaSet, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ReplicaSet{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, ReplicaSet{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ReplicaSet{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, ReplicaSet{}) // var yyz1 ReplicaSet
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ReplicaSet{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []ReplicaSet{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSlicev1_Capability(v []pkg2_v1.Capability, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yysf2 := &yyv1
+ yysf2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSlicev1_Capability(v *[]pkg2_v1.Capability, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []pkg2_v1.Capability{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]pkg2_v1.Capability, yyrl1)
+ }
+ } else {
+ yyv1 = make([]pkg2_v1.Capability, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = pkg2_v1.Capability(r.DecodeString())
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, "")
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = pkg2_v1.Capability(r.DecodeString())
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, "") // var yyz1 pkg2_v1.Capability
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = pkg2_v1.Capability(r.DecodeString())
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []pkg2_v1.Capability{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceFSType(v []FSType, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yyv1.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceFSType(v *[]FSType, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []FSType{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]FSType, yyrl1)
+ }
+ } else {
+ yyv1 = make([]FSType, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = FSType(r.DecodeString())
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, "")
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = FSType(r.DecodeString())
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, "") // var yyz1 FSType
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ""
+ } else {
+ yyv1[yyj1] = FSType(r.DecodeString())
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []FSType{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceHostPortRange(v []HostPortRange, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceHostPortRange(v *[]HostPortRange, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []HostPortRange{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]HostPortRange, yyrl1)
+ }
+ } else {
+ yyv1 = make([]HostPortRange, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = HostPortRange{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, HostPortRange{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = HostPortRange{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, HostPortRange{}) // var yyz1 HostPortRange
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = HostPortRange{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []HostPortRange{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceIDRange(v []IDRange, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceIDRange(v *[]IDRange, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []IDRange{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]IDRange, yyrl1)
+ }
+ } else {
+ yyv1 = make([]IDRange, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = IDRange{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, IDRange{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = IDRange{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, IDRange{}) // var yyz1 IDRange
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = IDRange{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []IDRange{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSlicePodSecurityPolicy(v []PodSecurityPolicy, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSlicePodSecurityPolicy(v *[]PodSecurityPolicy, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []PodSecurityPolicy{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 536)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]PodSecurityPolicy, yyrl1)
+ }
+ } else {
+ yyv1 = make([]PodSecurityPolicy, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PodSecurityPolicy{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, PodSecurityPolicy{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PodSecurityPolicy{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, PodSecurityPolicy{}) // var yyz1 PodSecurityPolicy
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PodSecurityPolicy{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []PodSecurityPolicy{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceNetworkPolicyIngressRule(v []NetworkPolicyIngressRule, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceNetworkPolicyIngressRule(v *[]NetworkPolicyIngressRule, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []NetworkPolicyIngressRule{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 48)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]NetworkPolicyIngressRule, yyrl1)
+ }
+ } else {
+ yyv1 = make([]NetworkPolicyIngressRule, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = NetworkPolicyIngressRule{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, NetworkPolicyIngressRule{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = NetworkPolicyIngressRule{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, NetworkPolicyIngressRule{}) // var yyz1 NetworkPolicyIngressRule
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = NetworkPolicyIngressRule{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []NetworkPolicyIngressRule{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceNetworkPolicyPort(v []NetworkPolicyPort, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceNetworkPolicyPort(v *[]NetworkPolicyPort, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []NetworkPolicyPort{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]NetworkPolicyPort, yyrl1)
+ }
+ } else {
+ yyv1 = make([]NetworkPolicyPort, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = NetworkPolicyPort{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, NetworkPolicyPort{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = NetworkPolicyPort{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, NetworkPolicyPort{}) // var yyz1 NetworkPolicyPort
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = NetworkPolicyPort{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []NetworkPolicyPort{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceNetworkPolicyPeer(v []NetworkPolicyPeer, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceNetworkPolicyPeer(v *[]NetworkPolicyPeer, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []NetworkPolicyPeer{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]NetworkPolicyPeer, yyrl1)
+ }
+ } else {
+ yyv1 = make([]NetworkPolicyPeer, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = NetworkPolicyPeer{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, NetworkPolicyPeer{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = NetworkPolicyPeer{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, NetworkPolicyPeer{}) // var yyz1 NetworkPolicyPeer
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = NetworkPolicyPeer{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []NetworkPolicyPeer{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceNetworkPolicy(v []NetworkPolicy, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceNetworkPolicy(v *[]NetworkPolicy, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []NetworkPolicy{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 296)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]NetworkPolicy, yyrl1)
+ }
+ } else {
+ yyv1 = make([]NetworkPolicy, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = NetworkPolicy{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, NetworkPolicy{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = NetworkPolicy{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, NetworkPolicy{}) // var yyz1 NetworkPolicy
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = NetworkPolicy{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []NetworkPolicy{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/types.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/types.go
new file mode 100644
index 0000000..55719fd
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/types.go
@@ -0,0 +1,1198 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+ "k8s.io/kubernetes/pkg/api/resource"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/api/v1"
+ "k8s.io/kubernetes/pkg/util/intstr"
+)
+
+// describes the attributes of a scale subresource
+type ScaleSpec struct {
+ // desired number of instances for the scaled object.
+ Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
+}
+
+// represents the current status of a scale subresource.
+type ScaleStatus struct {
+ // actual number of observed instances of the scaled object.
+ Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"`
+
+ // label query over pods that should match the replicas count. More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors
+ Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"`
+
+ // label selector for pods that should match the replicas count. This is a serializated
+ // version of both map-based and more expressive set-based selectors. This is done to
+ // avoid introspection in the clients. The string will be in the same format as the
+ // query-param syntax. If the target type only supports map-based selectors, both this
+ // field and map-based selector field are populated.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors
+ TargetSelector string `json:"targetSelector,omitempty" protobuf:"bytes,3,opt,name=targetSelector"`
+}
+
+// +genclient=true
+// +noMethods=true
+
+// represents a scaling request for a resource.
+type Scale struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata.
+ v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.
+ Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+ // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only.
+ Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// Dummy definition
+type ReplicationControllerDummy struct {
+ unversioned.TypeMeta `json:",inline"`
+}
+
+// SubresourceReference contains enough information to let you inspect or modify the referred subresource.
+type SubresourceReference struct {
+ // Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
+ Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"`
+ // Name of the referent; More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names
+ Name string `json:"name,omitempty" protobuf:"bytes,2,opt,name=name"`
+ // API version of the referent
+ APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"`
+ // Subresource name of the referent
+ Subresource string `json:"subresource,omitempty" protobuf:"bytes,4,opt,name=subresource"`
+}
+
+type CPUTargetUtilization struct {
+ // fraction of the requested CPU that should be utilized/used,
+ // e.g. 70 means that 70% of the requested CPU should be in use.
+ TargetPercentage int32 `json:"targetPercentage" protobuf:"varint,1,opt,name=targetPercentage"`
+}
+
+// Alpha-level support for Custom Metrics in HPA (as annotations).
+type CustomMetricTarget struct {
+ // Custom Metric name.
+ Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+ // Custom Metric value (average).
+ TargetValue resource.Quantity `json:"value" protobuf:"bytes,2,opt,name=value"`
+}
+
+type CustomMetricTargetList struct {
+ Items []CustomMetricTarget `json:"items" protobuf:"bytes,1,rep,name=items"`
+}
+
+type CustomMetricCurrentStatus struct {
+ // Custom Metric name.
+ Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+ // Custom Metric value (average).
+ CurrentValue resource.Quantity `json:"value" protobuf:"bytes,2,opt,name=value"`
+}
+
+type CustomMetricCurrentStatusList struct {
+ Items []CustomMetricCurrentStatus `json:"items" protobuf:"bytes,1,rep,name=items"`
+}
+
+// specification of a horizontal pod autoscaler.
+type HorizontalPodAutoscalerSpec struct {
+ // reference to Scale subresource; horizontal pod autoscaler will learn the current resource consumption from its status,
+ // and will set the desired number of pods by modifying its spec.
+ ScaleRef SubresourceReference `json:"scaleRef" protobuf:"bytes,1,opt,name=scaleRef"`
+ // lower limit for the number of pods that can be set by the autoscaler, default 1.
+ MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"`
+ // upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.
+ MaxReplicas int32 `json:"maxReplicas" protobuf:"varint,3,opt,name=maxReplicas"`
+ // target average CPU utilization (represented as a percentage of requested CPU) over all the pods;
+ // if not specified it defaults to the target CPU utilization at 80% of the requested resources.
+ CPUUtilization *CPUTargetUtilization `json:"cpuUtilization,omitempty" protobuf:"bytes,4,opt,name=cpuUtilization"`
+}
+
+// current status of a horizontal pod autoscaler
+type HorizontalPodAutoscalerStatus struct {
+ // most recent generation observed by this autoscaler.
+ ObservedGeneration *int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"`
+
+ // last time the HorizontalPodAutoscaler scaled the number of pods;
+ // used by the autoscaler to control how often the number of pods is changed.
+ LastScaleTime *unversioned.Time `json:"lastScaleTime,omitempty" protobuf:"bytes,2,opt,name=lastScaleTime"`
+
+ // current number of replicas of pods managed by this autoscaler.
+ CurrentReplicas int32 `json:"currentReplicas" protobuf:"varint,3,opt,name=currentReplicas"`
+
+ // desired number of replicas of pods managed by this autoscaler.
+ DesiredReplicas int32 `json:"desiredReplicas" protobuf:"varint,4,opt,name=desiredReplicas"`
+
+ // current average CPU utilization over all pods, represented as a percentage of requested CPU,
+ // e.g. 70 means that an average pod is using now 70% of its requested CPU.
+ CurrentCPUUtilizationPercentage *int32 `json:"currentCPUUtilizationPercentage,omitempty" protobuf:"varint,5,opt,name=currentCPUUtilizationPercentage"`
+}
+
+// +genclient=true
+
+// configuration of a horizontal pod autoscaler.
+type HorizontalPodAutoscaler struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard object metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.
+ Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+ // current information about the autoscaler.
+ Status HorizontalPodAutoscalerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// list of horizontal pod autoscaler objects.
+type HorizontalPodAutoscalerList struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard list metadata.
+ unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // list of horizontal pod autoscaler objects.
+ Items []HorizontalPodAutoscaler `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +genclient=true
+// +nonNamespaced=true
+
+// A ThirdPartyResource is a generic representation of a resource, it is used by add-ons and plugins to add new resource
+// types to the API. It consists of one or more Versions of the api.
+type ThirdPartyResource struct {
+ unversioned.TypeMeta `json:",inline"`
+
+ // Standard object metadata
+ v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Description is the description of this object.
+ Description string `json:"description,omitempty" protobuf:"bytes,2,opt,name=description"`
+
+ // Versions are versions for this third party object
+ Versions []APIVersion `json:"versions,omitempty" protobuf:"bytes,3,rep,name=versions"`
+}
+
+// ThirdPartyResourceList is a list of ThirdPartyResources.
+type ThirdPartyResourceList struct {
+ unversioned.TypeMeta `json:",inline"`
+
+ // Standard list metadata.
+ unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is the list of ThirdPartyResources.
+ Items []ThirdPartyResource `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// An APIVersion represents a single concrete version of an object model.
+type APIVersion struct {
+ // Name of this version (e.g. 'v1').
+ Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
+}
+
+// An internal object, used for versioned storage in etcd. Not exposed to the end user.
+type ThirdPartyResourceData struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard object metadata.
+ v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Data is the raw JSON data for this data.
+ Data []byte `json:"data,omitempty" protobuf:"bytes,2,opt,name=data"`
+}
+
+// +genclient=true
+
+// Deployment enables declarative updates for Pods and ReplicaSets.
+type Deployment struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard object metadata.
+ v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Specification of the desired behavior of the Deployment.
+ Spec DeploymentSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+ // Most recently observed status of the Deployment.
+ Status DeploymentStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// DeploymentSpec is the specification of the desired behavior of the Deployment.
+type DeploymentSpec struct {
+ // Number of desired pods. This is a pointer to distinguish between explicit
+ // zero and not specified. Defaults to 1.
+ Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
+
+ // Label selector for pods. Existing ReplicaSets whose pods are
+ // selected by this will be the ones affected by this deployment.
+ Selector *LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"`
+
+ // Template describes the pods that will be created.
+ Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"`
+
+ // The deployment strategy to use to replace existing pods with new ones.
+ Strategy DeploymentStrategy `json:"strategy,omitempty" protobuf:"bytes,4,opt,name=strategy"`
+
+ // Minimum number of seconds for which a newly created pod should be ready
+ // without any of its container crashing, for it to be considered available.
+ // Defaults to 0 (pod will be considered available as soon as it is ready)
+ MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,5,opt,name=minReadySeconds"`
+
+ // The number of old ReplicaSets to retain to allow rollback.
+ // This is a pointer to distinguish between explicit zero and not specified.
+ RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"`
+
+ // Indicates that the deployment is paused and will not be processed by the
+ // deployment controller.
+ Paused bool `json:"paused,omitempty" protobuf:"varint,7,opt,name=paused"`
+ // The config this deployment is rolling back to. Will be cleared after rollback is done.
+ RollbackTo *RollbackConfig `json:"rollbackTo,omitempty" protobuf:"bytes,8,opt,name=rollbackTo"`
+}
+
+// DeploymentRollback stores the information required to rollback a deployment.
+type DeploymentRollback struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Required: This must match the Name of a deployment.
+ Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+ // The annotations to be updated to a deployment
+ UpdatedAnnotations map[string]string `json:"updatedAnnotations,omitempty" protobuf:"bytes,2,rep,name=updatedAnnotations"`
+ // The config of this deployment rollback.
+ RollbackTo RollbackConfig `json:"rollbackTo" protobuf:"bytes,3,opt,name=rollbackTo"`
+}
+
+type RollbackConfig struct {
+ // The revision to rollback to. If set to 0, rollbck to the last revision.
+ Revision int64 `json:"revision,omitempty" protobuf:"varint,1,opt,name=revision"`
+}
+
+const (
+ // DefaultDeploymentUniqueLabelKey is the default key of the selector that is added
+ // to existing RCs (and label key that is added to its pods) to prevent the existing RCs
+ // to select new pods (and old pods being select by new RC).
+ DefaultDeploymentUniqueLabelKey string = "pod-template-hash"
+)
+
+// DeploymentStrategy describes how to replace existing pods with new ones.
+type DeploymentStrategy struct {
+ // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate.
+ Type DeploymentStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=DeploymentStrategyType"`
+
+ // Rolling update config params. Present only if DeploymentStrategyType =
+ // RollingUpdate.
+ //---
+ // TODO: Update this to follow our convention for oneOf, whatever we decide it
+ // to be.
+ RollingUpdate *RollingUpdateDeployment `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"`
+}
+
+type DeploymentStrategyType string
+
+const (
+ // Kill all existing pods before creating new ones.
+ RecreateDeploymentStrategyType DeploymentStrategyType = "Recreate"
+
+ // Replace the old RCs by new one using rolling update i.e gradually scale down the old RCs and scale up the new one.
+ RollingUpdateDeploymentStrategyType DeploymentStrategyType = "RollingUpdate"
+)
+
+// Spec to control the desired behavior of rolling update.
+type RollingUpdateDeployment struct {
+ // The maximum number of pods that can be unavailable during the update.
+ // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+ // Absolute number is calculated from percentage by rounding up.
+ // This can not be 0 if MaxSurge is 0.
+ // By default, a fixed value of 1 is used.
+ // Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods
+ // immediately when the rolling update starts. Once new pods are ready, old RC
+ // can be scaled down further, followed by scaling up the new RC, ensuring
+ // that the total number of pods available at all times during the update is at
+ // least 70% of desired pods.
+ MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"`
+
+ // The maximum number of pods that can be scheduled above the desired number of
+ // pods.
+ // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+ // This can not be 0 if MaxUnavailable is 0.
+ // Absolute number is calculated from percentage by rounding up.
+ // By default, a value of 1 is used.
+ // Example: when this is set to 30%, the new RC can be scaled up immediately when
+ // the rolling update starts, such that the total number of old and new pods do not exceed
+ // 130% of desired pods. Once old pods have been killed,
+ // new RC can be scaled up further, ensuring that total number of pods running
+ // at any time during the update is atmost 130% of desired pods.
+ MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"`
+}
+
+// DeploymentStatus is the most recently observed status of the Deployment.
+type DeploymentStatus struct {
+ // The generation observed by the deployment controller.
+ ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"`
+
+ // Total number of non-terminated pods targeted by this deployment (their labels match the selector).
+ Replicas int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"`
+
+ // Total number of non-terminated pods targeted by this deployment that have the desired template spec.
+ UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"`
+
+ // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.
+ AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"`
+
+ // Total number of unavailable pods targeted by this deployment.
+ UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"`
+}
+
+// DeploymentList is a list of Deployments.
+type DeploymentList struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard list metadata.
+ unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is the list of Deployments.
+ Items []Deployment `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// TODO(madhusudancs): Uncomment while implementing DaemonSet updates.
+/* Commenting out for v1.2. We are planning to bring these types back with a more robust DaemonSet update implementation in v1.3, hence not deleting but just commenting the types out.
+type DaemonSetUpdateStrategy struct {
+ // Type of daemon set update. Only "RollingUpdate" is supported at this time. Default is RollingUpdate.
+ Type DaemonSetUpdateStrategyType `json:"type,omitempty"`
+
+ // Rolling update config params. Present only if DaemonSetUpdateStrategy =
+ // RollingUpdate.
+ //---
+ // TODO: Update this to follow our convention for oneOf, whatever we decide it
+ // to be. Same as DeploymentStrategy.RollingUpdate.
+ RollingUpdate *RollingUpdateDaemonSet `json:"rollingUpdate,omitempty"`
+}
+
+type DaemonSetUpdateStrategyType string
+
+const (
+ // Replace the old daemons by new ones using rolling update i.e replace them on each node one after the other.
+ RollingUpdateDaemonSetStrategyType DaemonSetUpdateStrategyType = "RollingUpdate"
+)
+
+// Spec to control the desired behavior of daemon set rolling update.
+type RollingUpdateDaemonSet struct {
+ // The maximum number of DaemonSet pods that can be unavailable during the
+ // update. Value can be an absolute number (ex: 5) or a percentage of total
+ // number of DaemonSet pods at the start of the update (ex: 10%). Absolute
+ // number is calculated from percentage by rounding up.
+ // This cannot be 0.
+ // Default value is 1.
+ // Example: when this is set to 30%, 30% of the currently running DaemonSet
+ // pods can be stopped for an update at any given time. The update starts
+ // by stopping at most 30% of the currently running DaemonSet pods and then
+ // brings up new DaemonSet pods in their place. Once the new pods are ready,
+ // it then proceeds onto other DaemonSet pods, thus ensuring that at least
+ // 70% of original number of DaemonSet pods are available at all times
+ // during the update.
+ MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"`
+
+ // Minimum number of seconds for which a newly created DaemonSet pod should
+ // be ready without any of its container crashing, for it to be considered
+ // available. Defaults to 0 (pod will be considered available as soon as it
+ // is ready).
+ MinReadySeconds int32 `json:"minReadySeconds,omitempty"`
+}
+*/
+
+// DaemonSetSpec is the specification of a daemon set.
+type DaemonSetSpec struct {
+ // Selector is a label query over pods that are managed by the daemon set.
+ // Must match in order to be controlled.
+ // If empty, defaulted to labels on Pod template.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors
+ Selector *LabelSelector `json:"selector,omitempty" protobuf:"bytes,1,opt,name=selector"`
+
+ // Template is the object that describes the pod that will be created.
+ // The DaemonSet will create exactly one copy of this pod on every node
+ // that matches the template's node selector (or on every node if no node
+ // selector is specified).
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#pod-template
+ Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,2,opt,name=template"`
+
+ // TODO(madhusudancs): Uncomment while implementing DaemonSet updates.
+ /* Commenting out for v1.2. We are planning to bring these fields back with a more robust DaemonSet update implementation in v1.3, hence not deleting but just commenting these fields out.
+ // Update strategy to replace existing DaemonSet pods with new pods.
+ UpdateStrategy DaemonSetUpdateStrategy `json:"updateStrategy,omitempty"`
+
+ // Label key that is added to DaemonSet pods to distinguish between old and
+ // new pod templates during DaemonSet update.
+ // Users can set this to an empty string to indicate that the system should
+ // not add any label. If unspecified, system uses
+ // DefaultDaemonSetUniqueLabelKey("daemonset.kubernetes.io/podTemplateHash").
+ // Value of this key is hash of DaemonSetSpec.PodTemplateSpec.
+ // No label is added if this is set to empty string.
+ UniqueLabelKey *string `json:"uniqueLabelKey,omitempty"`
+ */
+}
+
+const (
+ // DefaultDaemonSetUniqueLabelKey is the default key of the labels that is added
+ // to daemon set pods to distinguish between old and new pod templates during
+ // DaemonSet update. See DaemonSetSpec's UniqueLabelKey field for more information.
+ DefaultDaemonSetUniqueLabelKey string = "daemonset.kubernetes.io/podTemplateHash"
+)
+
+// DaemonSetStatus represents the current status of a daemon set.
+type DaemonSetStatus struct {
+ // CurrentNumberScheduled is the number of nodes that are running at least 1
+ // daemon pod and are supposed to run the daemon pod.
+ // More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md
+ CurrentNumberScheduled int32 `json:"currentNumberScheduled" protobuf:"varint,1,opt,name=currentNumberScheduled"`
+
+ // NumberMisscheduled is the number of nodes that are running the daemon pod, but are
+ // not supposed to run the daemon pod.
+ // More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md
+ NumberMisscheduled int32 `json:"numberMisscheduled" protobuf:"varint,2,opt,name=numberMisscheduled"`
+
+ // DesiredNumberScheduled is the total number of nodes that should be running the daemon
+ // pod (including nodes correctly running the daemon pod).
+ // More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md
+ DesiredNumberScheduled int32 `json:"desiredNumberScheduled" protobuf:"varint,3,opt,name=desiredNumberScheduled"`
+}
+
+// +genclient=true
+
+// DaemonSet represents the configuration of a daemon set.
+type DaemonSet struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Spec defines the desired behavior of this daemon set.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ Spec DaemonSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+ // Status is the current status of this daemon set. This data may be
+ // out of date by some window of time.
+ // Populated by the system.
+ // Read-only.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ Status DaemonSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// DaemonSetList is a collection of daemon sets.
+type DaemonSetList struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard list metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is a list of daemon sets.
+ Items []DaemonSet `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// ThirdPartyResrouceDataList is a list of ThirdPartyResourceData.
+type ThirdPartyResourceDataList struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard list metadata
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is the list of ThirdpartyResourceData.
+ Items []ThirdPartyResourceData `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +genclient=true
+
+// Job represents the configuration of a single job.
+type Job struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Spec is a structure defining the expected behavior of a job.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ Spec JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+ // Status is a structure describing current status of a job.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ Status JobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// JobList is a collection of jobs.
+type JobList struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard list metadata
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is the list of Job.
+ Items []Job `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// JobSpec describes how the job execution will look like.
+type JobSpec struct {
+
+ // Parallelism specifies the maximum desired number of pods the job should
+ // run at any given time. The actual number of pods running in steady state will
+ // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism),
+ // i.e. when the work left to do is less than max parallelism.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md
+ Parallelism *int32 `json:"parallelism,omitempty" protobuf:"varint,1,opt,name=parallelism"`
+
+ // Completions specifies the desired number of successfully finished pods the
+ // job should be run with. Setting to nil means that the success of any
+ // pod signals the success of all pods, and allows parallelism to have any positive
+ // value. Setting to 1 means that parallelism is limited to 1 and the success of that
+ // pod signals the success of the job.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md
+ Completions *int32 `json:"completions,omitempty" protobuf:"varint,2,opt,name=completions"`
+
+ // Optional duration in seconds relative to the startTime that the job may be active
+ // before the system tries to terminate it; value must be positive integer
+ ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,3,opt,name=activeDeadlineSeconds"`
+
+ // Selector is a label query over pods that should match the pod count.
+ // Normally, the system sets this field for you.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors
+ Selector *LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"`
+
+ // AutoSelector controls generation of pod labels and pod selectors.
+ // It was not present in the original extensions/v1beta1 Job definition, but exists
+ // to allow conversion from batch/v1 Jobs, where it corresponds to, but has the opposite
+ // meaning as, ManualSelector.
+ // More info: http://releases.k8s.io/HEAD/docs/design/selector-generation.md
+ AutoSelector *bool `json:"autoSelector,omitempty" protobuf:"varint,5,opt,name=autoSelector"`
+
+ // Template is the object that describes the pod that will be created when
+ // executing a job.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md
+ Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,6,opt,name=template"`
+}
+
+// JobStatus represents the current state of a Job.
+type JobStatus struct {
+
+ // Conditions represent the latest available observations of an object's current state.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md
+ Conditions []JobCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
+
+ // StartTime represents time when the job was acknowledged by the Job Manager.
+ // It is not guaranteed to be set in happens-before order across separate operations.
+ // It is represented in RFC3339 form and is in UTC.
+ StartTime *unversioned.Time `json:"startTime,omitempty" protobuf:"bytes,2,opt,name=startTime"`
+
+ // CompletionTime represents time when the job was completed. It is not guaranteed to
+ // be set in happens-before order across separate operations.
+ // It is represented in RFC3339 form and is in UTC.
+ CompletionTime *unversioned.Time `json:"completionTime,omitempty" protobuf:"bytes,3,opt,name=completionTime"`
+
+ // Active is the number of actively running pods.
+ Active int32 `json:"active,omitempty" protobuf:"varint,4,opt,name=active"`
+
+ // Succeeded is the number of pods which reached Phase Succeeded.
+ Succeeded int32 `json:"succeeded,omitempty" protobuf:"varint,5,opt,name=succeeded"`
+
+ // Failed is the number of pods which reached Phase Failed.
+ Failed int32 `json:"failed,omitempty" protobuf:"varint,6,opt,name=failed"`
+}
+
+type JobConditionType string
+
+// These are valid conditions of a job.
+const (
+ // JobComplete means the job has completed its execution.
+ JobComplete JobConditionType = "Complete"
+ // JobFailed means the job has failed its execution.
+ JobFailed JobConditionType = "Failed"
+)
+
+// JobCondition describes current state of a job.
+type JobCondition struct {
+ // Type of job condition, Complete or Failed.
+ Type JobConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=JobConditionType"`
+ // Status of the condition, one of True, False, Unknown.
+ Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"`
+ // Last time the condition was checked.
+ LastProbeTime unversioned.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"`
+ // Last time the condition transit from one status to another.
+ LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"`
+ // (brief) reason for the condition's last transition.
+ Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"`
+ // Human readable message indicating details about last transition.
+ Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
+}
+
+// +genclient=true
+
+// Ingress is a collection of rules that allow inbound connections to reach the
+// endpoints defined by a backend. An Ingress can be configured to give services
+// externally-reachable urls, load balance traffic, terminate SSL, offer name
+// based virtual hosting etc.
+type Ingress struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Spec is the desired state of the Ingress.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ Spec IngressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+ // Status is the current state of the Ingress.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ Status IngressStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// IngressList is a collection of Ingress.
+type IngressList struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is the list of Ingress.
+ Items []Ingress `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// IngressSpec describes the Ingress the user wishes to exist.
+type IngressSpec struct {
+ // A default backend capable of servicing requests that don't match any
+ // rule. At least one of 'backend' or 'rules' must be specified. This field
+ // is optional to allow the loadbalancer controller or defaulting logic to
+ // specify a global default.
+ Backend *IngressBackend `json:"backend,omitempty" protobuf:"bytes,1,opt,name=backend"`
+
+ // TLS configuration. Currently the Ingress only supports a single TLS
+ // port, 443. If multiple members of this list specify different hosts, they
+ // will be multiplexed on the same port according to the hostname specified
+ // through the SNI TLS extension, if the ingress controller fulfilling the
+ // ingress supports SNI.
+ TLS []IngressTLS `json:"tls,omitempty" protobuf:"bytes,2,rep,name=tls"`
+
+ // A list of host rules used to configure the Ingress. If unspecified, or
+ // no rule matches, all traffic is sent to the default backend.
+ Rules []IngressRule `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"`
+ // TODO: Add the ability to specify load-balancer IP through claims
+}
+
+// IngressTLS describes the transport layer security associated with an Ingress.
+type IngressTLS struct {
+ // Hosts are a list of hosts included in the TLS certificate. The values in
+ // this list must match the name/s used in the tlsSecret. Defaults to the
+ // wildcard host setting for the loadbalancer controller fulfilling this
+ // Ingress, if left unspecified.
+ Hosts []string `json:"hosts,omitempty" protobuf:"bytes,1,rep,name=hosts"`
+ // SecretName is the name of the secret used to terminate SSL traffic on 443.
+ // Field is left optional to allow SSL routing based on SNI hostname alone.
+ // If the SNI host in a listener conflicts with the "Host" header field used
+ // by an IngressRule, the SNI host is used for termination and value of the
+ // Host header is used for routing.
+ SecretName string `json:"secretName,omitempty" protobuf:"bytes,2,opt,name=secretName"`
+ // TODO: Consider specifying different modes of termination, protocols etc.
+}
+
+// IngressStatus describe the current state of the Ingress.
+type IngressStatus struct {
+ // LoadBalancer contains the current status of the load-balancer.
+ LoadBalancer v1.LoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"`
+}
+
+// IngressRule represents the rules mapping the paths under a specified host to
+// the related backend services. Incoming requests are first evaluated for a host
+// match, then routed to the backend associated with the matching IngressRuleValue.
+type IngressRule struct {
+ // Host is the fully qualified domain name of a network host, as defined
+ // by RFC 3986. Note the following deviations from the "host" part of the
+ // URI as defined in the RFC:
+ // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the
+ // IP in the Spec of the parent Ingress.
+ // 2. The `:` delimiter is not respected because ports are not allowed.
+ // Currently the port of an Ingress is implicitly :80 for http and
+ // :443 for https.
+ // Both these may change in the future.
+ // Incoming requests are matched against the host before the IngressRuleValue.
+ // If the host is unspecified, the Ingress routes all traffic based on the
+ // specified IngressRuleValue.
+ Host string `json:"host,omitempty" protobuf:"bytes,1,opt,name=host"`
+ // IngressRuleValue represents a rule to route requests for this IngressRule.
+ // If unspecified, the rule defaults to a http catch-all. Whether that sends
+ // just traffic matching the host to the default backend or all traffic to the
+ // default backend, is left to the controller fulfilling the Ingress. Http is
+ // currently the only supported IngressRuleValue.
+ IngressRuleValue `json:",inline,omitempty" protobuf:"bytes,2,opt,name=ingressRuleValue"`
+}
+
+// IngressRuleValue represents a rule to apply against incoming requests. If the
+// rule is satisfied, the request is routed to the specified backend. Currently
+// mixing different types of rules in a single Ingress is disallowed, so exactly
+// one of the following must be set.
+type IngressRuleValue struct {
+ //TODO:
+ // 1. Consider renaming this resource and the associated rules so they
+ // aren't tied to Ingress. They can be used to route intra-cluster traffic.
+ // 2. Consider adding fields for ingress-type specific global options
+ // usable by a loadbalancer, like http keep-alive.
+
+ HTTP *HTTPIngressRuleValue `json:"http,omitempty" protobuf:"bytes,1,opt,name=http"`
+}
+
+// HTTPIngressRuleValue is a list of http selectors pointing to backends.
+// In the example: http://<host>/<path>?<searchpart> -> backend where
+// where parts of the url correspond to RFC 3986, this resource will be used
+// to match against everything after the last '/' and before the first '?'
+// or '#'.
+type HTTPIngressRuleValue struct {
+ // A collection of paths that map requests to backends.
+ Paths []HTTPIngressPath `json:"paths" protobuf:"bytes,1,rep,name=paths"`
+ // TODO: Consider adding fields for ingress-type specific global
+ // options usable by a loadbalancer, like http keep-alive.
+}
+
+// HTTPIngressPath associates a path regex with a backend. Incoming urls matching
+// the path are forwarded to the backend.
+type HTTPIngressPath struct {
+ // Path is a extended POSIX regex as defined by IEEE Std 1003.1,
+ // (i.e this follows the egrep/unix syntax, not the perl syntax)
+ // matched against the path of an incoming request. Currently it can
+ // contain characters disallowed from the conventional "path"
+ // part of a URL as defined by RFC 3986. Paths must begin with
+ // a '/'. If unspecified, the path defaults to a catch all sending
+ // traffic to the backend.
+ Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
+
+ // Backend defines the referenced service endpoint to which the traffic
+ // will be forwarded to.
+ Backend IngressBackend `json:"backend" protobuf:"bytes,2,opt,name=backend"`
+}
+
+// IngressBackend describes all endpoints for a given service and port.
+type IngressBackend struct {
+ // Specifies the name of the referenced service.
+ ServiceName string `json:"serviceName" protobuf:"bytes,1,opt,name=serviceName"`
+
+ // Specifies the port of the referenced service.
+ ServicePort intstr.IntOrString `json:"servicePort" protobuf:"bytes,2,opt,name=servicePort"`
+}
+
+// ExportOptions is the query options to the standard REST get call.
+type ExportOptions struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Should this value be exported. Export strips fields that a user can not specify.
+ Export bool `json:"export" protobuf:"varint,1,opt,name=export"`
+ // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'
+ Exact bool `json:"exact" protobuf:"varint,2,opt,name=exact"`
+}
+
+// ListOptions is the query options to a standard REST list call.
+type ListOptions struct {
+ unversioned.TypeMeta `json:",inline"`
+
+ // A selector to restrict the list of returned objects by their labels.
+ // Defaults to everything.
+ LabelSelector string `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"`
+ // A selector to restrict the list of returned objects by their fields.
+ // Defaults to everything.
+ FieldSelector string `json:"fieldSelector,omitempty" protobuf:"bytes,2,opt,name=fieldSelector"`
+ // Watch for changes to the described resources and return them as a stream of
+ // add, update, and remove notifications. Specify resourceVersion.
+ Watch bool `json:"watch,omitempty" protobuf:"varint,3,opt,name=watch"`
+ // When specified with a watch call, shows changes that occur after that particular version of a resource.
+ // Defaults to changes from the beginning of history.
+ ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,4,opt,name=resourceVersion"`
+ // Timeout for the list/watch call.
+ TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty" protobuf:"varint,5,opt,name=timeoutSeconds"`
+}
+
+// A label selector is a label query over a set of resources. The result of matchLabels and
+// matchExpressions are ANDed. An empty label selector matches all objects. A null
+// label selector matches no objects.
+type LabelSelector struct {
+ // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ // map is equivalent to an element of matchExpressions, whose key field is "key", the
+ // operator is "In", and the values array contains only "value". The requirements are ANDed.
+ MatchLabels map[string]string `json:"matchLabels,omitempty" protobuf:"bytes,1,rep,name=matchLabels"`
+ // matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ MatchExpressions []LabelSelectorRequirement `json:"matchExpressions,omitempty" protobuf:"bytes,2,rep,name=matchExpressions"`
+}
+
+// A label selector requirement is a selector that contains values, a key, and an operator that
+// relates the key and values.
+type LabelSelectorRequirement struct {
+ // key is the label key that the selector applies to.
+ Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"`
+ // operator represents a key's relationship to a set of values.
+ // Valid operators ard In, NotIn, Exists and DoesNotExist.
+ Operator LabelSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=LabelSelectorOperator"`
+ // values is an array of string values. If the operator is In or NotIn,
+ // the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ // the values array must be empty. This array is replaced during a strategic
+ // merge patch.
+ Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"`
+}
+
+// A label selector operator is the set of operators that can be used in a selector requirement.
+type LabelSelectorOperator string
+
+const (
+ LabelSelectorOpIn LabelSelectorOperator = "In"
+ LabelSelectorOpNotIn LabelSelectorOperator = "NotIn"
+ LabelSelectorOpExists LabelSelectorOperator = "Exists"
+ LabelSelectorOpDoesNotExist LabelSelectorOperator = "DoesNotExist"
+)
+
+// +genclient=true
+
+// ReplicaSet represents the configuration of a ReplicaSet.
+type ReplicaSet struct {
+ unversioned.TypeMeta `json:",inline"`
+
+ // If the Labels of a ReplicaSet are empty, they are defaulted to
+ // be the same as the Pod(s) that the ReplicaSet manages.
+ // Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Spec defines the specification of the desired behavior of the ReplicaSet.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ Spec ReplicaSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+ // Status is the most recently observed status of the ReplicaSet.
+ // This data may be out of date by some window of time.
+ // Populated by the system.
+ // Read-only.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
+ Status ReplicaSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// ReplicaSetList is a collection of ReplicaSets.
+type ReplicaSetList struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard list metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
+ unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // List of ReplicaSets.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md
+ Items []ReplicaSet `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// ReplicaSetSpec is the specification of a ReplicaSet.
+type ReplicaSetSpec struct {
+ // Replicas is the number of desired replicas.
+ // This is a pointer to distinguish between explicit zero and unspecified.
+ // Defaults to 1.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller
+ Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
+
+ // Selector is a label query over pods that should match the replica count.
+ // If the selector is empty, it is defaulted to the labels present on the pod template.
+ // Label keys and values that must match in order to be controlled by this replica set.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors
+ Selector *LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"`
+
+ // Template is the object that describes the pod that will be created if
+ // insufficient replicas are detected.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#pod-template
+ Template v1.PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"`
+}
+
+// ReplicaSetStatus represents the current status of a ReplicaSet.
+type ReplicaSetStatus struct {
+ // Replicas is the most recently oberved number of replicas.
+ // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller
+ Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"`
+
+ // The number of pods that have labels matching the labels of the pod template of the replicaset.
+ FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"`
+
+ // ObservedGeneration reflects the generation of the most recently observed ReplicaSet.
+ ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"`
+}
+
+// +genclient=true
+// +nonNamespaced=true
+
+// Pod Security Policy governs the ability to make requests that affect the Security Context
+// that will be applied to a pod and container.
+type PodSecurityPolicy struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // spec defines the policy enforced.
+ Spec PodSecurityPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+}
+
+// Pod Security Policy Spec defines the policy enforced.
+type PodSecurityPolicySpec struct {
+ // privileged determines if a pod can request to be run as privileged.
+ Privileged bool `json:"privileged,omitempty" protobuf:"varint,1,opt,name=privileged"`
+ // DefaultAddCapabilities is the default set of capabilities that will be added to the container
+ // unless the pod spec specifically drops the capability. You may not list a capabiility in both
+ // DefaultAddCapabilities and RequiredDropCapabilities.
+ DefaultAddCapabilities []v1.Capability `json:"defaultAddCapabilities,omitempty" protobuf:"bytes,2,rep,name=defaultAddCapabilities,casttype=k8s.io/kubernetes/pkg/api/v1.Capability"`
+ // RequiredDropCapabilities are the capabilities that will be dropped from the container. These
+ // are required to be dropped and cannot be added.
+ RequiredDropCapabilities []v1.Capability `json:"requiredDropCapabilities,omitempty" protobuf:"bytes,3,rep,name=requiredDropCapabilities,casttype=k8s.io/kubernetes/pkg/api/v1.Capability"`
+ // AllowedCapabilities is a list of capabilities that can be requested to add to the container.
+ // Capabilities in this field may be added at the pod author's discretion.
+ // You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities.
+ AllowedCapabilities []v1.Capability `json:"allowedCapabilities,omitempty" protobuf:"bytes,4,rep,name=allowedCapabilities,casttype=k8s.io/kubernetes/pkg/api/v1.Capability"`
+ // volumes is a white list of allowed volume plugins. Empty indicates that all plugins
+ // may be used.
+ Volumes []FSType `json:"volumes,omitempty" protobuf:"bytes,5,rep,name=volumes,casttype=FSType"`
+ // hostNetwork determines if the policy allows the use of HostNetwork in the pod spec.
+ HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,6,opt,name=hostNetwork"`
+ // hostPorts determines which host port ranges are allowed to be exposed.
+ HostPorts []HostPortRange `json:"hostPorts,omitempty" protobuf:"bytes,7,rep,name=hostPorts"`
+ // hostPID determines if the policy allows the use of HostPID in the pod spec.
+ HostPID bool `json:"hostPID,omitempty" protobuf:"varint,8,opt,name=hostPID"`
+ // hostIPC determines if the policy allows the use of HostIPC in the pod spec.
+ HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,9,opt,name=hostIPC"`
+ // seLinux is the strategy that will dictate the allowable labels that may be set.
+ SELinux SELinuxStrategyOptions `json:"seLinux" protobuf:"bytes,10,opt,name=seLinux"`
+ // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.
+ RunAsUser RunAsUserStrategyOptions `json:"runAsUser" protobuf:"bytes,11,opt,name=runAsUser"`
+ // SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.
+ SupplementalGroups SupplementalGroupsStrategyOptions `json:"supplementalGroups" protobuf:"bytes,12,opt,name=supplementalGroups"`
+ // FSGroup is the strategy that will dictate what fs group is used by the SecurityContext.
+ FSGroup FSGroupStrategyOptions `json:"fsGroup" protobuf:"bytes,13,opt,name=fsGroup"`
+ // ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file
+ // system. If the container specifically requests to run with a non-read only root file system
+ // the PSP should deny the pod.
+ // If set to false the container may run with a read only root file system if it wishes but it
+ // will not be forced to.
+ ReadOnlyRootFilesystem bool `json:"readOnlyRootFilesystem,omitempty" protobuf:"varint,14,opt,name=readOnlyRootFilesystem"`
+}
+
+// FS Type gives strong typing to different file systems that are used by volumes.
+type FSType string
+
+var (
+ AzureFile FSType = "azureFile"
+ Flocker FSType = "flocker"
+ FlexVolume FSType = "flexVolume"
+ HostPath FSType = "hostPath"
+ EmptyDir FSType = "emptyDir"
+ GCEPersistentDisk FSType = "gcePersistentDisk"
+ AWSElasticBlockStore FSType = "awsElasticBlockStore"
+ GitRepo FSType = "gitRepo"
+ Secret FSType = "secret"
+ NFS FSType = "nfs"
+ ISCSI FSType = "iscsi"
+ Glusterfs FSType = "glusterfs"
+ PersistentVolumeClaim FSType = "persistentVolumeClaim"
+ RBD FSType = "rbd"
+ Cinder FSType = "cinder"
+ CephFS FSType = "cephFS"
+ DownwardAPI FSType = "downwardAPI"
+ FC FSType = "fc"
+ ConfigMap FSType = "configMap"
+ All FSType = "*"
+)
+
+// Host Port Range defines a range of host ports that will be enabled by a policy
+// for pods to use. It requires both the start and end to be defined.
+type HostPortRange struct {
+ // min is the start of the range, inclusive.
+ Min int32 `json:"min" protobuf:"varint,1,opt,name=min"`
+ // max is the end of the range, inclusive.
+ Max int32 `json:"max" protobuf:"varint,2,opt,name=max"`
+}
+
+// SELinux Strategy Options defines the strategy type and any options used to create the strategy.
+type SELinuxStrategyOptions struct {
+ // type is the strategy that will dictate the allowable labels that may be set.
+ Rule SELinuxStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=SELinuxStrategy"`
+ // seLinuxOptions required to run as; required for MustRunAs
+ // More info: http://releases.k8s.io/HEAD/docs/design/security_context.md#security-context
+ SELinuxOptions *v1.SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,2,opt,name=seLinuxOptions"`
+}
+
+// SELinuxStrategy denotes strategy types for generating SELinux options for a
+// Security Context.
+type SELinuxStrategy string
+
+const (
+ // container must have SELinux labels of X applied.
+ SELinuxStrategyMustRunAs SELinuxStrategy = "MustRunAs"
+ // container may make requests for any SELinux context labels.
+ SELinuxStrategyRunAsAny SELinuxStrategy = "RunAsAny"
+)
+
+// Run A sUser Strategy Options defines the strategy type and any options used to create the strategy.
+type RunAsUserStrategyOptions struct {
+ // Rule is the strategy that will dictate the allowable RunAsUser values that may be set.
+ Rule RunAsUserStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsUserStrategy"`
+ // Ranges are the allowed ranges of uids that may be used.
+ Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"`
+}
+
+// ID Range provides a min/max of an allowed range of IDs.
+type IDRange struct {
+ // Min is the start of the range, inclusive.
+ Min int64 `json:"min" protobuf:"varint,1,opt,name=min"`
+ // Max is the end of the range, inclusive.
+ Max int64 `json:"max" protobuf:"varint,2,opt,name=max"`
+}
+
+// RunAsUserStrategy denotes strategy types for generating RunAsUser values for a
+// Security Context.
+type RunAsUserStrategy string
+
+const (
+ // container must run as a particular uid.
+ RunAsUserStrategyMustRunAs RunAsUserStrategy = "MustRunAs"
+ // container must run as a non-root uid
+ RunAsUserStrategyMustRunAsNonRoot RunAsUserStrategy = "MustRunAsNonRoot"
+ // container may make requests for any uid.
+ RunAsUserStrategyRunAsAny RunAsUserStrategy = "RunAsAny"
+)
+
+// FSGroupStrategyOptions defines the strategy type and options used to create the strategy.
+type FSGroupStrategyOptions struct {
+ // Rule is the strategy that will dictate what FSGroup is used in the SecurityContext.
+ Rule FSGroupStrategyType `json:"rule,omitempty" protobuf:"bytes,1,opt,name=rule,casttype=FSGroupStrategyType"`
+ // Ranges are the allowed ranges of fs groups. If you would like to force a single
+ // fs group then supply a single range with the same start and end.
+ Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"`
+}
+
+// FSGroupStrategyType denotes strategy types for generating FSGroup values for a
+// SecurityContext
+type FSGroupStrategyType string
+
+const (
+ // container must have FSGroup of X applied.
+ FSGroupStrategyMustRunAs FSGroupStrategyType = "MustRunAs"
+ // container may make requests for any FSGroup labels.
+ FSGroupStrategyRunAsAny FSGroupStrategyType = "RunAsAny"
+)
+
+// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy.
+type SupplementalGroupsStrategyOptions struct {
+ // Rule is the strategy that will dictate what supplemental groups is used in the SecurityContext.
+ Rule SupplementalGroupsStrategyType `json:"rule,omitempty" protobuf:"bytes,1,opt,name=rule,casttype=SupplementalGroupsStrategyType"`
+ // Ranges are the allowed ranges of supplemental groups. If you would like to force a single
+ // supplemental group then supply a single range with the same start and end.
+ Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"`
+}
+
+// SupplementalGroupsStrategyType denotes strategy types for determining valid supplemental
+// groups for a SecurityContext.
+type SupplementalGroupsStrategyType string
+
+const (
+ // container must run as a particular gid.
+ SupplementalGroupsStrategyMustRunAs SupplementalGroupsStrategyType = "MustRunAs"
+ // container may make requests for any gid.
+ SupplementalGroupsStrategyRunAsAny SupplementalGroupsStrategyType = "RunAsAny"
+)
+
+// Pod Security Policy List is a list of PodSecurityPolicy objects.
+type PodSecurityPolicyList struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard list metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is a list of schema objects.
+ Items []PodSecurityPolicy `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+type NetworkPolicy struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Specification of the desired behavior for this NetworkPolicy.
+ Spec NetworkPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+}
+
+type NetworkPolicySpec struct {
+ // Selects the pods to which this NetworkPolicy object applies. The array of ingress rules
+ // is applied to any pods selected by this field. Multiple network policies can select the
+ // same set of pods. In this case, the ingress rules for each are combined additively.
+ // This field is NOT optional and follows standard label selector semantics.
+ // An empty podSelector matches all pods in this namespace.
+ PodSelector LabelSelector `json:"podSelector" protobuf:"bytes,1,opt,name=podSelector"`
+
+ // List of ingress rules to be applied to the selected pods.
+ // Traffic is allowed to a pod if namespace.networkPolicy.ingress.isolation is undefined and cluster policy allows it,
+ // OR if the traffic source is the pod's local node,
+ // OR if the traffic matches at least one ingress rule across all of the NetworkPolicy
+ // objects whose podSelector matches the pod.
+ // If this field is empty then this NetworkPolicy does not affect ingress isolation.
+ // If this field is present and contains at least one rule, this policy allows any traffic
+ // which matches at least one of the ingress rules in this list.
+ Ingress []NetworkPolicyIngressRule `json:"ingress,omitempty" protobuf:"bytes,2,rep,name=ingress"`
+}
+
+// This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from.
+type NetworkPolicyIngressRule struct {
+ // List of ports which should be made accessible on the pods selected for this rule.
+ // Each item in this list is combined using a logical OR.
+ // If this field is not provided, this rule matches all ports (traffic not restricted by port).
+ // If this field is empty, this rule matches no ports (no traffic matches).
+ // If this field is present and contains at least one item, then this rule allows traffic
+ // only if the traffic matches at least one port in the list.
+ // TODO: Update this to be a pointer to slice as soon as auto-generation supports it.
+ Ports []NetworkPolicyPort `json:"ports,omitempty" protobuf:"bytes,1,rep,name=ports"`
+
+ // List of sources which should be able to access the pods selected for this rule.
+ // Items in this list are combined using a logical OR operation.
+ // If this field is not provided, this rule matches all sources (traffic not restricted by source).
+ // If this field is empty, this rule matches no sources (no traffic matches).
+ // If this field is present and contains at least on item, this rule allows traffic only if the
+ // traffic matches at least one item in the from list.
+ // TODO: Update this to be a pointer to slice as soon as auto-generation supports it.
+ From []NetworkPolicyPeer `json:"from,omitempty" protobuf:"bytes,2,rep,name=from"`
+}
+
+type NetworkPolicyPort struct {
+ // Optional. The protocol (TCP or UDP) which traffic must match.
+ // If not specified, this field defaults to TCP.
+ Protocol *v1.Protocol `json:"protocol,omitempty" protobuf:"bytes,1,opt,name=protocol,casttype=k8s.io/kubernetes/pkg/api/v1.Protocol"`
+
+ // If specified, the port on the given protocol. This can
+ // either be a numerical or named port on a pod. If this field is not provided,
+ // this matches all port names and numbers.
+ // If present, only traffic on the specified protocol AND port
+ // will be matched.
+ Port *intstr.IntOrString `json:"port,omitempty" protobuf:"bytes,2,opt,name=port"`
+}
+
+type NetworkPolicyPeer struct {
+ // Exactly one of the following must be specified.
+
+ // This is a label selector which selects Pods in this namespace.
+ // This field follows standard label selector semantics.
+ // If not provided, this selector selects no pods.
+ // If present but empty, this selector selects all pods in this namespace.
+ PodSelector *LabelSelector `json:"podSelector,omitempty" protobuf:"bytes,1,opt,name=podSelector"`
+
+ // Selects Namespaces using cluster scoped-labels. This
+ // matches all pods in all namespaces selected by this label selector.
+ // This field follows standard label selector semantics.
+ // If omitted, this selector selects no namespaces.
+ // If present but empty, this selector selects all namespaces.
+ NamespaceSelector *LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,2,opt,name=namespaceSelector"`
+}
+
+// Network Policy List is a list of NetworkPolicy objects.
+type NetworkPolicyList struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard list metadata.
+ // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
+ unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is a list of schema objects.
+ Items []NetworkPolicy `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/types_swagger_doc_generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/types_swagger_doc_generated.go
new file mode 100644
index 0000000..182ff78
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/types_swagger_doc_generated.go
@@ -0,0 +1,740 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE
+var map_APIVersion = map[string]string{
+ "": "An APIVersion represents a single concrete version of an object model.",
+ "name": "Name of this version (e.g. 'v1').",
+}
+
+func (APIVersion) SwaggerDoc() map[string]string {
+ return map_APIVersion
+}
+
+var map_CPUTargetUtilization = map[string]string{
+ "targetPercentage": "fraction of the requested CPU that should be utilized/used, e.g. 70 means that 70% of the requested CPU should be in use.",
+}
+
+func (CPUTargetUtilization) SwaggerDoc() map[string]string {
+ return map_CPUTargetUtilization
+}
+
+var map_CustomMetricCurrentStatus = map[string]string{
+ "name": "Custom Metric name.",
+ "value": "Custom Metric value (average).",
+}
+
+func (CustomMetricCurrentStatus) SwaggerDoc() map[string]string {
+ return map_CustomMetricCurrentStatus
+}
+
+var map_CustomMetricTarget = map[string]string{
+ "": "Alpha-level support for Custom Metrics in HPA (as annotations).",
+ "name": "Custom Metric name.",
+ "value": "Custom Metric value (average).",
+}
+
+func (CustomMetricTarget) SwaggerDoc() map[string]string {
+ return map_CustomMetricTarget
+}
+
+var map_DaemonSet = map[string]string{
+ "": "DaemonSet represents the configuration of a daemon set.",
+ "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
+ "spec": "Spec defines the desired behavior of this daemon set. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status",
+ "status": "Status is the current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status",
+}
+
+func (DaemonSet) SwaggerDoc() map[string]string {
+ return map_DaemonSet
+}
+
+var map_DaemonSetList = map[string]string{
+ "": "DaemonSetList is a collection of daemon sets.",
+ "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
+ "items": "Items is a list of daemon sets.",
+}
+
+func (DaemonSetList) SwaggerDoc() map[string]string {
+ return map_DaemonSetList
+}
+
+var map_DaemonSetSpec = map[string]string{
+ "": "DaemonSetSpec is the specification of a daemon set.",
+ "selector": "Selector is a label query over pods that are managed by the daemon set. Must match in order to be controlled. If empty, defaulted to labels on Pod template. More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors",
+ "template": "Template is the object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#pod-template",
+}
+
+func (DaemonSetSpec) SwaggerDoc() map[string]string {
+ return map_DaemonSetSpec
+}
+
+var map_DaemonSetStatus = map[string]string{
+ "": "DaemonSetStatus represents the current status of a daemon set.",
+ "currentNumberScheduled": "CurrentNumberScheduled is the number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md",
+ "numberMisscheduled": "NumberMisscheduled is the number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md",
+ "desiredNumberScheduled": "DesiredNumberScheduled is the total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md",
+}
+
+func (DaemonSetStatus) SwaggerDoc() map[string]string {
+ return map_DaemonSetStatus
+}
+
+var map_Deployment = map[string]string{
+ "": "Deployment enables declarative updates for Pods and ReplicaSets.",
+ "metadata": "Standard object metadata.",
+ "spec": "Specification of the desired behavior of the Deployment.",
+ "status": "Most recently observed status of the Deployment.",
+}
+
+func (Deployment) SwaggerDoc() map[string]string {
+ return map_Deployment
+}
+
+var map_DeploymentList = map[string]string{
+ "": "DeploymentList is a list of Deployments.",
+ "metadata": "Standard list metadata.",
+ "items": "Items is the list of Deployments.",
+}
+
+func (DeploymentList) SwaggerDoc() map[string]string {
+ return map_DeploymentList
+}
+
+var map_DeploymentRollback = map[string]string{
+ "": "DeploymentRollback stores the information required to rollback a deployment.",
+ "name": "Required: This must match the Name of a deployment.",
+ "updatedAnnotations": "The annotations to be updated to a deployment",
+ "rollbackTo": "The config of this deployment rollback.",
+}
+
+func (DeploymentRollback) SwaggerDoc() map[string]string {
+ return map_DeploymentRollback
+}
+
+var map_DeploymentSpec = map[string]string{
+ "": "DeploymentSpec is the specification of the desired behavior of the Deployment.",
+ "replicas": "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.",
+ "selector": "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment.",
+ "template": "Template describes the pods that will be created.",
+ "strategy": "The deployment strategy to use to replace existing pods with new ones.",
+ "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)",
+ "revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified.",
+ "paused": "Indicates that the deployment is paused and will not be processed by the deployment controller.",
+ "rollbackTo": "The config this deployment is rolling back to. Will be cleared after rollback is done.",
+}
+
+func (DeploymentSpec) SwaggerDoc() map[string]string {
+ return map_DeploymentSpec
+}
+
+var map_DeploymentStatus = map[string]string{
+ "": "DeploymentStatus is the most recently observed status of the Deployment.",
+ "observedGeneration": "The generation observed by the deployment controller.",
+ "replicas": "Total number of non-terminated pods targeted by this deployment (their labels match the selector).",
+ "updatedReplicas": "Total number of non-terminated pods targeted by this deployment that have the desired template spec.",
+ "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.",
+ "unavailableReplicas": "Total number of unavailable pods targeted by this deployment.",
+}
+
+func (DeploymentStatus) SwaggerDoc() map[string]string {
+ return map_DeploymentStatus
+}
+
+var map_DeploymentStrategy = map[string]string{
+ "": "DeploymentStrategy describes how to replace existing pods with new ones.",
+ "type": "Type of deployment. Can be \"Recreate\" or \"RollingUpdate\". Default is RollingUpdate.",
+ "rollingUpdate": "Rolling update config params. Present only if DeploymentStrategyType = RollingUpdate.",
+}
+
+func (DeploymentStrategy) SwaggerDoc() map[string]string {
+ return map_DeploymentStrategy
+}
+
+var map_ExportOptions = map[string]string{
+ "": "ExportOptions is the query options to the standard REST get call.",
+ "export": "Should this value be exported. Export strips fields that a user can not specify.",
+ "exact": "Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'",
+}
+
+func (ExportOptions) SwaggerDoc() map[string]string {
+ return map_ExportOptions
+}
+
+var map_FSGroupStrategyOptions = map[string]string{
+ "": "FSGroupStrategyOptions defines the strategy type and options used to create the strategy.",
+ "rule": "Rule is the strategy that will dictate what FSGroup is used in the SecurityContext.",
+ "ranges": "Ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end.",
+}
+
+func (FSGroupStrategyOptions) SwaggerDoc() map[string]string {
+ return map_FSGroupStrategyOptions
+}
+
+var map_HTTPIngressPath = map[string]string{
+ "": "HTTPIngressPath associates a path regex with a backend. Incoming urls matching the path are forwarded to the backend.",
+ "path": "Path is a extended POSIX regex as defined by IEEE Std 1003.1, (i.e this follows the egrep/unix syntax, not the perl syntax) matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/'. If unspecified, the path defaults to a catch all sending traffic to the backend.",
+ "backend": "Backend defines the referenced service endpoint to which the traffic will be forwarded to.",
+}
+
+func (HTTPIngressPath) SwaggerDoc() map[string]string {
+ return map_HTTPIngressPath
+}
+
+var map_HTTPIngressRuleValue = map[string]string{
+ "": "HTTPIngressRuleValue is a list of http selectors pointing to backends. In the example: http://<host>/<path>?<searchpart> -> backend where where parts of the url correspond to RFC 3986, this resource will be used to match against everything after the last '/' and before the first '?' or '#'.",
+ "paths": "A collection of paths that map requests to backends.",
+}
+
+func (HTTPIngressRuleValue) SwaggerDoc() map[string]string {
+ return map_HTTPIngressRuleValue
+}
+
+var map_HorizontalPodAutoscaler = map[string]string{
+ "": "configuration of a horizontal pod autoscaler.",
+ "metadata": "Standard object metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
+ "spec": "behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.",
+ "status": "current information about the autoscaler.",
+}
+
+func (HorizontalPodAutoscaler) SwaggerDoc() map[string]string {
+ return map_HorizontalPodAutoscaler
+}
+
+var map_HorizontalPodAutoscalerList = map[string]string{
+ "": "list of horizontal pod autoscaler objects.",
+ "metadata": "Standard list metadata.",
+ "items": "list of horizontal pod autoscaler objects.",
+}
+
+func (HorizontalPodAutoscalerList) SwaggerDoc() map[string]string {
+ return map_HorizontalPodAutoscalerList
+}
+
+var map_HorizontalPodAutoscalerSpec = map[string]string{
+ "": "specification of a horizontal pod autoscaler.",
+ "scaleRef": "reference to Scale subresource; horizontal pod autoscaler will learn the current resource consumption from its status, and will set the desired number of pods by modifying its spec.",
+ "minReplicas": "lower limit for the number of pods that can be set by the autoscaler, default 1.",
+ "maxReplicas": "upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.",
+ "cpuUtilization": "target average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified it defaults to the target CPU utilization at 80% of the requested resources.",
+}
+
+func (HorizontalPodAutoscalerSpec) SwaggerDoc() map[string]string {
+ return map_HorizontalPodAutoscalerSpec
+}
+
+var map_HorizontalPodAutoscalerStatus = map[string]string{
+ "": "current status of a horizontal pod autoscaler",
+ "observedGeneration": "most recent generation observed by this autoscaler.",
+ "lastScaleTime": "last time the HorizontalPodAutoscaler scaled the number of pods; used by the autoscaler to control how often the number of pods is changed.",
+ "currentReplicas": "current number of replicas of pods managed by this autoscaler.",
+ "desiredReplicas": "desired number of replicas of pods managed by this autoscaler.",
+ "currentCPUUtilizationPercentage": "current average CPU utilization over all pods, represented as a percentage of requested CPU, e.g. 70 means that an average pod is using now 70% of its requested CPU.",
+}
+
+func (HorizontalPodAutoscalerStatus) SwaggerDoc() map[string]string {
+ return map_HorizontalPodAutoscalerStatus
+}
+
+var map_HostPortRange = map[string]string{
+ "": "Host Port Range defines a range of host ports that will be enabled by a policy for pods to use. It requires both the start and end to be defined.",
+ "min": "min is the start of the range, inclusive.",
+ "max": "max is the end of the range, inclusive.",
+}
+
+func (HostPortRange) SwaggerDoc() map[string]string {
+ return map_HostPortRange
+}
+
+var map_IDRange = map[string]string{
+ "": "ID Range provides a min/max of an allowed range of IDs.",
+ "min": "Min is the start of the range, inclusive.",
+ "max": "Max is the end of the range, inclusive.",
+}
+
+func (IDRange) SwaggerDoc() map[string]string {
+ return map_IDRange
+}
+
+var map_Ingress = map[string]string{
+ "": "Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc.",
+ "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
+ "spec": "Spec is the desired state of the Ingress. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status",
+ "status": "Status is the current state of the Ingress. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status",
+}
+
+func (Ingress) SwaggerDoc() map[string]string {
+ return map_Ingress
+}
+
+var map_IngressBackend = map[string]string{
+ "": "IngressBackend describes all endpoints for a given service and port.",
+ "serviceName": "Specifies the name of the referenced service.",
+ "servicePort": "Specifies the port of the referenced service.",
+}
+
+func (IngressBackend) SwaggerDoc() map[string]string {
+ return map_IngressBackend
+}
+
+var map_IngressList = map[string]string{
+ "": "IngressList is a collection of Ingress.",
+ "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
+ "items": "Items is the list of Ingress.",
+}
+
+func (IngressList) SwaggerDoc() map[string]string {
+ return map_IngressList
+}
+
+var map_IngressRule = map[string]string{
+ "": "IngressRule represents the rules mapping the paths under a specified host to the related backend services. Incoming requests are first evaluated for a host match, then routed to the backend associated with the matching IngressRuleValue.",
+ "host": "Host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in the RFC: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the\n\t IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t Currently the port of an Ingress is implicitly :80 for http and\n\t :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.",
+}
+
+func (IngressRule) SwaggerDoc() map[string]string {
+ return map_IngressRule
+}
+
+var map_IngressRuleValue = map[string]string{
+ "": "IngressRuleValue represents a rule to apply against incoming requests. If the rule is satisfied, the request is routed to the specified backend. Currently mixing different types of rules in a single Ingress is disallowed, so exactly one of the following must be set.",
+}
+
+func (IngressRuleValue) SwaggerDoc() map[string]string {
+ return map_IngressRuleValue
+}
+
+var map_IngressSpec = map[string]string{
+ "": "IngressSpec describes the Ingress the user wishes to exist.",
+ "backend": "A default backend capable of servicing requests that don't match any rule. At least one of 'backend' or 'rules' must be specified. This field is optional to allow the loadbalancer controller or defaulting logic to specify a global default.",
+ "tls": "TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.",
+ "rules": "A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.",
+}
+
+func (IngressSpec) SwaggerDoc() map[string]string {
+ return map_IngressSpec
+}
+
+var map_IngressStatus = map[string]string{
+ "": "IngressStatus describe the current state of the Ingress.",
+ "loadBalancer": "LoadBalancer contains the current status of the load-balancer.",
+}
+
+func (IngressStatus) SwaggerDoc() map[string]string {
+ return map_IngressStatus
+}
+
+var map_IngressTLS = map[string]string{
+ "": "IngressTLS describes the transport layer security associated with an Ingress.",
+ "hosts": "Hosts are a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified.",
+ "secretName": "SecretName is the name of the secret used to terminate SSL traffic on 443. Field is left optional to allow SSL routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \"Host\" header field used by an IngressRule, the SNI host is used for termination and value of the Host header is used for routing.",
+}
+
+func (IngressTLS) SwaggerDoc() map[string]string {
+ return map_IngressTLS
+}
+
+var map_Job = map[string]string{
+ "": "Job represents the configuration of a single job.",
+ "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
+ "spec": "Spec is a structure defining the expected behavior of a job. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status",
+ "status": "Status is a structure describing current status of a job. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status",
+}
+
+func (Job) SwaggerDoc() map[string]string {
+ return map_Job
+}
+
+var map_JobCondition = map[string]string{
+ "": "JobCondition describes current state of a job.",
+ "type": "Type of job condition, Complete or Failed.",
+ "status": "Status of the condition, one of True, False, Unknown.",
+ "lastProbeTime": "Last time the condition was checked.",
+ "lastTransitionTime": "Last time the condition transit from one status to another.",
+ "reason": "(brief) reason for the condition's last transition.",
+ "message": "Human readable message indicating details about last transition.",
+}
+
+func (JobCondition) SwaggerDoc() map[string]string {
+ return map_JobCondition
+}
+
+var map_JobList = map[string]string{
+ "": "JobList is a collection of jobs.",
+ "metadata": "Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
+ "items": "Items is the list of Job.",
+}
+
+func (JobList) SwaggerDoc() map[string]string {
+ return map_JobList
+}
+
+var map_JobSpec = map[string]string{
+ "": "JobSpec describes how the job execution will look like.",
+ "parallelism": "Parallelism specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md",
+ "completions": "Completions specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md",
+ "activeDeadlineSeconds": "Optional duration in seconds relative to the startTime that the job may be active before the system tries to terminate it; value must be positive integer",
+ "selector": "Selector is a label query over pods that should match the pod count. Normally, the system sets this field for you. More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors",
+ "autoSelector": "AutoSelector controls generation of pod labels and pod selectors. It was not present in the original extensions/v1beta1 Job definition, but exists to allow conversion from batch/v1 Jobs, where it corresponds to, but has the opposite meaning as, ManualSelector. More info: http://releases.k8s.io/HEAD/docs/design/selector-generation.md",
+ "template": "Template is the object that describes the pod that will be created when executing a job. More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md",
+}
+
+func (JobSpec) SwaggerDoc() map[string]string {
+ return map_JobSpec
+}
+
+var map_JobStatus = map[string]string{
+ "": "JobStatus represents the current state of a Job.",
+ "conditions": "Conditions represent the latest available observations of an object's current state. More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md",
+ "startTime": "StartTime represents time when the job was acknowledged by the Job Manager. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.",
+ "completionTime": "CompletionTime represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.",
+ "active": "Active is the number of actively running pods.",
+ "succeeded": "Succeeded is the number of pods which reached Phase Succeeded.",
+ "failed": "Failed is the number of pods which reached Phase Failed.",
+}
+
+func (JobStatus) SwaggerDoc() map[string]string {
+ return map_JobStatus
+}
+
+var map_LabelSelector = map[string]string{
+ "": "A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.",
+ "matchLabels": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.",
+ "matchExpressions": "matchExpressions is a list of label selector requirements. The requirements are ANDed.",
+}
+
+func (LabelSelector) SwaggerDoc() map[string]string {
+ return map_LabelSelector
+}
+
+var map_LabelSelectorRequirement = map[string]string{
+ "": "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.",
+ "key": "key is the label key that the selector applies to.",
+ "operator": "operator represents a key's relationship to a set of values. Valid operators ard In, NotIn, Exists and DoesNotExist.",
+ "values": "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.",
+}
+
+func (LabelSelectorRequirement) SwaggerDoc() map[string]string {
+ return map_LabelSelectorRequirement
+}
+
+var map_ListOptions = map[string]string{
+ "": "ListOptions is the query options to a standard REST list call.",
+ "labelSelector": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "fieldSelector": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "watch": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "resourceVersion": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history.",
+ "timeoutSeconds": "Timeout for the list/watch call.",
+}
+
+func (ListOptions) SwaggerDoc() map[string]string {
+ return map_ListOptions
+}
+
+var map_NetworkPolicy = map[string]string{
+ "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
+ "spec": "Specification of the desired behavior for this NetworkPolicy.",
+}
+
+func (NetworkPolicy) SwaggerDoc() map[string]string {
+ return map_NetworkPolicy
+}
+
+var map_NetworkPolicyIngressRule = map[string]string{
+ "": "This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from.",
+ "ports": "List of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is not provided, this rule matches all ports (traffic not restricted by port). If this field is empty, this rule matches no ports (no traffic matches). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.",
+ "from": "List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is not provided, this rule matches all sources (traffic not restricted by source). If this field is empty, this rule matches no sources (no traffic matches). If this field is present and contains at least on item, this rule allows traffic only if the traffic matches at least one item in the from list.",
+}
+
+func (NetworkPolicyIngressRule) SwaggerDoc() map[string]string {
+ return map_NetworkPolicyIngressRule
+}
+
+var map_NetworkPolicyList = map[string]string{
+ "": "Network Policy List is a list of NetworkPolicy objects.",
+ "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
+ "items": "Items is a list of schema objects.",
+}
+
+func (NetworkPolicyList) SwaggerDoc() map[string]string {
+ return map_NetworkPolicyList
+}
+
+var map_NetworkPolicyPeer = map[string]string{
+ "podSelector": "This is a label selector which selects Pods in this namespace. This field follows standard label selector semantics. If not provided, this selector selects no pods. If present but empty, this selector selects all pods in this namespace.",
+ "namespaceSelector": "Selects Namespaces using cluster scoped-labels. This matches all pods in all namespaces selected by this label selector. This field follows standard label selector semantics. If omitted, this selector selects no namespaces. If present but empty, this selector selects all namespaces.",
+}
+
+func (NetworkPolicyPeer) SwaggerDoc() map[string]string {
+ return map_NetworkPolicyPeer
+}
+
+var map_NetworkPolicyPort = map[string]string{
+ "protocol": "Optional. The protocol (TCP or UDP) which traffic must match. If not specified, this field defaults to TCP.",
+ "port": "If specified, the port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.",
+}
+
+func (NetworkPolicyPort) SwaggerDoc() map[string]string {
+ return map_NetworkPolicyPort
+}
+
+var map_NetworkPolicySpec = map[string]string{
+ "podSelector": "Selects the pods to which this NetworkPolicy object applies. The array of ingress rules is applied to any pods selected by this field. Multiple network policies can select the same set of pods. In this case, the ingress rules for each are combined additively. This field is NOT optional and follows standard label selector semantics. An empty podSelector matches all pods in this namespace.",
+ "ingress": "List of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if namespace.networkPolicy.ingress.isolation is undefined and cluster policy allows it, OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not affect ingress isolation. If this field is present and contains at least one rule, this policy allows any traffic which matches at least one of the ingress rules in this list.",
+}
+
+func (NetworkPolicySpec) SwaggerDoc() map[string]string {
+ return map_NetworkPolicySpec
+}
+
+var map_PodSecurityPolicy = map[string]string{
+ "": "Pod Security Policy governs the ability to make requests that affect the Security Context that will be applied to a pod and container.",
+ "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
+ "spec": "spec defines the policy enforced.",
+}
+
+func (PodSecurityPolicy) SwaggerDoc() map[string]string {
+ return map_PodSecurityPolicy
+}
+
+var map_PodSecurityPolicyList = map[string]string{
+ "": "Pod Security Policy List is a list of PodSecurityPolicy objects.",
+ "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
+ "items": "Items is a list of schema objects.",
+}
+
+func (PodSecurityPolicyList) SwaggerDoc() map[string]string {
+ return map_PodSecurityPolicyList
+}
+
+var map_PodSecurityPolicySpec = map[string]string{
+ "": "Pod Security Policy Spec defines the policy enforced.",
+ "privileged": "privileged determines if a pod can request to be run as privileged.",
+ "defaultAddCapabilities": "DefaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capabiility in both DefaultAddCapabilities and RequiredDropCapabilities.",
+ "requiredDropCapabilities": "RequiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added.",
+ "allowedCapabilities": "AllowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field may be added at the pod author's discretion. You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities.",
+ "volumes": "volumes is a white list of allowed volume plugins. Empty indicates that all plugins may be used.",
+ "hostNetwork": "hostNetwork determines if the policy allows the use of HostNetwork in the pod spec.",
+ "hostPorts": "hostPorts determines which host port ranges are allowed to be exposed.",
+ "hostPID": "hostPID determines if the policy allows the use of HostPID in the pod spec.",
+ "hostIPC": "hostIPC determines if the policy allows the use of HostIPC in the pod spec.",
+ "seLinux": "seLinux is the strategy that will dictate the allowable labels that may be set.",
+ "runAsUser": "runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.",
+ "supplementalGroups": "SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.",
+ "fsGroup": "FSGroup is the strategy that will dictate what fs group is used by the SecurityContext.",
+ "readOnlyRootFilesystem": "ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.",
+}
+
+func (PodSecurityPolicySpec) SwaggerDoc() map[string]string {
+ return map_PodSecurityPolicySpec
+}
+
+var map_ReplicaSet = map[string]string{
+ "": "ReplicaSet represents the configuration of a ReplicaSet.",
+ "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
+ "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status",
+ "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status",
+}
+
+func (ReplicaSet) SwaggerDoc() map[string]string {
+ return map_ReplicaSet
+}
+
+var map_ReplicaSetList = map[string]string{
+ "": "ReplicaSetList is a collection of ReplicaSets.",
+ "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds",
+ "items": "List of ReplicaSets. More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md",
+}
+
+func (ReplicaSetList) SwaggerDoc() map[string]string {
+ return map_ReplicaSetList
+}
+
+var map_ReplicaSetSpec = map[string]string{
+ "": "ReplicaSetSpec is the specification of a ReplicaSet.",
+ "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller",
+ "selector": "Selector is a label query over pods that should match the replica count. If the selector is empty, it is defaulted to the labels present on the pod template. Label keys and values that must match in order to be controlled by this replica set. More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors",
+ "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#pod-template",
+}
+
+func (ReplicaSetSpec) SwaggerDoc() map[string]string {
+ return map_ReplicaSetSpec
+}
+
+var map_ReplicaSetStatus = map[string]string{
+ "": "ReplicaSetStatus represents the current status of a ReplicaSet.",
+ "replicas": "Replicas is the most recently oberved number of replicas. More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller",
+ "fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replicaset.",
+ "observedGeneration": "ObservedGeneration reflects the generation of the most recently observed ReplicaSet.",
+}
+
+func (ReplicaSetStatus) SwaggerDoc() map[string]string {
+ return map_ReplicaSetStatus
+}
+
+var map_ReplicationControllerDummy = map[string]string{
+ "": "Dummy definition",
+}
+
+func (ReplicationControllerDummy) SwaggerDoc() map[string]string {
+ return map_ReplicationControllerDummy
+}
+
+var map_RollbackConfig = map[string]string{
+ "revision": "The revision to rollback to. If set to 0, rollbck to the last revision.",
+}
+
+func (RollbackConfig) SwaggerDoc() map[string]string {
+ return map_RollbackConfig
+}
+
+var map_RollingUpdateDeployment = map[string]string{
+ "": "Spec to control the desired behavior of rolling update.",
+ "maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding up. This can not be 0 if MaxSurge is 0. By default, a fixed value of 1 is used. Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old RC can be scaled down further, followed by scaling up the new RC, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.",
+ "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. By default, a value of 1 is used. Example: when this is set to 30%, the new RC can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of desired pods.",
+}
+
+func (RollingUpdateDeployment) SwaggerDoc() map[string]string {
+ return map_RollingUpdateDeployment
+}
+
+var map_RunAsUserStrategyOptions = map[string]string{
+ "": "Run A sUser Strategy Options defines the strategy type and any options used to create the strategy.",
+ "rule": "Rule is the strategy that will dictate the allowable RunAsUser values that may be set.",
+ "ranges": "Ranges are the allowed ranges of uids that may be used.",
+}
+
+func (RunAsUserStrategyOptions) SwaggerDoc() map[string]string {
+ return map_RunAsUserStrategyOptions
+}
+
+var map_SELinuxStrategyOptions = map[string]string{
+ "": "SELinux Strategy Options defines the strategy type and any options used to create the strategy.",
+ "rule": "type is the strategy that will dictate the allowable labels that may be set.",
+ "seLinuxOptions": "seLinuxOptions required to run as; required for MustRunAs More info: http://releases.k8s.io/HEAD/docs/design/security_context.md#security-context",
+}
+
+func (SELinuxStrategyOptions) SwaggerDoc() map[string]string {
+ return map_SELinuxStrategyOptions
+}
+
+var map_Scale = map[string]string{
+ "": "represents a scaling request for a resource.",
+ "metadata": "Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata.",
+ "spec": "defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.",
+ "status": "current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only.",
+}
+
+func (Scale) SwaggerDoc() map[string]string {
+ return map_Scale
+}
+
+var map_ScaleSpec = map[string]string{
+ "": "describes the attributes of a scale subresource",
+ "replicas": "desired number of instances for the scaled object.",
+}
+
+func (ScaleSpec) SwaggerDoc() map[string]string {
+ return map_ScaleSpec
+}
+
+var map_ScaleStatus = map[string]string{
+ "": "represents the current status of a scale subresource.",
+ "replicas": "actual number of observed instances of the scaled object.",
+ "selector": "label query over pods that should match the replicas count. More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors",
+ "targetSelector": "label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors",
+}
+
+func (ScaleStatus) SwaggerDoc() map[string]string {
+ return map_ScaleStatus
+}
+
+var map_SubresourceReference = map[string]string{
+ "": "SubresourceReference contains enough information to let you inspect or modify the referred subresource.",
+ "kind": "Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds",
+ "name": "Name of the referent; More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names",
+ "apiVersion": "API version of the referent",
+ "subresource": "Subresource name of the referent",
+}
+
+func (SubresourceReference) SwaggerDoc() map[string]string {
+ return map_SubresourceReference
+}
+
+var map_SupplementalGroupsStrategyOptions = map[string]string{
+ "": "SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy.",
+ "rule": "Rule is the strategy that will dictate what supplemental groups is used in the SecurityContext.",
+ "ranges": "Ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end.",
+}
+
+func (SupplementalGroupsStrategyOptions) SwaggerDoc() map[string]string {
+ return map_SupplementalGroupsStrategyOptions
+}
+
+var map_ThirdPartyResource = map[string]string{
+ "": "A ThirdPartyResource is a generic representation of a resource, it is used by add-ons and plugins to add new resource types to the API. It consists of one or more Versions of the api.",
+ "metadata": "Standard object metadata",
+ "description": "Description is the description of this object.",
+ "versions": "Versions are versions for this third party object",
+}
+
+func (ThirdPartyResource) SwaggerDoc() map[string]string {
+ return map_ThirdPartyResource
+}
+
+var map_ThirdPartyResourceData = map[string]string{
+ "": "An internal object, used for versioned storage in etcd. Not exposed to the end user.",
+ "metadata": "Standard object metadata.",
+ "data": "Data is the raw JSON data for this data.",
+}
+
+func (ThirdPartyResourceData) SwaggerDoc() map[string]string {
+ return map_ThirdPartyResourceData
+}
+
+var map_ThirdPartyResourceDataList = map[string]string{
+ "": "ThirdPartyResrouceDataList is a list of ThirdPartyResourceData.",
+ "metadata": "Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
+ "items": "Items is the list of ThirdpartyResourceData.",
+}
+
+func (ThirdPartyResourceDataList) SwaggerDoc() map[string]string {
+ return map_ThirdPartyResourceDataList
+}
+
+var map_ThirdPartyResourceList = map[string]string{
+ "": "ThirdPartyResourceList is a list of ThirdPartyResources.",
+ "metadata": "Standard list metadata.",
+ "items": "Items is the list of ThirdPartyResources.",
+}
+
+func (ThirdPartyResourceList) SwaggerDoc() map[string]string {
+ return map_ThirdPartyResourceList
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/deep_copy_generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/deep_copy_generated.go
new file mode 100644
index 0000000..098a0ee
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/deep_copy_generated.go
@@ -0,0 +1,90 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by deepcopy-gen. Do not edit it manually!
+
+package policy
+
+import (
+ api "k8s.io/kubernetes/pkg/api"
+ unversioned "k8s.io/kubernetes/pkg/api/unversioned"
+ conversion "k8s.io/kubernetes/pkg/conversion"
+)
+
+func init() {
+ if err := api.Scheme.AddGeneratedDeepCopyFuncs(
+ DeepCopy_policy_PodDisruptionBudget,
+ DeepCopy_policy_PodDisruptionBudgetList,
+ DeepCopy_policy_PodDisruptionBudgetSpec,
+ DeepCopy_policy_PodDisruptionBudgetStatus,
+ ); err != nil {
+ // if one of the deep copy functions is malformed, detect it immediately.
+ panic(err)
+ }
+}
+
+func DeepCopy_policy_PodDisruptionBudget(in PodDisruptionBudget, out *PodDisruptionBudget, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_policy_PodDisruptionBudgetSpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ out.Status = in.Status
+ return nil
+}
+
+func DeepCopy_policy_PodDisruptionBudgetList(in PodDisruptionBudgetList, out *PodDisruptionBudgetList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]PodDisruptionBudget, len(in))
+ for i := range in {
+ if err := DeepCopy_policy_PodDisruptionBudget(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_policy_PodDisruptionBudgetSpec(in PodDisruptionBudgetSpec, out *PodDisruptionBudgetSpec, c *conversion.Cloner) error {
+ out.MinAvailable = in.MinAvailable
+ if in.Selector != nil {
+ in, out := in.Selector, &out.Selector
+ *out = new(unversioned.LabelSelector)
+ if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.Selector = nil
+ }
+ return nil
+}
+
+func DeepCopy_policy_PodDisruptionBudgetStatus(in PodDisruptionBudgetStatus, out *PodDisruptionBudgetStatus, c *conversion.Cloner) error {
+ out.PodDisruptionAllowed = in.PodDisruptionAllowed
+ out.CurrentHealthy = in.CurrentHealthy
+ out.DesiredHealthy = in.DesiredHealthy
+ out.ExpectedPods = in.ExpectedPods
+ return nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/doc.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/doc.go
new file mode 100644
index 0000000..876858c
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package,register
+
+package policy
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/install/install.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/install/install.go
new file mode 100644
index 0000000..90489a7
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/install/install.go
@@ -0,0 +1,129 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package install installs the experimental API group, making it available as
+// an option to all of the API encoding/decoding machinery.
+package install
+
+import (
+ "fmt"
+
+ "github.com/golang/glog"
+
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/meta"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/apimachinery"
+ "k8s.io/kubernetes/pkg/apimachinery/registered"
+ "k8s.io/kubernetes/pkg/apis/policy"
+ "k8s.io/kubernetes/pkg/apis/policy/v1alpha1"
+ "k8s.io/kubernetes/pkg/runtime"
+ "k8s.io/kubernetes/pkg/util/sets"
+)
+
+const importPrefix = "k8s.io/kubernetes/pkg/apis/policy"
+
+var accessor = meta.NewAccessor()
+
+// availableVersions lists all known external versions for this group from most preferred to least preferred
+var availableVersions = []unversioned.GroupVersion{v1alpha1.SchemeGroupVersion}
+
+func init() {
+ registered.RegisterVersions(availableVersions)
+ externalVersions := []unversioned.GroupVersion{}
+ for _, v := range availableVersions {
+ if registered.IsAllowedVersion(v) {
+ externalVersions = append(externalVersions, v)
+ }
+ }
+ if len(externalVersions) == 0 {
+ glog.V(4).Infof("No version is registered for group %v", policy.GroupName)
+ return
+ }
+
+ if err := registered.EnableVersions(externalVersions...); err != nil {
+ glog.V(4).Infof("%v", err)
+ return
+ }
+ if err := enableVersions(externalVersions); err != nil {
+ glog.V(4).Infof("%v", err)
+ return
+ }
+}
+
+// TODO: enableVersions should be centralized rather than spread in each API
+// group.
+// We can combine registered.RegisterVersions, registered.EnableVersions and
+// registered.RegisterGroup once we have moved enableVersions there.
+func enableVersions(externalVersions []unversioned.GroupVersion) error {
+ addVersionsToScheme(externalVersions...)
+ preferredExternalVersion := externalVersions[0]
+
+ groupMeta := apimachinery.GroupMeta{
+ GroupVersion: preferredExternalVersion,
+ GroupVersions: externalVersions,
+ RESTMapper: newRESTMapper(externalVersions),
+ SelfLinker: runtime.SelfLinker(accessor),
+ InterfacesFor: interfacesFor,
+ }
+
+ if err := registered.RegisterGroup(groupMeta); err != nil {
+ return err
+ }
+ api.RegisterRESTMapper(groupMeta.RESTMapper)
+ return nil
+}
+
+func newRESTMapper(externalVersions []unversioned.GroupVersion) meta.RESTMapper {
+ // the list of kinds that are scoped at the root of the api hierarchy
+ // if a kind is not enumerated here, it is assumed to have a namespace scope
+ rootScoped := sets.NewString()
+
+ ignoredKinds := sets.NewString()
+
+ return api.NewDefaultRESTMapper(externalVersions, interfacesFor, importPrefix, ignoredKinds, rootScoped)
+}
+
+// interfacesFor returns the default Codec and ResourceVersioner for a given version
+// string, or an error if the version is not known.
+func interfacesFor(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) {
+ switch version {
+ case v1alpha1.SchemeGroupVersion:
+ return &meta.VersionInterfaces{
+ ObjectConvertor: api.Scheme,
+ MetadataAccessor: accessor,
+ }, nil
+ default:
+ g, _ := registered.Group(policy.GroupName)
+ return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, g.GroupVersions)
+ }
+}
+
+func addVersionsToScheme(externalVersions ...unversioned.GroupVersion) {
+ // add the internal version to Scheme
+ policy.AddToScheme(api.Scheme)
+ // add the enabled external versions to Scheme
+ for _, v := range externalVersions {
+ if !registered.IsEnabledVersion(v) {
+ glog.Errorf("Version %s is not enabled, so it will not be added to the Scheme.", v)
+ continue
+ }
+ switch v {
+ case v1alpha1.SchemeGroupVersion:
+ v1alpha1.AddToScheme(api.Scheme)
+ }
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/register.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/register.go
new file mode 100644
index 0000000..e61c82b
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/register.go
@@ -0,0 +1,52 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package policy
+
+import (
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/runtime"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "policy"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
+
+// Kind takes an unqualified kind and returns back a Group qualified GroupKind
+func Kind(kind string) unversioned.GroupKind {
+ return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// Resource takes an unqualified resource and returns back a Group qualified GroupResource
+func Resource(resource string) unversioned.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+func AddToScheme(scheme *runtime.Scheme) {
+ // Add the API to Scheme.
+ addKnownTypes(scheme)
+}
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) {
+ // TODO this gets cleaned up when the types are fixed
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &PodDisruptionBudget{},
+ &PodDisruptionBudgetList{},
+ )
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/types.generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/types.generated.go
new file mode 100644
index 0000000..0665268
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/types.generated.go
@@ -0,0 +1,1440 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// ************************************************************
+// DO NOT EDIT.
+// THIS FILE IS AUTO-GENERATED BY codecgen.
+// ************************************************************
+
+package policy
+
+import (
+ "errors"
+ "fmt"
+ codec1978 "github.com/ugorji/go/codec"
+ pkg3_api "k8s.io/kubernetes/pkg/api"
+ pkg2_unversioned "k8s.io/kubernetes/pkg/api/unversioned"
+ pkg4_types "k8s.io/kubernetes/pkg/types"
+ pkg1_intstr "k8s.io/kubernetes/pkg/util/intstr"
+ "reflect"
+ "runtime"
+ time "time"
+)
+
+const (
+ // ----- content types ----
+ codecSelferC_UTF81234 = 1
+ codecSelferC_RAW1234 = 0
+ // ----- value types used ----
+ codecSelferValueTypeArray1234 = 10
+ codecSelferValueTypeMap1234 = 9
+ // ----- containerStateValues ----
+ codecSelfer_containerMapKey1234 = 2
+ codecSelfer_containerMapValue1234 = 3
+ codecSelfer_containerMapEnd1234 = 4
+ codecSelfer_containerArrayElem1234 = 6
+ codecSelfer_containerArrayEnd1234 = 7
+)
+
+var (
+ codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits())
+ codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`)
+)
+
+type codecSelfer1234 struct{}
+
+func init() {
+ if codec1978.GenVersion != 5 {
+ _, file, _, _ := runtime.Caller(0)
+ err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v",
+ 5, codec1978.GenVersion, file)
+ panic(err)
+ }
+ if false { // reference the types, but skip this branch at build/run time
+ var v0 pkg3_api.ObjectMeta
+ var v1 pkg2_unversioned.LabelSelector
+ var v2 pkg4_types.UID
+ var v3 pkg1_intstr.IntOrString
+ var v4 time.Time
+ _, _, _, _, _ = v0, v1, v2, v3, v4
+ }
+}
+
+func (x *PodDisruptionBudgetSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = x.Selector != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.MinAvailable
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else if !yym5 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy4)
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("minAvailable"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.MinAvailable
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else if !yym7 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy6)
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Selector == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.Selector) {
+ } else {
+ z.EncFallback(x.Selector)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("selector"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Selector == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.Selector) {
+ } else {
+ z.EncFallback(x.Selector)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PodDisruptionBudgetSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PodDisruptionBudgetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "minAvailable":
+ if r.TryDecodeAsNil() {
+ x.MinAvailable = pkg1_intstr.IntOrString{}
+ } else {
+ yyv4 := &x.MinAvailable
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else if !yym5 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv4)
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "selector":
+ if r.TryDecodeAsNil() {
+ if x.Selector != nil {
+ x.Selector = nil
+ }
+ } else {
+ if x.Selector == nil {
+ x.Selector = new(pkg2_unversioned.LabelSelector)
+ }
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.Selector) {
+ } else {
+ z.DecFallback(x.Selector, false)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PodDisruptionBudgetSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.MinAvailable = pkg1_intstr.IntOrString{}
+ } else {
+ yyv9 := &x.MinAvailable
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv9) {
+ } else if !yym10 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv9)
+ } else {
+ z.DecFallback(yyv9, false)
+ }
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Selector != nil {
+ x.Selector = nil
+ }
+ } else {
+ if x.Selector == nil {
+ x.Selector = new(pkg2_unversioned.LabelSelector)
+ }
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.Selector) {
+ } else {
+ z.DecFallback(x.Selector, false)
+ }
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PodDisruptionBudgetStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 4
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeBool(bool(x.PodDisruptionAllowed))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("disruptionAllowed"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeBool(bool(x.PodDisruptionAllowed))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeInt(int64(x.CurrentHealthy))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("currentHealthy"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeInt(int64(x.CurrentHealthy))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeInt(int64(x.DesiredHealthy))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("desiredHealthy"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeInt(int64(x.DesiredHealthy))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ExpectedPods))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("expectedPods"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ExpectedPods))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PodDisruptionBudgetStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PodDisruptionBudgetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "disruptionAllowed":
+ if r.TryDecodeAsNil() {
+ x.PodDisruptionAllowed = false
+ } else {
+ x.PodDisruptionAllowed = bool(r.DecodeBool())
+ }
+ case "currentHealthy":
+ if r.TryDecodeAsNil() {
+ x.CurrentHealthy = 0
+ } else {
+ x.CurrentHealthy = int32(r.DecodeInt(32))
+ }
+ case "desiredHealthy":
+ if r.TryDecodeAsNil() {
+ x.DesiredHealthy = 0
+ } else {
+ x.DesiredHealthy = int32(r.DecodeInt(32))
+ }
+ case "expectedPods":
+ if r.TryDecodeAsNil() {
+ x.ExpectedPods = 0
+ } else {
+ x.ExpectedPods = int32(r.DecodeInt(32))
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PodDisruptionBudgetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.PodDisruptionAllowed = false
+ } else {
+ x.PodDisruptionAllowed = bool(r.DecodeBool())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.CurrentHealthy = 0
+ } else {
+ x.CurrentHealthy = int32(r.DecodeInt(32))
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.DesiredHealthy = 0
+ } else {
+ x.DesiredHealthy = int32(r.DecodeInt(32))
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ExpectedPods = 0
+ } else {
+ x.ExpectedPods = int32(r.DecodeInt(32))
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PodDisruptionBudget) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = true
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy14 := &x.Status
+ yy14.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy16 := &x.Status
+ yy16.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PodDisruptionBudget) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PodDisruptionBudget) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg3_api.ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = PodDisruptionBudgetSpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = PodDisruptionBudgetStatus{}
+ } else {
+ yyv6 := &x.Status
+ yyv6.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PodDisruptionBudget) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg3_api.ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = PodDisruptionBudgetSpec{}
+ } else {
+ yyv11 := &x.Spec
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = PodDisruptionBudgetStatus{}
+ } else {
+ yyv12 := &x.Status
+ yyv12.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PodDisruptionBudgetList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSlicePodDisruptionBudget(([]PodDisruptionBudget)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSlicePodDisruptionBudget(([]PodDisruptionBudget)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PodDisruptionBudgetList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PodDisruptionBudgetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSlicePodDisruptionBudget((*[]PodDisruptionBudget)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PodDisruptionBudgetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSlicePodDisruptionBudget((*[]PodDisruptionBudget)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) encSlicePodDisruptionBudget(v []PodDisruptionBudget, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSlicePodDisruptionBudget(v *[]PodDisruptionBudget, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []PodDisruptionBudget{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 296)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]PodDisruptionBudget, yyrl1)
+ }
+ } else {
+ yyv1 = make([]PodDisruptionBudget, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PodDisruptionBudget{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, PodDisruptionBudget{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PodDisruptionBudget{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, PodDisruptionBudget{}) // var yyz1 PodDisruptionBudget
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PodDisruptionBudget{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []PodDisruptionBudget{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/types.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/types.go
new file mode 100644
index 0000000..adc31e5
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/types.go
@@ -0,0 +1,71 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package policy
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/util/intstr"
+)
+
+// PodDisruptionBudgetSpec is a description of a PodDisruptionBudget.
+type PodDisruptionBudgetSpec struct {
+ // The minimum number of pods that must be available simultaneously. This
+ // can be either an integer or a string specifying a percentage, e.g. "28%".
+ MinAvailable intstr.IntOrString `json:"minAvailable,omitempty"`
+
+ // Label query over pods whose evictions are managed by the disruption
+ // budget.
+ Selector *unversioned.LabelSelector `json:"selector,omitempty"`
+}
+
+// PodDisruptionBudgetStatus represents information about the status of a
+// PodDisruptionBudget. Status may trail the actual state of a system.
+type PodDisruptionBudgetStatus struct {
+ // Whether or not a disruption is currently allowed.
+ PodDisruptionAllowed bool `json:"disruptionAllowed"`
+
+ // current number of healthy pods
+ CurrentHealthy int32 `json:"currentHealthy"`
+
+ // minimum desired number of healthy pods
+ DesiredHealthy int32 `json:"desiredHealthy"`
+
+ // total number of pods counted by this disruption budget
+ ExpectedPods int32 `json:"expectedPods"`
+}
+
+// +genclient=true
+// +noMethods=true
+
+// PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods
+type PodDisruptionBudget struct {
+ unversioned.TypeMeta `json:",inline"`
+ api.ObjectMeta `json:"metadata,omitempty"`
+
+ // Specification of the desired behavior of the PodDisruptionBudget.
+ Spec PodDisruptionBudgetSpec `json:"spec,omitempty"`
+ // Most recently observed status of the PodDisruptionBudget.
+ Status PodDisruptionBudgetStatus `json:"status,omitempty"`
+}
+
+// PodDisruptionBudgetList is a collection of PodDisruptionBudgets.
+type PodDisruptionBudgetList struct {
+ unversioned.TypeMeta `json:",inline"`
+ unversioned.ListMeta `json:"metadata,omitempty"`
+ Items []PodDisruptionBudget `json:"items"`
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/conversion_generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/conversion_generated.go
new file mode 100644
index 0000000..c524ca2
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/conversion_generated.go
@@ -0,0 +1,183 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by conversion-gen. Do not edit it manually!
+
+package v1alpha1
+
+import (
+ api "k8s.io/kubernetes/pkg/api"
+ policy "k8s.io/kubernetes/pkg/apis/policy"
+ conversion "k8s.io/kubernetes/pkg/conversion"
+)
+
+func init() {
+ if err := api.Scheme.AddGeneratedConversionFuncs(
+ Convert_v1alpha1_PodDisruptionBudget_To_policy_PodDisruptionBudget,
+ Convert_policy_PodDisruptionBudget_To_v1alpha1_PodDisruptionBudget,
+ Convert_v1alpha1_PodDisruptionBudgetList_To_policy_PodDisruptionBudgetList,
+ Convert_policy_PodDisruptionBudgetList_To_v1alpha1_PodDisruptionBudgetList,
+ Convert_v1alpha1_PodDisruptionBudgetSpec_To_policy_PodDisruptionBudgetSpec,
+ Convert_policy_PodDisruptionBudgetSpec_To_v1alpha1_PodDisruptionBudgetSpec,
+ Convert_v1alpha1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudgetStatus,
+ Convert_policy_PodDisruptionBudgetStatus_To_v1alpha1_PodDisruptionBudgetStatus,
+ ); err != nil {
+ // if one of the conversion functions is malformed, detect it immediately.
+ panic(err)
+ }
+}
+
+func autoConvert_v1alpha1_PodDisruptionBudget_To_policy_PodDisruptionBudget(in *PodDisruptionBudget, out *policy.PodDisruptionBudget, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ if err := Convert_v1alpha1_PodDisruptionBudgetSpec_To_policy_PodDisruptionBudgetSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1alpha1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudgetStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1alpha1_PodDisruptionBudget_To_policy_PodDisruptionBudget(in *PodDisruptionBudget, out *policy.PodDisruptionBudget, s conversion.Scope) error {
+ return autoConvert_v1alpha1_PodDisruptionBudget_To_policy_PodDisruptionBudget(in, out, s)
+}
+
+func autoConvert_policy_PodDisruptionBudget_To_v1alpha1_PodDisruptionBudget(in *policy.PodDisruptionBudget, out *PodDisruptionBudget, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ if err := Convert_policy_PodDisruptionBudgetSpec_To_v1alpha1_PodDisruptionBudgetSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_policy_PodDisruptionBudgetStatus_To_v1alpha1_PodDisruptionBudgetStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_policy_PodDisruptionBudget_To_v1alpha1_PodDisruptionBudget(in *policy.PodDisruptionBudget, out *PodDisruptionBudget, s conversion.Scope) error {
+ return autoConvert_policy_PodDisruptionBudget_To_v1alpha1_PodDisruptionBudget(in, out, s)
+}
+
+func autoConvert_v1alpha1_PodDisruptionBudgetList_To_policy_PodDisruptionBudgetList(in *PodDisruptionBudgetList, out *policy.PodDisruptionBudgetList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]policy.PodDisruptionBudget, len(*in))
+ for i := range *in {
+ if err := Convert_v1alpha1_PodDisruptionBudget_To_policy_PodDisruptionBudget(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_v1alpha1_PodDisruptionBudgetList_To_policy_PodDisruptionBudgetList(in *PodDisruptionBudgetList, out *policy.PodDisruptionBudgetList, s conversion.Scope) error {
+ return autoConvert_v1alpha1_PodDisruptionBudgetList_To_policy_PodDisruptionBudgetList(in, out, s)
+}
+
+func autoConvert_policy_PodDisruptionBudgetList_To_v1alpha1_PodDisruptionBudgetList(in *policy.PodDisruptionBudgetList, out *PodDisruptionBudgetList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]PodDisruptionBudget, len(*in))
+ for i := range *in {
+ if err := Convert_policy_PodDisruptionBudget_To_v1alpha1_PodDisruptionBudget(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_policy_PodDisruptionBudgetList_To_v1alpha1_PodDisruptionBudgetList(in *policy.PodDisruptionBudgetList, out *PodDisruptionBudgetList, s conversion.Scope) error {
+ return autoConvert_policy_PodDisruptionBudgetList_To_v1alpha1_PodDisruptionBudgetList(in, out, s)
+}
+
+func autoConvert_v1alpha1_PodDisruptionBudgetSpec_To_policy_PodDisruptionBudgetSpec(in *PodDisruptionBudgetSpec, out *policy.PodDisruptionBudgetSpec, s conversion.Scope) error {
+ if err := api.Convert_intstr_IntOrString_To_intstr_IntOrString(&in.MinAvailable, &out.MinAvailable, s); err != nil {
+ return err
+ }
+ out.Selector = in.Selector
+ return nil
+}
+
+func Convert_v1alpha1_PodDisruptionBudgetSpec_To_policy_PodDisruptionBudgetSpec(in *PodDisruptionBudgetSpec, out *policy.PodDisruptionBudgetSpec, s conversion.Scope) error {
+ return autoConvert_v1alpha1_PodDisruptionBudgetSpec_To_policy_PodDisruptionBudgetSpec(in, out, s)
+}
+
+func autoConvert_policy_PodDisruptionBudgetSpec_To_v1alpha1_PodDisruptionBudgetSpec(in *policy.PodDisruptionBudgetSpec, out *PodDisruptionBudgetSpec, s conversion.Scope) error {
+ if err := api.Convert_intstr_IntOrString_To_intstr_IntOrString(&in.MinAvailable, &out.MinAvailable, s); err != nil {
+ return err
+ }
+ out.Selector = in.Selector
+ return nil
+}
+
+func Convert_policy_PodDisruptionBudgetSpec_To_v1alpha1_PodDisruptionBudgetSpec(in *policy.PodDisruptionBudgetSpec, out *PodDisruptionBudgetSpec, s conversion.Scope) error {
+ return autoConvert_policy_PodDisruptionBudgetSpec_To_v1alpha1_PodDisruptionBudgetSpec(in, out, s)
+}
+
+func autoConvert_v1alpha1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudgetStatus(in *PodDisruptionBudgetStatus, out *policy.PodDisruptionBudgetStatus, s conversion.Scope) error {
+ out.PodDisruptionAllowed = in.PodDisruptionAllowed
+ out.CurrentHealthy = in.CurrentHealthy
+ out.DesiredHealthy = in.DesiredHealthy
+ out.ExpectedPods = in.ExpectedPods
+ return nil
+}
+
+func Convert_v1alpha1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudgetStatus(in *PodDisruptionBudgetStatus, out *policy.PodDisruptionBudgetStatus, s conversion.Scope) error {
+ return autoConvert_v1alpha1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudgetStatus(in, out, s)
+}
+
+func autoConvert_policy_PodDisruptionBudgetStatus_To_v1alpha1_PodDisruptionBudgetStatus(in *policy.PodDisruptionBudgetStatus, out *PodDisruptionBudgetStatus, s conversion.Scope) error {
+ out.PodDisruptionAllowed = in.PodDisruptionAllowed
+ out.CurrentHealthy = in.CurrentHealthy
+ out.DesiredHealthy = in.DesiredHealthy
+ out.ExpectedPods = in.ExpectedPods
+ return nil
+}
+
+func Convert_policy_PodDisruptionBudgetStatus_To_v1alpha1_PodDisruptionBudgetStatus(in *policy.PodDisruptionBudgetStatus, out *PodDisruptionBudgetStatus, s conversion.Scope) error {
+ return autoConvert_policy_PodDisruptionBudgetStatus_To_v1alpha1_PodDisruptionBudgetStatus(in, out, s)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/deep_copy_generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/deep_copy_generated.go
new file mode 100644
index 0000000..05f286d
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/deep_copy_generated.go
@@ -0,0 +1,91 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by deepcopy-gen. Do not edit it manually!
+
+package v1alpha1
+
+import (
+ api "k8s.io/kubernetes/pkg/api"
+ unversioned "k8s.io/kubernetes/pkg/api/unversioned"
+ v1 "k8s.io/kubernetes/pkg/api/v1"
+ conversion "k8s.io/kubernetes/pkg/conversion"
+)
+
+func init() {
+ if err := api.Scheme.AddGeneratedDeepCopyFuncs(
+ DeepCopy_v1alpha1_PodDisruptionBudget,
+ DeepCopy_v1alpha1_PodDisruptionBudgetList,
+ DeepCopy_v1alpha1_PodDisruptionBudgetSpec,
+ DeepCopy_v1alpha1_PodDisruptionBudgetStatus,
+ ); err != nil {
+ // if one of the deep copy functions is malformed, detect it immediately.
+ panic(err)
+ }
+}
+
+func DeepCopy_v1alpha1_PodDisruptionBudget(in PodDisruptionBudget, out *PodDisruptionBudget, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if err := DeepCopy_v1alpha1_PodDisruptionBudgetSpec(in.Spec, &out.Spec, c); err != nil {
+ return err
+ }
+ out.Status = in.Status
+ return nil
+}
+
+func DeepCopy_v1alpha1_PodDisruptionBudgetList(in PodDisruptionBudgetList, out *PodDisruptionBudgetList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]PodDisruptionBudget, len(in))
+ for i := range in {
+ if err := DeepCopy_v1alpha1_PodDisruptionBudget(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1alpha1_PodDisruptionBudgetSpec(in PodDisruptionBudgetSpec, out *PodDisruptionBudgetSpec, c *conversion.Cloner) error {
+ out.MinAvailable = in.MinAvailable
+ if in.Selector != nil {
+ in, out := in.Selector, &out.Selector
+ *out = new(unversioned.LabelSelector)
+ if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil {
+ return err
+ }
+ } else {
+ out.Selector = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1alpha1_PodDisruptionBudgetStatus(in PodDisruptionBudgetStatus, out *PodDisruptionBudgetStatus, c *conversion.Cloner) error {
+ out.PodDisruptionAllowed = in.PodDisruptionAllowed
+ out.CurrentHealthy = in.CurrentHealthy
+ out.DesiredHealthy = in.DesiredHealthy
+ out.ExpectedPods = in.ExpectedPods
+ return nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/doc.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/doc.go
new file mode 100644
index 0000000..985d4bb
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/doc.go
@@ -0,0 +1,23 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package,register
+// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/policy
+
+// Package policy is for any kind of policy object. Suitable examples, even if
+// they aren't all here, are PodDisruptionBudget, PodSecurityPolicy,
+// NetworkPolicy, etc.
+package v1alpha1
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/generated.pb.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/generated.pb.go
new file mode 100644
index 0000000..7a4c999
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/generated.pb.go
@@ -0,0 +1,903 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by protoc-gen-gogo.
+// source: k8s.io/kubernetes/pkg/apis/policy/v1alpha1/generated.proto
+// DO NOT EDIT!
+
+/*
+ Package v1alpha1 is a generated protocol buffer package.
+
+ It is generated from these files:
+ k8s.io/kubernetes/pkg/apis/policy/v1alpha1/generated.proto
+
+ It has these top-level messages:
+ PodDisruptionBudget
+ PodDisruptionBudgetList
+ PodDisruptionBudgetSpec
+ PodDisruptionBudgetStatus
+*/
+package v1alpha1
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+import k8s_io_kubernetes_pkg_api_unversioned "k8s.io/kubernetes/pkg/api/unversioned"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+func (m *PodDisruptionBudget) Reset() { *m = PodDisruptionBudget{} }
+func (m *PodDisruptionBudget) String() string { return proto.CompactTextString(m) }
+func (*PodDisruptionBudget) ProtoMessage() {}
+
+func (m *PodDisruptionBudgetList) Reset() { *m = PodDisruptionBudgetList{} }
+func (m *PodDisruptionBudgetList) String() string { return proto.CompactTextString(m) }
+func (*PodDisruptionBudgetList) ProtoMessage() {}
+
+func (m *PodDisruptionBudgetSpec) Reset() { *m = PodDisruptionBudgetSpec{} }
+func (m *PodDisruptionBudgetSpec) String() string { return proto.CompactTextString(m) }
+func (*PodDisruptionBudgetSpec) ProtoMessage() {}
+
+func (m *PodDisruptionBudgetStatus) Reset() { *m = PodDisruptionBudgetStatus{} }
+func (m *PodDisruptionBudgetStatus) String() string { return proto.CompactTextString(m) }
+func (*PodDisruptionBudgetStatus) ProtoMessage() {}
+
+func init() {
+ proto.RegisterType((*PodDisruptionBudget)(nil), "k8s.io.kubernetes.pkg.apis.policy.v1alpha1.PodDisruptionBudget")
+ proto.RegisterType((*PodDisruptionBudgetList)(nil), "k8s.io.kubernetes.pkg.apis.policy.v1alpha1.PodDisruptionBudgetList")
+ proto.RegisterType((*PodDisruptionBudgetSpec)(nil), "k8s.io.kubernetes.pkg.apis.policy.v1alpha1.PodDisruptionBudgetSpec")
+ proto.RegisterType((*PodDisruptionBudgetStatus)(nil), "k8s.io.kubernetes.pkg.apis.policy.v1alpha1.PodDisruptionBudgetStatus")
+}
+func (m *PodDisruptionBudget) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *PodDisruptionBudget) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
+ n1, err := m.ObjectMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n1
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size()))
+ n2, err := m.Spec.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n2
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Status.Size()))
+ n3, err := m.Status.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n3
+ return i, nil
+}
+
+func (m *PodDisruptionBudgetList) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *PodDisruptionBudgetList) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size()))
+ n4, err := m.ListMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n4
+ if len(m.Items) > 0 {
+ for _, msg := range m.Items {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *PodDisruptionBudgetSpec) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *PodDisruptionBudgetSpec) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.MinAvailable.Size()))
+ n5, err := m.MinAvailable.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n5
+ if m.Selector != nil {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Selector.Size()))
+ n6, err := m.Selector.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n6
+ }
+ return i, nil
+}
+
+func (m *PodDisruptionBudgetStatus) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *PodDisruptionBudgetStatus) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0x8
+ i++
+ if m.PodDisruptionAllowed {
+ data[i] = 1
+ } else {
+ data[i] = 0
+ }
+ i++
+ data[i] = 0x10
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.CurrentHealthy))
+ data[i] = 0x18
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.DesiredHealthy))
+ data[i] = 0x20
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ExpectedPods))
+ return i, nil
+}
+
+func encodeFixed64Generated(data []byte, offset int, v uint64) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ data[offset+4] = uint8(v >> 32)
+ data[offset+5] = uint8(v >> 40)
+ data[offset+6] = uint8(v >> 48)
+ data[offset+7] = uint8(v >> 56)
+ return offset + 8
+}
+func encodeFixed32Generated(data []byte, offset int, v uint32) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ return offset + 4
+}
+func encodeVarintGenerated(data []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ data[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ data[offset] = uint8(v)
+ return offset + 1
+}
+func (m *PodDisruptionBudget) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *PodDisruptionBudgetList) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *PodDisruptionBudgetSpec) Size() (n int) {
+ var l int
+ _ = l
+ l = m.MinAvailable.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Selector != nil {
+ l = m.Selector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *PodDisruptionBudgetStatus) Size() (n int) {
+ var l int
+ _ = l
+ n += 2
+ n += 1 + sovGenerated(uint64(m.CurrentHealthy))
+ n += 1 + sovGenerated(uint64(m.DesiredHealthy))
+ n += 1 + sovGenerated(uint64(m.ExpectedPods))
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *PodDisruptionBudget) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PodDisruptionBudget: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PodDisruptionBudget: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PodDisruptionBudgetList) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PodDisruptionBudgetList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PodDisruptionBudgetList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, PodDisruptionBudget{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PodDisruptionBudgetSpec) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PodDisruptionBudgetSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PodDisruptionBudgetSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MinAvailable", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.MinAvailable.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Selector == nil {
+ m.Selector = &k8s_io_kubernetes_pkg_api_unversioned.LabelSelector{}
+ }
+ if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PodDisruptionBudgetStatus) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PodDisruptionBudgetStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PodDisruptionBudgetStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PodDisruptionAllowed", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.PodDisruptionAllowed = bool(v != 0)
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CurrentHealthy", wireType)
+ }
+ m.CurrentHealthy = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.CurrentHealthy |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DesiredHealthy", wireType)
+ }
+ m.DesiredHealthy = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.DesiredHealthy |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ExpectedPods", wireType)
+ }
+ m.ExpectedPods = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.ExpectedPods |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenerated(data []byte) (n int, err error) {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if data[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipGenerated(data[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
+)
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/generated.proto b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/generated.proto
new file mode 100644
index 0000000..d04a0af
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/generated.proto
@@ -0,0 +1,77 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.kubernetes.pkg.apis.policy.v1alpha1;
+
+import "k8s.io/kubernetes/pkg/api/resource/generated.proto";
+import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto";
+import "k8s.io/kubernetes/pkg/api/v1/generated.proto";
+import "k8s.io/kubernetes/pkg/runtime/generated.proto";
+import "k8s.io/kubernetes/pkg/util/intstr/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v1alpha1";
+
+// PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods
+message PodDisruptionBudget {
+ optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1;
+
+ // Specification of the desired behavior of the PodDisruptionBudget.
+ optional PodDisruptionBudgetSpec spec = 2;
+
+ // Most recently observed status of the PodDisruptionBudget.
+ optional PodDisruptionBudgetStatus status = 3;
+}
+
+// PodDisruptionBudgetList is a collection of PodDisruptionBudgets.
+message PodDisruptionBudgetList {
+ optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1;
+
+ repeated PodDisruptionBudget items = 2;
+}
+
+// PodDisruptionBudgetSpec is a description of a PodDisruptionBudget.
+message PodDisruptionBudgetSpec {
+ // The minimum number of pods that must be available simultaneously. This
+ // can be either an integer or a string specifying a percentage, e.g. "28%".
+ optional k8s.io.kubernetes.pkg.util.intstr.IntOrString minAvailable = 1;
+
+ // Label query over pods whose evictions are managed by the disruption
+ // budget.
+ optional k8s.io.kubernetes.pkg.api.unversioned.LabelSelector selector = 2;
+}
+
+// PodDisruptionBudgetStatus represents information about the status of a
+// PodDisruptionBudget. Status may trail the actual state of a system.
+message PodDisruptionBudgetStatus {
+ // Whether or not a disruption is currently allowed.
+ optional bool disruptionAllowed = 1;
+
+ // current number of healthy pods
+ optional int32 currentHealthy = 2;
+
+ // minimum desired number of healthy pods
+ optional int32 desiredHealthy = 3;
+
+ // total number of pods counted by this disruption budget
+ optional int32 expectedPods = 4;
+}
+
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/register.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/register.go
new file mode 100644
index 0000000..6a82e3f
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/register.go
@@ -0,0 +1,50 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/api/v1"
+ "k8s.io/kubernetes/pkg/runtime"
+ versionedwatch "k8s.io/kubernetes/pkg/watch/versioned"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "policy"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1alpha1"}
+
+func AddToScheme(scheme *runtime.Scheme) {
+ addKnownTypes(scheme)
+ /*
+ addDefaultingFuncs(scheme)
+ addConversionFuncs(scheme)
+ */
+}
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &PodDisruptionBudget{},
+ &PodDisruptionBudgetList{},
+ &v1.ListOptions{},
+ &v1.DeleteOptions{},
+ )
+ // Add the watch version that applies
+ versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/types.generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/types.generated.go
new file mode 100644
index 0000000..6592c9c
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/types.generated.go
@@ -0,0 +1,1440 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// ************************************************************
+// DO NOT EDIT.
+// THIS FILE IS AUTO-GENERATED BY codecgen.
+// ************************************************************
+
+package v1alpha1
+
+import (
+ "errors"
+ "fmt"
+ codec1978 "github.com/ugorji/go/codec"
+ pkg2_unversioned "k8s.io/kubernetes/pkg/api/unversioned"
+ pkg3_v1 "k8s.io/kubernetes/pkg/api/v1"
+ pkg4_types "k8s.io/kubernetes/pkg/types"
+ pkg1_intstr "k8s.io/kubernetes/pkg/util/intstr"
+ "reflect"
+ "runtime"
+ time "time"
+)
+
+const (
+ // ----- content types ----
+ codecSelferC_UTF81234 = 1
+ codecSelferC_RAW1234 = 0
+ // ----- value types used ----
+ codecSelferValueTypeArray1234 = 10
+ codecSelferValueTypeMap1234 = 9
+ // ----- containerStateValues ----
+ codecSelfer_containerMapKey1234 = 2
+ codecSelfer_containerMapValue1234 = 3
+ codecSelfer_containerMapEnd1234 = 4
+ codecSelfer_containerArrayElem1234 = 6
+ codecSelfer_containerArrayEnd1234 = 7
+)
+
+var (
+ codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits())
+ codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`)
+)
+
+type codecSelfer1234 struct{}
+
+func init() {
+ if codec1978.GenVersion != 5 {
+ _, file, _, _ := runtime.Caller(0)
+ err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v",
+ 5, codec1978.GenVersion, file)
+ panic(err)
+ }
+ if false { // reference the types, but skip this branch at build/run time
+ var v0 pkg2_unversioned.LabelSelector
+ var v1 pkg3_v1.ObjectMeta
+ var v2 pkg4_types.UID
+ var v3 pkg1_intstr.IntOrString
+ var v4 time.Time
+ _, _, _, _, _ = v0, v1, v2, v3, v4
+ }
+}
+
+func (x *PodDisruptionBudgetSpec) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = x.Selector != nil
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.MinAvailable
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else if !yym5 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy4)
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("minAvailable"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.MinAvailable
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else if !yym7 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy6)
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ if x.Selector == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.Selector) {
+ } else {
+ z.EncFallback(x.Selector)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("selector"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Selector == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.Selector) {
+ } else {
+ z.EncFallback(x.Selector)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PodDisruptionBudgetSpec) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PodDisruptionBudgetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "minAvailable":
+ if r.TryDecodeAsNil() {
+ x.MinAvailable = pkg1_intstr.IntOrString{}
+ } else {
+ yyv4 := &x.MinAvailable
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else if !yym5 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv4)
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "selector":
+ if r.TryDecodeAsNil() {
+ if x.Selector != nil {
+ x.Selector = nil
+ }
+ } else {
+ if x.Selector == nil {
+ x.Selector = new(pkg2_unversioned.LabelSelector)
+ }
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.Selector) {
+ } else {
+ z.DecFallback(x.Selector, false)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PodDisruptionBudgetSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.MinAvailable = pkg1_intstr.IntOrString{}
+ } else {
+ yyv9 := &x.MinAvailable
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv9) {
+ } else if !yym10 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv9)
+ } else {
+ z.DecFallback(yyv9, false)
+ }
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ if x.Selector != nil {
+ x.Selector = nil
+ }
+ } else {
+ if x.Selector == nil {
+ x.Selector = new(pkg2_unversioned.LabelSelector)
+ }
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x.Selector) {
+ } else {
+ z.DecFallback(x.Selector, false)
+ }
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PodDisruptionBudgetStatus) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 4
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeBool(bool(x.PodDisruptionAllowed))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("disruptionAllowed"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeBool(bool(x.PodDisruptionAllowed))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeInt(int64(x.CurrentHealthy))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("currentHealthy"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeInt(int64(x.CurrentHealthy))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeInt(int64(x.DesiredHealthy))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("desiredHealthy"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeInt(int64(x.DesiredHealthy))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ExpectedPods))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("expectedPods"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ExpectedPods))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PodDisruptionBudgetStatus) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PodDisruptionBudgetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "disruptionAllowed":
+ if r.TryDecodeAsNil() {
+ x.PodDisruptionAllowed = false
+ } else {
+ x.PodDisruptionAllowed = bool(r.DecodeBool())
+ }
+ case "currentHealthy":
+ if r.TryDecodeAsNil() {
+ x.CurrentHealthy = 0
+ } else {
+ x.CurrentHealthy = int32(r.DecodeInt(32))
+ }
+ case "desiredHealthy":
+ if r.TryDecodeAsNil() {
+ x.DesiredHealthy = 0
+ } else {
+ x.DesiredHealthy = int32(r.DecodeInt(32))
+ }
+ case "expectedPods":
+ if r.TryDecodeAsNil() {
+ x.ExpectedPods = 0
+ } else {
+ x.ExpectedPods = int32(r.DecodeInt(32))
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PodDisruptionBudgetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.PodDisruptionAllowed = false
+ } else {
+ x.PodDisruptionAllowed = bool(r.DecodeBool())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.CurrentHealthy = 0
+ } else {
+ x.CurrentHealthy = int32(r.DecodeInt(32))
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.DesiredHealthy = 0
+ } else {
+ x.DesiredHealthy = int32(r.DecodeInt(32))
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ExpectedPods = 0
+ } else {
+ x.ExpectedPods = int32(r.DecodeInt(32))
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PodDisruptionBudget) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[1] = true
+ yyq2[2] = true
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 0
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy9 := &x.Spec
+ yy9.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("spec"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy11 := &x.Spec
+ yy11.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yy14 := &x.Status
+ yy14.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("status"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy16 := &x.Status
+ yy16.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PodDisruptionBudget) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PodDisruptionBudget) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg3_v1.ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "spec":
+ if r.TryDecodeAsNil() {
+ x.Spec = PodDisruptionBudgetSpec{}
+ } else {
+ yyv5 := &x.Spec
+ yyv5.CodecDecodeSelf(d)
+ }
+ case "status":
+ if r.TryDecodeAsNil() {
+ x.Status = PodDisruptionBudgetStatus{}
+ } else {
+ yyv6 := &x.Status
+ yyv6.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PodDisruptionBudget) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg3_v1.ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Spec = PodDisruptionBudgetSpec{}
+ } else {
+ yyv11 := &x.Spec
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Status = PodDisruptionBudgetStatus{}
+ } else {
+ yyv12 := &x.Status
+ yyv12.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *PodDisruptionBudgetList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSlicePodDisruptionBudget(([]PodDisruptionBudget)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSlicePodDisruptionBudget(([]PodDisruptionBudget)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PodDisruptionBudgetList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PodDisruptionBudgetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSlicePodDisruptionBudget((*[]PodDisruptionBudget)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PodDisruptionBudgetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSlicePodDisruptionBudget((*[]PodDisruptionBudget)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) encSlicePodDisruptionBudget(v []PodDisruptionBudget, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSlicePodDisruptionBudget(v *[]PodDisruptionBudget, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []PodDisruptionBudget{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 296)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]PodDisruptionBudget, yyrl1)
+ }
+ } else {
+ yyv1 = make([]PodDisruptionBudget, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PodDisruptionBudget{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, PodDisruptionBudget{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PodDisruptionBudget{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, PodDisruptionBudget{}) // var yyz1 PodDisruptionBudget
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PodDisruptionBudget{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []PodDisruptionBudget{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/types.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/types.go
new file mode 100644
index 0000000..efe136f
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/types.go
@@ -0,0 +1,72 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ "k8s.io/kubernetes/pkg/api/unversioned"
+
+ "k8s.io/kubernetes/pkg/api/v1"
+ "k8s.io/kubernetes/pkg/util/intstr"
+)
+
+// PodDisruptionBudgetSpec is a description of a PodDisruptionBudget.
+type PodDisruptionBudgetSpec struct {
+ // The minimum number of pods that must be available simultaneously. This
+ // can be either an integer or a string specifying a percentage, e.g. "28%".
+ MinAvailable intstr.IntOrString `json:"minAvailable,omitempty" protobuf:"bytes,1,opt,name=minAvailable"`
+
+ // Label query over pods whose evictions are managed by the disruption
+ // budget.
+ Selector *unversioned.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"`
+}
+
+// PodDisruptionBudgetStatus represents information about the status of a
+// PodDisruptionBudget. Status may trail the actual state of a system.
+type PodDisruptionBudgetStatus struct {
+ // Whether or not a disruption is currently allowed.
+ PodDisruptionAllowed bool `json:"disruptionAllowed" protobuf:"varint,1,opt,name=disruptionAllowed"`
+
+ // current number of healthy pods
+ CurrentHealthy int32 `json:"currentHealthy" protobuf:"varint,2,opt,name=currentHealthy"`
+
+ // minimum desired number of healthy pods
+ DesiredHealthy int32 `json:"desiredHealthy" protobuf:"varint,3,opt,name=desiredHealthy"`
+
+ // total number of pods counted by this disruption budget
+ ExpectedPods int32 `json:"expectedPods" protobuf:"varint,4,opt,name=expectedPods"`
+}
+
+// +genclient=true
+// +noMethods=true
+
+// PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods
+type PodDisruptionBudget struct {
+ unversioned.TypeMeta `json:",inline"`
+ v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Specification of the desired behavior of the PodDisruptionBudget.
+ Spec PodDisruptionBudgetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+ // Most recently observed status of the PodDisruptionBudget.
+ Status PodDisruptionBudgetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// PodDisruptionBudgetList is a collection of PodDisruptionBudgets.
+type PodDisruptionBudgetList struct {
+ unversioned.TypeMeta `json:",inline"`
+ unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ Items []PodDisruptionBudget `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/types_swagger_doc_generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/types_swagger_doc_generated.go
new file mode 100644
index 0000000..b12ce0f
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/types_swagger_doc_generated.go
@@ -0,0 +1,70 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE
+var map_PodDisruptionBudget = map[string]string{
+ "": "PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods",
+ "spec": "Specification of the desired behavior of the PodDisruptionBudget.",
+ "status": "Most recently observed status of the PodDisruptionBudget.",
+}
+
+func (PodDisruptionBudget) SwaggerDoc() map[string]string {
+ return map_PodDisruptionBudget
+}
+
+var map_PodDisruptionBudgetList = map[string]string{
+ "": "PodDisruptionBudgetList is a collection of PodDisruptionBudgets.",
+}
+
+func (PodDisruptionBudgetList) SwaggerDoc() map[string]string {
+ return map_PodDisruptionBudgetList
+}
+
+var map_PodDisruptionBudgetSpec = map[string]string{
+ "": "PodDisruptionBudgetSpec is a description of a PodDisruptionBudget.",
+ "minAvailable": "The minimum number of pods that must be available simultaneously. This can be either an integer or a string specifying a percentage, e.g. \"28%\".",
+ "selector": "Label query over pods whose evictions are managed by the disruption budget.",
+}
+
+func (PodDisruptionBudgetSpec) SwaggerDoc() map[string]string {
+ return map_PodDisruptionBudgetSpec
+}
+
+var map_PodDisruptionBudgetStatus = map[string]string{
+ "": "PodDisruptionBudgetStatus represents information about the status of a PodDisruptionBudget. Status may trail the actual state of a system.",
+ "disruptionAllowed": "Whether or not a disruption is currently allowed.",
+ "currentHealthy": "current number of healthy pods",
+ "desiredHealthy": "minimum desired number of healthy pods",
+ "expectedPods": "total number of pods counted by this disruption budget",
+}
+
+func (PodDisruptionBudgetStatus) SwaggerDoc() map[string]string {
+ return map_PodDisruptionBudgetStatus
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/deep_copy_generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/deep_copy_generated.go
new file mode 100644
index 0000000..10d8c40
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/deep_copy_generated.go
@@ -0,0 +1,241 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by deepcopy-gen. Do not edit it manually!
+
+package rbac
+
+import (
+ api "k8s.io/kubernetes/pkg/api"
+ conversion "k8s.io/kubernetes/pkg/conversion"
+ runtime "k8s.io/kubernetes/pkg/runtime"
+)
+
+func init() {
+ if err := api.Scheme.AddGeneratedDeepCopyFuncs(
+ DeepCopy_rbac_ClusterRole,
+ DeepCopy_rbac_ClusterRoleBinding,
+ DeepCopy_rbac_ClusterRoleBindingList,
+ DeepCopy_rbac_ClusterRoleList,
+ DeepCopy_rbac_PolicyRule,
+ DeepCopy_rbac_Role,
+ DeepCopy_rbac_RoleBinding,
+ DeepCopy_rbac_RoleBindingList,
+ DeepCopy_rbac_RoleList,
+ DeepCopy_rbac_Subject,
+ ); err != nil {
+ // if one of the deep copy functions is malformed, detect it immediately.
+ panic(err)
+ }
+}
+
+func DeepCopy_rbac_ClusterRole(in ClusterRole, out *ClusterRole, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if in.Rules != nil {
+ in, out := in.Rules, &out.Rules
+ *out = make([]PolicyRule, len(in))
+ for i := range in {
+ if err := DeepCopy_rbac_PolicyRule(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Rules = nil
+ }
+ return nil
+}
+
+func DeepCopy_rbac_ClusterRoleBinding(in ClusterRoleBinding, out *ClusterRoleBinding, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if in.Subjects != nil {
+ in, out := in.Subjects, &out.Subjects
+ *out = make([]Subject, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.Subjects = nil
+ }
+ out.RoleRef = in.RoleRef
+ return nil
+}
+
+func DeepCopy_rbac_ClusterRoleBindingList(in ClusterRoleBindingList, out *ClusterRoleBindingList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]ClusterRoleBinding, len(in))
+ for i := range in {
+ if err := DeepCopy_rbac_ClusterRoleBinding(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_rbac_ClusterRoleList(in ClusterRoleList, out *ClusterRoleList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]ClusterRole, len(in))
+ for i := range in {
+ if err := DeepCopy_rbac_ClusterRole(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_rbac_PolicyRule(in PolicyRule, out *PolicyRule, c *conversion.Cloner) error {
+ if in.Verbs != nil {
+ in, out := in.Verbs, &out.Verbs
+ *out = make([]string, len(in))
+ copy(*out, in)
+ } else {
+ out.Verbs = nil
+ }
+ if in.AttributeRestrictions == nil {
+ out.AttributeRestrictions = nil
+ } else if newVal, err := c.DeepCopy(in.AttributeRestrictions); err != nil {
+ return err
+ } else {
+ out.AttributeRestrictions = newVal.(runtime.Object)
+ }
+ if in.APIGroups != nil {
+ in, out := in.APIGroups, &out.APIGroups
+ *out = make([]string, len(in))
+ copy(*out, in)
+ } else {
+ out.APIGroups = nil
+ }
+ if in.Resources != nil {
+ in, out := in.Resources, &out.Resources
+ *out = make([]string, len(in))
+ copy(*out, in)
+ } else {
+ out.Resources = nil
+ }
+ if in.ResourceNames != nil {
+ in, out := in.ResourceNames, &out.ResourceNames
+ *out = make([]string, len(in))
+ copy(*out, in)
+ } else {
+ out.ResourceNames = nil
+ }
+ if in.NonResourceURLs != nil {
+ in, out := in.NonResourceURLs, &out.NonResourceURLs
+ *out = make([]string, len(in))
+ copy(*out, in)
+ } else {
+ out.NonResourceURLs = nil
+ }
+ return nil
+}
+
+func DeepCopy_rbac_Role(in Role, out *Role, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if in.Rules != nil {
+ in, out := in.Rules, &out.Rules
+ *out = make([]PolicyRule, len(in))
+ for i := range in {
+ if err := DeepCopy_rbac_PolicyRule(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Rules = nil
+ }
+ return nil
+}
+
+func DeepCopy_rbac_RoleBinding(in RoleBinding, out *RoleBinding, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if in.Subjects != nil {
+ in, out := in.Subjects, &out.Subjects
+ *out = make([]Subject, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.Subjects = nil
+ }
+ out.RoleRef = in.RoleRef
+ return nil
+}
+
+func DeepCopy_rbac_RoleBindingList(in RoleBindingList, out *RoleBindingList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]RoleBinding, len(in))
+ for i := range in {
+ if err := DeepCopy_rbac_RoleBinding(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_rbac_RoleList(in RoleList, out *RoleList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]Role, len(in))
+ for i := range in {
+ if err := DeepCopy_rbac_Role(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_rbac_Subject(in Subject, out *Subject, c *conversion.Cloner) error {
+ out.Kind = in.Kind
+ out.APIVersion = in.APIVersion
+ out.Name = in.Name
+ out.Namespace = in.Namespace
+ return nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/doc.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/doc.go
new file mode 100644
index 0000000..e4ce69b
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package,register
+
+// +groupName=rbac.authorization.k8s.io
+package rbac
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/install/install.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/install/install.go
new file mode 100644
index 0000000..0f6dc91
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/install/install.go
@@ -0,0 +1,130 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package install installs the batch API group, making it available as
+// an option to all of the API encoding/decoding machinery.
+package install
+
+import (
+ "fmt"
+
+ "github.com/golang/glog"
+
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/meta"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/apimachinery"
+ "k8s.io/kubernetes/pkg/apimachinery/registered"
+ "k8s.io/kubernetes/pkg/apis/rbac"
+ "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1"
+ "k8s.io/kubernetes/pkg/runtime"
+ "k8s.io/kubernetes/pkg/util/sets"
+)
+
+const importPrefix = "k8s.io/kubernetes/pkg/apis/rbac"
+
+var accessor = meta.NewAccessor()
+
+// availableVersions lists all known external versions for this group from most preferred to least preferred
+var availableVersions = []unversioned.GroupVersion{v1alpha1.SchemeGroupVersion}
+
+func init() {
+ registered.RegisterVersions(availableVersions)
+ externalVersions := []unversioned.GroupVersion{}
+ for _, v := range availableVersions {
+ if registered.IsAllowedVersion(v) {
+ externalVersions = append(externalVersions, v)
+ }
+ }
+ if len(externalVersions) == 0 {
+ glog.V(4).Infof("No version is registered for group %v", rbac.GroupName)
+ return
+ }
+
+ if err := registered.EnableVersions(externalVersions...); err != nil {
+ glog.V(4).Infof("%v", err)
+ return
+ }
+ if err := enableVersions(externalVersions); err != nil {
+ glog.V(4).Infof("%v", err)
+ return
+ }
+}
+
+// TODO: enableVersions should be centralized rather than spread in each API
+// group.
+// We can combine registered.RegisterVersions, registered.EnableVersions and
+// registered.RegisterGroup once we have moved enableVersions there.
+func enableVersions(externalVersions []unversioned.GroupVersion) error {
+ addVersionsToScheme(externalVersions...)
+ preferredExternalVersion := externalVersions[0]
+
+ groupMeta := apimachinery.GroupMeta{
+ GroupVersion: preferredExternalVersion,
+ GroupVersions: externalVersions,
+ RESTMapper: newRESTMapper(externalVersions),
+ SelfLinker: runtime.SelfLinker(accessor),
+ InterfacesFor: interfacesFor,
+ }
+
+ if err := registered.RegisterGroup(groupMeta); err != nil {
+ return err
+ }
+ api.RegisterRESTMapper(groupMeta.RESTMapper)
+ return nil
+}
+
+func newRESTMapper(externalVersions []unversioned.GroupVersion) meta.RESTMapper {
+ rootScoped := sets.NewString(
+ "ClusterRole",
+ "ClusterRoleBinding",
+ )
+
+ ignoredKinds := sets.NewString()
+
+ return api.NewDefaultRESTMapper(externalVersions, interfacesFor, importPrefix, ignoredKinds, rootScoped)
+}
+
+// interfacesFor returns the default Codec and ResourceVersioner for a given version
+// string, or an error if the version is not known.
+func interfacesFor(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) {
+ switch version {
+ case v1alpha1.SchemeGroupVersion:
+ return &meta.VersionInterfaces{
+ ObjectConvertor: api.Scheme,
+ MetadataAccessor: accessor,
+ }, nil
+ default:
+ g, _ := registered.Group(rbac.GroupName)
+ return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, g.GroupVersions)
+ }
+}
+
+func addVersionsToScheme(externalVersions ...unversioned.GroupVersion) {
+ // add the internal version to Scheme
+ rbac.AddToScheme(api.Scheme)
+ // add the enabled external versions to Scheme
+ for _, v := range externalVersions {
+ if !registered.IsEnabledVersion(v) {
+ glog.Errorf("Version %s is not enabled, so it will not be added to the Scheme.", v)
+ continue
+ }
+ switch v {
+ case v1alpha1.SchemeGroupVersion:
+ v1alpha1.AddToScheme(api.Scheme)
+ }
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/register.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/register.go
new file mode 100644
index 0000000..5d89656
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/register.go
@@ -0,0 +1,64 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rbac
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/runtime"
+ "k8s.io/kubernetes/pkg/watch/versioned"
+)
+
+const GroupName = "rbac.authorization.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
+
+// Kind takes an unqualified kind and returns back a Group qualified GroupKind
+func Kind(kind string) unversioned.GroupKind {
+ return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// Resource takes an unqualified resource and returns back a Group qualified GroupResource
+func Resource(resource string) unversioned.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+func AddToScheme(scheme *runtime.Scheme) {
+ // Add the API to Scheme.
+ addKnownTypes(scheme)
+}
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &Role{},
+ &RoleBinding{},
+ &RoleBindingList{},
+ &RoleList{},
+
+ &ClusterRole{},
+ &ClusterRoleBinding{},
+ &ClusterRoleBindingList{},
+ &ClusterRoleList{},
+
+ &api.ListOptions{},
+ &api.DeleteOptions{},
+ &api.ExportOptions{},
+ )
+ versioned.AddToGroupVersion(scheme, SchemeGroupVersion)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/types.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/types.go
new file mode 100644
index 0000000..44a3897
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/types.go
@@ -0,0 +1,178 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rbac
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/runtime"
+)
+
+// Authorization is calculated against
+// 1. evaluation of ClusterRoleBindings - short circuit on match
+// 2. evaluation of RoleBindings in the namespace requested - short circuit on match
+// 3. deny by default
+
+const (
+ APIGroupAll = "*"
+ ResourceAll = "*"
+ VerbAll = "*"
+ NonResourceAll = "*"
+
+ GroupKind = "Group"
+ ServiceAccountKind = "ServiceAccount"
+ UserKind = "User"
+
+ UserAll = "*"
+)
+
+// PolicyRule holds information that describes a policy rule, but does not contain information
+// about who the rule applies to or which namespace the rule applies to.
+type PolicyRule struct {
+ // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.
+ Verbs []string
+ // AttributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports.
+ // If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error.
+ AttributeRestrictions runtime.Object
+ // APIGroups is the name of the APIGroup that contains the resources.
+ // If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.
+ APIGroups []string
+ // Resources is a list of resources this rule applies to. ResourceAll represents all resources.
+ Resources []string
+ // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.
+ ResourceNames []string
+ // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path
+ // If an action is not a resource API request, then the URL is split on '/' and is checked against the NonResourceURLs to look for a match.
+ // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding.
+ NonResourceURLs []string
+}
+
+// Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference,
+// or a value for non-objects such as user and group names.
+type Subject struct {
+ // Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount".
+ // If the Authorizer does not recognized the kind value, the Authorizer should report an error.
+ Kind string
+ // APIVersion holds the API group and version of the referenced object. For non-object references such as "Group" and "User" this is
+ // expected to be API version of this API group. For example "rbac/v1alpha1".
+ APIVersion string
+ // Name of the object being referenced.
+ Name string
+ // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty
+ // the Authorizer should report an error.
+ Namespace string
+}
+
+// +genclient=true
+
+// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.
+type Role struct {
+ unversioned.TypeMeta
+ // Standard object's metadata.
+ api.ObjectMeta
+
+ // Rules holds all the PolicyRules for this Role
+ Rules []PolicyRule
+}
+
+// +genclient=true
+
+// RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace.
+// It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given
+// namespace only have effect in that namespace.
+type RoleBinding struct {
+ unversioned.TypeMeta
+ api.ObjectMeta
+
+ // Subjects holds references to the objects the role applies to.
+ Subjects []Subject
+
+ // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace.
+ // If the RoleRef cannot be resolved, the Authorizer must return an error.
+ RoleRef api.ObjectReference
+}
+
+// RoleBindingList is a collection of RoleBindings
+type RoleBindingList struct {
+ unversioned.TypeMeta
+ // Standard object's metadata.
+ unversioned.ListMeta
+
+ // Items is a list of roleBindings
+ Items []RoleBinding
+}
+
+// RoleList is a collection of Roles
+type RoleList struct {
+ unversioned.TypeMeta
+ // Standard object's metadata.
+ unversioned.ListMeta
+
+ // Items is a list of roles
+ Items []Role
+}
+
+// +genclient=true
+// +nonNamespaced=true
+
+// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.
+type ClusterRole struct {
+ unversioned.TypeMeta
+ // Standard object's metadata.
+ api.ObjectMeta
+
+ // Rules holds all the PolicyRules for this ClusterRole
+ Rules []PolicyRule
+}
+
+// +genclient=true
+// +nonNamespaced=true
+
+// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace,
+// and adds who information via Subject.
+type ClusterRoleBinding struct {
+ unversioned.TypeMeta
+ // Standard object's metadata.
+ api.ObjectMeta
+
+ // Subjects holds references to the objects the role applies to.
+ Subjects []Subject
+
+ // RoleRef can only reference a ClusterRole in the global namespace.
+ // If the RoleRef cannot be resolved, the Authorizer must return an error.
+ RoleRef api.ObjectReference
+}
+
+// ClusterRoleBindingList is a collection of ClusterRoleBindings
+type ClusterRoleBindingList struct {
+ unversioned.TypeMeta
+ // Standard object's metadata.
+ unversioned.ListMeta
+
+ // Items is a list of ClusterRoleBindings
+ Items []ClusterRoleBinding
+}
+
+// ClusterRoleList is a collection of ClusterRoles
+type ClusterRoleList struct {
+ unversioned.TypeMeta
+ // Standard object's metadata.
+ unversioned.ListMeta
+
+ // Items is a list of ClusterRoles
+ Items []ClusterRole
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/conversion_generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/conversion_generated.go
new file mode 100644
index 0000000..5588146
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/conversion_generated.go
@@ -0,0 +1,536 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by conversion-gen. Do not edit it manually!
+
+package v1alpha1
+
+import (
+ api "k8s.io/kubernetes/pkg/api"
+ rbac "k8s.io/kubernetes/pkg/apis/rbac"
+ conversion "k8s.io/kubernetes/pkg/conversion"
+ runtime "k8s.io/kubernetes/pkg/runtime"
+)
+
+func init() {
+ if err := api.Scheme.AddGeneratedConversionFuncs(
+ Convert_v1alpha1_ClusterRole_To_rbac_ClusterRole,
+ Convert_rbac_ClusterRole_To_v1alpha1_ClusterRole,
+ Convert_v1alpha1_ClusterRoleBinding_To_rbac_ClusterRoleBinding,
+ Convert_rbac_ClusterRoleBinding_To_v1alpha1_ClusterRoleBinding,
+ Convert_v1alpha1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList,
+ Convert_rbac_ClusterRoleBindingList_To_v1alpha1_ClusterRoleBindingList,
+ Convert_v1alpha1_ClusterRoleList_To_rbac_ClusterRoleList,
+ Convert_rbac_ClusterRoleList_To_v1alpha1_ClusterRoleList,
+ Convert_v1alpha1_PolicyRule_To_rbac_PolicyRule,
+ Convert_rbac_PolicyRule_To_v1alpha1_PolicyRule,
+ Convert_v1alpha1_Role_To_rbac_Role,
+ Convert_rbac_Role_To_v1alpha1_Role,
+ Convert_v1alpha1_RoleBinding_To_rbac_RoleBinding,
+ Convert_rbac_RoleBinding_To_v1alpha1_RoleBinding,
+ Convert_v1alpha1_RoleBindingList_To_rbac_RoleBindingList,
+ Convert_rbac_RoleBindingList_To_v1alpha1_RoleBindingList,
+ Convert_v1alpha1_RoleList_To_rbac_RoleList,
+ Convert_rbac_RoleList_To_v1alpha1_RoleList,
+ Convert_v1alpha1_Subject_To_rbac_Subject,
+ Convert_rbac_Subject_To_v1alpha1_Subject,
+ ); err != nil {
+ // if one of the conversion functions is malformed, detect it immediately.
+ panic(err)
+ }
+}
+
+func autoConvert_v1alpha1_ClusterRole_To_rbac_ClusterRole(in *ClusterRole, out *rbac.ClusterRole, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ if in.Rules != nil {
+ in, out := &in.Rules, &out.Rules
+ *out = make([]rbac.PolicyRule, len(*in))
+ for i := range *in {
+ if err := Convert_v1alpha1_PolicyRule_To_rbac_PolicyRule(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Rules = nil
+ }
+ return nil
+}
+
+func Convert_v1alpha1_ClusterRole_To_rbac_ClusterRole(in *ClusterRole, out *rbac.ClusterRole, s conversion.Scope) error {
+ return autoConvert_v1alpha1_ClusterRole_To_rbac_ClusterRole(in, out, s)
+}
+
+func autoConvert_rbac_ClusterRole_To_v1alpha1_ClusterRole(in *rbac.ClusterRole, out *ClusterRole, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ if in.Rules != nil {
+ in, out := &in.Rules, &out.Rules
+ *out = make([]PolicyRule, len(*in))
+ for i := range *in {
+ if err := Convert_rbac_PolicyRule_To_v1alpha1_PolicyRule(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Rules = nil
+ }
+ return nil
+}
+
+func Convert_rbac_ClusterRole_To_v1alpha1_ClusterRole(in *rbac.ClusterRole, out *ClusterRole, s conversion.Scope) error {
+ return autoConvert_rbac_ClusterRole_To_v1alpha1_ClusterRole(in, out, s)
+}
+
+func autoConvert_v1alpha1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(in *ClusterRoleBinding, out *rbac.ClusterRoleBinding, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ if in.Subjects != nil {
+ in, out := &in.Subjects, &out.Subjects
+ *out = make([]rbac.Subject, len(*in))
+ for i := range *in {
+ if err := Convert_v1alpha1_Subject_To_rbac_Subject(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Subjects = nil
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.RoleRef, &out.RoleRef, 0); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1alpha1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(in *ClusterRoleBinding, out *rbac.ClusterRoleBinding, s conversion.Scope) error {
+ return autoConvert_v1alpha1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(in, out, s)
+}
+
+func autoConvert_rbac_ClusterRoleBinding_To_v1alpha1_ClusterRoleBinding(in *rbac.ClusterRoleBinding, out *ClusterRoleBinding, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ if in.Subjects != nil {
+ in, out := &in.Subjects, &out.Subjects
+ *out = make([]Subject, len(*in))
+ for i := range *in {
+ if err := Convert_rbac_Subject_To_v1alpha1_Subject(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Subjects = nil
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.RoleRef, &out.RoleRef, 0); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_rbac_ClusterRoleBinding_To_v1alpha1_ClusterRoleBinding(in *rbac.ClusterRoleBinding, out *ClusterRoleBinding, s conversion.Scope) error {
+ return autoConvert_rbac_ClusterRoleBinding_To_v1alpha1_ClusterRoleBinding(in, out, s)
+}
+
+func autoConvert_v1alpha1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(in *ClusterRoleBindingList, out *rbac.ClusterRoleBindingList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]rbac.ClusterRoleBinding, len(*in))
+ for i := range *in {
+ if err := Convert_v1alpha1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_v1alpha1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(in *ClusterRoleBindingList, out *rbac.ClusterRoleBindingList, s conversion.Scope) error {
+ return autoConvert_v1alpha1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(in, out, s)
+}
+
+func autoConvert_rbac_ClusterRoleBindingList_To_v1alpha1_ClusterRoleBindingList(in *rbac.ClusterRoleBindingList, out *ClusterRoleBindingList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ClusterRoleBinding, len(*in))
+ for i := range *in {
+ if err := Convert_rbac_ClusterRoleBinding_To_v1alpha1_ClusterRoleBinding(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_rbac_ClusterRoleBindingList_To_v1alpha1_ClusterRoleBindingList(in *rbac.ClusterRoleBindingList, out *ClusterRoleBindingList, s conversion.Scope) error {
+ return autoConvert_rbac_ClusterRoleBindingList_To_v1alpha1_ClusterRoleBindingList(in, out, s)
+}
+
+func autoConvert_v1alpha1_ClusterRoleList_To_rbac_ClusterRoleList(in *ClusterRoleList, out *rbac.ClusterRoleList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]rbac.ClusterRole, len(*in))
+ for i := range *in {
+ if err := Convert_v1alpha1_ClusterRole_To_rbac_ClusterRole(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_v1alpha1_ClusterRoleList_To_rbac_ClusterRoleList(in *ClusterRoleList, out *rbac.ClusterRoleList, s conversion.Scope) error {
+ return autoConvert_v1alpha1_ClusterRoleList_To_rbac_ClusterRoleList(in, out, s)
+}
+
+func autoConvert_rbac_ClusterRoleList_To_v1alpha1_ClusterRoleList(in *rbac.ClusterRoleList, out *ClusterRoleList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ClusterRole, len(*in))
+ for i := range *in {
+ if err := Convert_rbac_ClusterRole_To_v1alpha1_ClusterRole(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_rbac_ClusterRoleList_To_v1alpha1_ClusterRoleList(in *rbac.ClusterRoleList, out *ClusterRoleList, s conversion.Scope) error {
+ return autoConvert_rbac_ClusterRoleList_To_v1alpha1_ClusterRoleList(in, out, s)
+}
+
+func autoConvert_v1alpha1_PolicyRule_To_rbac_PolicyRule(in *PolicyRule, out *rbac.PolicyRule, s conversion.Scope) error {
+ out.Verbs = in.Verbs
+ if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.AttributeRestrictions, &out.AttributeRestrictions, s); err != nil {
+ return err
+ }
+ out.APIGroups = in.APIGroups
+ out.Resources = in.Resources
+ out.ResourceNames = in.ResourceNames
+ out.NonResourceURLs = in.NonResourceURLs
+ return nil
+}
+
+func Convert_v1alpha1_PolicyRule_To_rbac_PolicyRule(in *PolicyRule, out *rbac.PolicyRule, s conversion.Scope) error {
+ return autoConvert_v1alpha1_PolicyRule_To_rbac_PolicyRule(in, out, s)
+}
+
+func autoConvert_rbac_PolicyRule_To_v1alpha1_PolicyRule(in *rbac.PolicyRule, out *PolicyRule, s conversion.Scope) error {
+ out.Verbs = in.Verbs
+ if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&in.AttributeRestrictions, &out.AttributeRestrictions, s); err != nil {
+ return err
+ }
+ out.APIGroups = in.APIGroups
+ out.Resources = in.Resources
+ out.ResourceNames = in.ResourceNames
+ out.NonResourceURLs = in.NonResourceURLs
+ return nil
+}
+
+func Convert_rbac_PolicyRule_To_v1alpha1_PolicyRule(in *rbac.PolicyRule, out *PolicyRule, s conversion.Scope) error {
+ return autoConvert_rbac_PolicyRule_To_v1alpha1_PolicyRule(in, out, s)
+}
+
+func autoConvert_v1alpha1_Role_To_rbac_Role(in *Role, out *rbac.Role, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ if in.Rules != nil {
+ in, out := &in.Rules, &out.Rules
+ *out = make([]rbac.PolicyRule, len(*in))
+ for i := range *in {
+ if err := Convert_v1alpha1_PolicyRule_To_rbac_PolicyRule(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Rules = nil
+ }
+ return nil
+}
+
+func Convert_v1alpha1_Role_To_rbac_Role(in *Role, out *rbac.Role, s conversion.Scope) error {
+ return autoConvert_v1alpha1_Role_To_rbac_Role(in, out, s)
+}
+
+func autoConvert_rbac_Role_To_v1alpha1_Role(in *rbac.Role, out *Role, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ if in.Rules != nil {
+ in, out := &in.Rules, &out.Rules
+ *out = make([]PolicyRule, len(*in))
+ for i := range *in {
+ if err := Convert_rbac_PolicyRule_To_v1alpha1_PolicyRule(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Rules = nil
+ }
+ return nil
+}
+
+func Convert_rbac_Role_To_v1alpha1_Role(in *rbac.Role, out *Role, s conversion.Scope) error {
+ return autoConvert_rbac_Role_To_v1alpha1_Role(in, out, s)
+}
+
+func autoConvert_v1alpha1_RoleBinding_To_rbac_RoleBinding(in *RoleBinding, out *rbac.RoleBinding, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ if in.Subjects != nil {
+ in, out := &in.Subjects, &out.Subjects
+ *out = make([]rbac.Subject, len(*in))
+ for i := range *in {
+ if err := Convert_v1alpha1_Subject_To_rbac_Subject(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Subjects = nil
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.RoleRef, &out.RoleRef, 0); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_v1alpha1_RoleBinding_To_rbac_RoleBinding(in *RoleBinding, out *rbac.RoleBinding, s conversion.Scope) error {
+ return autoConvert_v1alpha1_RoleBinding_To_rbac_RoleBinding(in, out, s)
+}
+
+func autoConvert_rbac_RoleBinding_To_v1alpha1_RoleBinding(in *rbac.RoleBinding, out *RoleBinding, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ if in.Subjects != nil {
+ in, out := &in.Subjects, &out.Subjects
+ *out = make([]Subject, len(*in))
+ for i := range *in {
+ if err := Convert_rbac_Subject_To_v1alpha1_Subject(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Subjects = nil
+ }
+ // TODO: Inefficient conversion - can we improve it?
+ if err := s.Convert(&in.RoleRef, &out.RoleRef, 0); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Convert_rbac_RoleBinding_To_v1alpha1_RoleBinding(in *rbac.RoleBinding, out *RoleBinding, s conversion.Scope) error {
+ return autoConvert_rbac_RoleBinding_To_v1alpha1_RoleBinding(in, out, s)
+}
+
+func autoConvert_v1alpha1_RoleBindingList_To_rbac_RoleBindingList(in *RoleBindingList, out *rbac.RoleBindingList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]rbac.RoleBinding, len(*in))
+ for i := range *in {
+ if err := Convert_v1alpha1_RoleBinding_To_rbac_RoleBinding(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_v1alpha1_RoleBindingList_To_rbac_RoleBindingList(in *RoleBindingList, out *rbac.RoleBindingList, s conversion.Scope) error {
+ return autoConvert_v1alpha1_RoleBindingList_To_rbac_RoleBindingList(in, out, s)
+}
+
+func autoConvert_rbac_RoleBindingList_To_v1alpha1_RoleBindingList(in *rbac.RoleBindingList, out *RoleBindingList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]RoleBinding, len(*in))
+ for i := range *in {
+ if err := Convert_rbac_RoleBinding_To_v1alpha1_RoleBinding(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_rbac_RoleBindingList_To_v1alpha1_RoleBindingList(in *rbac.RoleBindingList, out *RoleBindingList, s conversion.Scope) error {
+ return autoConvert_rbac_RoleBindingList_To_v1alpha1_RoleBindingList(in, out, s)
+}
+
+func autoConvert_v1alpha1_RoleList_To_rbac_RoleList(in *RoleList, out *rbac.RoleList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]rbac.Role, len(*in))
+ for i := range *in {
+ if err := Convert_v1alpha1_Role_To_rbac_Role(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_v1alpha1_RoleList_To_rbac_RoleList(in *RoleList, out *rbac.RoleList, s conversion.Scope) error {
+ return autoConvert_v1alpha1_RoleList_To_rbac_RoleList(in, out, s)
+}
+
+func autoConvert_rbac_RoleList_To_v1alpha1_RoleList(in *rbac.RoleList, out *RoleList, s conversion.Scope) error {
+ if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
+ return err
+ }
+ if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
+ return err
+ }
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Role, len(*in))
+ for i := range *in {
+ if err := Convert_rbac_Role_To_v1alpha1_Role(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func Convert_rbac_RoleList_To_v1alpha1_RoleList(in *rbac.RoleList, out *RoleList, s conversion.Scope) error {
+ return autoConvert_rbac_RoleList_To_v1alpha1_RoleList(in, out, s)
+}
+
+func autoConvert_v1alpha1_Subject_To_rbac_Subject(in *Subject, out *rbac.Subject, s conversion.Scope) error {
+ out.Kind = in.Kind
+ out.APIVersion = in.APIVersion
+ out.Name = in.Name
+ out.Namespace = in.Namespace
+ return nil
+}
+
+func Convert_v1alpha1_Subject_To_rbac_Subject(in *Subject, out *rbac.Subject, s conversion.Scope) error {
+ return autoConvert_v1alpha1_Subject_To_rbac_Subject(in, out, s)
+}
+
+func autoConvert_rbac_Subject_To_v1alpha1_Subject(in *rbac.Subject, out *Subject, s conversion.Scope) error {
+ out.Kind = in.Kind
+ out.APIVersion = in.APIVersion
+ out.Name = in.Name
+ out.Namespace = in.Namespace
+ return nil
+}
+
+func Convert_rbac_Subject_To_v1alpha1_Subject(in *rbac.Subject, out *Subject, s conversion.Scope) error {
+ return autoConvert_rbac_Subject_To_v1alpha1_Subject(in, out, s)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/deep_copy_generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/deep_copy_generated.go
new file mode 100644
index 0000000..3f0e1eb
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/deep_copy_generated.go
@@ -0,0 +1,238 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by deepcopy-gen. Do not edit it manually!
+
+package v1alpha1
+
+import (
+ api "k8s.io/kubernetes/pkg/api"
+ v1 "k8s.io/kubernetes/pkg/api/v1"
+ conversion "k8s.io/kubernetes/pkg/conversion"
+ runtime "k8s.io/kubernetes/pkg/runtime"
+)
+
+func init() {
+ if err := api.Scheme.AddGeneratedDeepCopyFuncs(
+ DeepCopy_v1alpha1_ClusterRole,
+ DeepCopy_v1alpha1_ClusterRoleBinding,
+ DeepCopy_v1alpha1_ClusterRoleBindingList,
+ DeepCopy_v1alpha1_ClusterRoleList,
+ DeepCopy_v1alpha1_PolicyRule,
+ DeepCopy_v1alpha1_Role,
+ DeepCopy_v1alpha1_RoleBinding,
+ DeepCopy_v1alpha1_RoleBindingList,
+ DeepCopy_v1alpha1_RoleList,
+ DeepCopy_v1alpha1_Subject,
+ ); err != nil {
+ // if one of the deep copy functions is malformed, detect it immediately.
+ panic(err)
+ }
+}
+
+func DeepCopy_v1alpha1_ClusterRole(in ClusterRole, out *ClusterRole, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if in.Rules != nil {
+ in, out := in.Rules, &out.Rules
+ *out = make([]PolicyRule, len(in))
+ for i := range in {
+ if err := DeepCopy_v1alpha1_PolicyRule(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Rules = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1alpha1_ClusterRoleBinding(in ClusterRoleBinding, out *ClusterRoleBinding, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if in.Subjects != nil {
+ in, out := in.Subjects, &out.Subjects
+ *out = make([]Subject, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.Subjects = nil
+ }
+ out.RoleRef = in.RoleRef
+ return nil
+}
+
+func DeepCopy_v1alpha1_ClusterRoleBindingList(in ClusterRoleBindingList, out *ClusterRoleBindingList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]ClusterRoleBinding, len(in))
+ for i := range in {
+ if err := DeepCopy_v1alpha1_ClusterRoleBinding(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1alpha1_ClusterRoleList(in ClusterRoleList, out *ClusterRoleList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]ClusterRole, len(in))
+ for i := range in {
+ if err := DeepCopy_v1alpha1_ClusterRole(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1alpha1_PolicyRule(in PolicyRule, out *PolicyRule, c *conversion.Cloner) error {
+ if in.Verbs != nil {
+ in, out := in.Verbs, &out.Verbs
+ *out = make([]string, len(in))
+ copy(*out, in)
+ } else {
+ out.Verbs = nil
+ }
+ if err := runtime.DeepCopy_runtime_RawExtension(in.AttributeRestrictions, &out.AttributeRestrictions, c); err != nil {
+ return err
+ }
+ if in.APIGroups != nil {
+ in, out := in.APIGroups, &out.APIGroups
+ *out = make([]string, len(in))
+ copy(*out, in)
+ } else {
+ out.APIGroups = nil
+ }
+ if in.Resources != nil {
+ in, out := in.Resources, &out.Resources
+ *out = make([]string, len(in))
+ copy(*out, in)
+ } else {
+ out.Resources = nil
+ }
+ if in.ResourceNames != nil {
+ in, out := in.ResourceNames, &out.ResourceNames
+ *out = make([]string, len(in))
+ copy(*out, in)
+ } else {
+ out.ResourceNames = nil
+ }
+ if in.NonResourceURLs != nil {
+ in, out := in.NonResourceURLs, &out.NonResourceURLs
+ *out = make([]string, len(in))
+ copy(*out, in)
+ } else {
+ out.NonResourceURLs = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1alpha1_Role(in Role, out *Role, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if in.Rules != nil {
+ in, out := in.Rules, &out.Rules
+ *out = make([]PolicyRule, len(in))
+ for i := range in {
+ if err := DeepCopy_v1alpha1_PolicyRule(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Rules = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1alpha1_RoleBinding(in RoleBinding, out *RoleBinding, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
+ return err
+ }
+ if in.Subjects != nil {
+ in, out := in.Subjects, &out.Subjects
+ *out = make([]Subject, len(in))
+ for i := range in {
+ (*out)[i] = in[i]
+ }
+ } else {
+ out.Subjects = nil
+ }
+ out.RoleRef = in.RoleRef
+ return nil
+}
+
+func DeepCopy_v1alpha1_RoleBindingList(in RoleBindingList, out *RoleBindingList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]RoleBinding, len(in))
+ for i := range in {
+ if err := DeepCopy_v1alpha1_RoleBinding(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1alpha1_RoleList(in RoleList, out *RoleList, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := in.Items, &out.Items
+ *out = make([]Role, len(in))
+ for i := range in {
+ if err := DeepCopy_v1alpha1_Role(in[i], &(*out)[i], c); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+func DeepCopy_v1alpha1_Subject(in Subject, out *Subject, c *conversion.Cloner) error {
+ out.Kind = in.Kind
+ out.APIVersion = in.APIVersion
+ out.Name = in.Name
+ out.Namespace = in.Namespace
+ return nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/doc.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/doc.go
new file mode 100644
index 0000000..e471bd3
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/doc.go
@@ -0,0 +1,21 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +groupName=rbac.authorization.k8s.io
+// +k8s:deepcopy-gen=package,register
+// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/rbac
+
+package v1alpha1
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.pb.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.pb.go
new file mode 100644
index 0000000..fb917c3
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.pb.go
@@ -0,0 +1,2209 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by protoc-gen-gogo.
+// source: k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.proto
+// DO NOT EDIT!
+
+/*
+ Package v1alpha1 is a generated protocol buffer package.
+
+ It is generated from these files:
+ k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.proto
+
+ It has these top-level messages:
+ ClusterRole
+ ClusterRoleBinding
+ ClusterRoleBindingList
+ ClusterRoleList
+ PolicyRule
+ Role
+ RoleBinding
+ RoleBindingList
+ RoleList
+ Subject
+*/
+package v1alpha1
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+func (m *ClusterRole) Reset() { *m = ClusterRole{} }
+func (m *ClusterRole) String() string { return proto.CompactTextString(m) }
+func (*ClusterRole) ProtoMessage() {}
+
+func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} }
+func (m *ClusterRoleBinding) String() string { return proto.CompactTextString(m) }
+func (*ClusterRoleBinding) ProtoMessage() {}
+
+func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} }
+func (m *ClusterRoleBindingList) String() string { return proto.CompactTextString(m) }
+func (*ClusterRoleBindingList) ProtoMessage() {}
+
+func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} }
+func (m *ClusterRoleList) String() string { return proto.CompactTextString(m) }
+func (*ClusterRoleList) ProtoMessage() {}
+
+func (m *PolicyRule) Reset() { *m = PolicyRule{} }
+func (m *PolicyRule) String() string { return proto.CompactTextString(m) }
+func (*PolicyRule) ProtoMessage() {}
+
+func (m *Role) Reset() { *m = Role{} }
+func (m *Role) String() string { return proto.CompactTextString(m) }
+func (*Role) ProtoMessage() {}
+
+func (m *RoleBinding) Reset() { *m = RoleBinding{} }
+func (m *RoleBinding) String() string { return proto.CompactTextString(m) }
+func (*RoleBinding) ProtoMessage() {}
+
+func (m *RoleBindingList) Reset() { *m = RoleBindingList{} }
+func (m *RoleBindingList) String() string { return proto.CompactTextString(m) }
+func (*RoleBindingList) ProtoMessage() {}
+
+func (m *RoleList) Reset() { *m = RoleList{} }
+func (m *RoleList) String() string { return proto.CompactTextString(m) }
+func (*RoleList) ProtoMessage() {}
+
+func (m *Subject) Reset() { *m = Subject{} }
+func (m *Subject) String() string { return proto.CompactTextString(m) }
+func (*Subject) ProtoMessage() {}
+
+func init() {
+ proto.RegisterType((*ClusterRole)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1alpha1.ClusterRole")
+ proto.RegisterType((*ClusterRoleBinding)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1alpha1.ClusterRoleBinding")
+ proto.RegisterType((*ClusterRoleBindingList)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1alpha1.ClusterRoleBindingList")
+ proto.RegisterType((*ClusterRoleList)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1alpha1.ClusterRoleList")
+ proto.RegisterType((*PolicyRule)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1alpha1.PolicyRule")
+ proto.RegisterType((*Role)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1alpha1.Role")
+ proto.RegisterType((*RoleBinding)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1alpha1.RoleBinding")
+ proto.RegisterType((*RoleBindingList)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1alpha1.RoleBindingList")
+ proto.RegisterType((*RoleList)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1alpha1.RoleList")
+ proto.RegisterType((*Subject)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1alpha1.Subject")
+}
+func (m *ClusterRole) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ClusterRole) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
+ n1, err := m.ObjectMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n1
+ if len(m.Rules) > 0 {
+ for _, msg := range m.Rules {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *ClusterRoleBinding) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ClusterRoleBinding) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
+ n2, err := m.ObjectMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n2
+ if len(m.Subjects) > 0 {
+ for _, msg := range m.Subjects {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.RoleRef.Size()))
+ n3, err := m.RoleRef.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n3
+ return i, nil
+}
+
+func (m *ClusterRoleBindingList) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ClusterRoleBindingList) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size()))
+ n4, err := m.ListMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n4
+ if len(m.Items) > 0 {
+ for _, msg := range m.Items {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *ClusterRoleList) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *ClusterRoleList) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size()))
+ n5, err := m.ListMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n5
+ if len(m.Items) > 0 {
+ for _, msg := range m.Items {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *PolicyRule) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *PolicyRule) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Verbs) > 0 {
+ for _, s := range m.Verbs {
+ data[i] = 0xa
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.AttributeRestrictions.Size()))
+ n6, err := m.AttributeRestrictions.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n6
+ if len(m.APIGroups) > 0 {
+ for _, s := range m.APIGroups {
+ data[i] = 0x1a
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ if len(m.Resources) > 0 {
+ for _, s := range m.Resources {
+ data[i] = 0x22
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ if len(m.ResourceNames) > 0 {
+ for _, s := range m.ResourceNames {
+ data[i] = 0x2a
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ if len(m.NonResourceURLs) > 0 {
+ for _, s := range m.NonResourceURLs {
+ data[i] = 0x32
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ data[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ data[i] = uint8(l)
+ i++
+ i += copy(data[i:], s)
+ }
+ }
+ return i, nil
+}
+
+func (m *Role) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Role) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
+ n7, err := m.ObjectMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n7
+ if len(m.Rules) > 0 {
+ for _, msg := range m.Rules {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *RoleBinding) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *RoleBinding) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size()))
+ n8, err := m.ObjectMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n8
+ if len(m.Subjects) > 0 {
+ for _, msg := range m.Subjects {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.RoleRef.Size()))
+ n9, err := m.RoleRef.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n9
+ return i, nil
+}
+
+func (m *RoleBindingList) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *RoleBindingList) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size()))
+ n10, err := m.ListMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n10
+ if len(m.Items) > 0 {
+ for _, msg := range m.Items {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *RoleList) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *RoleList) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size()))
+ n11, err := m.ListMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n11
+ if len(m.Items) > 0 {
+ for _, msg := range m.Items {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *Subject) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Subject) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Kind)))
+ i += copy(data[i:], m.Kind)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion)))
+ i += copy(data[i:], m.APIVersion)
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Name)))
+ i += copy(data[i:], m.Name)
+ data[i] = 0x22
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Namespace)))
+ i += copy(data[i:], m.Namespace)
+ return i, nil
+}
+
+func encodeFixed64Generated(data []byte, offset int, v uint64) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ data[offset+4] = uint8(v >> 32)
+ data[offset+5] = uint8(v >> 40)
+ data[offset+6] = uint8(v >> 48)
+ data[offset+7] = uint8(v >> 56)
+ return offset + 8
+}
+func encodeFixed32Generated(data []byte, offset int, v uint32) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ return offset + 4
+}
+func encodeVarintGenerated(data []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ data[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ data[offset] = uint8(v)
+ return offset + 1
+}
+func (m *ClusterRole) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Rules) > 0 {
+ for _, e := range m.Rules {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ClusterRoleBinding) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Subjects) > 0 {
+ for _, e := range m.Subjects {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = m.RoleRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ClusterRoleBindingList) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ClusterRoleList) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *PolicyRule) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Verbs) > 0 {
+ for _, s := range m.Verbs {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = m.AttributeRestrictions.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.APIGroups) > 0 {
+ for _, s := range m.APIGroups {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Resources) > 0 {
+ for _, s := range m.Resources {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.ResourceNames) > 0 {
+ for _, s := range m.ResourceNames {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.NonResourceURLs) > 0 {
+ for _, s := range m.NonResourceURLs {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *Role) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Rules) > 0 {
+ for _, e := range m.Rules {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *RoleBinding) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Subjects) > 0 {
+ for _, e := range m.Subjects {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = m.RoleRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *RoleBindingList) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *RoleList) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *Subject) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Kind)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.APIVersion)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Namespace)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *ClusterRole) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ClusterRole: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ClusterRole: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Rules = append(m.Rules, PolicyRule{})
+ if err := m.Rules[len(m.Rules)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ClusterRoleBinding) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ClusterRoleBinding: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ClusterRoleBinding: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Subjects = append(m.Subjects, Subject{})
+ if err := m.Subjects[len(m.Subjects)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RoleRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.RoleRef.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ClusterRoleBindingList) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ClusterRoleBindingList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ClusterRoleBindingList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, ClusterRoleBinding{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ClusterRoleList) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ClusterRoleList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ClusterRoleList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, ClusterRole{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PolicyRule) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PolicyRule: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PolicyRule: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Verbs = append(m.Verbs, string(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AttributeRestrictions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.AttributeRestrictions.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.APIGroups = append(m.APIGroups, string(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Resources = append(m.Resources, string(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResourceNames", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ResourceNames = append(m.ResourceNames, string(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NonResourceURLs", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.NonResourceURLs = append(m.NonResourceURLs, string(data[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Role) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Role: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Role: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Rules = append(m.Rules, PolicyRule{})
+ if err := m.Rules[len(m.Rules)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RoleBinding) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RoleBinding: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RoleBinding: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Subjects = append(m.Subjects, Subject{})
+ if err := m.Subjects[len(m.Subjects)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RoleRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.RoleRef.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RoleBindingList) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RoleBindingList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RoleBindingList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, RoleBinding{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RoleList) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RoleList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RoleList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, Role{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Subject) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Subject: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Subject: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Kind = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.APIVersion = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Namespace = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenerated(data []byte) (n int, err error) {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if data[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipGenerated(data[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
+)
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.proto b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.proto
new file mode 100644
index 0000000..062f815
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.proto
@@ -0,0 +1,159 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.kubernetes.pkg.apis.rbac.v1alpha1;
+
+import "k8s.io/kubernetes/pkg/api/resource/generated.proto";
+import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto";
+import "k8s.io/kubernetes/pkg/api/v1/generated.proto";
+import "k8s.io/kubernetes/pkg/runtime/generated.proto";
+import "k8s.io/kubernetes/pkg/util/intstr/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v1alpha1";
+
+// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.
+message ClusterRole {
+ // Standard object's metadata.
+ optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1;
+
+ // Rules holds all the PolicyRules for this ClusterRole
+ repeated PolicyRule rules = 2;
+}
+
+// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace,
+// and adds who information via Subject.
+message ClusterRoleBinding {
+ // Standard object's metadata.
+ optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1;
+
+ // Subjects holds references to the objects the role applies to.
+ repeated Subject subjects = 2;
+
+ // RoleRef can only reference a ClusterRole in the global namespace.
+ // If the RoleRef cannot be resolved, the Authorizer must return an error.
+ optional k8s.io.kubernetes.pkg.api.v1.ObjectReference roleRef = 3;
+}
+
+// ClusterRoleBindingList is a collection of ClusterRoleBindings
+message ClusterRoleBindingList {
+ // Standard object's metadata.
+ optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1;
+
+ // Items is a list of ClusterRoleBindings
+ repeated ClusterRoleBinding items = 2;
+}
+
+// ClusterRoleList is a collection of ClusterRoles
+message ClusterRoleList {
+ // Standard object's metadata.
+ optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1;
+
+ // Items is a list of ClusterRoles
+ repeated ClusterRole items = 2;
+}
+
+// PolicyRule holds information that describes a policy rule, but does not contain information
+// about who the rule applies to or which namespace the rule applies to.
+message PolicyRule {
+ // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.
+ repeated string verbs = 1;
+
+ // AttributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports.
+ // If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error.
+ optional k8s.io.kubernetes.pkg.runtime.RawExtension attributeRestrictions = 2;
+
+ // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of
+ // the enumerated resources in any API group will be allowed.
+ repeated string apiGroups = 3;
+
+ // Resources is a list of resources this rule applies to. ResourceAll represents all resources.
+ repeated string resources = 4;
+
+ // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.
+ repeated string resourceNames = 5;
+
+ // NonResourceURLsSlice is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path
+ // This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different.
+ // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding.
+ repeated string nonResourceURLs = 6;
+}
+
+// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.
+message Role {
+ // Standard object's metadata.
+ optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1;
+
+ // Rules holds all the PolicyRules for this Role
+ repeated PolicyRule rules = 2;
+}
+
+// RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace.
+// It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given
+// namespace only have effect in that namespace.
+message RoleBinding {
+ // Standard object's metadata.
+ optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1;
+
+ // Subjects holds references to the objects the role applies to.
+ repeated Subject subjects = 2;
+
+ // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace.
+ // If the RoleRef cannot be resolved, the Authorizer must return an error.
+ optional k8s.io.kubernetes.pkg.api.v1.ObjectReference roleRef = 3;
+}
+
+// RoleBindingList is a collection of RoleBindings
+message RoleBindingList {
+ // Standard object's metadata.
+ optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1;
+
+ // Items is a list of RoleBindings
+ repeated RoleBinding items = 2;
+}
+
+// RoleList is a collection of Roles
+message RoleList {
+ // Standard object's metadata.
+ optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1;
+
+ // Items is a list of Roles
+ repeated Role items = 2;
+}
+
+// Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference,
+// or a value for non-objects such as user and group names.
+message Subject {
+ // Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount".
+ // If the Authorizer does not recognized the kind value, the Authorizer should report an error.
+ optional string kind = 1;
+
+ // APIVersion holds the API group and version of the referenced object.
+ optional string apiVersion = 2;
+
+ // Name of the object being referenced.
+ optional string name = 3;
+
+ // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty
+ // the Authorizer should report an error.
+ optional string namespace = 4;
+}
+
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/register.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/register.go
new file mode 100644
index 0000000..cff4720
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/register.go
@@ -0,0 +1,52 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/api/v1"
+ "k8s.io/kubernetes/pkg/apis/rbac"
+ "k8s.io/kubernetes/pkg/runtime"
+ "k8s.io/kubernetes/pkg/watch/versioned"
+)
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = unversioned.GroupVersion{Group: rbac.GroupName, Version: "v1alpha1"}
+
+func AddToScheme(scheme *runtime.Scheme) {
+ addKnownTypes(scheme)
+}
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &Role{},
+ &RoleBinding{},
+ &RoleBindingList{},
+ &RoleList{},
+
+ &ClusterRole{},
+ &ClusterRoleBinding{},
+ &ClusterRoleBindingList{},
+ &ClusterRoleList{},
+
+ &v1.ListOptions{},
+ &v1.DeleteOptions{},
+ &v1.ExportOptions{},
+ )
+ versioned.AddToGroupVersion(scheme, SchemeGroupVersion)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/types.generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/types.generated.go
new file mode 100644
index 0000000..bf7b3db
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/types.generated.go
@@ -0,0 +1,4327 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// ************************************************************
+// DO NOT EDIT.
+// THIS FILE IS AUTO-GENERATED BY codecgen.
+// ************************************************************
+
+package v1alpha1
+
+import (
+ "errors"
+ "fmt"
+ codec1978 "github.com/ugorji/go/codec"
+ pkg2_unversioned "k8s.io/kubernetes/pkg/api/unversioned"
+ pkg3_v1 "k8s.io/kubernetes/pkg/api/v1"
+ pkg1_runtime "k8s.io/kubernetes/pkg/runtime"
+ pkg4_types "k8s.io/kubernetes/pkg/types"
+ "reflect"
+ "runtime"
+ time "time"
+)
+
+const (
+ // ----- content types ----
+ codecSelferC_UTF81234 = 1
+ codecSelferC_RAW1234 = 0
+ // ----- value types used ----
+ codecSelferValueTypeArray1234 = 10
+ codecSelferValueTypeMap1234 = 9
+ // ----- containerStateValues ----
+ codecSelfer_containerMapKey1234 = 2
+ codecSelfer_containerMapValue1234 = 3
+ codecSelfer_containerMapEnd1234 = 4
+ codecSelfer_containerArrayElem1234 = 6
+ codecSelfer_containerArrayEnd1234 = 7
+)
+
+var (
+ codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits())
+ codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`)
+)
+
+type codecSelfer1234 struct{}
+
+func init() {
+ if codec1978.GenVersion != 5 {
+ _, file, _, _ := runtime.Caller(0)
+ err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v",
+ 5, codec1978.GenVersion, file)
+ panic(err)
+ }
+ if false { // reference the types, but skip this branch at build/run time
+ var v0 pkg2_unversioned.TypeMeta
+ var v1 pkg3_v1.ObjectMeta
+ var v2 pkg1_runtime.RawExtension
+ var v3 pkg4_types.UID
+ var v4 time.Time
+ _, _, _, _, _ = v0, v1, v2, v3, v4
+ }
+}
+
+func (x *PolicyRule) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [6]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = true
+ yyq2[4] = len(x.ResourceNames) != 0
+ yyq2[5] = len(x.NonResourceURLs) != 0
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(6)
+ } else {
+ yynn2 = 3
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Verbs == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Verbs, false, e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("verbs"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Verbs == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Verbs, false, e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yy7 := &x.AttributeRestrictions
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy7) {
+ } else if !yym8 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy7)
+ } else {
+ z.EncFallback(yy7)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("attributeRestrictions"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy9 := &x.AttributeRestrictions
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy9) {
+ } else if !yym10 && z.IsJSONHandle() {
+ z.EncJSONMarshal(yy9)
+ } else {
+ z.EncFallback(yy9)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.APIGroups == nil {
+ r.EncodeNil()
+ } else {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.APIGroups, false, e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiGroups"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.APIGroups == nil {
+ r.EncodeNil()
+ } else {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.APIGroups, false, e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Resources == nil {
+ r.EncodeNil()
+ } else {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Resources, false, e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("resources"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Resources == nil {
+ r.EncodeNil()
+ } else {
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.Resources, false, e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ if x.ResourceNames == nil {
+ r.EncodeNil()
+ } else {
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.ResourceNames, false, e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("resourceNames"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.ResourceNames == nil {
+ r.EncodeNil()
+ } else {
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.ResourceNames, false, e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[5] {
+ if x.NonResourceURLs == nil {
+ r.EncodeNil()
+ } else {
+ yym21 := z.EncBinary()
+ _ = yym21
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.NonResourceURLs, false, e)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("nonResourceURLs"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.NonResourceURLs == nil {
+ r.EncodeNil()
+ } else {
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ z.F.EncSliceStringV(x.NonResourceURLs, false, e)
+ }
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *PolicyRule) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *PolicyRule) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "verbs":
+ if r.TryDecodeAsNil() {
+ x.Verbs = nil
+ } else {
+ yyv4 := &x.Verbs
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv4, false, d)
+ }
+ }
+ case "attributeRestrictions":
+ if r.TryDecodeAsNil() {
+ x.AttributeRestrictions = pkg1_runtime.RawExtension{}
+ } else {
+ yyv6 := &x.AttributeRestrictions
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv6) {
+ } else if !yym7 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv6)
+ } else {
+ z.DecFallback(yyv6, false)
+ }
+ }
+ case "apiGroups":
+ if r.TryDecodeAsNil() {
+ x.APIGroups = nil
+ } else {
+ yyv8 := &x.APIGroups
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv8, false, d)
+ }
+ }
+ case "resources":
+ if r.TryDecodeAsNil() {
+ x.Resources = nil
+ } else {
+ yyv10 := &x.Resources
+ yym11 := z.DecBinary()
+ _ = yym11
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv10, false, d)
+ }
+ }
+ case "resourceNames":
+ if r.TryDecodeAsNil() {
+ x.ResourceNames = nil
+ } else {
+ yyv12 := &x.ResourceNames
+ yym13 := z.DecBinary()
+ _ = yym13
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv12, false, d)
+ }
+ }
+ case "nonResourceURLs":
+ if r.TryDecodeAsNil() {
+ x.NonResourceURLs = nil
+ } else {
+ yyv14 := &x.NonResourceURLs
+ yym15 := z.DecBinary()
+ _ = yym15
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv14, false, d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *PolicyRule) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj16 int
+ var yyb16 bool
+ var yyhl16 bool = l >= 0
+ yyj16++
+ if yyhl16 {
+ yyb16 = yyj16 > l
+ } else {
+ yyb16 = r.CheckBreak()
+ }
+ if yyb16 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Verbs = nil
+ } else {
+ yyv17 := &x.Verbs
+ yym18 := z.DecBinary()
+ _ = yym18
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv17, false, d)
+ }
+ }
+ yyj16++
+ if yyhl16 {
+ yyb16 = yyj16 > l
+ } else {
+ yyb16 = r.CheckBreak()
+ }
+ if yyb16 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.AttributeRestrictions = pkg1_runtime.RawExtension{}
+ } else {
+ yyv19 := &x.AttributeRestrictions
+ yym20 := z.DecBinary()
+ _ = yym20
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv19) {
+ } else if !yym20 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(yyv19)
+ } else {
+ z.DecFallback(yyv19, false)
+ }
+ }
+ yyj16++
+ if yyhl16 {
+ yyb16 = yyj16 > l
+ } else {
+ yyb16 = r.CheckBreak()
+ }
+ if yyb16 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIGroups = nil
+ } else {
+ yyv21 := &x.APIGroups
+ yym22 := z.DecBinary()
+ _ = yym22
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv21, false, d)
+ }
+ }
+ yyj16++
+ if yyhl16 {
+ yyb16 = yyj16 > l
+ } else {
+ yyb16 = r.CheckBreak()
+ }
+ if yyb16 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Resources = nil
+ } else {
+ yyv23 := &x.Resources
+ yym24 := z.DecBinary()
+ _ = yym24
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv23, false, d)
+ }
+ }
+ yyj16++
+ if yyhl16 {
+ yyb16 = yyj16 > l
+ } else {
+ yyb16 = r.CheckBreak()
+ }
+ if yyb16 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ResourceNames = nil
+ } else {
+ yyv25 := &x.ResourceNames
+ yym26 := z.DecBinary()
+ _ = yym26
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv25, false, d)
+ }
+ }
+ yyj16++
+ if yyhl16 {
+ yyb16 = yyj16 > l
+ } else {
+ yyb16 = r.CheckBreak()
+ }
+ if yyb16 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.NonResourceURLs = nil
+ } else {
+ yyv27 := &x.NonResourceURLs
+ yym28 := z.DecBinary()
+ _ = yym28
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv27, false, d)
+ }
+ }
+ for {
+ yyj16++
+ if yyhl16 {
+ yyb16 = yyj16 > l
+ } else {
+ yyb16 = r.CheckBreak()
+ }
+ if yyb16 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj16-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *Subject) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[1] = x.APIVersion != ""
+ yyq2[3] = x.Namespace != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[1] {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("name"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Name))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Namespace))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("namespace"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Namespace))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *Subject) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *Subject) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ case "name":
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ case "namespace":
+ if r.TryDecodeAsNil() {
+ x.Namespace = ""
+ } else {
+ x.Namespace = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *Subject) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ x.Name = string(r.DecodeString())
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Namespace = ""
+ } else {
+ x.Namespace = string(r.DecodeString())
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *Role) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Rules == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSlicePolicyRule(([]PolicyRule)(x.Rules), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("rules"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Rules == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSlicePolicyRule(([]PolicyRule)(x.Rules), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *Role) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *Role) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg3_v1.ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "rules":
+ if r.TryDecodeAsNil() {
+ x.Rules = nil
+ } else {
+ yyv5 := &x.Rules
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ h.decSlicePolicyRule((*[]PolicyRule)(yyv5), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *Role) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg3_v1.ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Rules = nil
+ } else {
+ yyv11 := &x.Rules
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ h.decSlicePolicyRule((*[]PolicyRule)(yyv11), d)
+ }
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *RoleBinding) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Subjects == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceSubject(([]Subject)(x.Subjects), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("subjects"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Subjects == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceSubject(([]Subject)(x.Subjects), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy12 := &x.RoleRef
+ yy12.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("roleRef"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy14 := &x.RoleRef
+ yy14.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym21 := z.EncBinary()
+ _ = yym21
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *RoleBinding) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *RoleBinding) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg3_v1.ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "subjects":
+ if r.TryDecodeAsNil() {
+ x.Subjects = nil
+ } else {
+ yyv5 := &x.Subjects
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ h.decSliceSubject((*[]Subject)(yyv5), d)
+ }
+ }
+ case "roleRef":
+ if r.TryDecodeAsNil() {
+ x.RoleRef = pkg3_v1.ObjectReference{}
+ } else {
+ yyv7 := &x.RoleRef
+ yyv7.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *RoleBinding) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg3_v1.ObjectMeta{}
+ } else {
+ yyv11 := &x.ObjectMeta
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Subjects = nil
+ } else {
+ yyv12 := &x.Subjects
+ yym13 := z.DecBinary()
+ _ = yym13
+ if false {
+ } else {
+ h.decSliceSubject((*[]Subject)(yyv12), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.RoleRef = pkg3_v1.ObjectReference{}
+ } else {
+ yyv14 := &x.RoleRef
+ yyv14.CodecDecodeSelf(d)
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *RoleBindingList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceRoleBinding(([]RoleBinding)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceRoleBinding(([]RoleBinding)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *RoleBindingList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *RoleBindingList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceRoleBinding((*[]RoleBinding)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *RoleBindingList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceRoleBinding((*[]RoleBinding)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *RoleList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceRole(([]Role)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceRole(([]Role)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *RoleList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *RoleList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceRole((*[]Role)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *RoleList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceRole((*[]Role)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ClusterRole) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Rules == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSlicePolicyRule(([]PolicyRule)(x.Rules), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("rules"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Rules == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSlicePolicyRule(([]PolicyRule)(x.Rules), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ClusterRole) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ClusterRole) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg3_v1.ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "rules":
+ if r.TryDecodeAsNil() {
+ x.Rules = nil
+ } else {
+ yyv5 := &x.Rules
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ h.decSlicePolicyRule((*[]PolicyRule)(yyv5), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ClusterRole) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj9 int
+ var yyb9 bool
+ var yyhl9 bool = l >= 0
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg3_v1.ObjectMeta{}
+ } else {
+ yyv10 := &x.ObjectMeta
+ yyv10.CodecDecodeSelf(d)
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Rules = nil
+ } else {
+ yyv11 := &x.Rules
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ h.decSlicePolicyRule((*[]PolicyRule)(yyv11), d)
+ }
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj9++
+ if yyhl9 {
+ yyb9 = yyj9 > l
+ } else {
+ yyb9 = r.CheckBreak()
+ }
+ if yyb9 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj9-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ClusterRoleBinding) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [5]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[3] = x.Kind != ""
+ yyq2[4] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(5)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ObjectMeta
+ yy4.CodecEncodeSelf(e)
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ObjectMeta
+ yy6.CodecEncodeSelf(e)
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Subjects == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceSubject(([]Subject)(x.Subjects), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("subjects"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Subjects == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceSubject(([]Subject)(x.Subjects), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy12 := &x.RoleRef
+ yy12.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("roleRef"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy14 := &x.RoleRef
+ yy14.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym18 := z.EncBinary()
+ _ = yym18
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[4] {
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym21 := z.EncBinary()
+ _ = yym21
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ClusterRoleBinding) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ClusterRoleBinding) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg3_v1.ObjectMeta{}
+ } else {
+ yyv4 := &x.ObjectMeta
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "subjects":
+ if r.TryDecodeAsNil() {
+ x.Subjects = nil
+ } else {
+ yyv5 := &x.Subjects
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ h.decSliceSubject((*[]Subject)(yyv5), d)
+ }
+ }
+ case "roleRef":
+ if r.TryDecodeAsNil() {
+ x.RoleRef = pkg3_v1.ObjectReference{}
+ } else {
+ yyv7 := &x.RoleRef
+ yyv7.CodecDecodeSelf(d)
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ClusterRoleBinding) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ObjectMeta = pkg3_v1.ObjectMeta{}
+ } else {
+ yyv11 := &x.ObjectMeta
+ yyv11.CodecDecodeSelf(d)
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Subjects = nil
+ } else {
+ yyv12 := &x.Subjects
+ yym13 := z.DecBinary()
+ _ = yym13
+ if false {
+ } else {
+ h.decSliceSubject((*[]Subject)(yyv12), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.RoleRef = pkg3_v1.ObjectReference{}
+ } else {
+ yyv14 := &x.RoleRef
+ yyv14.CodecDecodeSelf(d)
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ClusterRoleBindingList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceClusterRoleBinding(([]ClusterRoleBinding)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceClusterRoleBinding(([]ClusterRoleBinding)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ClusterRoleBindingList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ClusterRoleBindingList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceClusterRoleBinding((*[]ClusterRoleBinding)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ClusterRoleBindingList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceClusterRoleBinding((*[]ClusterRoleBinding)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x *ClusterRoleList) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [4]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ yyq2[0] = true
+ yyq2[2] = x.Kind != ""
+ yyq2[3] = x.APIVersion != ""
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(4)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[0] {
+ yy4 := &x.ListMeta
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy4) {
+ } else {
+ z.EncFallback(yy4)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq2[0] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yy6 := &x.ListMeta
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else if z.HasExtensions() && z.EncExt(yy6) {
+ } else {
+ z.EncFallback(yy6)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceClusterRole(([]ClusterRole)(x.Items), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("items"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ if x.Items == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceClusterRole(([]ClusterRole)(x.Items), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[2] {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("kind"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ if yyq2[3] {
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, "")
+ }
+ } else {
+ if yyq2[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1234)
+ r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1234)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1234)
+ }
+ }
+ }
+}
+
+func (x *ClusterRoleList) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap1234 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray1234 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
+ }
+ }
+}
+
+func (x *ClusterRoleList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1234)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3 := string(yys3Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1234)
+ switch yys3 {
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv4 := &x.ListMeta
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv4) {
+ } else {
+ z.DecFallback(yyv4, false)
+ }
+ }
+ case "items":
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv6 := &x.Items
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSliceClusterRole((*[]ClusterRole)(yyv6), d)
+ }
+ }
+ case "kind":
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ case "apiVersion":
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd1234)
+}
+
+func (x *ClusterRoleList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.ListMeta = pkg2_unversioned.ListMeta{}
+ } else {
+ yyv11 := &x.ListMeta
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv11) {
+ } else {
+ z.DecFallback(yyv11, false)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Items = nil
+ } else {
+ yyv13 := &x.Items
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSliceClusterRole((*[]ClusterRole)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.Kind = ""
+ } else {
+ x.Kind = string(r.DecodeString())
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ if r.TryDecodeAsNil() {
+ x.APIVersion = ""
+ } else {
+ x.APIVersion = string(r.DecodeString())
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1234)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) encSlicePolicyRule(v []PolicyRule, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSlicePolicyRule(v *[]PolicyRule, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []PolicyRule{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 160)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]PolicyRule, yyrl1)
+ }
+ } else {
+ yyv1 = make([]PolicyRule, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PolicyRule{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, PolicyRule{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PolicyRule{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, PolicyRule{}) // var yyz1 PolicyRule
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = PolicyRule{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []PolicyRule{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceSubject(v []Subject, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceSubject(v *[]Subject, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []Subject{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 64)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]Subject, yyrl1)
+ }
+ } else {
+ yyv1 = make([]Subject, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Subject{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, Subject{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Subject{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, Subject{}) // var yyz1 Subject
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Subject{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []Subject{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceRoleBinding(v []RoleBinding, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceRoleBinding(v *[]RoleBinding, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []RoleBinding{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 376)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]RoleBinding, yyrl1)
+ }
+ } else {
+ yyv1 = make([]RoleBinding, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = RoleBinding{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, RoleBinding{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = RoleBinding{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, RoleBinding{}) // var yyz1 RoleBinding
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = RoleBinding{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []RoleBinding{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceRole(v []Role, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceRole(v *[]Role, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []Role{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 264)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]Role, yyrl1)
+ }
+ } else {
+ yyv1 = make([]Role, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Role{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, Role{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Role{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, Role{}) // var yyz1 Role
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Role{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []Role{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceClusterRoleBinding(v []ClusterRoleBinding, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceClusterRoleBinding(v *[]ClusterRoleBinding, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []ClusterRoleBinding{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 376)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]ClusterRoleBinding, yyrl1)
+ }
+ } else {
+ yyv1 = make([]ClusterRoleBinding, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ClusterRoleBinding{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, ClusterRoleBinding{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ClusterRoleBinding{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, ClusterRoleBinding{}) // var yyz1 ClusterRoleBinding
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ClusterRoleBinding{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []ClusterRoleBinding{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer1234) encSliceClusterRole(v []ClusterRole, e *codec1978.Encoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1234)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
+}
+
+func (x codecSelfer1234) decSliceClusterRole(v *[]ClusterRole, d *codec1978.Decoder) {
+ var h codecSelfer1234
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []ClusterRole{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 264)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]ClusterRole, yyrl1)
+ }
+ } else {
+ yyv1 = make([]ClusterRole, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ClusterRole{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, ClusterRole{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ClusterRole{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, ClusterRole{}) // var yyz1 ClusterRole
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = ClusterRole{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []ClusterRole{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/types.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/types.go
new file mode 100644
index 0000000..a260fac
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/types.go
@@ -0,0 +1,165 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/api/v1"
+ "k8s.io/kubernetes/pkg/runtime"
+)
+
+// Authorization is calculated against
+// 1. evaluation of ClusterRoleBindings - short circuit on match
+// 2. evaluation of RoleBindings in the namespace requested - short circuit on match
+// 3. deny by default
+
+// PolicyRule holds information that describes a policy rule, but does not contain information
+// about who the rule applies to or which namespace the rule applies to.
+type PolicyRule struct {
+ // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.
+ Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"`
+ // AttributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports.
+ // If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error.
+ AttributeRestrictions runtime.RawExtension `json:"attributeRestrictions,omitempty" protobuf:"bytes,2,opt,name=attributeRestrictions"`
+ // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of
+ // the enumerated resources in any API group will be allowed.
+ APIGroups []string `json:"apiGroups" protobuf:"bytes,3,rep,name=apiGroups"`
+ // Resources is a list of resources this rule applies to. ResourceAll represents all resources.
+ Resources []string `json:"resources" protobuf:"bytes,4,rep,name=resources"`
+ // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.
+ ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,5,rep,name=resourceNames"`
+ // NonResourceURLsSlice is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path
+ // This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different.
+ // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding.
+ NonResourceURLs []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,6,rep,name=nonResourceURLs"`
+}
+
+// Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference,
+// or a value for non-objects such as user and group names.
+type Subject struct {
+ // Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount".
+ // If the Authorizer does not recognized the kind value, the Authorizer should report an error.
+ Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"`
+ // APIVersion holds the API group and version of the referenced object.
+ APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt.name=apiVersion"`
+ // Name of the object being referenced.
+ Name string `json:"name" protobuf:"bytes,3,opt,name=name"`
+ // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty
+ // the Authorizer should report an error.
+ Namespace string `json:"namespace,omitempty" protobuf:"bytes,4,opt,name=namespace"`
+}
+
+// +genclient=true
+
+// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.
+type Role struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Rules holds all the PolicyRules for this Role
+ Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"`
+}
+
+// +genclient=true
+
+// RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace.
+// It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given
+// namespace only have effect in that namespace.
+type RoleBinding struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Subjects holds references to the objects the role applies to.
+ Subjects []Subject `json:"subjects" protobuf:"bytes,2,rep,name=subjects"`
+
+ // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace.
+ // If the RoleRef cannot be resolved, the Authorizer must return an error.
+ RoleRef v1.ObjectReference `json:"roleRef" protobuf:"bytes,3,opt,name=roleRef"`
+}
+
+// RoleBindingList is a collection of RoleBindings
+type RoleBindingList struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is a list of RoleBindings
+ Items []RoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// RoleList is a collection of Roles
+type RoleList struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is a list of Roles
+ Items []Role `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +genclient=true
+// +nonNamespaced=true
+
+// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.
+type ClusterRole struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Rules holds all the PolicyRules for this ClusterRole
+ Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"`
+}
+
+// +genclient=true
+// +nonNamespaced=true
+
+// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace,
+// and adds who information via Subject.
+type ClusterRoleBinding struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Subjects holds references to the objects the role applies to.
+ Subjects []Subject `json:"subjects" protobuf:"bytes,2,rep,name=subjects"`
+
+ // RoleRef can only reference a ClusterRole in the global namespace.
+ // If the RoleRef cannot be resolved, the Authorizer must return an error.
+ RoleRef v1.ObjectReference `json:"roleRef" protobuf:"bytes,3,opt,name=roleRef"`
+}
+
+// ClusterRoleBindingList is a collection of ClusterRoleBindings
+type ClusterRoleBindingList struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is a list of ClusterRoleBindings
+ Items []ClusterRoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// ClusterRoleList is a collection of ClusterRoles
+type ClusterRoleList struct {
+ unversioned.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is a list of ClusterRoles
+ Items []ClusterRole `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/types_swagger_doc_generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/types_swagger_doc_generated.go
new file mode 100644
index 0000000..f1c6fdb
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/types_swagger_doc_generated.go
@@ -0,0 +1,138 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE
+var map_ClusterRole = map[string]string{
+ "": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.",
+ "metadata": "Standard object's metadata.",
+ "rules": "Rules holds all the PolicyRules for this ClusterRole",
+}
+
+func (ClusterRole) SwaggerDoc() map[string]string {
+ return map_ClusterRole
+}
+
+var map_ClusterRoleBinding = map[string]string{
+ "": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject.",
+ "metadata": "Standard object's metadata.",
+ "subjects": "Subjects holds references to the objects the role applies to.",
+ "roleRef": "RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.",
+}
+
+func (ClusterRoleBinding) SwaggerDoc() map[string]string {
+ return map_ClusterRoleBinding
+}
+
+var map_ClusterRoleBindingList = map[string]string{
+ "": "ClusterRoleBindingList is a collection of ClusterRoleBindings",
+ "metadata": "Standard object's metadata.",
+ "items": "Items is a list of ClusterRoleBindings",
+}
+
+func (ClusterRoleBindingList) SwaggerDoc() map[string]string {
+ return map_ClusterRoleBindingList
+}
+
+var map_ClusterRoleList = map[string]string{
+ "": "ClusterRoleList is a collection of ClusterRoles",
+ "metadata": "Standard object's metadata.",
+ "items": "Items is a list of ClusterRoles",
+}
+
+func (ClusterRoleList) SwaggerDoc() map[string]string {
+ return map_ClusterRoleList
+}
+
+var map_PolicyRule = map[string]string{
+ "": "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.",
+ "verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.",
+ "attributeRestrictions": "AttributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports. If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error.",
+ "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.",
+ "resources": "Resources is a list of resources this rule applies to. ResourceAll represents all resources.",
+ "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.",
+ "nonResourceURLs": "NonResourceURLsSlice is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different. Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding.",
+}
+
+func (PolicyRule) SwaggerDoc() map[string]string {
+ return map_PolicyRule
+}
+
+var map_Role = map[string]string{
+ "": "Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.",
+ "metadata": "Standard object's metadata.",
+ "rules": "Rules holds all the PolicyRules for this Role",
+}
+
+func (Role) SwaggerDoc() map[string]string {
+ return map_Role
+}
+
+var map_RoleBinding = map[string]string{
+ "": "RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace.",
+ "metadata": "Standard object's metadata.",
+ "subjects": "Subjects holds references to the objects the role applies to.",
+ "roleRef": "RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.",
+}
+
+func (RoleBinding) SwaggerDoc() map[string]string {
+ return map_RoleBinding
+}
+
+var map_RoleBindingList = map[string]string{
+ "": "RoleBindingList is a collection of RoleBindings",
+ "metadata": "Standard object's metadata.",
+ "items": "Items is a list of RoleBindings",
+}
+
+func (RoleBindingList) SwaggerDoc() map[string]string {
+ return map_RoleBindingList
+}
+
+var map_RoleList = map[string]string{
+ "": "RoleList is a collection of Roles",
+ "metadata": "Standard object's metadata.",
+ "items": "Items is a list of Roles",
+}
+
+func (RoleList) SwaggerDoc() map[string]string {
+ return map_RoleList
+}
+
+var map_Subject = map[string]string{
+ "": "Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, or a value for non-objects such as user and group names.",
+ "kind": "Kind of object being referenced. Values defined by this API group are \"User\", \"Group\", and \"ServiceAccount\". If the Authorizer does not recognized the kind value, the Authorizer should report an error.",
+ "apiVersion": "APIVersion holds the API group and version of the referenced object.",
+ "name": "Name of the object being referenced.",
+ "namespace": "Namespace of the referenced object. If the object kind is non-namespace, such as \"User\" or \"Group\", and this value is not empty the Authorizer should report an error.",
+}
+
+func (Subject) SwaggerDoc() map[string]string {
+ return map_Subject
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/auth/user/doc.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/auth/user/doc.go
new file mode 100644
index 0000000..570c51a
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/auth/user/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package user contains utilities for dealing with simple user exchange in the auth
+// packages. The user.Info interface defines an interface for exchanging that info.
+package user
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/auth/user/user.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/auth/user/user.go
new file mode 100644
index 0000000..9926196
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/auth/user/user.go
@@ -0,0 +1,65 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package user
+
+// Info describes a user that has been authenticated to the system.
+type Info interface {
+ // GetName returns the name that uniquely identifies this user among all
+ // other active users.
+ GetName() string
+ // GetUID returns a unique value for a particular user that will change
+ // if the user is removed from the system and another user is added with
+ // the same name.
+ GetUID() string
+ // GetGroups returns the names of the groups the user is a member of
+ GetGroups() []string
+
+ // GetExtra can contain any additional information that the authenticator
+ // thought was interesting. One example would be scopes on a token.
+ // Keys in this map should be namespaced to the authenticator or
+ // authenticator/authorizer pair making use of them.
+ // For instance: "example.org/foo" instead of "foo"
+ // This is a map[string][]string because it needs to be serializeable into
+ // a SubjectAccessReviewSpec.authorization.k8s.io for proper authorization
+ // delegation flows
+ GetExtra() map[string][]string
+}
+
+// DefaultInfo provides a simple user information exchange object
+// for components that implement the UserInfo interface.
+type DefaultInfo struct {
+ Name string
+ UID string
+ Groups []string
+ Extra map[string][]string
+}
+
+func (i *DefaultInfo) GetName() string {
+ return i.Name
+}
+
+func (i *DefaultInfo) GetUID() string {
+ return i.UID
+}
+
+func (i *DefaultInfo) GetGroups() []string {
+ return i.Groups
+}
+
+func (i *DefaultInfo) GetExtra() map[string][]string {
+ return i.Extra
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/capabilities/capabilities.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/capabilities/capabilities.go
new file mode 100644
index 0000000..96146c6
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/capabilities/capabilities.go
@@ -0,0 +1,94 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package capabilities
+
+import (
+ "sync"
+)
+
+// Capabilities defines the set of capabilities available within the system.
+// For now these are global. Eventually they may be per-user
+type Capabilities struct {
+ AllowPrivileged bool
+
+ // Pod sources from which to allow privileged capabilities like host networking, sharing the host
+ // IPC namespace, and sharing the host PID namespace.
+ PrivilegedSources PrivilegedSources
+
+ // PerConnectionBandwidthLimitBytesPerSec limits the throughput of each connection (currently only used for proxy, exec, attach)
+ PerConnectionBandwidthLimitBytesPerSec int64
+}
+
+// PrivilegedSources defines the pod sources allowed to make privileged requests for certain types
+// of capabilities like host networking, sharing the host IPC namespace, and sharing the host PID namespace.
+type PrivilegedSources struct {
+ // List of pod sources for which using host network is allowed.
+ HostNetworkSources []string
+
+ // List of pod sources for which using host pid namespace is allowed.
+ HostPIDSources []string
+
+ // List of pod sources for which using host ipc is allowed.
+ HostIPCSources []string
+}
+
+// TODO: Clean these up into a singleton
+var once sync.Once
+var lock sync.Mutex
+var capabilities *Capabilities
+
+// Initialize the capability set. This can only be done once per binary, subsequent calls are ignored.
+func Initialize(c Capabilities) {
+ // Only do this once
+ once.Do(func() {
+ capabilities = &c
+ })
+}
+
+// Setup the capability set. It wraps Initialize for improving usability.
+func Setup(allowPrivileged bool, privilegedSources PrivilegedSources, perConnectionBytesPerSec int64) {
+ Initialize(Capabilities{
+ AllowPrivileged: allowPrivileged,
+ PrivilegedSources: privilegedSources,
+ PerConnectionBandwidthLimitBytesPerSec: perConnectionBytesPerSec,
+ })
+}
+
+// SetCapabilitiesForTests. Convenience method for testing. This should only be called from tests.
+func SetForTests(c Capabilities) {
+ lock.Lock()
+ defer lock.Unlock()
+ capabilities = &c
+}
+
+// Returns a read-only copy of the system capabilities.
+func Get() Capabilities {
+ lock.Lock()
+ defer lock.Unlock()
+ // This check prevents clobbering of capabilities that might've been set via SetForTests
+ if capabilities == nil {
+ Initialize(Capabilities{
+ AllowPrivileged: false,
+ PrivilegedSources: PrivilegedSources{
+ HostNetworkSources: []string{},
+ HostPIDSources: []string{},
+ HostIPCSources: []string{},
+ },
+ })
+ }
+ return *capabilities
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/capabilities/doc.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/capabilities/doc.go
new file mode 100644
index 0000000..b7fa14b
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/capabilities/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// package capbabilities manages system level capabilities
+package capabilities
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/delta_fifo.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/delta_fifo.go
new file mode 100644
index 0000000..5a7f4a9
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/delta_fifo.go
@@ -0,0 +1,613 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cache
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+
+ "k8s.io/kubernetes/pkg/util/sets"
+
+ "github.com/golang/glog"
+)
+
+// NewDeltaFIFO returns a Store which can be used process changes to items.
+//
+// keyFunc is used to figure out what key an object should have. (It's
+// exposed in the returned DeltaFIFO's KeyOf() method, with bonus features.)
+//
+// 'compressor' may compress as many or as few items as it wants
+// (including returning an empty slice), but it should do what it
+// does quickly since it is called while the queue is locked.
+// 'compressor' may be nil if you don't want any delta compression.
+//
+// 'keyLister' is expected to return a list of keys that the consumer of
+// this queue "knows about". It is used to decide which items are missing
+// when Replace() is called; 'Deleted' deltas are produced for these items.
+// It may be nil if you don't need to detect all deletions.
+// TODO: consider merging keyLister with this object, tracking a list of
+// "known" keys when Pop() is called. Have to think about how that
+// affects error retrying.
+// TODO(lavalamp): I believe there is a possible race only when using an
+// external known object source that the above TODO would
+// fix.
+//
+// Also see the comment on DeltaFIFO.
+func NewDeltaFIFO(keyFunc KeyFunc, compressor DeltaCompressor, knownObjects KeyListerGetter) *DeltaFIFO {
+ f := &DeltaFIFO{
+ items: map[string]Deltas{},
+ queue: []string{},
+ keyFunc: keyFunc,
+ deltaCompressor: compressor,
+ knownObjects: knownObjects,
+ }
+ f.cond.L = &f.lock
+ return f
+}
+
+// DeltaFIFO is like FIFO, but allows you to process deletes.
+//
+// DeltaFIFO is a producer-consumer queue, where a Reflector is
+// intended to be the producer, and the consumer is whatever calls
+// the Pop() method.
+//
+// DeltaFIFO solves this use case:
+// * You want to process every object change (delta) at most once.
+// * When you process an object, you want to see everything
+// that's happened to it since you last processed it.
+// * You want to process the deletion of objects.
+// * You might want to periodically reprocess objects.
+//
+// DeltaFIFO's Pop(), Get(), and GetByKey() methods return
+// interface{} to satisfy the Store/Queue interfaces, but it
+// will always return an object of type Deltas.
+//
+// A note on threading: If you call Pop() in parallel from multiple
+// threads, you could end up with multiple threads processing slightly
+// different versions of the same object.
+//
+// A note on the KeyLister used by the DeltaFIFO: It's main purpose is
+// to list keys that are "known", for the purpose of figuring out which
+// items have been deleted when Replace() or Delete() are called. The deleted
+// object will be included in the DeleteFinalStateUnknown markers. These objects
+// could be stale.
+//
+// You may provide a function to compress deltas (e.g., represent a
+// series of Updates as a single Update).
+type DeltaFIFO struct {
+ // lock/cond protects access to 'items' and 'queue'.
+ lock sync.RWMutex
+ cond sync.Cond
+
+ // We depend on the property that items in the set are in
+ // the queue and vice versa, and that all Deltas in this
+ // map have at least one Delta.
+ items map[string]Deltas
+ queue []string
+
+ // populated is true if the first batch of items inserted by Replace() has been populated
+ // or Delete/Add/Update was called first.
+ populated bool
+ // initialPopulationCount is the number of items inserted by the first call of Replace()
+ initialPopulationCount int
+
+ // keyFunc is used to make the key used for queued item
+ // insertion and retrieval, and should be deterministic.
+ keyFunc KeyFunc
+
+ // deltaCompressor tells us how to combine two or more
+ // deltas. It may be nil.
+ deltaCompressor DeltaCompressor
+
+ // knownObjects list keys that are "known", for the
+ // purpose of figuring out which items have been deleted
+ // when Replace() or Delete() is called.
+ knownObjects KeyListerGetter
+}
+
+var (
+ _ = Queue(&DeltaFIFO{}) // DeltaFIFO is a Queue
+)
+
+var (
+ // ErrZeroLengthDeltasObject is returned in a KeyError if a Deltas
+ // object with zero length is encountered (should be impossible,
+ // even if such an object is accidentally produced by a DeltaCompressor--
+ // but included for completeness).
+ ErrZeroLengthDeltasObject = errors.New("0 length Deltas object; can't get key")
+)
+
+// KeyOf exposes f's keyFunc, but also detects the key of a Deltas object or
+// DeletedFinalStateUnknown objects.
+func (f *DeltaFIFO) KeyOf(obj interface{}) (string, error) {
+ if d, ok := obj.(Deltas); ok {
+ if len(d) == 0 {
+ return "", KeyError{obj, ErrZeroLengthDeltasObject}
+ }
+ obj = d.Newest().Object
+ }
+ if d, ok := obj.(DeletedFinalStateUnknown); ok {
+ return d.Key, nil
+ }
+ return f.keyFunc(obj)
+}
+
+// Return true if an Add/Update/Delete/AddIfNotPresent are called first,
+// or an Update called first but the first batch of items inserted by Replace() has been popped
+func (f *DeltaFIFO) HasSynced() bool {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ return f.populated && f.initialPopulationCount == 0
+}
+
+// Add inserts an item, and puts it in the queue. The item is only enqueued
+// if it doesn't already exist in the set.
+func (f *DeltaFIFO) Add(obj interface{}) error {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ f.populated = true
+ return f.queueActionLocked(Added, obj)
+}
+
+// Update is just like Add, but makes an Updated Delta.
+func (f *DeltaFIFO) Update(obj interface{}) error {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ f.populated = true
+ return f.queueActionLocked(Updated, obj)
+}
+
+// Delete is just like Add, but makes an Deleted Delta. If the item does not
+// already exist, it will be ignored. (It may have already been deleted by a
+// Replace (re-list), for example.
+func (f *DeltaFIFO) Delete(obj interface{}) error {
+ id, err := f.KeyOf(obj)
+ if err != nil {
+ return KeyError{obj, err}
+ }
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ f.populated = true
+ if f.knownObjects == nil {
+ if _, exists := f.items[id]; !exists {
+ // Presumably, this was deleted when a relist happened.
+ // Don't provide a second report of the same deletion.
+ return nil
+ }
+ } else {
+ // We only want to skip the "deletion" action if the object doesn't
+ // exist in knownObjects and it doesn't have corresponding item in items.
+ // Note that even if there is a "deletion" action in items, we can ignore it,
+ // because it will be deduped automatically in "queueActionLocked"
+ _, exists, err := f.knownObjects.GetByKey(id)
+ _, itemsExist := f.items[id]
+ if err == nil && !exists && !itemsExist {
+ // Presumably, this was deleted when a relist happened.
+ // Don't provide a second report of the same deletion.
+ // TODO(lavalamp): This may be racy-- we aren't properly locked
+ // with knownObjects.
+ return nil
+ }
+ }
+
+ return f.queueActionLocked(Deleted, obj)
+}
+
+// AddIfNotPresent inserts an item, and puts it in the queue. If the item is already
+// present in the set, it is neither enqueued nor added to the set.
+//
+// This is useful in a single producer/consumer scenario so that the consumer can
+// safely retry items without contending with the producer and potentially enqueueing
+// stale items.
+//
+// Important: obj must be a Deltas (the output of the Pop() function). Yes, this is
+// different from the Add/Update/Delete functions.
+func (f *DeltaFIFO) AddIfNotPresent(obj interface{}) error {
+ deltas, ok := obj.(Deltas)
+ if !ok {
+ return fmt.Errorf("object must be of type deltas, but got: %#v", obj)
+ }
+ id, err := f.KeyOf(deltas.Newest().Object)
+ if err != nil {
+ return KeyError{obj, err}
+ }
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ f.addIfNotPresent(id, deltas)
+ return nil
+}
+
+// addIfNotPresent inserts deltas under id if it does not exist, and assumes the caller
+// already holds the fifo lock.
+func (f *DeltaFIFO) addIfNotPresent(id string, deltas Deltas) {
+ f.populated = true
+ if _, exists := f.items[id]; exists {
+ return
+ }
+
+ f.queue = append(f.queue, id)
+ f.items[id] = deltas
+ f.cond.Broadcast()
+}
+
+// re-listing and watching can deliver the same update multiple times in any
+// order. This will combine the most recent two deltas if they are the same.
+func dedupDeltas(deltas Deltas) Deltas {
+ n := len(deltas)
+ if n < 2 {
+ return deltas
+ }
+ a := &deltas[n-1]
+ b := &deltas[n-2]
+ if out := isDup(a, b); out != nil {
+ d := append(Deltas{}, deltas[:n-2]...)
+ return append(d, *out)
+ }
+ return deltas
+}
+
+// If a & b represent the same event, returns the delta that ought to be kept.
+// Otherwise, returns nil.
+// TODO: is there anything other than deletions that need deduping?
+func isDup(a, b *Delta) *Delta {
+ if out := isDeletionDup(a, b); out != nil {
+ return out
+ }
+ // TODO: Detect other duplicate situations? Are there any?
+ return nil
+}
+
+// keep the one with the most information if both are deletions.
+func isDeletionDup(a, b *Delta) *Delta {
+ if b.Type != Deleted || a.Type != Deleted {
+ return nil
+ }
+ // Do more sophisticated checks, or is this sufficient?
+ if _, ok := b.Object.(DeletedFinalStateUnknown); ok {
+ return a
+ }
+ return b
+}
+
+// willObjectBeDeletedLocked returns true only if the last delta for the
+// given object is Delete. Caller must lock first.
+func (f *DeltaFIFO) willObjectBeDeletedLocked(id string) bool {
+ deltas := f.items[id]
+ return len(deltas) > 0 && deltas[len(deltas)-1].Type == Deleted
+}
+
+// queueActionLocked appends to the delta list for the object, calling
+// f.deltaCompressor if needed. Caller must lock first.
+func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) error {
+ id, err := f.KeyOf(obj)
+ if err != nil {
+ return KeyError{obj, err}
+ }
+
+ // If object is supposed to be deleted (last event is Deleted),
+ // then we should ignore Sync events, because it would result in
+ // recreation of this object.
+ if actionType == Sync && f.willObjectBeDeletedLocked(id) {
+ return nil
+ }
+
+ newDeltas := append(f.items[id], Delta{actionType, obj})
+ newDeltas = dedupDeltas(newDeltas)
+ if f.deltaCompressor != nil {
+ newDeltas = f.deltaCompressor.Compress(newDeltas)
+ }
+
+ _, exists := f.items[id]
+ if len(newDeltas) > 0 {
+ if !exists {
+ f.queue = append(f.queue, id)
+ }
+ f.items[id] = newDeltas
+ f.cond.Broadcast()
+ } else if exists {
+ // The compression step removed all deltas, so
+ // we need to remove this from our map (extra items
+ // in the queue are ignored if they are not in the
+ // map).
+ delete(f.items, id)
+ }
+ return nil
+}
+
+// List returns a list of all the items; it returns the object
+// from the most recent Delta.
+// You should treat the items returned inside the deltas as immutable.
+func (f *DeltaFIFO) List() []interface{} {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+ return f.listLocked()
+}
+
+func (f *DeltaFIFO) listLocked() []interface{} {
+ list := make([]interface{}, 0, len(f.items))
+ for _, item := range f.items {
+ // Copy item's slice so operations on this slice (delta
+ // compression) won't interfere with the object we return.
+ item = copyDeltas(item)
+ list = append(list, item.Newest().Object)
+ }
+ return list
+}
+
+// ListKeys returns a list of all the keys of the objects currently
+// in the FIFO.
+func (f *DeltaFIFO) ListKeys() []string {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+ list := make([]string, 0, len(f.items))
+ for key := range f.items {
+ list = append(list, key)
+ }
+ return list
+}
+
+// Get returns the complete list of deltas for the requested item,
+// or sets exists=false.
+// You should treat the items returned inside the deltas as immutable.
+func (f *DeltaFIFO) Get(obj interface{}) (item interface{}, exists bool, err error) {
+ key, err := f.KeyOf(obj)
+ if err != nil {
+ return nil, false, KeyError{obj, err}
+ }
+ return f.GetByKey(key)
+}
+
+// GetByKey returns the complete list of deltas for the requested item,
+// setting exists=false if that list is empty.
+// You should treat the items returned inside the deltas as immutable.
+func (f *DeltaFIFO) GetByKey(key string) (item interface{}, exists bool, err error) {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+ d, exists := f.items[key]
+ if exists {
+ // Copy item's slice so operations on this slice (delta
+ // compression) won't interfere with the object we return.
+ d = copyDeltas(d)
+ }
+ return d, exists, nil
+}
+
+// Pop blocks until an item is added to the queue, and then returns it. If
+// multiple items are ready, they are returned in the order in which they were
+// added/updated. The item is removed from the queue (and the store) before it
+// is returned, so if you don't successfully process it, you need to add it back
+// with AddIfNotPresent().
+// process function is called under lock, so it is safe update data structures
+// in it that need to be in sync with the queue (e.g. knownKeys). The PopProcessFunc
+// may return an instance of ErrRequeue with a nested error to indicate the current
+// item should be requeued (equivalent to calling AddIfNotPresent under the lock).
+//
+// Pop returns a 'Deltas', which has a complete list of all the things
+// that happened to the object (deltas) while it was sitting in the queue.
+func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ for {
+ for len(f.queue) == 0 {
+ f.cond.Wait()
+ }
+ id := f.queue[0]
+ f.queue = f.queue[1:]
+ item, ok := f.items[id]
+ if f.initialPopulationCount > 0 {
+ f.initialPopulationCount--
+ }
+ if !ok {
+ // Item may have been deleted subsequently.
+ continue
+ }
+ delete(f.items, id)
+ err := process(item)
+ if e, ok := err.(ErrRequeue); ok {
+ f.addIfNotPresent(id, item)
+ err = e.Err
+ }
+ // Don't need to copyDeltas here, because we're transferring
+ // ownership to the caller.
+ return item, err
+ }
+}
+
+// Replace will delete the contents of 'f', using instead the given map.
+// 'f' takes ownership of the map, you should not reference the map again
+// after calling this function. f's queue is reset, too; upon return, it
+// will contain the items in the map, in no particular order.
+func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ keys := make(sets.String, len(list))
+
+ if !f.populated {
+ f.populated = true
+ f.initialPopulationCount = len(list)
+ }
+
+ for _, item := range list {
+ key, err := f.KeyOf(item)
+ if err != nil {
+ return KeyError{item, err}
+ }
+ keys.Insert(key)
+ if err := f.queueActionLocked(Sync, item); err != nil {
+ return fmt.Errorf("couldn't enqueue object: %v", err)
+ }
+ }
+
+ if f.knownObjects == nil {
+ // Do deletion detection against our own list.
+ for k, oldItem := range f.items {
+ if keys.Has(k) {
+ continue
+ }
+ var deletedObj interface{}
+ if n := oldItem.Newest(); n != nil {
+ deletedObj = n.Object
+ }
+ if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+
+ // Detect deletions not already in the queue.
+ // TODO(lavalamp): This may be racy-- we aren't properly locked
+ // with knownObjects. Unproven.
+ knownKeys := f.knownObjects.ListKeys()
+ for _, k := range knownKeys {
+ if keys.Has(k) {
+ continue
+ }
+
+ deletedObj, exists, err := f.knownObjects.GetByKey(k)
+ if err != nil {
+ deletedObj = nil
+ glog.Errorf("Unexpected error %v during lookup of key %v, placing DeleteFinalStateUnknown marker without object", err, k)
+ } else if !exists {
+ deletedObj = nil
+ glog.Infof("Key %v does not exist in known objects store, placing DeleteFinalStateUnknown marker without object", k)
+ }
+ if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Resync will send a sync event for each item
+func (f *DeltaFIFO) Resync() error {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+ for _, k := range f.knownObjects.ListKeys() {
+ obj, exists, err := f.knownObjects.GetByKey(k)
+ if err != nil {
+ glog.Errorf("Unexpected error %v during lookup of key %v, unable to queue object for sync", err, k)
+ continue
+ } else if !exists {
+ glog.Infof("Key %v does not exist in known objects store, unable to queue object for sync", k)
+ continue
+ }
+
+ if err := f.queueActionLocked(Sync, obj); err != nil {
+ return fmt.Errorf("couldn't queue object: %v", err)
+ }
+ }
+ return nil
+}
+
+// A KeyListerGetter is anything that knows how to list its keys and look up by key.
+type KeyListerGetter interface {
+ KeyLister
+ KeyGetter
+}
+
+// A KeyLister is anything that knows how to list its keys.
+type KeyLister interface {
+ ListKeys() []string
+}
+
+// A KeyGetter is anything that knows how to get the value stored under a given key.
+type KeyGetter interface {
+ GetByKey(key string) (interface{}, bool, error)
+}
+
+// DeltaCompressor is an algorithm that removes redundant changes.
+type DeltaCompressor interface {
+ Compress(Deltas) Deltas
+}
+
+// DeltaCompressorFunc should remove redundant changes; but changes that
+// are redundant depend on one's desired semantics, so this is an
+// injectable function.
+//
+// DeltaCompressorFunc adapts a raw function to be a DeltaCompressor.
+type DeltaCompressorFunc func(Deltas) Deltas
+
+// Compress just calls dc.
+func (dc DeltaCompressorFunc) Compress(d Deltas) Deltas {
+ return dc(d)
+}
+
+// DeltaType is the type of a change (addition, deletion, etc)
+type DeltaType string
+
+const (
+ Added DeltaType = "Added"
+ Updated DeltaType = "Updated"
+ Deleted DeltaType = "Deleted"
+ // The other types are obvious. You'll get Sync deltas when:
+ // * A watch expires/errors out and a new list/watch cycle is started.
+ // * You've turned on periodic syncs.
+ // (Anything that trigger's DeltaFIFO's Replace() method.)
+ Sync DeltaType = "Sync"
+)
+
+// Delta is the type stored by a DeltaFIFO. It tells you what change
+// happened, and the object's state after* that change.
+//
+// [*] Unless the change is a deletion, and then you'll get the final
+// state of the object before it was deleted.
+type Delta struct {
+ Type DeltaType
+ Object interface{}
+}
+
+// Deltas is a list of one or more 'Delta's to an individual object.
+// The oldest delta is at index 0, the newest delta is the last one.
+type Deltas []Delta
+
+// Oldest is a convenience function that returns the oldest delta, or
+// nil if there are no deltas.
+func (d Deltas) Oldest() *Delta {
+ if len(d) > 0 {
+ return &d[0]
+ }
+ return nil
+}
+
+// Newest is a convenience function that returns the newest delta, or
+// nil if there are no deltas.
+func (d Deltas) Newest() *Delta {
+ if n := len(d); n > 0 {
+ return &d[n-1]
+ }
+ return nil
+}
+
+// copyDeltas returns a shallow copy of d; that is, it copies the slice but not
+// the objects in the slice. This allows Get/List to return an object that we
+// know won't be clobbered by a subsequent call to a delta compressor.
+func copyDeltas(d Deltas) Deltas {
+ d2 := make(Deltas, len(d))
+ copy(d2, d)
+ return d2
+}
+
+// DeletedFinalStateUnknown is placed into a DeltaFIFO in the case where
+// an object was deleted but the watch deletion event was missed. In this
+// case we don't know the final "resting" state of the object, so there's
+// a chance the included `Obj` is stale.
+type DeletedFinalStateUnknown struct {
+ Key string
+ Obj interface{}
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/doc.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/doc.go
new file mode 100644
index 0000000..4f593f0
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/doc.go
@@ -0,0 +1,24 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package cache is a client-side caching mechanism. It is useful for
+// reducing the number of server calls you'd otherwise need to make.
+// Reflector watches a server and updates a Store. Two stores are provided;
+// one that simply caches objects (for example, to allow a scheduler to
+// list currently available nodes), and one that additionally acts as
+// a FIFO queue (for example, to allow a scheduler to process incoming
+// pods).
+package cache
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/expiration_cache.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/expiration_cache.go
new file mode 100644
index 0000000..8c5c470
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/expiration_cache.go
@@ -0,0 +1,208 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cache
+
+import (
+ "sync"
+ "time"
+
+ "github.com/golang/glog"
+ "k8s.io/kubernetes/pkg/util"
+)
+
+// ExpirationCache implements the store interface
+// 1. All entries are automatically time stamped on insert
+// a. The key is computed based off the original item/keyFunc
+// b. The value inserted under that key is the timestamped item
+// 2. Expiration happens lazily on read based on the expiration policy
+// a. No item can be inserted into the store while we're expiring
+// *any* item in the cache.
+// 3. Time-stamps are stripped off unexpired entries before return
+// Note that the ExpirationCache is inherently slower than a normal
+// threadSafeStore because it takes a write lock every time it checks if
+// an item has expired.
+type ExpirationCache struct {
+ cacheStorage ThreadSafeStore
+ keyFunc KeyFunc
+ clock util.Clock
+ expirationPolicy ExpirationPolicy
+ // expirationLock is a write lock used to guarantee that we don't clobber
+ // newly inserted objects because of a stale expiration timestamp comparison
+ expirationLock sync.Mutex
+}
+
+// ExpirationPolicy dictates when an object expires. Currently only abstracted out
+// so unittests don't rely on the system clock.
+type ExpirationPolicy interface {
+ IsExpired(obj *timestampedEntry) bool
+}
+
+// TTLPolicy implements a ttl based ExpirationPolicy.
+type TTLPolicy struct {
+ // >0: Expire entries with an age > ttl
+ // <=0: Don't expire any entry
+ Ttl time.Duration
+
+ // Clock used to calculate ttl expiration
+ Clock util.Clock
+}
+
+// IsExpired returns true if the given object is older than the ttl, or it can't
+// determine its age.
+func (p *TTLPolicy) IsExpired(obj *timestampedEntry) bool {
+ return p.Ttl > 0 && p.Clock.Since(obj.timestamp) > p.Ttl
+}
+
+// timestampedEntry is the only type allowed in a ExpirationCache.
+type timestampedEntry struct {
+ obj interface{}
+ timestamp time.Time
+}
+
+// getTimestampedEntry returnes the timestampedEntry stored under the given key.
+func (c *ExpirationCache) getTimestampedEntry(key string) (*timestampedEntry, bool) {
+ item, _ := c.cacheStorage.Get(key)
+ if tsEntry, ok := item.(*timestampedEntry); ok {
+ return tsEntry, true
+ }
+ return nil, false
+}
+
+// getOrExpire retrieves the object from the timestampedEntry if and only if it hasn't
+// already expired. It holds a write lock across deletion.
+func (c *ExpirationCache) getOrExpire(key string) (interface{}, bool) {
+ // Prevent all inserts from the time we deem an item as "expired" to when we
+ // delete it, so an un-expired item doesn't sneak in under the same key, just
+ // before the Delete.
+ c.expirationLock.Lock()
+ defer c.expirationLock.Unlock()
+ timestampedItem, exists := c.getTimestampedEntry(key)
+ if !exists {
+ return nil, false
+ }
+ if c.expirationPolicy.IsExpired(timestampedItem) {
+ glog.V(4).Infof("Entry %v: %+v has expired", key, timestampedItem.obj)
+ c.cacheStorage.Delete(key)
+ return nil, false
+ }
+ return timestampedItem.obj, true
+}
+
+// GetByKey returns the item stored under the key, or sets exists=false.
+func (c *ExpirationCache) GetByKey(key string) (interface{}, bool, error) {
+ obj, exists := c.getOrExpire(key)
+ return obj, exists, nil
+}
+
+// Get returns unexpired items. It purges the cache of expired items in the
+// process.
+func (c *ExpirationCache) Get(obj interface{}) (interface{}, bool, error) {
+ key, err := c.keyFunc(obj)
+ if err != nil {
+ return nil, false, KeyError{obj, err}
+ }
+ obj, exists := c.getOrExpire(key)
+ return obj, exists, nil
+}
+
+// List retrieves a list of unexpired items. It purges the cache of expired
+// items in the process.
+func (c *ExpirationCache) List() []interface{} {
+ items := c.cacheStorage.List()
+
+ list := make([]interface{}, 0, len(items))
+ for _, item := range items {
+ obj := item.(*timestampedEntry).obj
+ if key, err := c.keyFunc(obj); err != nil {
+ list = append(list, obj)
+ } else if obj, exists := c.getOrExpire(key); exists {
+ list = append(list, obj)
+ }
+ }
+ return list
+}
+
+// ListKeys returns a list of all keys in the expiration cache.
+func (c *ExpirationCache) ListKeys() []string {
+ return c.cacheStorage.ListKeys()
+}
+
+// Add timestamps an item and inserts it into the cache, overwriting entries
+// that might exist under the same key.
+func (c *ExpirationCache) Add(obj interface{}) error {
+ c.expirationLock.Lock()
+ defer c.expirationLock.Unlock()
+
+ key, err := c.keyFunc(obj)
+ if err != nil {
+ return KeyError{obj, err}
+ }
+ c.cacheStorage.Add(key, &timestampedEntry{obj, c.clock.Now()})
+ return nil
+}
+
+// Update has not been implemented yet for lack of a use case, so this method
+// simply calls `Add`. This effectively refreshes the timestamp.
+func (c *ExpirationCache) Update(obj interface{}) error {
+ return c.Add(obj)
+}
+
+// Delete removes an item from the cache.
+func (c *ExpirationCache) Delete(obj interface{}) error {
+ c.expirationLock.Lock()
+ defer c.expirationLock.Unlock()
+ key, err := c.keyFunc(obj)
+ if err != nil {
+ return KeyError{obj, err}
+ }
+ c.cacheStorage.Delete(key)
+ return nil
+}
+
+// Replace will convert all items in the given list to TimestampedEntries
+// before attempting the replace operation. The replace operation will
+// delete the contents of the ExpirationCache `c`.
+func (c *ExpirationCache) Replace(list []interface{}, resourceVersion string) error {
+ c.expirationLock.Lock()
+ defer c.expirationLock.Unlock()
+ items := map[string]interface{}{}
+ ts := c.clock.Now()
+ for _, item := range list {
+ key, err := c.keyFunc(item)
+ if err != nil {
+ return KeyError{item, err}
+ }
+ items[key] = &timestampedEntry{item, ts}
+ }
+ c.cacheStorage.Replace(items, resourceVersion)
+ return nil
+}
+
+// Resync will touch all objects to put them into the processing queue
+func (c *ExpirationCache) Resync() error {
+ return c.cacheStorage.Resync()
+}
+
+// NewTTLStore creates and returns a ExpirationCache with a TTLPolicy
+func NewTTLStore(keyFunc KeyFunc, ttl time.Duration) Store {
+ return &ExpirationCache{
+ cacheStorage: NewThreadSafeStore(Indexers{}, Indices{}),
+ keyFunc: keyFunc,
+ clock: util.RealClock{},
+ expirationPolicy: &TTLPolicy{ttl, util.RealClock{}},
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/expiration_cache_fakes.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/expiration_cache_fakes.go
new file mode 100644
index 0000000..eb1d535
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/expiration_cache_fakes.go
@@ -0,0 +1,54 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cache
+
+import (
+ "k8s.io/kubernetes/pkg/util"
+ "k8s.io/kubernetes/pkg/util/sets"
+)
+
+type fakeThreadSafeMap struct {
+ ThreadSafeStore
+ deletedKeys chan<- string
+}
+
+func (c *fakeThreadSafeMap) Delete(key string) {
+ if c.deletedKeys != nil {
+ c.ThreadSafeStore.Delete(key)
+ c.deletedKeys <- key
+ }
+}
+
+type FakeExpirationPolicy struct {
+ NeverExpire sets.String
+ RetrieveKeyFunc KeyFunc
+}
+
+func (p *FakeExpirationPolicy) IsExpired(obj *timestampedEntry) bool {
+ key, _ := p.RetrieveKeyFunc(obj)
+ return !p.NeverExpire.Has(key)
+}
+
+func NewFakeExpirationStore(keyFunc KeyFunc, deletedKeys chan<- string, expirationPolicy ExpirationPolicy, cacheClock util.Clock) Store {
+ cacheStorage := NewThreadSafeStore(Indexers{}, Indices{})
+ return &ExpirationCache{
+ cacheStorage: &fakeThreadSafeMap{cacheStorage, deletedKeys},
+ keyFunc: keyFunc,
+ clock: cacheClock,
+ expirationPolicy: expirationPolicy,
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/fake_custom_store.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/fake_custom_store.go
new file mode 100644
index 0000000..8d71c24
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/fake_custom_store.go
@@ -0,0 +1,102 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cache
+
+// FakeStore lets you define custom functions for store operations
+type FakeCustomStore struct {
+ AddFunc func(obj interface{}) error
+ UpdateFunc func(obj interface{}) error
+ DeleteFunc func(obj interface{}) error
+ ListFunc func() []interface{}
+ ListKeysFunc func() []string
+ GetFunc func(obj interface{}) (item interface{}, exists bool, err error)
+ GetByKeyFunc func(key string) (item interface{}, exists bool, err error)
+ ReplaceFunc func(list []interface{}, resourceVerion string) error
+ ResyncFunc func() error
+}
+
+// Add calls the custom Add function if defined
+func (f *FakeCustomStore) Add(obj interface{}) error {
+ if f.AddFunc != nil {
+ return f.AddFunc(obj)
+ }
+ return nil
+}
+
+// Update calls the custom Update function if defined
+func (f *FakeCustomStore) Update(obj interface{}) error {
+ if f.UpdateFunc != nil {
+ return f.Update(obj)
+ }
+ return nil
+}
+
+// Delete calls the custom Delete function if defined
+func (f *FakeCustomStore) Delete(obj interface{}) error {
+ if f.DeleteFunc != nil {
+ return f.DeleteFunc(obj)
+ }
+ return nil
+}
+
+// List calls the custom List function if defined
+func (f *FakeCustomStore) List() []interface{} {
+ if f.ListFunc != nil {
+ return f.ListFunc()
+ }
+ return nil
+}
+
+// ListKeys calls the custom ListKeys function if defined
+func (f *FakeCustomStore) ListKeys() []string {
+ if f.ListKeysFunc != nil {
+ return f.ListKeysFunc()
+ }
+ return nil
+}
+
+// Get calls the custom Get function if defined
+func (f *FakeCustomStore) Get(obj interface{}) (item interface{}, exists bool, err error) {
+ if f.GetFunc != nil {
+ return f.GetFunc(obj)
+ }
+ return nil, false, nil
+}
+
+// GetByKey calls the custom GetByKey function if defined
+func (f *FakeCustomStore) GetByKey(key string) (item interface{}, exists bool, err error) {
+ if f.GetByKeyFunc != nil {
+ return f.GetByKeyFunc(key)
+ }
+ return nil, false, nil
+}
+
+// Replace calls the custom Replace function if defined
+func (f *FakeCustomStore) Replace(list []interface{}, resourceVersion string) error {
+ if f.ReplaceFunc != nil {
+ return f.ReplaceFunc(list, resourceVersion)
+ }
+ return nil
+}
+
+// Resync calls the custom Resync function if defined
+func (f *FakeCustomStore) Resync() error {
+ if f.ResyncFunc != nil {
+ return f.ResyncFunc()
+ }
+ return nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/fifo.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/fifo.go
new file mode 100644
index 0000000..a6d5e0a
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/fifo.go
@@ -0,0 +1,321 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cache
+
+import (
+ "sync"
+
+ "k8s.io/kubernetes/pkg/util/sets"
+)
+
+// PopProcessFunc is passed to Pop() method of Queue interface.
+// It is supposed to process the element popped from the queue.
+type PopProcessFunc func(interface{}) error
+
+// ErrRequeue may be returned by a PopProcessFunc to safely requeue
+// the current item. The value of Err will be returned from Pop.
+type ErrRequeue struct {
+ // Err is returned by the Pop function
+ Err error
+}
+
+func (e ErrRequeue) Error() string {
+ if e.Err == nil {
+ return "the popped item should be requeued without returning an error"
+ }
+ return e.Err.Error()
+}
+
+// Queue is exactly like a Store, but has a Pop() method too.
+type Queue interface {
+ Store
+
+ // Pop blocks until it has something to process.
+ // It returns the object that was process and the result of processing.
+ // The PopProcessFunc may return an ErrRequeue{...} to indicate the item
+ // should be requeued before releasing the lock on the queue.
+ Pop(PopProcessFunc) (interface{}, error)
+
+ // AddIfNotPresent adds a value previously
+ // returned by Pop back into the queue as long
+ // as nothing else (presumably more recent)
+ // has since been added.
+ AddIfNotPresent(interface{}) error
+
+ // Return true if the first batch of items has been popped
+ HasSynced() bool
+}
+
+// Helper function for popping from Queue.
+// WARNING: Do NOT use this function in non-test code to avoid races
+// unless you really really really really know what you are doing.
+func Pop(queue Queue) interface{} {
+ var result interface{}
+ queue.Pop(func(obj interface{}) error {
+ result = obj
+ return nil
+ })
+ return result
+}
+
+// FIFO receives adds and updates from a Reflector, and puts them in a queue for
+// FIFO order processing. If multiple adds/updates of a single item happen while
+// an item is in the queue before it has been processed, it will only be
+// processed once, and when it is processed, the most recent version will be
+// processed. This can't be done with a channel.
+//
+// FIFO solves this use case:
+// * You want to process every object (exactly) once.
+// * You want to process the most recent version of the object when you process it.
+// * You do not want to process deleted objects, they should be removed from the queue.
+// * You do not want to periodically reprocess objects.
+// Compare with DeltaFIFO for other use cases.
+type FIFO struct {
+ lock sync.RWMutex
+ cond sync.Cond
+ // We depend on the property that items in the set are in the queue and vice versa.
+ items map[string]interface{}
+ queue []string
+
+ // populated is true if the first batch of items inserted by Replace() has been populated
+ // or Delete/Add/Update was called first.
+ populated bool
+ // initialPopulationCount is the number of items inserted by the first call of Replace()
+ initialPopulationCount int
+
+ // keyFunc is used to make the key used for queued item insertion and retrieval, and
+ // should be deterministic.
+ keyFunc KeyFunc
+}
+
+var (
+ _ = Queue(&FIFO{}) // FIFO is a Queue
+)
+
+// Return true if an Add/Update/Delete/AddIfNotPresent are called first,
+// or an Update called first but the first batch of items inserted by Replace() has been popped
+func (f *FIFO) HasSynced() bool {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ return f.populated && f.initialPopulationCount == 0
+}
+
+// Add inserts an item, and puts it in the queue. The item is only enqueued
+// if it doesn't already exist in the set.
+func (f *FIFO) Add(obj interface{}) error {
+ id, err := f.keyFunc(obj)
+ if err != nil {
+ return KeyError{obj, err}
+ }
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ f.populated = true
+ if _, exists := f.items[id]; !exists {
+ f.queue = append(f.queue, id)
+ }
+ f.items[id] = obj
+ f.cond.Broadcast()
+ return nil
+}
+
+// AddIfNotPresent inserts an item, and puts it in the queue. If the item is already
+// present in the set, it is neither enqueued nor added to the set.
+//
+// This is useful in a single producer/consumer scenario so that the consumer can
+// safely retry items without contending with the producer and potentially enqueueing
+// stale items.
+func (f *FIFO) AddIfNotPresent(obj interface{}) error {
+ id, err := f.keyFunc(obj)
+ if err != nil {
+ return KeyError{obj, err}
+ }
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ f.addIfNotPresent(id, obj)
+ return nil
+}
+
+// addIfNotPresent assumes the fifo lock is already held and adds the the provided
+// item to the queue under id if it does not already exist.
+func (f *FIFO) addIfNotPresent(id string, obj interface{}) {
+ f.populated = true
+ if _, exists := f.items[id]; exists {
+ return
+ }
+
+ f.queue = append(f.queue, id)
+ f.items[id] = obj
+ f.cond.Broadcast()
+}
+
+// Update is the same as Add in this implementation.
+func (f *FIFO) Update(obj interface{}) error {
+ return f.Add(obj)
+}
+
+// Delete removes an item. It doesn't add it to the queue, because
+// this implementation assumes the consumer only cares about the objects,
+// not the order in which they were created/added.
+func (f *FIFO) Delete(obj interface{}) error {
+ id, err := f.keyFunc(obj)
+ if err != nil {
+ return KeyError{obj, err}
+ }
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ f.populated = true
+ delete(f.items, id)
+ return err
+}
+
+// List returns a list of all the items.
+func (f *FIFO) List() []interface{} {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+ list := make([]interface{}, 0, len(f.items))
+ for _, item := range f.items {
+ list = append(list, item)
+ }
+ return list
+}
+
+// ListKeys returns a list of all the keys of the objects currently
+// in the FIFO.
+func (f *FIFO) ListKeys() []string {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+ list := make([]string, 0, len(f.items))
+ for key := range f.items {
+ list = append(list, key)
+ }
+ return list
+}
+
+// Get returns the requested item, or sets exists=false.
+func (f *FIFO) Get(obj interface{}) (item interface{}, exists bool, err error) {
+ key, err := f.keyFunc(obj)
+ if err != nil {
+ return nil, false, KeyError{obj, err}
+ }
+ return f.GetByKey(key)
+}
+
+// GetByKey returns the requested item, or sets exists=false.
+func (f *FIFO) GetByKey(key string) (item interface{}, exists bool, err error) {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+ item, exists = f.items[key]
+ return item, exists, nil
+}
+
+// Pop waits until an item is ready and processes it. If multiple items are
+// ready, they are returned in the order in which they were added/updated.
+// The item is removed from the queue (and the store) before it is processed,
+// so if you don't successfully process it, it should be added back with
+// AddIfNotPresent(). process function is called under lock, so it is safe
+// update data structures in it that need to be in sync with the queue.
+func (f *FIFO) Pop(process PopProcessFunc) (interface{}, error) {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ for {
+ for len(f.queue) == 0 {
+ f.cond.Wait()
+ }
+ id := f.queue[0]
+ f.queue = f.queue[1:]
+ if f.initialPopulationCount > 0 {
+ f.initialPopulationCount--
+ }
+ item, ok := f.items[id]
+ if !ok {
+ // Item may have been deleted subsequently.
+ continue
+ }
+ delete(f.items, id)
+ err := process(item)
+ if e, ok := err.(ErrRequeue); ok {
+ f.addIfNotPresent(id, item)
+ err = e.Err
+ }
+ return item, err
+ }
+}
+
+// Replace will delete the contents of 'f', using instead the given map.
+// 'f' takes ownership of the map, you should not reference the map again
+// after calling this function. f's queue is reset, too; upon return, it
+// will contain the items in the map, in no particular order.
+func (f *FIFO) Replace(list []interface{}, resourceVersion string) error {
+ items := map[string]interface{}{}
+ for _, item := range list {
+ key, err := f.keyFunc(item)
+ if err != nil {
+ return KeyError{item, err}
+ }
+ items[key] = item
+ }
+
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if !f.populated {
+ f.populated = true
+ f.initialPopulationCount = len(items)
+ }
+
+ f.items = items
+ f.queue = f.queue[:0]
+ for id := range items {
+ f.queue = append(f.queue, id)
+ }
+ if len(f.queue) > 0 {
+ f.cond.Broadcast()
+ }
+ return nil
+}
+
+// Resync will touch all objects to put them into the processing queue
+func (f *FIFO) Resync() error {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ inQueue := sets.NewString()
+ for _, id := range f.queue {
+ inQueue.Insert(id)
+ }
+ for id := range f.items {
+ if !inQueue.Has(id) {
+ f.queue = append(f.queue, id)
+ }
+ }
+ if len(f.queue) > 0 {
+ f.cond.Broadcast()
+ }
+ return nil
+}
+
+// NewFIFO returns a Store which can be used to queue up items to
+// process.
+func NewFIFO(keyFunc KeyFunc) *FIFO {
+ f := &FIFO{
+ items: map[string]interface{}{},
+ queue: []string{},
+ keyFunc: keyFunc,
+ }
+ f.cond.L = &f.lock
+ return f
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/index.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/index.go
new file mode 100644
index 0000000..4379880
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/index.go
@@ -0,0 +1,82 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cache
+
+import (
+ "fmt"
+
+ "k8s.io/kubernetes/pkg/api/meta"
+ "k8s.io/kubernetes/pkg/util/sets"
+)
+
+// Indexer is a storage interface that lets you list objects using multiple indexing functions
+type Indexer interface {
+ Store
+ // Retrieve list of objects that match on the named indexing function
+ Index(indexName string, obj interface{}) ([]interface{}, error)
+ // ListIndexFuncValues returns the list of generated values of an Index func
+ ListIndexFuncValues(indexName string) []string
+ // ByIndex lists object that match on the named indexing function with the exact key
+ ByIndex(indexName, indexKey string) ([]interface{}, error)
+ // GetIndexer return the indexers
+ GetIndexers() Indexers
+
+ // AddIndexers adds more indexers to this store. If you call this after you already have data
+ // in the store, the results are undefined.
+ AddIndexers(newIndexers Indexers) error
+}
+
+// IndexFunc knows how to provide an indexed value for an object.
+type IndexFunc func(obj interface{}) ([]string, error)
+
+// IndexFuncToKeyFuncAdapter adapts an indexFunc to a keyFunc. This is only useful if your index function returns
+// unique values for every object. This is conversion can create errors when more than one key is found. You
+// should prefer to make proper key and index functions.
+func IndexFuncToKeyFuncAdapter(indexFunc IndexFunc) KeyFunc {
+ return func(obj interface{}) (string, error) {
+ indexKeys, err := indexFunc(obj)
+ if err != nil {
+ return "", err
+ }
+ if len(indexKeys) > 1 {
+ return "", fmt.Errorf("too many keys: %v", indexKeys)
+ }
+ return indexKeys[0], nil
+ }
+}
+
+const (
+ NamespaceIndex string = "namespace"
+)
+
+// MetaNamespaceIndexFunc is a default index function that indexes based on an object's namespace
+func MetaNamespaceIndexFunc(obj interface{}) ([]string, error) {
+ meta, err := meta.Accessor(obj)
+ if err != nil {
+ return []string{""}, fmt.Errorf("object has no meta: %v", err)
+ }
+ return []string{meta.GetNamespace()}, nil
+}
+
+// Index maps the indexed value to a set of keys in the store that match on that value
+type Index map[string]sets.String
+
+// Indexers maps a name to a IndexFunc
+type Indexers map[string]IndexFunc
+
+// Indices maps a name to an Index
+type Indices map[string]Index
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/listers.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/listers.go
new file mode 100644
index 0000000..29e5859
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/listers.go
@@ -0,0 +1,672 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cache
+
+import (
+ "fmt"
+
+ "github.com/golang/glog"
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/apis/apps"
+ "k8s.io/kubernetes/pkg/apis/batch"
+ "k8s.io/kubernetes/pkg/apis/extensions"
+ "k8s.io/kubernetes/pkg/labels"
+)
+
+// TODO: generate these classes and methods for all resources of interest using
+// a script. Can use "go generate" once 1.4 is supported by all users.
+
+// StoreToPodLister makes a Store have the List method of the client.PodInterface
+// The Store must contain (only) Pods.
+//
+// Example:
+// s := cache.NewStore()
+// lw := cache.ListWatch{Client: c, FieldSelector: sel, Resource: "pods"}
+// r := cache.NewReflector(lw, &api.Pod{}, s).Run()
+// l := StoreToPodLister{s}
+// l.List()
+type StoreToPodLister struct {
+ Indexer
+}
+
+// Please note that selector is filtering among the pods that have gotten into
+// the store; there may have been some filtering that already happened before
+// that.
+//
+// TODO: converge on the interface in pkg/client.
+func (s *StoreToPodLister) List(selector labels.Selector) (pods []*api.Pod, err error) {
+ // TODO: it'd be great to just call
+ // s.Pods(api.NamespaceAll).List(selector), however then we'd have to
+ // remake the list.Items as a []*api.Pod. So leave this separate for
+ // now.
+ for _, m := range s.Indexer.List() {
+ pod := m.(*api.Pod)
+ if selector.Matches(labels.Set(pod.Labels)) {
+ pods = append(pods, pod)
+ }
+ }
+ return pods, nil
+}
+
+// Pods is taking baby steps to be more like the api in pkg/client
+func (s *StoreToPodLister) Pods(namespace string) storePodsNamespacer {
+ return storePodsNamespacer{s.Indexer, namespace}
+}
+
+type storePodsNamespacer struct {
+ indexer Indexer
+ namespace string
+}
+
+// Please note that selector is filtering among the pods that have gotten into
+// the store; there may have been some filtering that already happened before
+// that.
+func (s storePodsNamespacer) List(selector labels.Selector) (api.PodList, error) {
+ pods := api.PodList{}
+
+ if s.namespace == api.NamespaceAll {
+ for _, m := range s.indexer.List() {
+ pod := m.(*api.Pod)
+ if selector.Matches(labels.Set(pod.Labels)) {
+ pods.Items = append(pods.Items, *pod)
+ }
+ }
+ return pods, nil
+ }
+
+ key := &api.Pod{ObjectMeta: api.ObjectMeta{Namespace: s.namespace}}
+ items, err := s.indexer.Index(NamespaceIndex, key)
+ if err != nil {
+ // Ignore error; do slow search without index.
+ glog.Warningf("can not retrieve list of objects using index : %v", err)
+ for _, m := range s.indexer.List() {
+ pod := m.(*api.Pod)
+ if s.namespace == pod.Namespace && selector.Matches(labels.Set(pod.Labels)) {
+ pods.Items = append(pods.Items, *pod)
+ }
+ }
+ return pods, nil
+ }
+ for _, m := range items {
+ pod := m.(*api.Pod)
+ if selector.Matches(labels.Set(pod.Labels)) {
+ pods.Items = append(pods.Items, *pod)
+ }
+ }
+ return pods, nil
+}
+
+// Exists returns true if a pod matching the namespace/name of the given pod exists in the store.
+func (s *StoreToPodLister) Exists(pod *api.Pod) (bool, error) {
+ _, exists, err := s.Indexer.Get(pod)
+ if err != nil {
+ return false, err
+ }
+ return exists, nil
+}
+
+// NodeConditionPredicate is a function that indicates whether the given node's conditions meet
+// some set of criteria defined by the function.
+type NodeConditionPredicate func(node *api.Node) bool
+
+// StoreToNodeLister makes a Store have the List method of the client.NodeInterface
+// The Store must contain (only) Nodes.
+type StoreToNodeLister struct {
+ Store
+}
+
+func (s *StoreToNodeLister) List() (machines api.NodeList, err error) {
+ for _, m := range s.Store.List() {
+ machines.Items = append(machines.Items, *(m.(*api.Node)))
+ }
+ return machines, nil
+}
+
+// NodeCondition returns a storeToNodeConditionLister
+func (s *StoreToNodeLister) NodeCondition(predicate NodeConditionPredicate) storeToNodeConditionLister {
+ // TODO: Move this filtering server side. Currently our selectors don't facilitate searching through a list so we
+ // have the reflector filter out the Unschedulable field and sift through node conditions in the lister.
+ return storeToNodeConditionLister{s.Store, predicate}
+}
+
+// storeToNodeConditionLister filters and returns nodes matching the given type and status from the store.
+type storeToNodeConditionLister struct {
+ store Store
+ predicate NodeConditionPredicate
+}
+
+// List returns a list of nodes that match the conditions defined by the predicate functions in the storeToNodeConditionLister.
+func (s storeToNodeConditionLister) List() (nodes api.NodeList, err error) {
+ for _, m := range s.store.List() {
+ node := m.(*api.Node)
+ if s.predicate(node) {
+ nodes.Items = append(nodes.Items, *node)
+ } else {
+ glog.V(5).Infof("Node %s matches none of the conditions", node.Name)
+ }
+ }
+ return
+}
+
+// StoreToReplicationControllerLister gives a store List and Exists methods. The store must contain only ReplicationControllers.
+type StoreToReplicationControllerLister struct {
+ Indexer
+}
+
+// Exists checks if the given rc exists in the store.
+func (s *StoreToReplicationControllerLister) Exists(controller *api.ReplicationController) (bool, error) {
+ _, exists, err := s.Indexer.Get(controller)
+ if err != nil {
+ return false, err
+ }
+ return exists, nil
+}
+
+// StoreToReplicationControllerLister lists all controllers in the store.
+// TODO: converge on the interface in pkg/client
+func (s *StoreToReplicationControllerLister) List() (controllers []api.ReplicationController, err error) {
+ for _, c := range s.Indexer.List() {
+ controllers = append(controllers, *(c.(*api.ReplicationController)))
+ }
+ return controllers, nil
+}
+
+func (s *StoreToReplicationControllerLister) ReplicationControllers(namespace string) storeReplicationControllersNamespacer {
+ return storeReplicationControllersNamespacer{s.Indexer, namespace}
+}
+
+type storeReplicationControllersNamespacer struct {
+ indexer Indexer
+ namespace string
+}
+
+func (s storeReplicationControllersNamespacer) List(selector labels.Selector) ([]api.ReplicationController, error) {
+ controllers := []api.ReplicationController{}
+
+ if s.namespace == api.NamespaceAll {
+ for _, m := range s.indexer.List() {
+ rc := *(m.(*api.ReplicationController))
+ if selector.Matches(labels.Set(rc.Labels)) {
+ controllers = append(controllers, rc)
+ }
+ }
+ return controllers, nil
+ }
+
+ key := &api.ReplicationController{ObjectMeta: api.ObjectMeta{Namespace: s.namespace}}
+ items, err := s.indexer.Index(NamespaceIndex, key)
+ if err != nil {
+ // Ignore error; do slow search without index.
+ glog.Warningf("can not retrieve list of objects using index : %v", err)
+ for _, m := range s.indexer.List() {
+ rc := *(m.(*api.ReplicationController))
+ if s.namespace == rc.Namespace && selector.Matches(labels.Set(rc.Labels)) {
+ controllers = append(controllers, rc)
+ }
+ }
+ return controllers, nil
+ }
+ for _, m := range items {
+ rc := *(m.(*api.ReplicationController))
+ if selector.Matches(labels.Set(rc.Labels)) {
+ controllers = append(controllers, rc)
+ }
+ }
+ return controllers, nil
+}
+
+// GetPodControllers returns a list of replication controllers managing a pod. Returns an error only if no matching controllers are found.
+func (s *StoreToReplicationControllerLister) GetPodControllers(pod *api.Pod) (controllers []api.ReplicationController, err error) {
+ var selector labels.Selector
+ var rc api.ReplicationController
+
+ if len(pod.Labels) == 0 {
+ err = fmt.Errorf("no controllers found for pod %v because it has no labels", pod.Name)
+ return
+ }
+
+ key := &api.ReplicationController{ObjectMeta: api.ObjectMeta{Namespace: pod.Namespace}}
+ items, err := s.Indexer.Index(NamespaceIndex, key)
+ if err != nil {
+ return
+ }
+
+ for _, m := range items {
+ rc = *m.(*api.ReplicationController)
+ labelSet := labels.Set(rc.Spec.Selector)
+ selector = labels.Set(rc.Spec.Selector).AsSelector()
+
+ // If an rc with a nil or empty selector creeps in, it should match nothing, not everything.
+ if labelSet.AsSelector().Empty() || !selector.Matches(labels.Set(pod.Labels)) {
+ continue
+ }
+ controllers = append(controllers, rc)
+ }
+ if len(controllers) == 0 {
+ err = fmt.Errorf("could not find controller for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels)
+ }
+ return
+}
+
+// StoreToDeploymentLister gives a store List and Exists methods. The store must contain only Deployments.
+type StoreToDeploymentLister struct {
+ Store
+}
+
+// Exists checks if the given deployment exists in the store.
+func (s *StoreToDeploymentLister) Exists(deployment *extensions.Deployment) (bool, error) {
+ _, exists, err := s.Store.Get(deployment)
+ if err != nil {
+ return false, err
+ }
+ return exists, nil
+}
+
+// StoreToDeploymentLister lists all deployments in the store.
+// TODO: converge on the interface in pkg/client
+func (s *StoreToDeploymentLister) List() (deployments []extensions.Deployment, err error) {
+ for _, c := range s.Store.List() {
+ deployments = append(deployments, *(c.(*extensions.Deployment)))
+ }
+ return deployments, nil
+}
+
+// GetDeploymentsForReplicaSet returns a list of deployments managing a replica set. Returns an error only if no matching deployments are found.
+func (s *StoreToDeploymentLister) GetDeploymentsForReplicaSet(rs *extensions.ReplicaSet) (deployments []extensions.Deployment, err error) {
+ var d extensions.Deployment
+
+ if len(rs.Labels) == 0 {
+ err = fmt.Errorf("no deployments found for ReplicaSet %v because it has no labels", rs.Name)
+ return
+ }
+
+ // TODO: MODIFY THIS METHOD so that it checks for the podTemplateSpecHash label
+ for _, m := range s.Store.List() {
+ d = *m.(*extensions.Deployment)
+ if d.Namespace != rs.Namespace {
+ continue
+ }
+
+ selector, err := unversioned.LabelSelectorAsSelector(d.Spec.Selector)
+ if err != nil {
+ return nil, fmt.Errorf("invalid label selector: %v", err)
+ }
+ // If a deployment with a nil or empty selector creeps in, it should match nothing, not everything.
+ if selector.Empty() || !selector.Matches(labels.Set(rs.Labels)) {
+ continue
+ }
+ deployments = append(deployments, d)
+ }
+ if len(deployments) == 0 {
+ err = fmt.Errorf("could not find deployments set for ReplicaSet %s in namespace %s with labels: %v", rs.Name, rs.Namespace, rs.Labels)
+ }
+ return
+}
+
+// StoreToReplicaSetLister gives a store List and Exists methods. The store must contain only ReplicaSets.
+type StoreToReplicaSetLister struct {
+ Store
+}
+
+// Exists checks if the given ReplicaSet exists in the store.
+func (s *StoreToReplicaSetLister) Exists(rs *extensions.ReplicaSet) (bool, error) {
+ _, exists, err := s.Store.Get(rs)
+ if err != nil {
+ return false, err
+ }
+ return exists, nil
+}
+
+// List lists all ReplicaSets in the store.
+// TODO: converge on the interface in pkg/client
+func (s *StoreToReplicaSetLister) List() (rss []extensions.ReplicaSet, err error) {
+ for _, rs := range s.Store.List() {
+ rss = append(rss, *(rs.(*extensions.ReplicaSet)))
+ }
+ return rss, nil
+}
+
+type storeReplicaSetsNamespacer struct {
+ store Store
+ namespace string
+}
+
+func (s storeReplicaSetsNamespacer) List(selector labels.Selector) (rss []extensions.ReplicaSet, err error) {
+ for _, c := range s.store.List() {
+ rs := *(c.(*extensions.ReplicaSet))
+ if s.namespace == api.NamespaceAll || s.namespace == rs.Namespace {
+ if selector.Matches(labels.Set(rs.Labels)) {
+ rss = append(rss, rs)
+ }
+ }
+ }
+ return
+}
+
+func (s *StoreToReplicaSetLister) ReplicaSets(namespace string) storeReplicaSetsNamespacer {
+ return storeReplicaSetsNamespacer{s.Store, namespace}
+}
+
+// GetPodReplicaSets returns a list of ReplicaSets managing a pod. Returns an error only if no matching ReplicaSets are found.
+func (s *StoreToReplicaSetLister) GetPodReplicaSets(pod *api.Pod) (rss []extensions.ReplicaSet, err error) {
+ var selector labels.Selector
+ var rs extensions.ReplicaSet
+
+ if len(pod.Labels) == 0 {
+ err = fmt.Errorf("no ReplicaSets found for pod %v because it has no labels", pod.Name)
+ return
+ }
+
+ for _, m := range s.Store.List() {
+ rs = *m.(*extensions.ReplicaSet)
+ if rs.Namespace != pod.Namespace {
+ continue
+ }
+ selector, err = unversioned.LabelSelectorAsSelector(rs.Spec.Selector)
+ if err != nil {
+ err = fmt.Errorf("invalid selector: %v", err)
+ return
+ }
+
+ // If a ReplicaSet with a nil or empty selector creeps in, it should match nothing, not everything.
+ if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) {
+ continue
+ }
+ rss = append(rss, rs)
+ }
+ if len(rss) == 0 {
+ err = fmt.Errorf("could not find ReplicaSet for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels)
+ }
+ return
+}
+
+// StoreToDaemonSetLister gives a store List and Exists methods. The store must contain only DaemonSets.
+type StoreToDaemonSetLister struct {
+ Store
+}
+
+// Exists checks if the given daemon set exists in the store.
+func (s *StoreToDaemonSetLister) Exists(ds *extensions.DaemonSet) (bool, error) {
+ _, exists, err := s.Store.Get(ds)
+ if err != nil {
+ return false, err
+ }
+ return exists, nil
+}
+
+// List lists all daemon sets in the store.
+// TODO: converge on the interface in pkg/client
+func (s *StoreToDaemonSetLister) List() (dss extensions.DaemonSetList, err error) {
+ for _, c := range s.Store.List() {
+ dss.Items = append(dss.Items, *(c.(*extensions.DaemonSet)))
+ }
+ return dss, nil
+}
+
+// GetPodDaemonSets returns a list of daemon sets managing a pod.
+// Returns an error if and only if no matching daemon sets are found.
+func (s *StoreToDaemonSetLister) GetPodDaemonSets(pod *api.Pod) (daemonSets []extensions.DaemonSet, err error) {
+ var selector labels.Selector
+ var daemonSet extensions.DaemonSet
+
+ if len(pod.Labels) == 0 {
+ err = fmt.Errorf("no daemon sets found for pod %v because it has no labels", pod.Name)
+ return
+ }
+
+ for _, m := range s.Store.List() {
+ daemonSet = *m.(*extensions.DaemonSet)
+ if daemonSet.Namespace != pod.Namespace {
+ continue
+ }
+ selector, err = unversioned.LabelSelectorAsSelector(daemonSet.Spec.Selector)
+ if err != nil {
+ // this should not happen if the DaemonSet passed validation
+ return nil, err
+ }
+
+ // If a daemonSet with a nil or empty selector creeps in, it should match nothing, not everything.
+ if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) {
+ continue
+ }
+ daemonSets = append(daemonSets, daemonSet)
+ }
+ if len(daemonSets) == 0 {
+ err = fmt.Errorf("could not find daemon set for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels)
+ }
+ return
+}
+
+// StoreToServiceLister makes a Store that has the List method of the client.ServiceInterface
+// The Store must contain (only) Services.
+type StoreToServiceLister struct {
+ Store
+}
+
+func (s *StoreToServiceLister) List() (services api.ServiceList, err error) {
+ for _, m := range s.Store.List() {
+ services.Items = append(services.Items, *(m.(*api.Service)))
+ }
+ return services, nil
+}
+
+// TODO: Move this back to scheduler as a helper function that takes a Store,
+// rather than a method of StoreToServiceLister.
+func (s *StoreToServiceLister) GetPodServices(pod *api.Pod) (services []api.Service, err error) {
+ var selector labels.Selector
+ var service api.Service
+
+ for _, m := range s.Store.List() {
+ service = *m.(*api.Service)
+ // consider only services that are in the same namespace as the pod
+ if service.Namespace != pod.Namespace {
+ continue
+ }
+ if service.Spec.Selector == nil {
+ // services with nil selectors match nothing, not everything.
+ continue
+ }
+ selector = labels.Set(service.Spec.Selector).AsSelector()
+ if selector.Matches(labels.Set(pod.Labels)) {
+ services = append(services, service)
+ }
+ }
+ if len(services) == 0 {
+ err = fmt.Errorf("could not find service for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels)
+ }
+
+ return
+}
+
+// StoreToEndpointsLister makes a Store that lists endpoints.
+type StoreToEndpointsLister struct {
+ Store
+}
+
+// List lists all endpoints in the store.
+func (s *StoreToEndpointsLister) List() (services api.EndpointsList, err error) {
+ for _, m := range s.Store.List() {
+ services.Items = append(services.Items, *(m.(*api.Endpoints)))
+ }
+ return services, nil
+}
+
+// GetServiceEndpoints returns the endpoints of a service, matched on service name.
+func (s *StoreToEndpointsLister) GetServiceEndpoints(svc *api.Service) (ep api.Endpoints, err error) {
+ for _, m := range s.Store.List() {
+ ep = *m.(*api.Endpoints)
+ if svc.Name == ep.Name && svc.Namespace == ep.Namespace {
+ return ep, nil
+ }
+ }
+ err = fmt.Errorf("could not find endpoints for service: %v", svc.Name)
+ return
+}
+
+// StoreToJobLister gives a store List and Exists methods. The store must contain only Jobs.
+type StoreToJobLister struct {
+ Store
+}
+
+// Exists checks if the given job exists in the store.
+func (s *StoreToJobLister) Exists(job *batch.Job) (bool, error) {
+ _, exists, err := s.Store.Get(job)
+ if err != nil {
+ return false, err
+ }
+ return exists, nil
+}
+
+// StoreToJobLister lists all jobs in the store.
+func (s *StoreToJobLister) List() (jobs batch.JobList, err error) {
+ for _, c := range s.Store.List() {
+ jobs.Items = append(jobs.Items, *(c.(*batch.Job)))
+ }
+ return jobs, nil
+}
+
+// GetPodJobs returns a list of jobs managing a pod. Returns an error only if no matching jobs are found.
+func (s *StoreToJobLister) GetPodJobs(pod *api.Pod) (jobs []batch.Job, err error) {
+ var selector labels.Selector
+ var job batch.Job
+
+ if len(pod.Labels) == 0 {
+ err = fmt.Errorf("no jobs found for pod %v because it has no labels", pod.Name)
+ return
+ }
+
+ for _, m := range s.Store.List() {
+ job = *m.(*batch.Job)
+ if job.Namespace != pod.Namespace {
+ continue
+ }
+
+ selector, _ = unversioned.LabelSelectorAsSelector(job.Spec.Selector)
+ if !selector.Matches(labels.Set(pod.Labels)) {
+ continue
+ }
+ jobs = append(jobs, job)
+ }
+ if len(jobs) == 0 {
+ err = fmt.Errorf("could not find jobs for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels)
+ }
+ return
+}
+
+// Typed wrapper around a store of PersistentVolumes
+type StoreToPVFetcher struct {
+ Store
+}
+
+// GetPersistentVolumeInfo returns cached data for the PersistentVolume 'id'.
+func (s *StoreToPVFetcher) GetPersistentVolumeInfo(id string) (*api.PersistentVolume, error) {
+ o, exists, err := s.Get(&api.PersistentVolume{ObjectMeta: api.ObjectMeta{Name: id}})
+
+ if err != nil {
+ return nil, fmt.Errorf("error retrieving PersistentVolume '%v' from cache: %v", id, err)
+ }
+
+ if !exists {
+ return nil, fmt.Errorf("PersistentVolume '%v' not found", id)
+ }
+
+ return o.(*api.PersistentVolume), nil
+}
+
+// Typed wrapper around a store of PersistentVolumeClaims
+type StoreToPVCFetcher struct {
+ Store
+}
+
+// GetPersistentVolumeClaimInfo returns cached data for the PersistentVolumeClaim 'id'.
+func (s *StoreToPVCFetcher) GetPersistentVolumeClaimInfo(namespace string, id string) (*api.PersistentVolumeClaim, error) {
+ o, exists, err := s.Get(&api.PersistentVolumeClaim{ObjectMeta: api.ObjectMeta{Namespace: namespace, Name: id}})
+ if err != nil {
+ return nil, fmt.Errorf("error retrieving PersistentVolumeClaim '%s/%s' from cache: %v", namespace, id, err)
+ }
+
+ if !exists {
+ return nil, fmt.Errorf("PersistentVolumeClaim '%s/%s' not found", namespace, id)
+ }
+
+ return o.(*api.PersistentVolumeClaim), nil
+}
+
+// StoreToPetSetLister gives a store List and Exists methods. The store must contain only PetSets.
+type StoreToPetSetLister struct {
+ Store
+}
+
+// Exists checks if the given PetSet exists in the store.
+func (s *StoreToPetSetLister) Exists(ps *apps.PetSet) (bool, error) {
+ _, exists, err := s.Store.Get(ps)
+ if err != nil {
+ return false, err
+ }
+ return exists, nil
+}
+
+// List lists all PetSets in the store.
+func (s *StoreToPetSetLister) List() (psList []apps.PetSet, err error) {
+ for _, ps := range s.Store.List() {
+ psList = append(psList, *(ps.(*apps.PetSet)))
+ }
+ return psList, nil
+}
+
+type storePetSetsNamespacer struct {
+ store Store
+ namespace string
+}
+
+func (s *StoreToPetSetLister) PetSets(namespace string) storePetSetsNamespacer {
+ return storePetSetsNamespacer{s.Store, namespace}
+}
+
+// GetPodPetSets returns a list of PetSets managing a pod. Returns an error only if no matching PetSets are found.
+func (s *StoreToPetSetLister) GetPodPetSets(pod *api.Pod) (psList []apps.PetSet, err error) {
+ var selector labels.Selector
+ var ps apps.PetSet
+
+ if len(pod.Labels) == 0 {
+ err = fmt.Errorf("no PetSets found for pod %v because it has no labels", pod.Name)
+ return
+ }
+
+ for _, m := range s.Store.List() {
+ ps = *m.(*apps.PetSet)
+ if ps.Namespace != pod.Namespace {
+ continue
+ }
+ selector, err = unversioned.LabelSelectorAsSelector(ps.Spec.Selector)
+ if err != nil {
+ err = fmt.Errorf("invalid selector: %v", err)
+ return
+ }
+
+ // If a PetSet with a nil or empty selector creeps in, it should match nothing, not everything.
+ if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) {
+ continue
+ }
+ psList = append(psList, ps)
+ }
+ if len(psList) == 0 {
+ err = fmt.Errorf("could not find PetSet for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels)
+ }
+ return
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/listwatch.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/listwatch.go
new file mode 100644
index 0000000..ff56c0b
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/listwatch.go
@@ -0,0 +1,86 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cache
+
+import (
+ "time"
+
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/client/restclient"
+ "k8s.io/kubernetes/pkg/fields"
+ "k8s.io/kubernetes/pkg/runtime"
+ "k8s.io/kubernetes/pkg/watch"
+)
+
+// ListFunc knows how to list resources
+type ListFunc func(options api.ListOptions) (runtime.Object, error)
+
+// WatchFunc knows how to watch resources
+type WatchFunc func(options api.ListOptions) (watch.Interface, error)
+
+// ListWatch knows how to list and watch a set of apiserver resources. It satisfies the ListerWatcher interface.
+// It is a convenience function for users of NewReflector, etc.
+// ListFunc and WatchFunc must not be nil
+type ListWatch struct {
+ ListFunc ListFunc
+ WatchFunc WatchFunc
+}
+
+// Getter interface knows how to access Get method from RESTClient.
+type Getter interface {
+ Get() *restclient.Request
+}
+
+// NewListWatchFromClient creates a new ListWatch from the specified client, resource, namespace and field selector.
+func NewListWatchFromClient(c Getter, resource string, namespace string, fieldSelector fields.Selector) *ListWatch {
+ listFunc := func(options api.ListOptions) (runtime.Object, error) {
+ return c.Get().
+ Namespace(namespace).
+ Resource(resource).
+ VersionedParams(&options, api.ParameterCodec).
+ FieldsSelectorParam(fieldSelector).
+ Do().
+ Get()
+ }
+ watchFunc := func(options api.ListOptions) (watch.Interface, error) {
+ return c.Get().
+ Prefix("watch").
+ Namespace(namespace).
+ Resource(resource).
+ VersionedParams(&options, api.ParameterCodec).
+ FieldsSelectorParam(fieldSelector).
+ Watch()
+ }
+ return &ListWatch{ListFunc: listFunc, WatchFunc: watchFunc}
+}
+
+func timeoutFromListOptions(options api.ListOptions) time.Duration {
+ if options.TimeoutSeconds != nil {
+ return time.Duration(*options.TimeoutSeconds) * time.Second
+ }
+ return 0
+}
+
+// List a set of apiserver resources
+func (lw *ListWatch) List(options api.ListOptions) (runtime.Object, error) {
+ return lw.ListFunc(options)
+}
+
+// Watch a set of apiserver resources
+func (lw *ListWatch) Watch(options api.ListOptions) (watch.Interface, error) {
+ return lw.WatchFunc(options)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/reflector.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/reflector.go
new file mode 100644
index 0000000..e1af63e
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/reflector.go
@@ -0,0 +1,423 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cache
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "math/rand"
+ "net"
+ "net/url"
+ "reflect"
+ "regexp"
+ goruntime "runtime"
+ "runtime/debug"
+ "strconv"
+ "strings"
+ "sync"
+ "syscall"
+ "time"
+
+ "github.com/golang/glog"
+ "k8s.io/kubernetes/pkg/api"
+ apierrs "k8s.io/kubernetes/pkg/api/errors"
+ "k8s.io/kubernetes/pkg/api/meta"
+ "k8s.io/kubernetes/pkg/runtime"
+ utilruntime "k8s.io/kubernetes/pkg/util/runtime"
+ "k8s.io/kubernetes/pkg/util/wait"
+ "k8s.io/kubernetes/pkg/watch"
+)
+
+// ListerWatcher is any object that knows how to perform an initial list and start a watch on a resource.
+type ListerWatcher interface {
+ // List should return a list type object; the Items field will be extracted, and the
+ // ResourceVersion field will be used to start the watch in the right place.
+ List(options api.ListOptions) (runtime.Object, error)
+ // Watch should begin a watch at the specified version.
+ Watch(options api.ListOptions) (watch.Interface, error)
+}
+
+// Reflector watches a specified resource and causes all changes to be reflected in the given store.
+type Reflector struct {
+ // name identifies this reflector. By default it will be a file:line if possible.
+ name string
+
+ // The type of object we expect to place in the store.
+ expectedType reflect.Type
+ // The destination to sync up with the watch source
+ store Store
+ // listerWatcher is used to perform lists and watches.
+ listerWatcher ListerWatcher
+ // period controls timing between one watch ending and
+ // the beginning of the next one.
+ period time.Duration
+ resyncPeriod time.Duration
+ // now() returns current time - exposed for testing purposes
+ now func() time.Time
+ // nextResync is approximate time of next resync (0 if not scheduled)
+ nextResync time.Time
+ // lastSyncResourceVersion is the resource version token last
+ // observed when doing a sync with the underlying store
+ // it is thread safe, but not synchronized with the underlying store
+ lastSyncResourceVersion string
+ // lastSyncResourceVersionMutex guards read/write access to lastSyncResourceVersion
+ lastSyncResourceVersionMutex sync.RWMutex
+}
+
+var (
+ // We try to spread the load on apiserver by setting timeouts for
+ // watch requests - it is random in [minWatchTimeout, 2*minWatchTimeout].
+ // However, it can be modified to avoid periodic resync to break the
+ // TCP connection.
+ minWatchTimeout = 5 * time.Minute
+ // If we are within 'forceResyncThreshold' from the next planned resync
+ // and are just before issuing Watch(), resync will be forced now.
+ forceResyncThreshold = 3 * time.Second
+ // We try to set timeouts for Watch() so that we will finish about
+ // than 'timeoutThreshold' from next planned periodic resync.
+ timeoutThreshold = 1 * time.Second
+)
+
+// NewNamespaceKeyedIndexerAndReflector creates an Indexer and a Reflector
+// The indexer is configured to key on namespace
+func NewNamespaceKeyedIndexerAndReflector(lw ListerWatcher, expectedType interface{}, resyncPeriod time.Duration) (indexer Indexer, reflector *Reflector) {
+ indexer = NewIndexer(MetaNamespaceKeyFunc, Indexers{"namespace": MetaNamespaceIndexFunc})
+ reflector = NewReflector(lw, expectedType, indexer, resyncPeriod)
+ return indexer, reflector
+}
+
+// NewReflector creates a new Reflector object which will keep the given store up to
+// date with the server's contents for the given resource. Reflector promises to
+// only put things in the store that have the type of expectedType, unless expectedType
+// is nil. If resyncPeriod is non-zero, then lists will be executed after every
+// resyncPeriod, so that you can use reflectors to periodically process everything as
+// well as incrementally processing the things that change.
+func NewReflector(lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector {
+ return NewNamedReflector(getDefaultReflectorName(internalPackages...), lw, expectedType, store, resyncPeriod)
+}
+
+// NewNamedReflector same as NewReflector, but with a specified name for logging
+func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector {
+ r := &Reflector{
+ name: name,
+ listerWatcher: lw,
+ store: store,
+ expectedType: reflect.TypeOf(expectedType),
+ period: time.Second,
+ resyncPeriod: resyncPeriod,
+ now: time.Now,
+ }
+ return r
+}
+
+// internalPackages are packages that ignored when creating a default reflector name. These packages are in the common
+// call chains to NewReflector, so they'd be low entropy names for reflectors
+var internalPackages = []string{"kubernetes/pkg/client/cache/", "kubernetes/pkg/controller/framework/", "/runtime/asm_"}
+
+// getDefaultReflectorName walks back through the call stack until we find a caller from outside of the ignoredPackages
+// it returns back a shortpath/filename:line to aid in identification of this reflector when it starts logging
+func getDefaultReflectorName(ignoredPackages ...string) string {
+ name := "????"
+ const maxStack = 10
+ for i := 1; i < maxStack; i++ {
+ _, file, line, ok := goruntime.Caller(i)
+ if !ok {
+ file, line, ok = extractStackCreator()
+ if !ok {
+ break
+ }
+ i += maxStack
+ }
+ if hasPackage(file, ignoredPackages) {
+ continue
+ }
+
+ file = trimPackagePrefix(file)
+ name = fmt.Sprintf("%s:%d", file, line)
+ break
+ }
+ return name
+}
+
+// hasPackage returns true if the file is in one of the ignored packages.
+func hasPackage(file string, ignoredPackages []string) bool {
+ for _, ignoredPackage := range ignoredPackages {
+ if strings.Contains(file, ignoredPackage) {
+ return true
+ }
+ }
+ return false
+}
+
+// trimPackagePrefix reduces dulpicate values off the front of a package name.
+func trimPackagePrefix(file string) string {
+ if l := strings.LastIndex(file, "k8s.io/kubernetes/pkg/"); l >= 0 {
+ return file[l+len("k8s.io/kubernetes/"):]
+ }
+ if l := strings.LastIndex(file, "/src/"); l >= 0 {
+ return file[l+5:]
+ }
+ if l := strings.LastIndex(file, "/pkg/"); l >= 0 {
+ return file[l+1:]
+ }
+ return file
+}
+
+var stackCreator = regexp.MustCompile(`(?m)^created by (.*)\n\s+(.*):(\d+) \+0x[[:xdigit:]]+$`)
+
+// extractStackCreator retrieves the goroutine file and line that launched this stack. Returns false
+// if the creator cannot be located.
+// TODO: Go does not expose this via runtime https://github.com/golang/go/issues/11440
+func extractStackCreator() (string, int, bool) {
+ stack := debug.Stack()
+ matches := stackCreator.FindStringSubmatch(string(stack))
+ if matches == nil || len(matches) != 4 {
+ return "", 0, false
+ }
+ line, err := strconv.Atoi(matches[3])
+ if err != nil {
+ return "", 0, false
+ }
+ return matches[2], line, true
+}
+
+// Run starts a watch and handles watch events. Will restart the watch if it is closed.
+// Run starts a goroutine and returns immediately.
+func (r *Reflector) Run() {
+ glog.V(3).Infof("Starting reflector %v (%s) from %s", r.expectedType, r.resyncPeriod, r.name)
+ go wait.Until(func() {
+ if err := r.ListAndWatch(wait.NeverStop); err != nil {
+ utilruntime.HandleError(err)
+ }
+ }, r.period, wait.NeverStop)
+}
+
+// RunUntil starts a watch and handles watch events. Will restart the watch if it is closed.
+// RunUntil starts a goroutine and returns immediately. It will exit when stopCh is closed.
+func (r *Reflector) RunUntil(stopCh <-chan struct{}) {
+ glog.V(3).Infof("Starting reflector %v (%s) from %s", r.expectedType, r.resyncPeriod, r.name)
+ go wait.Until(func() {
+ if err := r.ListAndWatch(stopCh); err != nil {
+ utilruntime.HandleError(err)
+ }
+ }, r.period, stopCh)
+}
+
+var (
+ // nothing will ever be sent down this channel
+ neverExitWatch <-chan time.Time = make(chan time.Time)
+
+ // Used to indicate that watching stopped so that a resync could happen.
+ errorResyncRequested = errors.New("resync channel fired")
+
+ // Used to indicate that watching stopped because of a signal from the stop
+ // channel passed in from a client of the reflector.
+ errorStopRequested = errors.New("Stop requested")
+)
+
+// resyncChan returns a channel which will receive something when a resync is
+// required, and a cleanup function.
+func (r *Reflector) resyncChan() (<-chan time.Time, func() bool) {
+ if r.resyncPeriod == 0 {
+ r.nextResync = time.Time{}
+ return neverExitWatch, func() bool { return false }
+ }
+ // The cleanup function is required: imagine the scenario where watches
+ // always fail so we end up listing frequently. Then, if we don't
+ // manually stop the timer, we could end up with many timers active
+ // concurrently.
+ r.nextResync = r.now().Add(r.resyncPeriod)
+ t := time.NewTimer(r.resyncPeriod)
+ return t.C, t.Stop
+}
+
+// ListAndWatch first lists all items and get the resource version at the moment of call,
+// and then use the resource version to watch.
+// It returns error if ListAndWatch didn't even try to initialize watch.
+func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
+ glog.V(3).Infof("Listing and watching %v from %s", r.expectedType, r.name)
+ var resourceVersion string
+ resyncCh, cleanup := r.resyncChan()
+ defer cleanup()
+
+ // Explicitly set "0" as resource version - it's fine for the List()
+ // to be served from cache and potentially be delayed relative to
+ // etcd contents. Reflector framework will catch up via Watch() eventually.
+ options := api.ListOptions{ResourceVersion: "0"}
+ list, err := r.listerWatcher.List(options)
+ if err != nil {
+ return fmt.Errorf("%s: Failed to list %v: %v", r.name, r.expectedType, err)
+ }
+ listMetaInterface, err := meta.ListAccessor(list)
+ if err != nil {
+ return fmt.Errorf("%s: Unable to understand list result %#v: %v", r.name, list, err)
+ }
+ resourceVersion = listMetaInterface.GetResourceVersion()
+ items, err := meta.ExtractList(list)
+ if err != nil {
+ return fmt.Errorf("%s: Unable to understand list result %#v (%v)", r.name, list, err)
+ }
+ if err := r.syncWith(items, resourceVersion); err != nil {
+ return fmt.Errorf("%s: Unable to sync list result: %v", r.name, err)
+ }
+ r.setLastSyncResourceVersion(resourceVersion)
+
+ resyncerrc := make(chan error, 1)
+ go func() {
+ for {
+ select {
+ case <-resyncCh:
+ case <-stopCh:
+ return
+ }
+ glog.V(4).Infof("%s: next resync planned for %#v, forcing now", r.name, r.nextResync)
+ if err := r.store.Resync(); err != nil {
+ resyncerrc <- err
+ return
+ }
+ cleanup()
+ resyncCh, cleanup = r.resyncChan()
+ }
+ }()
+
+ for {
+ timemoutseconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0))
+ options = api.ListOptions{
+ ResourceVersion: resourceVersion,
+ // We want to avoid situations of hanging watchers. Stop any wachers that do not
+ // receive any events within the timeout window.
+ TimeoutSeconds: &timemoutseconds,
+ }
+
+ w, err := r.listerWatcher.Watch(options)
+ if err != nil {
+ switch err {
+ case io.EOF:
+ // watch closed normally
+ case io.ErrUnexpectedEOF:
+ glog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.expectedType, err)
+ default:
+ utilruntime.HandleError(fmt.Errorf("%s: Failed to watch %v: %v", r.name, r.expectedType, err))
+ }
+ // If this is "connection refused" error, it means that most likely apiserver is not responsive.
+ // It doesn't make sense to re-list all objects because most likely we will be able to restart
+ // watch where we ended.
+ // If that's the case wait and resend watch request.
+ if urlError, ok := err.(*url.Error); ok {
+ if opError, ok := urlError.Err.(*net.OpError); ok {
+ if errno, ok := opError.Err.(syscall.Errno); ok && errno == syscall.ECONNREFUSED {
+ time.Sleep(time.Second)
+ continue
+ }
+ }
+ }
+ return nil
+ }
+
+ if err := r.watchHandler(w, &resourceVersion, resyncerrc, stopCh); err != nil {
+ if err != errorStopRequested {
+ glog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedType, err)
+ }
+ return nil
+ }
+ }
+}
+
+// syncWith replaces the store's items with the given list.
+func (r *Reflector) syncWith(items []runtime.Object, resourceVersion string) error {
+ found := make([]interface{}, 0, len(items))
+ for _, item := range items {
+ found = append(found, item)
+ }
+ return r.store.Replace(found, resourceVersion)
+}
+
+// watchHandler watches w and keeps *resourceVersion up to date.
+func (r *Reflector) watchHandler(w watch.Interface, resourceVersion *string, errc chan error, stopCh <-chan struct{}) error {
+ start := time.Now()
+ eventCount := 0
+
+ // Stopping the watcher should be idempotent and if we return from this function there's no way
+ // we're coming back in with the same watch interface.
+ defer w.Stop()
+
+loop:
+ for {
+ select {
+ case <-stopCh:
+ return errorStopRequested
+ case err := <-errc:
+ return err
+ case event, ok := <-w.ResultChan():
+ if !ok {
+ break loop
+ }
+ if event.Type == watch.Error {
+ return apierrs.FromObject(event.Object)
+ }
+ if e, a := r.expectedType, reflect.TypeOf(event.Object); e != nil && e != a {
+ utilruntime.HandleError(fmt.Errorf("%s: expected type %v, but watch event object had type %v", r.name, e, a))
+ continue
+ }
+ meta, err := meta.Accessor(event.Object)
+ if err != nil {
+ utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", r.name, event))
+ continue
+ }
+ newResourceVersion := meta.GetResourceVersion()
+ switch event.Type {
+ case watch.Added:
+ r.store.Add(event.Object)
+ case watch.Modified:
+ r.store.Update(event.Object)
+ case watch.Deleted:
+ // TODO: Will any consumers need access to the "last known
+ // state", which is passed in event.Object? If so, may need
+ // to change this.
+ r.store.Delete(event.Object)
+ default:
+ utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", r.name, event))
+ }
+ *resourceVersion = newResourceVersion
+ r.setLastSyncResourceVersion(newResourceVersion)
+ eventCount++
+ }
+ }
+
+ watchDuration := time.Now().Sub(start)
+ if watchDuration < 1*time.Second && eventCount == 0 {
+ glog.V(4).Infof("%s: Unexpected watch close - watch lasted less than a second and no items received", r.name)
+ return errors.New("very short watch")
+ }
+ glog.V(4).Infof("%s: Watch close - %v total %v items received", r.name, r.expectedType, eventCount)
+ return nil
+}
+
+// LastSyncResourceVersion is the resource version observed when last sync with the underlying store
+// The value returned is not synchronized with access to the underlying store and is not thread-safe
+func (r *Reflector) LastSyncResourceVersion() string {
+ r.lastSyncResourceVersionMutex.RLock()
+ defer r.lastSyncResourceVersionMutex.RUnlock()
+ return r.lastSyncResourceVersion
+}
+
+func (r *Reflector) setLastSyncResourceVersion(v string) {
+ r.lastSyncResourceVersionMutex.Lock()
+ defer r.lastSyncResourceVersionMutex.Unlock()
+ r.lastSyncResourceVersion = v
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/store.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/store.go
new file mode 100644
index 0000000..4cd2479
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/store.go
@@ -0,0 +1,240 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cache
+
+import (
+ "fmt"
+ "strings"
+
+ "k8s.io/kubernetes/pkg/api/meta"
+)
+
+// Store is a generic object storage interface. Reflector knows how to watch a server
+// and update a store. A generic store is provided, which allows Reflector to be used
+// as a local caching system, and an LRU store, which allows Reflector to work like a
+// queue of items yet to be processed.
+//
+// Store makes no assumptions about stored object identity; it is the responsibility
+// of a Store implementation to provide a mechanism to correctly key objects and to
+// define the contract for obtaining objects by some arbitrary key type.
+type Store interface {
+ Add(obj interface{}) error
+ Update(obj interface{}) error
+ Delete(obj interface{}) error
+ List() []interface{}
+ ListKeys() []string
+ Get(obj interface{}) (item interface{}, exists bool, err error)
+ GetByKey(key string) (item interface{}, exists bool, err error)
+
+ // Replace will delete the contents of the store, using instead the
+ // given list. Store takes ownership of the list, you should not reference
+ // it after calling this function.
+ Replace([]interface{}, string) error
+ Resync() error
+}
+
+// KeyFunc knows how to make a key from an object. Implementations should be deterministic.
+type KeyFunc func(obj interface{}) (string, error)
+
+// KeyError will be returned any time a KeyFunc gives an error; it includes the object
+// at fault.
+type KeyError struct {
+ Obj interface{}
+ Err error
+}
+
+// Error gives a human-readable description of the error.
+func (k KeyError) Error() string {
+ return fmt.Sprintf("couldn't create key for object %+v: %v", k.Obj, k.Err)
+}
+
+// ExplicitKey can be passed to MetaNamespaceKeyFunc if you have the key for
+// the object but not the object itself.
+type ExplicitKey string
+
+// MetaNamespaceKeyFunc is a convenient default KeyFunc which knows how to make
+// keys for API objects which implement meta.Interface.
+// The key uses the format <namespace>/<name> unless <namespace> is empty, then
+// it's just <name>.
+//
+// TODO: replace key-as-string with a key-as-struct so that this
+// packing/unpacking won't be necessary.
+func MetaNamespaceKeyFunc(obj interface{}) (string, error) {
+ if key, ok := obj.(ExplicitKey); ok {
+ return string(key), nil
+ }
+ meta, err := meta.Accessor(obj)
+ if err != nil {
+ return "", fmt.Errorf("object has no meta: %v", err)
+ }
+ if len(meta.GetNamespace()) > 0 {
+ return meta.GetNamespace() + "/" + meta.GetName(), nil
+ }
+ return meta.GetName(), nil
+}
+
+// SplitMetaNamespaceKey returns the namespace and name that
+// MetaNamespaceKeyFunc encoded into key.
+//
+// TODO: replace key-as-string with a key-as-struct so that this
+// packing/unpacking won't be necessary.
+func SplitMetaNamespaceKey(key string) (namespace, name string, err error) {
+ parts := strings.Split(key, "/")
+ switch len(parts) {
+ case 1:
+ // name only, no namespace
+ return "", parts[0], nil
+ case 2:
+ // namespace and name
+ return parts[0], parts[1], nil
+ }
+
+ return "", "", fmt.Errorf("unexpected key format: %q", key)
+}
+
+// cache responsibilities are limited to:
+// 1. Computing keys for objects via keyFunc
+// 2. Invoking methods of a ThreadSafeStorage interface
+type cache struct {
+ // cacheStorage bears the burden of thread safety for the cache
+ cacheStorage ThreadSafeStore
+ // keyFunc is used to make the key for objects stored in and retrieved from items, and
+ // should be deterministic.
+ keyFunc KeyFunc
+}
+
+var _ Store = &cache{}
+
+// Add inserts an item into the cache.
+func (c *cache) Add(obj interface{}) error {
+ key, err := c.keyFunc(obj)
+ if err != nil {
+ return KeyError{obj, err}
+ }
+ c.cacheStorage.Add(key, obj)
+ return nil
+}
+
+// Update sets an item in the cache to its updated state.
+func (c *cache) Update(obj interface{}) error {
+ key, err := c.keyFunc(obj)
+ if err != nil {
+ return KeyError{obj, err}
+ }
+ c.cacheStorage.Update(key, obj)
+ return nil
+}
+
+// Delete removes an item from the cache.
+func (c *cache) Delete(obj interface{}) error {
+ key, err := c.keyFunc(obj)
+ if err != nil {
+ return KeyError{obj, err}
+ }
+ c.cacheStorage.Delete(key)
+ return nil
+}
+
+// List returns a list of all the items.
+// List is completely threadsafe as long as you treat all items as immutable.
+func (c *cache) List() []interface{} {
+ return c.cacheStorage.List()
+}
+
+// ListKeys returns a list of all the keys of the objects currently
+// in the cache.
+func (c *cache) ListKeys() []string {
+ return c.cacheStorage.ListKeys()
+}
+
+// GetIndexers returns the indexers of cache
+func (c *cache) GetIndexers() Indexers {
+ return c.cacheStorage.GetIndexers()
+}
+
+// Index returns a list of items that match on the index function
+// Index is thread-safe so long as you treat all items as immutable
+func (c *cache) Index(indexName string, obj interface{}) ([]interface{}, error) {
+ return c.cacheStorage.Index(indexName, obj)
+}
+
+// ListIndexFuncValues returns the list of generated values of an Index func
+func (c *cache) ListIndexFuncValues(indexName string) []string {
+ return c.cacheStorage.ListIndexFuncValues(indexName)
+}
+
+func (c *cache) ByIndex(indexName, indexKey string) ([]interface{}, error) {
+ return c.cacheStorage.ByIndex(indexName, indexKey)
+}
+
+func (c *cache) AddIndexers(newIndexers Indexers) error {
+ return c.cacheStorage.AddIndexers(newIndexers)
+}
+
+// Get returns the requested item, or sets exists=false.
+// Get is completely threadsafe as long as you treat all items as immutable.
+func (c *cache) Get(obj interface{}) (item interface{}, exists bool, err error) {
+ key, err := c.keyFunc(obj)
+ if err != nil {
+ return nil, false, KeyError{obj, err}
+ }
+ return c.GetByKey(key)
+}
+
+// GetByKey returns the request item, or exists=false.
+// GetByKey is completely threadsafe as long as you treat all items as immutable.
+func (c *cache) GetByKey(key string) (item interface{}, exists bool, err error) {
+ item, exists = c.cacheStorage.Get(key)
+ return item, exists, nil
+}
+
+// Replace will delete the contents of 'c', using instead the given list.
+// 'c' takes ownership of the list, you should not reference the list again
+// after calling this function.
+func (c *cache) Replace(list []interface{}, resourceVersion string) error {
+ items := map[string]interface{}{}
+ for _, item := range list {
+ key, err := c.keyFunc(item)
+ if err != nil {
+ return KeyError{item, err}
+ }
+ items[key] = item
+ }
+ c.cacheStorage.Replace(items, resourceVersion)
+ return nil
+}
+
+// Resync touches all items in the store to force processing
+func (c *cache) Resync() error {
+ return c.cacheStorage.Resync()
+}
+
+// NewStore returns a Store implemented simply with a map and a lock.
+func NewStore(keyFunc KeyFunc) Store {
+ return &cache{
+ cacheStorage: NewThreadSafeStore(Indexers{}, Indices{}),
+ keyFunc: keyFunc,
+ }
+}
+
+// NewIndexer returns an Indexer implemented simply with a map and a lock.
+func NewIndexer(keyFunc KeyFunc, indexers Indexers) Indexer {
+ return &cache{
+ cacheStorage: NewThreadSafeStore(indexers, Indices{}),
+ keyFunc: keyFunc,
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/thread_safe_store.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/thread_safe_store.go
new file mode 100644
index 0000000..9d88ce3
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/thread_safe_store.go
@@ -0,0 +1,288 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cache
+
+import (
+ "fmt"
+ "sync"
+
+ "k8s.io/kubernetes/pkg/util/sets"
+)
+
+// ThreadSafeStore is an interface that allows concurrent access to a storage backend.
+// TL;DR caveats: you must not modify anything returned by Get or List as it will break
+// the indexing feature in addition to not being thread safe.
+//
+// The guarantees of thread safety provided by List/Get are only valid if the caller
+// treats returned items as read-only. For example, a pointer inserted in the store
+// through `Add` will be returned as is by `Get`. Multiple clients might invoke `Get`
+// on the same key and modify the pointer in a non-thread-safe way. Also note that
+// modifying objects stored by the indexers (if any) will *not* automatically lead
+// to a re-index. So it's not a good idea to directly modify the objects returned by
+// Get/List, in general.
+type ThreadSafeStore interface {
+ Add(key string, obj interface{})
+ Update(key string, obj interface{})
+ Delete(key string)
+ Get(key string) (item interface{}, exists bool)
+ List() []interface{}
+ ListKeys() []string
+ Replace(map[string]interface{}, string)
+ Index(indexName string, obj interface{}) ([]interface{}, error)
+ ListIndexFuncValues(name string) []string
+ ByIndex(indexName, indexKey string) ([]interface{}, error)
+ GetIndexers() Indexers
+
+ // AddIndexers adds more indexers to this store. If you call this after you already have data
+ // in the store, the results are undefined.
+ AddIndexers(newIndexers Indexers) error
+ Resync() error
+}
+
+// threadSafeMap implements ThreadSafeStore
+type threadSafeMap struct {
+ lock sync.RWMutex
+ items map[string]interface{}
+
+ // indexers maps a name to an IndexFunc
+ indexers Indexers
+ // indices maps a name to an Index
+ indices Indices
+}
+
+func (c *threadSafeMap) Add(key string, obj interface{}) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ oldObject := c.items[key]
+ c.items[key] = obj
+ c.updateIndices(oldObject, obj, key)
+}
+
+func (c *threadSafeMap) Update(key string, obj interface{}) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ oldObject := c.items[key]
+ c.items[key] = obj
+ c.updateIndices(oldObject, obj, key)
+}
+
+func (c *threadSafeMap) Delete(key string) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ if obj, exists := c.items[key]; exists {
+ c.deleteFromIndices(obj, key)
+ delete(c.items, key)
+ }
+}
+
+func (c *threadSafeMap) Get(key string) (item interface{}, exists bool) {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ item, exists = c.items[key]
+ return item, exists
+}
+
+func (c *threadSafeMap) List() []interface{} {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ list := make([]interface{}, 0, len(c.items))
+ for _, item := range c.items {
+ list = append(list, item)
+ }
+ return list
+}
+
+// ListKeys returns a list of all the keys of the objects currently
+// in the threadSafeMap.
+func (c *threadSafeMap) ListKeys() []string {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ list := make([]string, 0, len(c.items))
+ for key := range c.items {
+ list = append(list, key)
+ }
+ return list
+}
+
+func (c *threadSafeMap) Replace(items map[string]interface{}, resourceVersion string) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ c.items = items
+
+ // rebuild any index
+ c.indices = Indices{}
+ for key, item := range c.items {
+ c.updateIndices(nil, item, key)
+ }
+}
+
+// Index returns a list of items that match on the index function
+// Index is thread-safe so long as you treat all items as immutable
+func (c *threadSafeMap) Index(indexName string, obj interface{}) ([]interface{}, error) {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+
+ indexFunc := c.indexers[indexName]
+ if indexFunc == nil {
+ return nil, fmt.Errorf("Index with name %s does not exist", indexName)
+ }
+
+ indexKeys, err := indexFunc(obj)
+ if err != nil {
+ return nil, err
+ }
+ index := c.indices[indexName]
+
+ // need to de-dupe the return list. Since multiple keys are allowed, this can happen.
+ returnKeySet := sets.String{}
+ for _, indexKey := range indexKeys {
+ set := index[indexKey]
+ for _, key := range set.List() {
+ returnKeySet.Insert(key)
+ }
+ }
+
+ list := make([]interface{}, 0, returnKeySet.Len())
+ for absoluteKey := range returnKeySet {
+ list = append(list, c.items[absoluteKey])
+ }
+ return list, nil
+}
+
+// ByIndex returns a list of items that match an exact value on the index function
+func (c *threadSafeMap) ByIndex(indexName, indexKey string) ([]interface{}, error) {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+
+ indexFunc := c.indexers[indexName]
+ if indexFunc == nil {
+ return nil, fmt.Errorf("Index with name %s does not exist", indexName)
+ }
+
+ index := c.indices[indexName]
+
+ set := index[indexKey]
+ list := make([]interface{}, 0, set.Len())
+ for _, key := range set.List() {
+ list = append(list, c.items[key])
+ }
+
+ return list, nil
+}
+
+func (c *threadSafeMap) ListIndexFuncValues(indexName string) []string {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+
+ index := c.indices[indexName]
+ names := make([]string, 0, len(index))
+ for key := range index {
+ names = append(names, key)
+ }
+ return names
+}
+
+func (c *threadSafeMap) GetIndexers() Indexers {
+ return c.indexers
+}
+
+func (c *threadSafeMap) AddIndexers(newIndexers Indexers) error {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ if len(c.items) > 0 {
+ return fmt.Errorf("cannot add indexers to running index")
+ }
+
+ oldKeys := sets.StringKeySet(c.indexers)
+ newKeys := sets.StringKeySet(newIndexers)
+
+ if oldKeys.HasAny(newKeys.List()...) {
+ return fmt.Errorf("indexer conflict: %v", oldKeys.Intersection(newKeys))
+ }
+
+ for k, v := range newIndexers {
+ c.indexers[k] = v
+ }
+ return nil
+}
+
+// updateIndices modifies the objects location in the managed indexes, if this is an update, you must provide an oldObj
+// updateIndices must be called from a function that already has a lock on the cache
+func (c *threadSafeMap) updateIndices(oldObj interface{}, newObj interface{}, key string) error {
+ // if we got an old object, we need to remove it before we add it again
+ if oldObj != nil {
+ c.deleteFromIndices(oldObj, key)
+ }
+ for name, indexFunc := range c.indexers {
+ indexValues, err := indexFunc(newObj)
+ if err != nil {
+ return err
+ }
+ index := c.indices[name]
+ if index == nil {
+ index = Index{}
+ c.indices[name] = index
+ }
+
+ for _, indexValue := range indexValues {
+ set := index[indexValue]
+ if set == nil {
+ set = sets.String{}
+ index[indexValue] = set
+ }
+ set.Insert(key)
+ }
+ }
+ return nil
+}
+
+// deleteFromIndices removes the object from each of the managed indexes
+// it is intended to be called from a function that already has a lock on the cache
+func (c *threadSafeMap) deleteFromIndices(obj interface{}, key string) error {
+ for name, indexFunc := range c.indexers {
+ indexValues, err := indexFunc(obj)
+ if err != nil {
+ return err
+ }
+
+ index := c.indices[name]
+ if index == nil {
+ continue
+ }
+ for _, indexValue := range indexValues {
+ set := index[indexValue]
+ if set != nil {
+ set.Delete(key)
+ }
+ }
+ }
+ return nil
+}
+
+func (c *threadSafeMap) Resync() error {
+ // Nothing to do
+ return nil
+}
+
+func NewThreadSafeStore(indexers Indexers, indices Indices) ThreadSafeStore {
+ return &threadSafeMap{
+ items: map[string]interface{}{},
+ indexers: indexers,
+ indices: indices,
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/undelta_store.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/undelta_store.go
new file mode 100644
index 0000000..117df46
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/cache/undelta_store.go
@@ -0,0 +1,83 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cache
+
+// UndeltaStore listens to incremental updates and sends complete state on every change.
+// It implements the Store interface so that it can receive a stream of mirrored objects
+// from Reflector. Whenever it receives any complete (Store.Replace) or incremental change
+// (Store.Add, Store.Update, Store.Delete), it sends the complete state by calling PushFunc.
+// It is thread-safe. It guarantees that every change (Add, Update, Replace, Delete) results
+// in one call to PushFunc, but sometimes PushFunc may be called twice with the same values.
+// PushFunc should be thread safe.
+type UndeltaStore struct {
+ Store
+ PushFunc func([]interface{})
+}
+
+// Assert that it implements the Store interface.
+var _ Store = &UndeltaStore{}
+
+// Note about thread safety. The Store implementation (cache.cache) uses a lock for all methods.
+// In the functions below, the lock gets released and reacquired betweend the {Add,Delete,etc}
+// and the List. So, the following can happen, resulting in two identical calls to PushFunc.
+// time thread 1 thread 2
+// 0 UndeltaStore.Add(a)
+// 1 UndeltaStore.Add(b)
+// 2 Store.Add(a)
+// 3 Store.Add(b)
+// 4 Store.List() -> [a,b]
+// 5 Store.List() -> [a,b]
+
+func (u *UndeltaStore) Add(obj interface{}) error {
+ if err := u.Store.Add(obj); err != nil {
+ return err
+ }
+ u.PushFunc(u.Store.List())
+ return nil
+}
+
+func (u *UndeltaStore) Update(obj interface{}) error {
+ if err := u.Store.Update(obj); err != nil {
+ return err
+ }
+ u.PushFunc(u.Store.List())
+ return nil
+}
+
+func (u *UndeltaStore) Delete(obj interface{}) error {
+ if err := u.Store.Delete(obj); err != nil {
+ return err
+ }
+ u.PushFunc(u.Store.List())
+ return nil
+}
+
+func (u *UndeltaStore) Replace(list []interface{}, resourceVersion string) error {
+ if err := u.Store.Replace(list, resourceVersion); err != nil {
+ return err
+ }
+ u.PushFunc(u.Store.List())
+ return nil
+}
+
+// NewUndeltaStore returns an UndeltaStore implemented with a Store.
+func NewUndeltaStore(pushFunc func([]interface{}), keyFunc KeyFunc) *UndeltaStore {
+ return &UndeltaStore{
+ Store: NewStore(keyFunc),
+ PushFunc: pushFunc,
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/metrics/metrics.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/metrics/metrics.go
new file mode 100644
index 0000000..53029b5
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/metrics/metrics.go
@@ -0,0 +1,67 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package metrics provides utilities for registering client metrics to Prometheus.
+package metrics
+
+import (
+ "sync"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+const restClientSubsystem = "rest_client"
+
+var (
+ // RequestLatency is a Prometheus Summary metric type partitioned by
+ // "verb" and "url" labels. It is used for the rest client latency metrics.
+ RequestLatency = prometheus.NewSummaryVec(
+ prometheus.SummaryOpts{
+ Subsystem: restClientSubsystem,
+ Name: "request_latency_microseconds",
+ Help: "Request latency in microseconds. Broken down by verb and URL",
+ MaxAge: time.Hour,
+ },
+ []string{"verb", "url"},
+ )
+
+ RequestResult = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Subsystem: restClientSubsystem,
+ Name: "request_status_codes",
+ Help: "Number of http requests, partitioned by metadata",
+ },
+ []string{"code", "method", "host"},
+ )
+)
+
+var registerMetrics sync.Once
+
+// Register registers all metrics to Prometheus with
+// respect to the RequestLatency.
+func Register() {
+ // Register the metrics.
+ registerMetrics.Do(func() {
+ prometheus.MustRegister(RequestLatency)
+ prometheus.MustRegister(RequestResult)
+ })
+}
+
+// Calculates the time since the specified start in microseconds.
+func SinceInMicroseconds(start time.Time) float64 {
+ return float64(time.Since(start).Nanoseconds() / time.Microsecond.Nanoseconds())
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/restclient/client.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/restclient/client.go
new file mode 100644
index 0000000..24ad191
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/restclient/client.go
@@ -0,0 +1,224 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package restclient
+
+import (
+ "fmt"
+ "net/http"
+ "net/url"
+ "os"
+ "strconv"
+ "strings"
+ "time"
+
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/runtime"
+ "k8s.io/kubernetes/pkg/util/flowcontrol"
+)
+
+const (
+ // Environment variables: Note that the duration should be long enough that the backoff
+ // persists for some reasonable time (i.e. 120 seconds). The typical base might be "1".
+ envBackoffBase = "KUBE_CLIENT_BACKOFF_BASE"
+ envBackoffDuration = "KUBE_CLIENT_BACKOFF_DURATION"
+)
+
+// RESTClient imposes common Kubernetes API conventions on a set of resource paths.
+// The baseURL is expected to point to an HTTP or HTTPS path that is the parent
+// of one or more resources. The server should return a decodable API resource
+// object, or an api.Status object which contains information about the reason for
+// any failure.
+//
+// Most consumers should use client.New() to get a Kubernetes API client.
+type RESTClient struct {
+ // base is the root URL for all invocations of the client
+ base *url.URL
+ // versionedAPIPath is a path segment connecting the base URL to the resource root
+ versionedAPIPath string
+
+ // contentConfig is the information used to communicate with the server.
+ contentConfig ContentConfig
+
+ // serializers contain all serializers for undelying content type.
+ serializers Serializers
+
+ // creates BackoffManager that is passed to requests.
+ createBackoffMgr func() BackoffManager
+
+ // TODO extract this into a wrapper interface via the RESTClient interface in kubectl.
+ Throttle flowcontrol.RateLimiter
+
+ // Set specific behavior of the client. If not set http.DefaultClient will be used.
+ Client *http.Client
+}
+
+type Serializers struct {
+ Encoder runtime.Encoder
+ Decoder runtime.Decoder
+ StreamingSerializer runtime.Serializer
+ Framer runtime.Framer
+ RenegotiatedDecoder func(contentType string, params map[string]string) (runtime.Decoder, error)
+}
+
+// NewRESTClient creates a new RESTClient. This client performs generic REST functions
+// such as Get, Put, Post, and Delete on specified paths. Codec controls encoding and
+// decoding of responses from the server.
+func NewRESTClient(baseURL *url.URL, versionedAPIPath string, config ContentConfig, maxQPS float32, maxBurst int, rateLimiter flowcontrol.RateLimiter, client *http.Client) (*RESTClient, error) {
+ base := *baseURL
+ if !strings.HasSuffix(base.Path, "/") {
+ base.Path += "/"
+ }
+ base.RawQuery = ""
+ base.Fragment = ""
+
+ if config.GroupVersion == nil {
+ config.GroupVersion = &unversioned.GroupVersion{}
+ }
+ if len(config.ContentType) == 0 {
+ config.ContentType = "application/json"
+ }
+ serializers, err := createSerializers(config)
+ if err != nil {
+ return nil, err
+ }
+
+ var throttle flowcontrol.RateLimiter
+ if maxQPS > 0 && rateLimiter == nil {
+ throttle = flowcontrol.NewTokenBucketRateLimiter(maxQPS, maxBurst)
+ } else if rateLimiter != nil {
+ throttle = rateLimiter
+ }
+ return &RESTClient{
+ base: &base,
+ versionedAPIPath: versionedAPIPath,
+ contentConfig: config,
+ serializers: *serializers,
+ createBackoffMgr: readExpBackoffConfig,
+ Throttle: throttle,
+ Client: client,
+ }, nil
+}
+
+// GetRateLimiter returns rate limier for a given client, or nil if it's called on a nil client
+func (c *RESTClient) GetRateLimiter() flowcontrol.RateLimiter {
+ if c == nil {
+ return nil
+ }
+ return c.Throttle
+}
+
+// readExpBackoffConfig handles the internal logic of determining what the
+// backoff policy is. By default if no information is available, NoBackoff.
+// TODO Generalize this see #17727 .
+func readExpBackoffConfig() BackoffManager {
+ backoffBase := os.Getenv(envBackoffBase)
+ backoffDuration := os.Getenv(envBackoffDuration)
+
+ backoffBaseInt, errBase := strconv.ParseInt(backoffBase, 10, 64)
+ backoffDurationInt, errDuration := strconv.ParseInt(backoffDuration, 10, 64)
+ if errBase != nil || errDuration != nil {
+ return &NoBackoff{}
+ }
+ return &URLBackoff{
+ Backoff: flowcontrol.NewBackOff(
+ time.Duration(backoffBaseInt)*time.Second,
+ time.Duration(backoffDurationInt)*time.Second)}
+}
+
+// createSerializers creates all necessary serializers for given contentType.
+func createSerializers(config ContentConfig) (*Serializers, error) {
+ negotiated := config.NegotiatedSerializer
+ contentType := config.ContentType
+ info, ok := negotiated.SerializerForMediaType(contentType, nil)
+ if !ok {
+ return nil, fmt.Errorf("serializer for %s not registered", contentType)
+ }
+ streamInfo, ok := negotiated.StreamingSerializerForMediaType(contentType, nil)
+ if !ok {
+ return nil, fmt.Errorf("streaming serializer for %s not registered", contentType)
+ }
+ internalGV := unversioned.GroupVersion{
+ Group: config.GroupVersion.Group,
+ Version: runtime.APIVersionInternal,
+ }
+ return &Serializers{
+ Encoder: negotiated.EncoderForVersion(info.Serializer, *config.GroupVersion),
+ Decoder: negotiated.DecoderToVersion(info.Serializer, internalGV),
+ StreamingSerializer: streamInfo.Serializer,
+ Framer: streamInfo.Framer,
+ RenegotiatedDecoder: func(contentType string, params map[string]string) (runtime.Decoder, error) {
+ renegotiated, ok := negotiated.SerializerForMediaType(contentType, params)
+ if !ok {
+ return nil, fmt.Errorf("serializer for %s not registered", contentType)
+ }
+ return negotiated.DecoderToVersion(renegotiated.Serializer, internalGV), nil
+ },
+ }, nil
+}
+
+// Verb begins a request with a verb (GET, POST, PUT, DELETE).
+//
+// Example usage of RESTClient's request building interface:
+// c, err := NewRESTClient(...)
+// if err != nil { ... }
+// resp, err := c.Verb("GET").
+// Path("pods").
+// SelectorParam("labels", "area=staging").
+// Timeout(10*time.Second).
+// Do()
+// if err != nil { ... }
+// list, ok := resp.(*api.PodList)
+//
+func (c *RESTClient) Verb(verb string) *Request {
+ backoff := c.createBackoffMgr()
+
+ if c.Client == nil {
+ return NewRequest(nil, verb, c.base, c.versionedAPIPath, c.contentConfig, c.serializers, backoff, c.Throttle)
+ }
+ return NewRequest(c.Client, verb, c.base, c.versionedAPIPath, c.contentConfig, c.serializers, backoff, c.Throttle)
+}
+
+// Post begins a POST request. Short for c.Verb("POST").
+func (c *RESTClient) Post() *Request {
+ return c.Verb("POST")
+}
+
+// Put begins a PUT request. Short for c.Verb("PUT").
+func (c *RESTClient) Put() *Request {
+ return c.Verb("PUT")
+}
+
+// Patch begins a PATCH request. Short for c.Verb("Patch").
+func (c *RESTClient) Patch(pt api.PatchType) *Request {
+ return c.Verb("PATCH").SetHeader("Content-Type", string(pt))
+}
+
+// Get begins a GET request. Short for c.Verb("GET").
+func (c *RESTClient) Get() *Request {
+ return c.Verb("GET")
+}
+
+// Delete begins a DELETE request. Short for c.Verb("DELETE").
+func (c *RESTClient) Delete() *Request {
+ return c.Verb("DELETE")
+}
+
+// APIVersion returns the APIVersion this RESTClient is expected to use.
+func (c *RESTClient) APIVersion() unversioned.GroupVersion {
+ return *c.contentConfig.GroupVersion
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/restclient/config.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/restclient/config.go
new file mode 100644
index 0000000..fec5f49
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/restclient/config.go
@@ -0,0 +1,328 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package restclient
+
+import (
+ "fmt"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "os"
+ "path"
+ gruntime "runtime"
+ "strings"
+
+ "github.com/golang/glog"
+
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
+ "k8s.io/kubernetes/pkg/runtime"
+ "k8s.io/kubernetes/pkg/util/crypto"
+ "k8s.io/kubernetes/pkg/util/flowcontrol"
+ "k8s.io/kubernetes/pkg/version"
+)
+
+const (
+ DefaultQPS float32 = 5.0
+ DefaultBurst int = 10
+)
+
+// Config holds the common attributes that can be passed to a Kubernetes client on
+// initialization.
+type Config struct {
+ // Host must be a host string, a host:port pair, or a URL to the base of the apiserver.
+ // If a URL is given then the (optional) Path of that URL represents a prefix that must
+ // be appended to all request URIs used to access the apiserver. This allows a frontend
+ // proxy to easily relocate all of the apiserver endpoints.
+ Host string
+ // APIPath is a sub-path that points to an API root.
+ APIPath string
+ // Prefix is the sub path of the server. If not specified, the client will set
+ // a default value. Use "/" to indicate the server root should be used
+ Prefix string
+
+ // ContentConfig contains settings that affect how objects are transformed when
+ // sent to the server.
+ ContentConfig
+
+ // Server requires Basic authentication
+ Username string
+ Password string
+
+ // Server requires Bearer authentication. This client will not attempt to use
+ // refresh tokens for an OAuth2 flow.
+ // TODO: demonstrate an OAuth2 compatible client.
+ BearerToken string
+
+ // Impersonate is the username that this RESTClient will impersonate
+ Impersonate string
+
+ // Server requires plugin-specified authentication.
+ AuthProvider *clientcmdapi.AuthProviderConfig
+
+ // Callback to persist config for AuthProvider.
+ AuthConfigPersister AuthProviderConfigPersister
+
+ // TLSClientConfig contains settings to enable transport layer security
+ TLSClientConfig
+
+ // Server should be accessed without verifying the TLS
+ // certificate. For testing only.
+ Insecure bool
+
+ // UserAgent is an optional field that specifies the caller of this request.
+ UserAgent string
+
+ // Transport may be used for custom HTTP behavior. This attribute may not
+ // be specified with the TLS client certificate options. Use WrapTransport
+ // for most client level operations.
+ Transport http.RoundTripper
+ // WrapTransport will be invoked for custom HTTP behavior after the underlying
+ // transport is initialized (either the transport created from TLSClientConfig,
+ // Transport, or http.DefaultTransport). The config may layer other RoundTrippers
+ // on top of the returned RoundTripper.
+ WrapTransport func(rt http.RoundTripper) http.RoundTripper
+
+ // QPS indicates the maximum QPS to the master from this client.
+ // If it's zero, the created RESTClient will use DefaultQPS: 5
+ QPS float32
+
+ // Maximum burst for throttle.
+ // If it's zero, the created RESTClient will use DefaultBurst: 10.
+ Burst int
+
+ // Rate limiter for limiting connections to the master from this client. If present overwrites QPS/Burst
+ RateLimiter flowcontrol.RateLimiter
+}
+
+// TLSClientConfig contains settings to enable transport layer security
+type TLSClientConfig struct {
+ // Server requires TLS client certificate authentication
+ CertFile string
+ // Server requires TLS client certificate authentication
+ KeyFile string
+ // Trusted root certificates for server
+ CAFile string
+
+ // CertData holds PEM-encoded bytes (typically read from a client certificate file).
+ // CertData takes precedence over CertFile
+ CertData []byte
+ // KeyData holds PEM-encoded bytes (typically read from a client certificate key file).
+ // KeyData takes precedence over KeyFile
+ KeyData []byte
+ // CAData holds PEM-encoded bytes (typically read from a root certificates bundle).
+ // CAData takes precedence over CAFile
+ CAData []byte
+}
+
+type ContentConfig struct {
+ // ContentType specifies the wire format used to communicate with the server.
+ // This value will be set as the Accept header on requests made to the server, and
+ // as the default content type on any object sent to the server. If not set,
+ // "application/json" is used.
+ ContentType string
+ // GroupVersion is the API version to talk to. Must be provided when initializing
+ // a RESTClient directly. When initializing a Client, will be set with the default
+ // code version.
+ GroupVersion *unversioned.GroupVersion
+ // NegotiatedSerializer is used for obtaining encoders and decoders for multiple
+ // supported media types.
+ NegotiatedSerializer runtime.NegotiatedSerializer
+}
+
+// RESTClientFor returns a RESTClient that satisfies the requested attributes on a client Config
+// object. Note that a RESTClient may require fields that are optional when initializing a Client.
+// A RESTClient created by this method is generic - it expects to operate on an API that follows
+// the Kubernetes conventions, but may not be the Kubernetes API.
+func RESTClientFor(config *Config) (*RESTClient, error) {
+ if config.GroupVersion == nil {
+ return nil, fmt.Errorf("GroupVersion is required when initializing a RESTClient")
+ }
+ if config.NegotiatedSerializer == nil {
+ return nil, fmt.Errorf("NegotiatedSerializer is required when initializing a RESTClient")
+ }
+ qps := config.QPS
+ if config.QPS == 0.0 {
+ qps = DefaultQPS
+ }
+ burst := config.Burst
+ if config.Burst == 0 {
+ burst = DefaultBurst
+ }
+
+ baseURL, versionedAPIPath, err := defaultServerUrlFor(config)
+ if err != nil {
+ return nil, err
+ }
+
+ transport, err := TransportFor(config)
+ if err != nil {
+ return nil, err
+ }
+
+ var httpClient *http.Client
+ if transport != http.DefaultTransport {
+ httpClient = &http.Client{Transport: transport}
+ }
+
+ return NewRESTClient(baseURL, versionedAPIPath, config.ContentConfig, qps, burst, config.RateLimiter, httpClient)
+}
+
+// UnversionedRESTClientFor is the same as RESTClientFor, except that it allows
+// the config.Version to be empty.
+func UnversionedRESTClientFor(config *Config) (*RESTClient, error) {
+ if config.NegotiatedSerializer == nil {
+ return nil, fmt.Errorf("NeogitatedSerializer is required when initializing a RESTClient")
+ }
+
+ baseURL, versionedAPIPath, err := defaultServerUrlFor(config)
+ if err != nil {
+ return nil, err
+ }
+
+ transport, err := TransportFor(config)
+ if err != nil {
+ return nil, err
+ }
+
+ var httpClient *http.Client
+ if transport != http.DefaultTransport {
+ httpClient = &http.Client{Transport: transport}
+ }
+
+ versionConfig := config.ContentConfig
+ if versionConfig.GroupVersion == nil {
+ v := unversioned.SchemeGroupVersion
+ versionConfig.GroupVersion = &v
+ }
+
+ return NewRESTClient(baseURL, versionedAPIPath, versionConfig, config.QPS, config.Burst, config.RateLimiter, httpClient)
+}
+
+// SetKubernetesDefaults sets default values on the provided client config for accessing the
+// Kubernetes API or returns an error if any of the defaults are impossible or invalid.
+func SetKubernetesDefaults(config *Config) error {
+ if len(config.UserAgent) == 0 {
+ config.UserAgent = DefaultKubernetesUserAgent()
+ }
+ return nil
+}
+
+// DefaultKubernetesUserAgent returns the default user agent that clients can use.
+func DefaultKubernetesUserAgent() string {
+ commit := version.Get().GitCommit
+ if len(commit) > 7 {
+ commit = commit[:7]
+ }
+ if len(commit) == 0 {
+ commit = "unknown"
+ }
+ version := version.Get().GitVersion
+ seg := strings.SplitN(version, "-", 2)
+ version = seg[0]
+ return fmt.Sprintf("%s/%s (%s/%s) kubernetes/%s", path.Base(os.Args[0]), version, gruntime.GOOS, gruntime.GOARCH, commit)
+}
+
+// InClusterConfig returns a config object which uses the service account
+// kubernetes gives to pods. It's intended for clients that expect to be
+// running inside a pod running on kuberenetes. It will return an error if
+// called from a process not running in a kubernetes environment.
+func InClusterConfig() (*Config, error) {
+ host, port := os.Getenv("KUBERNETES_SERVICE_HOST"), os.Getenv("KUBERNETES_SERVICE_PORT")
+ if len(host) == 0 || len(port) == 0 {
+ return nil, fmt.Errorf("unable to load in-cluster configuration, KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT must be defined")
+ }
+
+ token, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/" + api.ServiceAccountTokenKey)
+ if err != nil {
+ return nil, err
+ }
+ tlsClientConfig := TLSClientConfig{}
+ rootCAFile := "/var/run/secrets/kubernetes.io/serviceaccount/" + api.ServiceAccountRootCAKey
+ if _, err := crypto.CertPoolFromFile(rootCAFile); err != nil {
+ glog.Errorf("Expected to load root CA config from %s, but got err: %v", rootCAFile, err)
+ } else {
+ tlsClientConfig.CAFile = rootCAFile
+ }
+
+ return &Config{
+ // TODO: switch to using cluster DNS.
+ Host: "https://" + net.JoinHostPort(host, port),
+ BearerToken: string(token),
+ TLSClientConfig: tlsClientConfig,
+ }, nil
+}
+
+// IsConfigTransportTLS returns true if and only if the provided
+// config will result in a protected connection to the server when it
+// is passed to restclient.RESTClientFor(). Use to determine when to
+// send credentials over the wire.
+//
+// Note: the Insecure flag is ignored when testing for this value, so MITM attacks are
+// still possible.
+func IsConfigTransportTLS(config Config) bool {
+ baseURL, _, err := defaultServerUrlFor(&config)
+ if err != nil {
+ return false
+ }
+ return baseURL.Scheme == "https"
+}
+
+// LoadTLSFiles copies the data from the CertFile, KeyFile, and CAFile fields into the CertData,
+// KeyData, and CAFile fields, or returns an error. If no error is returned, all three fields are
+// either populated or were empty to start.
+func LoadTLSFiles(c *Config) error {
+ var err error
+ c.CAData, err = dataFromSliceOrFile(c.CAData, c.CAFile)
+ if err != nil {
+ return err
+ }
+
+ c.CertData, err = dataFromSliceOrFile(c.CertData, c.CertFile)
+ if err != nil {
+ return err
+ }
+
+ c.KeyData, err = dataFromSliceOrFile(c.KeyData, c.KeyFile)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// dataFromSliceOrFile returns data from the slice (if non-empty), or from the file,
+// or an error if an error occurred reading the file
+func dataFromSliceOrFile(data []byte, file string) ([]byte, error) {
+ if len(data) > 0 {
+ return data, nil
+ }
+ if len(file) > 0 {
+ fileData, err := ioutil.ReadFile(file)
+ if err != nil {
+ return []byte{}, err
+ }
+ return fileData, nil
+ }
+ return nil, nil
+}
+
+func AddUserAgent(config *Config, userAgent string) *Config {
+ fullUserAgent := DefaultKubernetesUserAgent() + "/" + userAgent
+ config.UserAgent = fullUserAgent
+ return config
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/restclient/plugin.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/restclient/plugin.go
new file mode 100644
index 0000000..06ac3cc
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/restclient/plugin.go
@@ -0,0 +1,73 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package restclient
+
+import (
+ "fmt"
+ "net/http"
+ "sync"
+
+ "github.com/golang/glog"
+
+ clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
+)
+
+type AuthProvider interface {
+ // WrapTransport allows the plugin to create a modified RoundTripper that
+ // attaches authorization headers (or other info) to requests.
+ WrapTransport(http.RoundTripper) http.RoundTripper
+ // Login allows the plugin to initialize its configuration. It must not
+ // require direct user interaction.
+ Login() error
+}
+
+// Factory generates an AuthProvider plugin.
+// clusterAddress is the address of the current cluster.
+// config is the initial configuration for this plugin.
+// persister allows the plugin to save updated configuration.
+type Factory func(clusterAddress string, config map[string]string, persister AuthProviderConfigPersister) (AuthProvider, error)
+
+// AuthProviderConfigPersister allows a plugin to persist configuration info
+// for just itself.
+type AuthProviderConfigPersister interface {
+ Persist(map[string]string) error
+}
+
+// All registered auth provider plugins.
+var pluginsLock sync.Mutex
+var plugins = make(map[string]Factory)
+
+func RegisterAuthProviderPlugin(name string, plugin Factory) error {
+ pluginsLock.Lock()
+ defer pluginsLock.Unlock()
+ if _, found := plugins[name]; found {
+ return fmt.Errorf("Auth Provider Plugin %q was registered twice", name)
+ }
+ glog.V(4).Infof("Registered Auth Provider Plugin %q", name)
+ plugins[name] = plugin
+ return nil
+}
+
+func GetAuthProvider(clusterAddress string, apc *clientcmdapi.AuthProviderConfig, persister AuthProviderConfigPersister) (AuthProvider, error) {
+ pluginsLock.Lock()
+ defer pluginsLock.Unlock()
+ p, ok := plugins[apc.Name]
+ if !ok {
+ return nil, fmt.Errorf("No Auth Provider found for name %q", apc.Name)
+ }
+ return p(clusterAddress, apc.Config, persister)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/restclient/request.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/restclient/request.go
new file mode 100644
index 0000000..51fac6b
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/restclient/request.go
@@ -0,0 +1,1086 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package restclient
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "mime"
+ "net/http"
+ "net/url"
+ "path"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/golang/glog"
+ "k8s.io/kubernetes/pkg/api/errors"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/api/v1"
+ "k8s.io/kubernetes/pkg/api/validation"
+ "k8s.io/kubernetes/pkg/client/metrics"
+ "k8s.io/kubernetes/pkg/fields"
+ "k8s.io/kubernetes/pkg/labels"
+ "k8s.io/kubernetes/pkg/runtime"
+ "k8s.io/kubernetes/pkg/runtime/serializer/streaming"
+ "k8s.io/kubernetes/pkg/util/flowcontrol"
+ "k8s.io/kubernetes/pkg/util/net"
+ "k8s.io/kubernetes/pkg/util/sets"
+ "k8s.io/kubernetes/pkg/watch"
+ "k8s.io/kubernetes/pkg/watch/versioned"
+)
+
+var (
+ // specialParams lists parameters that are handled specially and which users of Request
+ // are therefore not allowed to set manually.
+ specialParams = sets.NewString("timeout")
+
+ // longThrottleLatency defines threshold for logging requests. All requests being
+ // throttle for more than longThrottleLatency will be logged.
+ longThrottleLatency = 50 * time.Millisecond
+)
+
+func init() {
+ metrics.Register()
+}
+
+// HTTPClient is an interface for testing a request object.
+type HTTPClient interface {
+ Do(req *http.Request) (*http.Response, error)
+}
+
+// ResponseWrapper is an interface for getting a response.
+// The response may be either accessed as a raw data (the whole output is put into memory) or as a stream.
+type ResponseWrapper interface {
+ DoRaw() ([]byte, error)
+ Stream() (io.ReadCloser, error)
+}
+
+// RequestConstructionError is returned when there's an error assembling a request.
+type RequestConstructionError struct {
+ Err error
+}
+
+// Error returns a textual description of 'r'.
+func (r *RequestConstructionError) Error() string {
+ return fmt.Sprintf("request construction error: '%v'", r.Err)
+}
+
+// Request allows for building up a request to a server in a chained fashion.
+// Any errors are stored until the end of your call, so you only have to
+// check once.
+type Request struct {
+ // required
+ client HTTPClient
+ verb string
+
+ baseURL *url.URL
+ content ContentConfig
+ serializers Serializers
+
+ // generic components accessible via method setters
+ pathPrefix string
+ subpath string
+ params url.Values
+ headers http.Header
+
+ // structural elements of the request that are part of the Kubernetes API conventions
+ namespace string
+ namespaceSet bool
+ resource string
+ resourceName string
+ subresource string
+ selector labels.Selector
+ timeout time.Duration
+
+ // output
+ err error
+ body io.Reader
+
+ // The constructed request and the response
+ req *http.Request
+ resp *http.Response
+
+ backoffMgr BackoffManager
+ throttle flowcontrol.RateLimiter
+}
+
+// NewRequest creates a new request helper object for accessing runtime.Objects on a server.
+func NewRequest(client HTTPClient, verb string, baseURL *url.URL, versionedAPIPath string, content ContentConfig, serializers Serializers, backoff BackoffManager, throttle flowcontrol.RateLimiter) *Request {
+ if backoff == nil {
+ glog.V(2).Infof("Not implementing request backoff strategy.")
+ backoff = &NoBackoff{}
+ }
+
+ pathPrefix := "/"
+ if baseURL != nil {
+ pathPrefix = path.Join(pathPrefix, baseURL.Path)
+ }
+ r := &Request{
+ client: client,
+ verb: verb,
+ baseURL: baseURL,
+ pathPrefix: path.Join(pathPrefix, versionedAPIPath),
+ content: content,
+ serializers: serializers,
+ backoffMgr: backoff,
+ throttle: throttle,
+ }
+ if len(content.ContentType) > 0 {
+ r.SetHeader("Accept", content.ContentType+", */*")
+ }
+ return r
+}
+
+// Prefix adds segments to the relative beginning to the request path. These
+// items will be placed before the optional Namespace, Resource, or Name sections.
+// Setting AbsPath will clear any previously set Prefix segments
+func (r *Request) Prefix(segments ...string) *Request {
+ if r.err != nil {
+ return r
+ }
+ r.pathPrefix = path.Join(r.pathPrefix, path.Join(segments...))
+ return r
+}
+
+// Suffix appends segments to the end of the path. These items will be placed after the prefix and optional
+// Namespace, Resource, or Name sections.
+func (r *Request) Suffix(segments ...string) *Request {
+ if r.err != nil {
+ return r
+ }
+ r.subpath = path.Join(r.subpath, path.Join(segments...))
+ return r
+}
+
+// Resource sets the resource to access (<resource>/[ns/<namespace>/]<name>)
+func (r *Request) Resource(resource string) *Request {
+ if r.err != nil {
+ return r
+ }
+ if len(r.resource) != 0 {
+ r.err = fmt.Errorf("resource already set to %q, cannot change to %q", r.resource, resource)
+ return r
+ }
+ if msgs := validation.IsValidPathSegmentName(resource); len(msgs) != 0 {
+ r.err = fmt.Errorf("invalid resource %q: %v", resource, msgs)
+ return r
+ }
+ r.resource = resource
+ return r
+}
+
+// SubResource sets a sub-resource path which can be multiple segments segment after the resource
+// name but before the suffix.
+func (r *Request) SubResource(subresources ...string) *Request {
+ if r.err != nil {
+ return r
+ }
+ subresource := path.Join(subresources...)
+ if len(r.subresource) != 0 {
+ r.err = fmt.Errorf("subresource already set to %q, cannot change to %q", r.resource, subresource)
+ return r
+ }
+ for _, s := range subresources {
+ if msgs := validation.IsValidPathSegmentName(s); len(msgs) != 0 {
+ r.err = fmt.Errorf("invalid subresource %q: %v", s, msgs)
+ return r
+ }
+ }
+ r.subresource = subresource
+ return r
+}
+
+// Name sets the name of a resource to access (<resource>/[ns/<namespace>/]<name>)
+func (r *Request) Name(resourceName string) *Request {
+ if r.err != nil {
+ return r
+ }
+ if len(resourceName) == 0 {
+ r.err = fmt.Errorf("resource name may not be empty")
+ return r
+ }
+ if len(r.resourceName) != 0 {
+ r.err = fmt.Errorf("resource name already set to %q, cannot change to %q", r.resourceName, resourceName)
+ return r
+ }
+ if msgs := validation.IsValidPathSegmentName(resourceName); len(msgs) != 0 {
+ r.err = fmt.Errorf("invalid resource name %q: %v", resourceName, msgs)
+ return r
+ }
+ r.resourceName = resourceName
+ return r
+}
+
+// Namespace applies the namespace scope to a request (<resource>/[ns/<namespace>/]<name>)
+func (r *Request) Namespace(namespace string) *Request {
+ if r.err != nil {
+ return r
+ }
+ if r.namespaceSet {
+ r.err = fmt.Errorf("namespace already set to %q, cannot change to %q", r.namespace, namespace)
+ return r
+ }
+ if msgs := validation.IsValidPathSegmentName(namespace); len(msgs) != 0 {
+ r.err = fmt.Errorf("invalid namespace %q: %v", namespace, msgs)
+ return r
+ }
+ r.namespaceSet = true
+ r.namespace = namespace
+ return r
+}
+
+// NamespaceIfScoped is a convenience function to set a namespace if scoped is true
+func (r *Request) NamespaceIfScoped(namespace string, scoped bool) *Request {
+ if scoped {
+ return r.Namespace(namespace)
+ }
+ return r
+}
+
+// AbsPath overwrites an existing path with the segments provided. Trailing slashes are preserved
+// when a single segment is passed.
+func (r *Request) AbsPath(segments ...string) *Request {
+ if r.err != nil {
+ return r
+ }
+ r.pathPrefix = path.Join(r.baseURL.Path, path.Join(segments...))
+ if len(segments) == 1 && (len(r.baseURL.Path) > 1 || len(segments[0]) > 1) && strings.HasSuffix(segments[0], "/") {
+ // preserve any trailing slashes for legacy behavior
+ r.pathPrefix += "/"
+ }
+ return r
+}
+
+// RequestURI overwrites existing path and parameters with the value of the provided server relative
+// URI. Some parameters (those in specialParameters) cannot be overwritten.
+func (r *Request) RequestURI(uri string) *Request {
+ if r.err != nil {
+ return r
+ }
+ locator, err := url.Parse(uri)
+ if err != nil {
+ r.err = err
+ return r
+ }
+ r.pathPrefix = locator.Path
+ if len(locator.Query()) > 0 {
+ if r.params == nil {
+ r.params = make(url.Values)
+ }
+ for k, v := range locator.Query() {
+ r.params[k] = v
+ }
+ }
+ return r
+}
+
+const (
+ // A constant that clients can use to refer in a field selector to the object name field.
+ // Will be automatically emitted as the correct name for the API version.
+ nodeUnschedulable = "spec.unschedulable"
+ objectNameField = "metadata.name"
+ podHost = "spec.nodeName"
+ podStatus = "status.phase"
+ secretType = "type"
+
+ eventReason = "reason"
+ eventSource = "source"
+ eventType = "type"
+ eventInvolvedKind = "involvedObject.kind"
+ eventInvolvedNamespace = "involvedObject.namespace"
+ eventInvolvedName = "involvedObject.name"
+ eventInvolvedUID = "involvedObject.uid"
+ eventInvolvedAPIVersion = "involvedObject.apiVersion"
+ eventInvolvedResourceVersion = "involvedObject.resourceVersion"
+ eventInvolvedFieldPath = "involvedObject.fieldPath"
+)
+
+type clientFieldNameToAPIVersionFieldName map[string]string
+
+func (c clientFieldNameToAPIVersionFieldName) filterField(field, value string) (newField, newValue string, err error) {
+ newFieldName, ok := c[field]
+ if !ok {
+ return "", "", fmt.Errorf("%v - %v - no field mapping defined", field, value)
+ }
+ return newFieldName, value, nil
+}
+
+type resourceTypeToFieldMapping map[string]clientFieldNameToAPIVersionFieldName
+
+func (r resourceTypeToFieldMapping) filterField(resourceType, field, value string) (newField, newValue string, err error) {
+ fMapping, ok := r[resourceType]
+ if !ok {
+ return "", "", fmt.Errorf("%v - %v - %v - no field mapping defined", resourceType, field, value)
+ }
+ return fMapping.filterField(field, value)
+}
+
+type versionToResourceToFieldMapping map[unversioned.GroupVersion]resourceTypeToFieldMapping
+
+func (v versionToResourceToFieldMapping) filterField(groupVersion *unversioned.GroupVersion, resourceType, field, value string) (newField, newValue string, err error) {
+ rMapping, ok := v[*groupVersion]
+ if !ok {
+ glog.Warningf("Field selector: %v - %v - %v - %v: need to check if this is versioned correctly.", groupVersion, resourceType, field, value)
+ return field, value, nil
+ }
+ newField, newValue, err = rMapping.filterField(resourceType, field, value)
+ if err != nil {
+ // This is only a warning until we find and fix all of the client's usages.
+ glog.Warningf("Field selector: %v - %v - %v - %v: need to check if this is versioned correctly.", groupVersion, resourceType, field, value)
+ return field, value, nil
+ }
+ return newField, newValue, nil
+}
+
+var fieldMappings = versionToResourceToFieldMapping{
+ v1.SchemeGroupVersion: resourceTypeToFieldMapping{
+ "nodes": clientFieldNameToAPIVersionFieldName{
+ objectNameField: objectNameField,
+ nodeUnschedulable: nodeUnschedulable,
+ },
+ "pods": clientFieldNameToAPIVersionFieldName{
+ podHost: podHost,
+ podStatus: podStatus,
+ },
+ "secrets": clientFieldNameToAPIVersionFieldName{
+ secretType: secretType,
+ },
+ "serviceAccounts": clientFieldNameToAPIVersionFieldName{
+ objectNameField: objectNameField,
+ },
+ "endpoints": clientFieldNameToAPIVersionFieldName{
+ objectNameField: objectNameField,
+ },
+ "events": clientFieldNameToAPIVersionFieldName{
+ objectNameField: objectNameField,
+ eventReason: eventReason,
+ eventSource: eventSource,
+ eventType: eventType,
+ eventInvolvedKind: eventInvolvedKind,
+ eventInvolvedNamespace: eventInvolvedNamespace,
+ eventInvolvedName: eventInvolvedName,
+ eventInvolvedUID: eventInvolvedUID,
+ eventInvolvedAPIVersion: eventInvolvedAPIVersion,
+ eventInvolvedResourceVersion: eventInvolvedResourceVersion,
+ eventInvolvedFieldPath: eventInvolvedFieldPath,
+ },
+ },
+}
+
+// FieldsSelectorParam adds the given selector as a query parameter with the name paramName.
+func (r *Request) FieldsSelectorParam(s fields.Selector) *Request {
+ if r.err != nil {
+ return r
+ }
+ if s == nil {
+ return r
+ }
+ if s.Empty() {
+ return r
+ }
+ s2, err := s.Transform(func(field, value string) (newField, newValue string, err error) {
+ return fieldMappings.filterField(r.content.GroupVersion, r.resource, field, value)
+ })
+ if err != nil {
+ r.err = err
+ return r
+ }
+ return r.setParam(unversioned.FieldSelectorQueryParam(r.content.GroupVersion.String()), s2.String())
+}
+
+// LabelsSelectorParam adds the given selector as a query parameter
+func (r *Request) LabelsSelectorParam(s labels.Selector) *Request {
+ if r.err != nil {
+ return r
+ }
+ if s == nil {
+ return r
+ }
+ if s.Empty() {
+ return r
+ }
+ return r.setParam(unversioned.LabelSelectorQueryParam(r.content.GroupVersion.String()), s.String())
+}
+
+// UintParam creates a query parameter with the given value.
+func (r *Request) UintParam(paramName string, u uint64) *Request {
+ if r.err != nil {
+ return r
+ }
+ return r.setParam(paramName, strconv.FormatUint(u, 10))
+}
+
+// Param creates a query parameter with the given string value.
+func (r *Request) Param(paramName, s string) *Request {
+ if r.err != nil {
+ return r
+ }
+ return r.setParam(paramName, s)
+}
+
+// VersionedParams will take the provided object, serialize it to a map[string][]string using the
+// implicit RESTClient API version and the default parameter codec, and then add those as parameters
+// to the request. Use this to provide versioned query parameters from client libraries.
+func (r *Request) VersionedParams(obj runtime.Object, codec runtime.ParameterCodec) *Request {
+ if r.err != nil {
+ return r
+ }
+ params, err := codec.EncodeParameters(obj, *r.content.GroupVersion)
+ if err != nil {
+ r.err = err
+ return r
+ }
+ for k, v := range params {
+ for _, value := range v {
+ // TODO: Move it to setParam method, once we get rid of
+ // FieldSelectorParam & LabelSelectorParam methods.
+ if k == unversioned.LabelSelectorQueryParam(r.content.GroupVersion.String()) && value == "" {
+ // Don't set an empty selector for backward compatibility.
+ // Since there is no way to get the difference between empty
+ // and unspecified string, we don't set it to avoid having
+ // labelSelector= param in every request.
+ continue
+ }
+ if k == unversioned.FieldSelectorQueryParam(r.content.GroupVersion.String()) {
+ if len(value) == 0 {
+ // Don't set an empty selector for backward compatibility.
+ // Since there is no way to get the difference between empty
+ // and unspecified string, we don't set it to avoid having
+ // fieldSelector= param in every request.
+ continue
+ }
+ // TODO: Filtering should be handled somewhere else.
+ selector, err := fields.ParseSelector(value)
+ if err != nil {
+ r.err = fmt.Errorf("unparsable field selector: %v", err)
+ return r
+ }
+ filteredSelector, err := selector.Transform(
+ func(field, value string) (newField, newValue string, err error) {
+ return fieldMappings.filterField(r.content.GroupVersion, r.resource, field, value)
+ })
+ if err != nil {
+ r.err = fmt.Errorf("untransformable field selector: %v", err)
+ return r
+ }
+ value = filteredSelector.String()
+ }
+
+ r.setParam(k, value)
+ }
+ }
+ return r
+}
+
+func (r *Request) setParam(paramName, value string) *Request {
+ if specialParams.Has(paramName) {
+ r.err = fmt.Errorf("must set %v through the corresponding function, not directly.", paramName)
+ return r
+ }
+ if r.params == nil {
+ r.params = make(url.Values)
+ }
+ r.params[paramName] = append(r.params[paramName], value)
+ return r
+}
+
+func (r *Request) SetHeader(key, value string) *Request {
+ if r.headers == nil {
+ r.headers = http.Header{}
+ }
+ r.headers.Set(key, value)
+ return r
+}
+
+// Timeout makes the request use the given duration as a timeout. Sets the "timeout"
+// parameter.
+func (r *Request) Timeout(d time.Duration) *Request {
+ if r.err != nil {
+ return r
+ }
+ r.timeout = d
+ return r
+}
+
+// Body makes the request use obj as the body. Optional.
+// If obj is a string, try to read a file of that name.
+// If obj is a []byte, send it directly.
+// If obj is an io.Reader, use it directly.
+// If obj is a runtime.Object, marshal it correctly, and set Content-Type header.
+// If obj is a runtime.Object and nil, do nothing.
+// Otherwise, set an error.
+func (r *Request) Body(obj interface{}) *Request {
+ if r.err != nil {
+ return r
+ }
+ switch t := obj.(type) {
+ case string:
+ data, err := ioutil.ReadFile(t)
+ if err != nil {
+ r.err = err
+ return r
+ }
+ glog.V(8).Infof("Request Body: %s", string(data))
+ r.body = bytes.NewReader(data)
+ case []byte:
+ glog.V(8).Infof("Request Body: %s", string(t))
+ r.body = bytes.NewReader(t)
+ case io.Reader:
+ r.body = t
+ case runtime.Object:
+ // callers may pass typed interface pointers, therefore we must check nil with reflection
+ if reflect.ValueOf(t).IsNil() {
+ return r
+ }
+ data, err := runtime.Encode(r.serializers.Encoder, t)
+ if err != nil {
+ r.err = err
+ return r
+ }
+ glog.V(8).Infof("Request Body: %s", string(data))
+ r.body = bytes.NewReader(data)
+ r.SetHeader("Content-Type", r.content.ContentType)
+ default:
+ r.err = fmt.Errorf("unknown type used for body: %+v", obj)
+ }
+ return r
+}
+
+// URL returns the current working URL.
+func (r *Request) URL() *url.URL {
+ p := r.pathPrefix
+ if r.namespaceSet && len(r.namespace) > 0 {
+ p = path.Join(p, "namespaces", r.namespace)
+ }
+ if len(r.resource) != 0 {
+ p = path.Join(p, strings.ToLower(r.resource))
+ }
+ // Join trims trailing slashes, so preserve r.pathPrefix's trailing slash for backwards compatibility if nothing was changed
+ if len(r.resourceName) != 0 || len(r.subpath) != 0 || len(r.subresource) != 0 {
+ p = path.Join(p, r.resourceName, r.subresource, r.subpath)
+ }
+
+ finalURL := &url.URL{}
+ if r.baseURL != nil {
+ *finalURL = *r.baseURL
+ }
+ finalURL.Path = p
+
+ query := url.Values{}
+ for key, values := range r.params {
+ for _, value := range values {
+ query.Add(key, value)
+ }
+ }
+
+ // timeout is handled specially here.
+ if r.timeout != 0 {
+ query.Set("timeout", r.timeout.String())
+ }
+ finalURL.RawQuery = query.Encode()
+ return finalURL
+}
+
+// finalURLTemplate is similar to URL(), but will make all specific parameter values equal
+// - instead of name or namespace, "{name}" and "{namespace}" will be used, and all query
+// parameters will be reset. This creates a copy of the request so as not to change the
+// underyling object. This means some useful request info (like the types of field
+// selectors in use) will be lost.
+// TODO: preserve field selector keys
+func (r Request) finalURLTemplate() string {
+ if len(r.resourceName) != 0 {
+ r.resourceName = "{name}"
+ }
+ if r.namespaceSet && len(r.namespace) != 0 {
+ r.namespace = "{namespace}"
+ }
+ newParams := url.Values{}
+ v := []string{"{value}"}
+ for k := range r.params {
+ newParams[k] = v
+ }
+ r.params = newParams
+ return r.URL().String()
+}
+
+func (r *Request) tryThrottle() {
+ now := time.Now()
+ if r.throttle != nil {
+ r.throttle.Accept()
+ }
+ if latency := time.Since(now); latency > longThrottleLatency {
+ glog.V(4).Infof("Throttling request took %v, request: %s:%s", latency, r.verb, r.URL().String())
+ }
+}
+
+// Watch attempts to begin watching the requested location.
+// Returns a watch.Interface, or an error.
+func (r *Request) Watch() (watch.Interface, error) {
+ // We specifically don't want to rate limit watches, so we
+ // don't use r.throttle here.
+ if r.err != nil {
+ return nil, r.err
+ }
+ if r.serializers.Framer == nil {
+ return nil, fmt.Errorf("watching resources is not possible with this client (content-type: %s)", r.content.ContentType)
+ }
+
+ url := r.URL().String()
+ req, err := http.NewRequest(r.verb, url, r.body)
+ if err != nil {
+ return nil, err
+ }
+ req.Header = r.headers
+ client := r.client
+ if client == nil {
+ client = http.DefaultClient
+ }
+ r.backoffMgr.Sleep(r.backoffMgr.CalculateBackoff(r.URL()))
+ resp, err := client.Do(req)
+ updateURLMetrics(r, resp, err)
+ if r.baseURL != nil {
+ if err != nil {
+ r.backoffMgr.UpdateBackoff(r.baseURL, err, 0)
+ } else {
+ r.backoffMgr.UpdateBackoff(r.baseURL, err, resp.StatusCode)
+ }
+ }
+ if err != nil {
+ // The watch stream mechanism handles many common partial data errors, so closed
+ // connections can be retried in many cases.
+ if net.IsProbableEOF(err) {
+ return watch.NewEmptyWatch(), nil
+ }
+ return nil, err
+ }
+ if resp.StatusCode != http.StatusOK {
+ defer resp.Body.Close()
+ if result := r.transformResponse(resp, req); result.err != nil {
+ return nil, result.err
+ }
+ return nil, fmt.Errorf("for request '%+v', got status: %v", url, resp.StatusCode)
+ }
+ framer := r.serializers.Framer.NewFrameReader(resp.Body)
+ decoder := streaming.NewDecoder(framer, r.serializers.StreamingSerializer)
+ return watch.NewStreamWatcher(versioned.NewDecoder(decoder, r.serializers.Decoder)), nil
+}
+
+// updateURLMetrics is a convenience function for pushing metrics.
+// It also handles corner cases for incomplete/invalid request data.
+func updateURLMetrics(req *Request, resp *http.Response, err error) {
+ url := "none"
+ if req.baseURL != nil {
+ url = req.baseURL.Host
+ }
+
+ // If we have an error (i.e. apiserver down) we report that as a metric label.
+ if err != nil {
+ metrics.RequestResult.WithLabelValues(err.Error(), req.verb, url).Inc()
+ } else {
+ //Metrics for failure codes
+ metrics.RequestResult.WithLabelValues(strconv.Itoa(resp.StatusCode), req.verb, url).Inc()
+ }
+}
+
+// Stream formats and executes the request, and offers streaming of the response.
+// Returns io.ReadCloser which could be used for streaming of the response, or an error
+// Any non-2xx http status code causes an error. If we get a non-2xx code, we try to convert the body into an APIStatus object.
+// If we can, we return that as an error. Otherwise, we create an error that lists the http status and the content of the response.
+func (r *Request) Stream() (io.ReadCloser, error) {
+ if r.err != nil {
+ return nil, r.err
+ }
+
+ r.tryThrottle()
+
+ url := r.URL().String()
+ req, err := http.NewRequest(r.verb, url, nil)
+ if err != nil {
+ return nil, err
+ }
+ req.Header = r.headers
+ client := r.client
+ if client == nil {
+ client = http.DefaultClient
+ }
+ r.backoffMgr.Sleep(r.backoffMgr.CalculateBackoff(r.URL()))
+ resp, err := client.Do(req)
+ updateURLMetrics(r, resp, err)
+ if r.baseURL != nil {
+ if err != nil {
+ r.backoffMgr.UpdateBackoff(r.URL(), err, 0)
+ } else {
+ r.backoffMgr.UpdateBackoff(r.URL(), err, resp.StatusCode)
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ switch {
+ case (resp.StatusCode >= 200) && (resp.StatusCode < 300):
+ return resp.Body, nil
+
+ default:
+ // ensure we close the body before returning the error
+ defer resp.Body.Close()
+
+ // we have a decent shot at taking the object returned, parsing it as a status object and returning a more normal error
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, fmt.Errorf("%v while accessing %v", resp.Status, url)
+ }
+
+ // TODO: Check ContentType.
+ if runtimeObject, err := runtime.Decode(r.serializers.Decoder, bodyBytes); err == nil {
+ statusError := errors.FromObject(runtimeObject)
+
+ if _, ok := statusError.(errors.APIStatus); ok {
+ return nil, statusError
+ }
+ }
+
+ bodyText := string(bodyBytes)
+ return nil, fmt.Errorf("%s while accessing %v: %s", resp.Status, url, bodyText)
+ }
+}
+
+// request connects to the server and invokes the provided function when a server response is
+// received. It handles retry behavior and up front validation of requests. It will invoke
+// fn at most once. It will return an error if a problem occurred prior to connecting to the
+// server - the provided function is responsible for handling server errors.
+func (r *Request) request(fn func(*http.Request, *http.Response)) error {
+ //Metrics for total request latency
+ start := time.Now()
+ defer func() {
+ metrics.RequestLatency.WithLabelValues(r.verb, r.finalURLTemplate()).Observe(metrics.SinceInMicroseconds(start))
+ }()
+
+ if r.err != nil {
+ glog.V(4).Infof("Error in request: %v", r.err)
+ return r.err
+ }
+
+ // TODO: added to catch programmer errors (invoking operations with an object with an empty namespace)
+ if (r.verb == "GET" || r.verb == "PUT" || r.verb == "DELETE") && r.namespaceSet && len(r.resourceName) > 0 && len(r.namespace) == 0 {
+ return fmt.Errorf("an empty namespace may not be set when a resource name is provided")
+ }
+ if (r.verb == "POST") && r.namespaceSet && len(r.namespace) == 0 {
+ return fmt.Errorf("an empty namespace may not be set during creation")
+ }
+
+ client := r.client
+ if client == nil {
+ client = http.DefaultClient
+ }
+
+ // Right now we make about ten retry attempts if we get a Retry-After response.
+ // TODO: Change to a timeout based approach.
+ maxRetries := 10
+ retries := 0
+ for {
+ url := r.URL().String()
+ req, err := http.NewRequest(r.verb, url, r.body)
+ if err != nil {
+ return err
+ }
+ req.Header = r.headers
+
+ r.backoffMgr.Sleep(r.backoffMgr.CalculateBackoff(r.URL()))
+ resp, err := client.Do(req)
+ updateURLMetrics(r, resp, err)
+ if err != nil {
+ r.backoffMgr.UpdateBackoff(r.URL(), err, 0)
+ } else {
+ r.backoffMgr.UpdateBackoff(r.URL(), err, resp.StatusCode)
+ }
+ if err != nil {
+ return err
+ }
+
+ done := func() bool {
+ // ensure the response body is closed before we reconnect, so that we reuse the same
+ // TCP connection
+ defer resp.Body.Close()
+
+ retries++
+ if seconds, wait := checkWait(resp); wait && retries < maxRetries {
+ if seeker, ok := r.body.(io.Seeker); ok && r.body != nil {
+ _, err := seeker.Seek(0, 0)
+ if err != nil {
+ glog.V(4).Infof("Could not retry request, can't Seek() back to beginning of body for %T", r.body)
+ fn(req, resp)
+ return true
+ }
+ }
+
+ glog.V(4).Infof("Got a Retry-After %s response for attempt %d to %v", seconds, retries, url)
+ r.backoffMgr.Sleep(time.Duration(seconds) * time.Second)
+ return false
+ }
+ fn(req, resp)
+ return true
+ }()
+ if done {
+ return nil
+ }
+ }
+}
+
+// Do formats and executes the request. Returns a Result object for easy response
+// processing.
+//
+// Error type:
+// * If the request can't be constructed, or an error happened earlier while building its
+// arguments: *RequestConstructionError
+// * If the server responds with a status: *errors.StatusError or *errors.UnexpectedObjectError
+// * http.Client.Do errors are returned directly.
+func (r *Request) Do() Result {
+ r.tryThrottle()
+
+ var result Result
+ err := r.request(func(req *http.Request, resp *http.Response) {
+ result = r.transformResponse(resp, req)
+ })
+ if err != nil {
+ return Result{err: err}
+ }
+ return result
+}
+
+// DoRaw executes the request but does not process the response body.
+func (r *Request) DoRaw() ([]byte, error) {
+ r.tryThrottle()
+
+ var result Result
+ err := r.request(func(req *http.Request, resp *http.Response) {
+ result.body, result.err = ioutil.ReadAll(resp.Body)
+ })
+ if err != nil {
+ return nil, err
+ }
+ return result.body, result.err
+}
+
+// transformResponse converts an API response into a structured API object
+func (r *Request) transformResponse(resp *http.Response, req *http.Request) Result {
+ var body []byte
+ if resp.Body != nil {
+ if data, err := ioutil.ReadAll(resp.Body); err == nil {
+ body = data
+ }
+ }
+ glog.V(8).Infof("Response Body: %s", string(body))
+
+ // Did the server give us a status response?
+ isStatusResponse := false
+ // Because release-1.1 server returns Status with empty APIVersion at paths
+ // to the Extensions resources, we need to use DecodeInto here to provide
+ // default groupVersion, otherwise a status response won't be correctly
+ // decoded.
+ status := &unversioned.Status{}
+ err := runtime.DecodeInto(r.serializers.Decoder, body, status)
+ if err == nil && len(status.Status) > 0 {
+ isStatusResponse = true
+ }
+
+ switch {
+ case resp.StatusCode == http.StatusSwitchingProtocols:
+ // no-op, we've been upgraded
+ case resp.StatusCode < http.StatusOK || resp.StatusCode > http.StatusPartialContent:
+ if !isStatusResponse {
+ return Result{err: r.transformUnstructuredResponseError(resp, req, body)}
+ }
+ return Result{err: errors.FromObject(status)}
+ }
+
+ // If the server gave us a status back, look at what it was.
+ success := resp.StatusCode >= http.StatusOK && resp.StatusCode <= http.StatusPartialContent
+ if isStatusResponse && (status.Status != unversioned.StatusSuccess && !success) {
+ // "Failed" requests are clearly just an error and it makes sense to return them as such.
+ return Result{err: errors.FromObject(status)}
+ }
+
+ contentType := resp.Header.Get("Content-Type")
+ var decoder runtime.Decoder
+ if contentType == r.content.ContentType {
+ decoder = r.serializers.Decoder
+ } else {
+ mediaType, params, err := mime.ParseMediaType(contentType)
+ if err != nil {
+ return Result{err: errors.NewInternalError(err)}
+ }
+ decoder, err = r.serializers.RenegotiatedDecoder(mediaType, params)
+ if err != nil {
+ return Result{
+ body: body,
+ contentType: contentType,
+ statusCode: resp.StatusCode,
+ }
+ }
+ }
+
+ return Result{
+ body: body,
+ contentType: contentType,
+ statusCode: resp.StatusCode,
+ decoder: decoder,
+ }
+}
+
+// transformUnstructuredResponseError handles an error from the server that is not in a structured form.
+// It is expected to transform any response that is not recognizable as a clear server sent error from the
+// K8S API using the information provided with the request. In practice, HTTP proxies and client libraries
+// introduce a level of uncertainty to the responses returned by servers that in common use result in
+// unexpected responses. The rough structure is:
+//
+// 1. Assume the server sends you something sane - JSON + well defined error objects + proper codes
+// - this is the happy path
+// - when you get this output, trust what the server sends
+// 2. Guard against empty fields / bodies in received JSON and attempt to cull sufficient info from them to
+// generate a reasonable facsimile of the original failure.
+// - Be sure to use a distinct error type or flag that allows a client to distinguish between this and error 1 above
+// 3. Handle true disconnect failures / completely malformed data by moving up to a more generic client error
+// 4. Distinguish between various connection failures like SSL certificates, timeouts, proxy errors, unexpected
+// initial contact, the presence of mismatched body contents from posted content types
+// - Give these a separate distinct error type and capture as much as possible of the original message
+//
+// TODO: introduce transformation of generic http.Client.Do() errors that separates 4.
+func (r *Request) transformUnstructuredResponseError(resp *http.Response, req *http.Request, body []byte) error {
+ if body == nil && resp.Body != nil {
+ if data, err := ioutil.ReadAll(resp.Body); err == nil {
+ body = data
+ }
+ }
+ glog.V(8).Infof("Response Body: %s", string(body))
+
+ message := "unknown"
+ if isTextResponse(resp) {
+ message = strings.TrimSpace(string(body))
+ }
+ retryAfter, _ := retryAfterSeconds(resp)
+ return errors.NewGenericServerResponse(
+ resp.StatusCode,
+ req.Method,
+ unversioned.GroupResource{
+ Group: r.content.GroupVersion.Group,
+ Resource: r.resource,
+ },
+ r.resourceName,
+ message,
+ retryAfter,
+ true,
+ )
+}
+
+// isTextResponse returns true if the response appears to be a textual media type.
+func isTextResponse(resp *http.Response) bool {
+ contentType := resp.Header.Get("Content-Type")
+ if len(contentType) == 0 {
+ return true
+ }
+ media, _, err := mime.ParseMediaType(contentType)
+ if err != nil {
+ return false
+ }
+ return strings.HasPrefix(media, "text/")
+}
+
+// checkWait returns true along with a number of seconds if the server instructed us to wait
+// before retrying.
+func checkWait(resp *http.Response) (int, bool) {
+ switch r := resp.StatusCode; {
+ // any 500 error code and 429 can trigger a wait
+ case r == errors.StatusTooManyRequests, r >= 500:
+ default:
+ return 0, false
+ }
+ i, ok := retryAfterSeconds(resp)
+ return i, ok
+}
+
+// retryAfterSeconds returns the value of the Retry-After header and true, or 0 and false if
+// the header was missing or not a valid number.
+func retryAfterSeconds(resp *http.Response) (int, bool) {
+ if h := resp.Header.Get("Retry-After"); len(h) > 0 {
+ if i, err := strconv.Atoi(h); err == nil {
+ return i, true
+ }
+ }
+ return 0, false
+}
+
+// Result contains the result of calling Request.Do().
+type Result struct {
+ body []byte
+ contentType string
+ err error
+ statusCode int
+
+ decoder runtime.Decoder
+}
+
+// Raw returns the raw result.
+func (r Result) Raw() ([]byte, error) {
+ return r.body, r.err
+}
+
+// Get returns the result as an object.
+func (r Result) Get() (runtime.Object, error) {
+ if r.err != nil {
+ return nil, r.err
+ }
+ if r.decoder == nil {
+ return nil, fmt.Errorf("serializer for %s doesn't exist", r.contentType)
+ }
+ return runtime.Decode(r.decoder, r.body)
+}
+
+// StatusCode returns the HTTP status code of the request. (Only valid if no
+// error was returned.)
+func (r Result) StatusCode(statusCode *int) Result {
+ *statusCode = r.statusCode
+ return r
+}
+
+// Into stores the result into obj, if possible. If obj is nil it is ignored.
+func (r Result) Into(obj runtime.Object) error {
+ if r.err != nil {
+ return r.err
+ }
+ if r.decoder == nil {
+ return fmt.Errorf("serializer for %s doesn't exist", r.contentType)
+ }
+ return runtime.DecodeInto(r.decoder, r.body, obj)
+}
+
+// WasCreated updates the provided bool pointer to whether the server returned
+// 201 created or a different response.
+func (r Result) WasCreated(wasCreated *bool) Result {
+ *wasCreated = r.statusCode == http.StatusCreated
+ return r
+}
+
+// Error returns the error executing the request, nil if no error occurred.
+// See the Request.Do() comment for what errors you might get.
+func (r Result) Error() error {
+ return r.err
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/restclient/transport.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/restclient/transport.go
new file mode 100644
index 0000000..c385914
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/restclient/transport.go
@@ -0,0 +1,94 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package restclient
+
+import (
+ "crypto/tls"
+ "net/http"
+
+ "k8s.io/kubernetes/pkg/client/transport"
+)
+
+// TLSConfigFor returns a tls.Config that will provide the transport level security defined
+// by the provided Config. Will return nil if no transport level security is requested.
+func TLSConfigFor(config *Config) (*tls.Config, error) {
+ cfg, err := config.transportConfig()
+ if err != nil {
+ return nil, err
+ }
+ return transport.TLSConfigFor(cfg)
+}
+
+// TransportFor returns an http.RoundTripper that will provide the authentication
+// or transport level security defined by the provided Config. Will return the
+// default http.DefaultTransport if no special case behavior is needed.
+func TransportFor(config *Config) (http.RoundTripper, error) {
+ cfg, err := config.transportConfig()
+ if err != nil {
+ return nil, err
+ }
+ return transport.New(cfg)
+}
+
+// HTTPWrappersForConfig wraps a round tripper with any relevant layered behavior from the
+// config. Exposed to allow more clients that need HTTP-like behavior but then must hijack
+// the underlying connection (like WebSocket or HTTP2 clients). Pure HTTP clients should use
+// the higher level TransportFor or RESTClientFor methods.
+func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTripper, error) {
+ cfg, err := config.transportConfig()
+ if err != nil {
+ return nil, err
+ }
+ return transport.HTTPWrappersForConfig(cfg, rt)
+}
+
+// transportConfig converts a client config to an appropriate transport config.
+func (c *Config) transportConfig() (*transport.Config, error) {
+ wt := c.WrapTransport
+ if c.AuthProvider != nil {
+ provider, err := GetAuthProvider(c.Host, c.AuthProvider, c.AuthConfigPersister)
+ if err != nil {
+ return nil, err
+ }
+ if wt != nil {
+ previousWT := wt
+ wt = func(rt http.RoundTripper) http.RoundTripper {
+ return provider.WrapTransport(previousWT(rt))
+ }
+ } else {
+ wt = provider.WrapTransport
+ }
+ }
+ return &transport.Config{
+ UserAgent: c.UserAgent,
+ Transport: c.Transport,
+ WrapTransport: wt,
+ TLS: transport.TLSConfig{
+ CAFile: c.CAFile,
+ CAData: c.CAData,
+ CertFile: c.CertFile,
+ CertData: c.CertData,
+ KeyFile: c.KeyFile,
+ KeyData: c.KeyData,
+ Insecure: c.Insecure,
+ },
+ Username: c.Username,
+ Password: c.Password,
+ BearerToken: c.BearerToken,
+ Impersonate: c.Impersonate,
+ }, nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/restclient/url_utils.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/restclient/url_utils.go
new file mode 100644
index 0000000..81f16d6
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/restclient/url_utils.go
@@ -0,0 +1,93 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package restclient
+
+import (
+ "fmt"
+ "net/url"
+ "path"
+
+ "k8s.io/kubernetes/pkg/api/unversioned"
+)
+
+// DefaultServerURL converts a host, host:port, or URL string to the default base server API path
+// to use with a Client at a given API version following the standard conventions for a
+// Kubernetes API.
+func DefaultServerURL(host, apiPath string, groupVersion unversioned.GroupVersion, defaultTLS bool) (*url.URL, string, error) {
+ if host == "" {
+ return nil, "", fmt.Errorf("host must be a URL or a host:port pair")
+ }
+ base := host
+ hostURL, err := url.Parse(base)
+ if err != nil {
+ return nil, "", err
+ }
+ if hostURL.Scheme == "" || hostURL.Host == "" {
+ scheme := "http://"
+ if defaultTLS {
+ scheme = "https://"
+ }
+ hostURL, err = url.Parse(scheme + base)
+ if err != nil {
+ return nil, "", err
+ }
+ if hostURL.Path != "" && hostURL.Path != "/" {
+ return nil, "", fmt.Errorf("host must be a URL or a host:port pair: %q", base)
+ }
+ }
+
+ // hostURL.Path is optional; a non-empty Path is treated as a prefix that is to be applied to
+ // all URIs used to access the host. this is useful when there's a proxy in front of the
+ // apiserver that has relocated the apiserver endpoints, forwarding all requests from, for
+ // example, /a/b/c to the apiserver. in this case the Path should be /a/b/c.
+ //
+ // if running without a frontend proxy (that changes the location of the apiserver), then
+ // hostURL.Path should be blank.
+ //
+ // versionedAPIPath, a path relative to baseURL.Path, points to a versioned API base
+ versionedAPIPath := path.Join("/", apiPath)
+
+ // Add the version to the end of the path
+ if len(groupVersion.Group) > 0 {
+ versionedAPIPath = path.Join(versionedAPIPath, groupVersion.Group, groupVersion.Version)
+
+ } else {
+ versionedAPIPath = path.Join(versionedAPIPath, groupVersion.Version)
+
+ }
+
+ return hostURL, versionedAPIPath, nil
+}
+
+// defaultServerUrlFor is shared between IsConfigTransportTLS and RESTClientFor. It
+// requires Host and Version to be set prior to being called.
+func defaultServerUrlFor(config *Config) (*url.URL, string, error) {
+ // TODO: move the default to secure when the apiserver supports TLS by default
+ // config.Insecure is taken to mean "I want HTTPS but don't bother checking the certs against a CA."
+ hasCA := len(config.CAFile) != 0 || len(config.CAData) != 0
+ hasCert := len(config.CertFile) != 0 || len(config.CertData) != 0
+ defaultTLS := hasCA || hasCert || config.Insecure
+ host := config.Host
+ if host == "" {
+ host = "localhost"
+ }
+
+ if config.GroupVersion != nil {
+ return DefaultServerURL(host, config.APIPath, *config.GroupVersion, defaultTLS)
+ }
+ return DefaultServerURL(host, config.APIPath, unversioned.GroupVersion{}, defaultTLS)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/restclient/urlbackoff.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/restclient/urlbackoff.go
new file mode 100644
index 0000000..24a89ed
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/restclient/urlbackoff.go
@@ -0,0 +1,107 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package restclient
+
+import (
+ "net/url"
+ "time"
+
+ "github.com/golang/glog"
+ "k8s.io/kubernetes/pkg/util/flowcontrol"
+ "k8s.io/kubernetes/pkg/util/sets"
+)
+
+// Set of resp. Codes that we backoff for.
+// In general these should be errors that indicate a server is overloaded.
+// These shouldn't be configured by any user, we set them based on conventions
+// described in
+var serverIsOverloadedSet = sets.NewInt(429)
+var maxResponseCode = 499
+
+type BackoffManager interface {
+ UpdateBackoff(actualUrl *url.URL, err error, responseCode int)
+ CalculateBackoff(actualUrl *url.URL) time.Duration
+ Sleep(d time.Duration)
+}
+
+// URLBackoff struct implements the semantics on top of Backoff which
+// we need for URL specific exponential backoff.
+type URLBackoff struct {
+ // Uses backoff as underlying implementation.
+ Backoff *flowcontrol.Backoff
+}
+
+// NoBackoff is a stub implementation, can be used for mocking or else as a default.
+type NoBackoff struct {
+}
+
+func (n *NoBackoff) UpdateBackoff(actualUrl *url.URL, err error, responseCode int) {
+ // do nothing.
+}
+
+func (n *NoBackoff) CalculateBackoff(actualUrl *url.URL) time.Duration {
+ return 0 * time.Second
+}
+
+func (n *NoBackoff) Sleep(d time.Duration) {
+ time.Sleep(d)
+}
+
+// Disable makes the backoff trivial, i.e., sets it to zero. This might be used
+// by tests which want to run 1000s of mock requests without slowing down.
+func (b *URLBackoff) Disable() {
+ glog.V(4).Infof("Disabling backoff strategy")
+ b.Backoff = flowcontrol.NewBackOff(0*time.Second, 0*time.Second)
+}
+
+// baseUrlKey returns the key which urls will be mapped to.
+// For example, 127.0.0.1:8080/api/v2/abcde -> 127.0.0.1:8080.
+func (b *URLBackoff) baseUrlKey(rawurl *url.URL) string {
+ // Simple implementation for now, just the host.
+ // We may backoff specific paths (i.e. "pods") differentially
+ // in the future.
+ host, err := url.Parse(rawurl.String())
+ if err != nil {
+ glog.V(4).Infof("Error extracting url: %v", rawurl)
+ panic("bad url!")
+ }
+ return host.Host
+}
+
+// UpdateBackoff updates backoff metadata
+func (b *URLBackoff) UpdateBackoff(actualUrl *url.URL, err error, responseCode int) {
+ // range for retry counts that we store is [0,13]
+ if responseCode > maxResponseCode || serverIsOverloadedSet.Has(responseCode) {
+ b.Backoff.Next(b.baseUrlKey(actualUrl), b.Backoff.Clock.Now())
+ return
+ } else if responseCode >= 300 || err != nil {
+ glog.V(4).Infof("Client is returning errors: code %v, error %v", responseCode, err)
+ }
+
+ //If we got this far, there is no backoff required for this URL anymore.
+ b.Backoff.Reset(b.baseUrlKey(actualUrl))
+}
+
+// CalculateBackoff takes a url and back's off exponentially,
+// based on its knowledge of existing failures.
+func (b *URLBackoff) CalculateBackoff(actualUrl *url.URL) time.Duration {
+ return b.Backoff.Get(b.baseUrlKey(actualUrl))
+}
+
+func (b *URLBackoff) Sleep(d time.Duration) {
+ b.Backoff.Clock.Sleep(d)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/restclient/versions.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/restclient/versions.go
new file mode 100644
index 0000000..3376434
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/restclient/versions.go
@@ -0,0 +1,88 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package restclient
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "path"
+
+ "k8s.io/kubernetes/pkg/api/unversioned"
+)
+
+const (
+ legacyAPIPath = "/api"
+ defaultAPIPath = "/apis"
+)
+
+// TODO: Is this obsoleted by the discovery client?
+
+// ServerAPIVersions returns the GroupVersions supported by the API server.
+// It creates a RESTClient based on the passed in config, but it doesn't rely
+// on the Version and Codec of the config, because it uses AbsPath and
+// takes the raw response.
+func ServerAPIVersions(c *Config) (groupVersions []string, err error) {
+ transport, err := TransportFor(c)
+ if err != nil {
+ return nil, err
+ }
+ client := http.Client{Transport: transport}
+
+ configCopy := *c
+ configCopy.GroupVersion = nil
+ configCopy.APIPath = ""
+ baseURL, _, err := defaultServerUrlFor(&configCopy)
+ if err != nil {
+ return nil, err
+ }
+ // Get the groupVersions exposed at /api
+ originalPath := baseURL.Path
+ baseURL.Path = path.Join(originalPath, legacyAPIPath)
+ resp, err := client.Get(baseURL.String())
+ if err != nil {
+ return nil, err
+ }
+ var v unversioned.APIVersions
+ defer resp.Body.Close()
+ err = json.NewDecoder(resp.Body).Decode(&v)
+ if err != nil {
+ return nil, fmt.Errorf("unexpected error: %v", err)
+ }
+
+ groupVersions = append(groupVersions, v.Versions...)
+ // Get the groupVersions exposed at /apis
+ baseURL.Path = path.Join(originalPath, defaultAPIPath)
+ resp2, err := client.Get(baseURL.String())
+ if err != nil {
+ return nil, err
+ }
+ var apiGroupList unversioned.APIGroupList
+ defer resp2.Body.Close()
+ err = json.NewDecoder(resp2.Body).Decode(&apiGroupList)
+ if err != nil {
+ return nil, fmt.Errorf("unexpected error: %v", err)
+ }
+
+ for _, g := range apiGroupList.Groups {
+ for _, gv := range g.Versions {
+ groupVersions = append(groupVersions, gv.GroupVersion)
+ }
+ }
+
+ return groupVersions, nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/transport/cache.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/transport/cache.go
new file mode 100644
index 0000000..eedfd3d
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/transport/cache.go
@@ -0,0 +1,88 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package transport
+
+import (
+ "fmt"
+ "net"
+ "net/http"
+ "sync"
+ "time"
+
+ utilnet "k8s.io/kubernetes/pkg/util/net"
+)
+
+// TlsTransportCache caches TLS http.RoundTrippers different configurations. The
+// same RoundTripper will be returned for configs with identical TLS options If
+// the config has no custom TLS options, http.DefaultTransport is returned.
+type tlsTransportCache struct {
+ mu sync.Mutex
+ transports map[string]*http.Transport
+}
+
+const idleConnsPerHost = 25
+
+var tlsCache = &tlsTransportCache{transports: make(map[string]*http.Transport)}
+
+func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) {
+ key, err := tlsConfigKey(config)
+ if err != nil {
+ return nil, err
+ }
+
+ // Ensure we only create a single transport for the given TLS options
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ // See if we already have a custom transport for this config
+ if t, ok := c.transports[key]; ok {
+ return t, nil
+ }
+
+ // Get the TLS options for this client config
+ tlsConfig, err := TLSConfigFor(config)
+ if err != nil {
+ return nil, err
+ }
+ // The options didn't require a custom TLS config
+ if tlsConfig == nil {
+ return http.DefaultTransport, nil
+ }
+
+ // Cache a single transport for these options
+ c.transports[key] = utilnet.SetTransportDefaults(&http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ TLSHandshakeTimeout: 10 * time.Second,
+ TLSClientConfig: tlsConfig,
+ MaxIdleConnsPerHost: idleConnsPerHost,
+ Dial: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }).Dial,
+ })
+ return c.transports[key], nil
+}
+
+// tlsConfigKey returns a unique key for tls.Config objects returned from TLSConfigFor
+func tlsConfigKey(c *Config) (string, error) {
+ // Make sure ca/key/cert content is loaded
+ if err := loadTLSFiles(c); err != nil {
+ return "", err
+ }
+ // Only include the things that actually affect the tls.Config
+ return fmt.Sprintf("%v/%x/%x/%x", c.TLS.Insecure, c.TLS.CAData, c.TLS.CertData, c.TLS.KeyData), nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/transport/config.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/transport/config.go
new file mode 100644
index 0000000..6e5c68a
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/transport/config.go
@@ -0,0 +1,84 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package transport
+
+import "net/http"
+
+// Config holds various options for establishing a transport.
+type Config struct {
+ // UserAgent is an optional field that specifies the caller of this
+ // request.
+ UserAgent string
+
+ // The base TLS configuration for this transport.
+ TLS TLSConfig
+
+ // Username and password for basic authentication
+ Username string
+ Password string
+
+ // Bearer token for authentication
+ BearerToken string
+
+ // Impersonate is the username that this Config will impersonate
+ Impersonate string
+
+ // Transport may be used for custom HTTP behavior. This attribute may
+ // not be specified with the TLS client certificate options. Use
+ // WrapTransport for most client level operations.
+ Transport http.RoundTripper
+
+ // WrapTransport will be invoked for custom HTTP behavior after the
+ // underlying transport is initialized (either the transport created
+ // from TLSClientConfig, Transport, or http.DefaultTransport). The
+ // config may layer other RoundTrippers on top of the returned
+ // RoundTripper.
+ WrapTransport func(rt http.RoundTripper) http.RoundTripper
+}
+
+// HasCA returns whether the configuration has a certificate authority or not.
+func (c *Config) HasCA() bool {
+ return len(c.TLS.CAData) > 0 || len(c.TLS.CAFile) > 0
+}
+
+// HasBasicAuth returns whether the configuration has basic authentication or not.
+func (c *Config) HasBasicAuth() bool {
+ return len(c.Username) != 0
+}
+
+// HasTokenAuth returns whether the configuration has token authentication or not.
+func (c *Config) HasTokenAuth() bool {
+ return len(c.BearerToken) != 0
+}
+
+// HasCertAuth returns whether the configuration has certificate authentication or not.
+func (c *Config) HasCertAuth() bool {
+ return len(c.TLS.CertData) != 0 || len(c.TLS.CertFile) != 0
+}
+
+// TLSConfig holds the information needed to set up a TLS transport.
+type TLSConfig struct {
+ CAFile string // Path of the PEM-encoded server trusted root certificates.
+ CertFile string // Path of the PEM-encoded client certificate.
+ KeyFile string // Path of the PEM-encoded client key.
+
+ Insecure bool // Server should be accessed without verifying the certificate. For testing only.
+
+ CAData []byte // Bytes of the PEM-encoded server trusted root certificates. Supercedes CAFile.
+ CertData []byte // Bytes of the PEM-encoded client certificate. Supercedes CertFile.
+ KeyData []byte // Bytes of the PEM-encoded client key. Supercedes KeyFile.
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/transport/round_trippers.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/transport/round_trippers.go
new file mode 100644
index 0000000..aadf0cb
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/transport/round_trippers.go
@@ -0,0 +1,337 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package transport
+
+import (
+ "fmt"
+ "net/http"
+ "time"
+
+ "github.com/golang/glog"
+)
+
+// HTTPWrappersForConfig wraps a round tripper with any relevant layered
+// behavior from the config. Exposed to allow more clients that need HTTP-like
+// behavior but then must hijack the underlying connection (like WebSocket or
+// HTTP2 clients). Pure HTTP clients should use the RoundTripper returned from
+// New.
+func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTripper, error) {
+ if config.WrapTransport != nil {
+ rt = config.WrapTransport(rt)
+ }
+
+ rt = DebugWrappers(rt)
+
+ // Set authentication wrappers
+ switch {
+ case config.HasBasicAuth() && config.HasTokenAuth():
+ return nil, fmt.Errorf("username/password or bearer token may be set, but not both")
+ case config.HasTokenAuth():
+ rt = NewBearerAuthRoundTripper(config.BearerToken, rt)
+ case config.HasBasicAuth():
+ rt = NewBasicAuthRoundTripper(config.Username, config.Password, rt)
+ }
+ if len(config.UserAgent) > 0 {
+ rt = NewUserAgentRoundTripper(config.UserAgent, rt)
+ }
+ if len(config.Impersonate) > 0 {
+ rt = NewImpersonatingRoundTripper(config.Impersonate, rt)
+ }
+ return rt, nil
+}
+
+// DebugWrappers wraps a round tripper and logs based on the current log level.
+func DebugWrappers(rt http.RoundTripper) http.RoundTripper {
+ switch {
+ case bool(glog.V(9)):
+ rt = newDebuggingRoundTripper(rt, debugCurlCommand, debugURLTiming, debugResponseHeaders)
+ case bool(glog.V(8)):
+ rt = newDebuggingRoundTripper(rt, debugJustURL, debugRequestHeaders, debugResponseStatus, debugResponseHeaders)
+ case bool(glog.V(7)):
+ rt = newDebuggingRoundTripper(rt, debugJustURL, debugRequestHeaders, debugResponseStatus)
+ case bool(glog.V(6)):
+ rt = newDebuggingRoundTripper(rt, debugURLTiming)
+ }
+
+ return rt
+}
+
+type requestCanceler interface {
+ CancelRequest(*http.Request)
+}
+
+type userAgentRoundTripper struct {
+ agent string
+ rt http.RoundTripper
+}
+
+func NewUserAgentRoundTripper(agent string, rt http.RoundTripper) http.RoundTripper {
+ return &userAgentRoundTripper{agent, rt}
+}
+
+func (rt *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+ if len(req.Header.Get("User-Agent")) != 0 {
+ return rt.rt.RoundTrip(req)
+ }
+ req = cloneRequest(req)
+ req.Header.Set("User-Agent", rt.agent)
+ return rt.rt.RoundTrip(req)
+}
+
+func (rt *userAgentRoundTripper) CancelRequest(req *http.Request) {
+ if canceler, ok := rt.rt.(requestCanceler); ok {
+ canceler.CancelRequest(req)
+ } else {
+ glog.Errorf("CancelRequest not implemented")
+ }
+}
+
+func (rt *userAgentRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt }
+
+type basicAuthRoundTripper struct {
+ username string
+ password string
+ rt http.RoundTripper
+}
+
+// NewBasicAuthRoundTripper will apply a BASIC auth authorization header to a
+// request unless it has already been set.
+func NewBasicAuthRoundTripper(username, password string, rt http.RoundTripper) http.RoundTripper {
+ return &basicAuthRoundTripper{username, password, rt}
+}
+
+func (rt *basicAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+ if len(req.Header.Get("Authorization")) != 0 {
+ return rt.rt.RoundTrip(req)
+ }
+ req = cloneRequest(req)
+ req.SetBasicAuth(rt.username, rt.password)
+ return rt.rt.RoundTrip(req)
+}
+
+func (rt *basicAuthRoundTripper) CancelRequest(req *http.Request) {
+ if canceler, ok := rt.rt.(requestCanceler); ok {
+ canceler.CancelRequest(req)
+ } else {
+ glog.Errorf("CancelRequest not implemented")
+ }
+}
+
+func (rt *basicAuthRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt }
+
+type impersonatingRoundTripper struct {
+ impersonate string
+ delegate http.RoundTripper
+}
+
+// NewImpersonatingRoundTripper will add an Act-As header to a request unless it has already been set.
+func NewImpersonatingRoundTripper(impersonate string, delegate http.RoundTripper) http.RoundTripper {
+ return &impersonatingRoundTripper{impersonate, delegate}
+}
+
+func (rt *impersonatingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+ if len(req.Header.Get("Impersonate-User")) != 0 {
+ return rt.delegate.RoundTrip(req)
+ }
+ req = cloneRequest(req)
+ req.Header.Set("Impersonate-User", rt.impersonate)
+ return rt.delegate.RoundTrip(req)
+}
+
+func (rt *impersonatingRoundTripper) CancelRequest(req *http.Request) {
+ if canceler, ok := rt.delegate.(requestCanceler); ok {
+ canceler.CancelRequest(req)
+ } else {
+ glog.Errorf("CancelRequest not implemented")
+ }
+}
+
+func (rt *impersonatingRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.delegate }
+
+type bearerAuthRoundTripper struct {
+ bearer string
+ rt http.RoundTripper
+}
+
+// NewBearerAuthRoundTripper adds the provided bearer token to a request
+// unless the authorization header has already been set.
+func NewBearerAuthRoundTripper(bearer string, rt http.RoundTripper) http.RoundTripper {
+ return &bearerAuthRoundTripper{bearer, rt}
+}
+
+func (rt *bearerAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+ if len(req.Header.Get("Authorization")) != 0 {
+ return rt.rt.RoundTrip(req)
+ }
+
+ req = cloneRequest(req)
+ req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", rt.bearer))
+ return rt.rt.RoundTrip(req)
+}
+
+func (rt *bearerAuthRoundTripper) CancelRequest(req *http.Request) {
+ if canceler, ok := rt.rt.(requestCanceler); ok {
+ canceler.CancelRequest(req)
+ } else {
+ glog.Errorf("CancelRequest not implemented")
+ }
+}
+
+func (rt *bearerAuthRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt }
+
+// cloneRequest returns a clone of the provided *http.Request.
+// The clone is a shallow copy of the struct and its Header map.
+func cloneRequest(r *http.Request) *http.Request {
+ // shallow copy of the struct
+ r2 := new(http.Request)
+ *r2 = *r
+ // deep copy of the Header
+ r2.Header = make(http.Header)
+ for k, s := range r.Header {
+ r2.Header[k] = s
+ }
+ return r2
+}
+
+// requestInfo keeps track of information about a request/response combination
+type requestInfo struct {
+ RequestHeaders http.Header
+ RequestVerb string
+ RequestURL string
+
+ ResponseStatus string
+ ResponseHeaders http.Header
+ ResponseErr error
+
+ Duration time.Duration
+}
+
+// newRequestInfo creates a new RequestInfo based on an http request
+func newRequestInfo(req *http.Request) *requestInfo {
+ return &requestInfo{
+ RequestURL: req.URL.String(),
+ RequestVerb: req.Method,
+ RequestHeaders: req.Header,
+ }
+}
+
+// complete adds information about the response to the requestInfo
+func (r *requestInfo) complete(response *http.Response, err error) {
+ if err != nil {
+ r.ResponseErr = err
+ return
+ }
+ r.ResponseStatus = response.Status
+ r.ResponseHeaders = response.Header
+}
+
+// toCurl returns a string that can be run as a command in a terminal (minus the body)
+func (r *requestInfo) toCurl() string {
+ headers := ""
+ for key, values := range r.RequestHeaders {
+ for _, value := range values {
+ headers += fmt.Sprintf(` -H %q`, fmt.Sprintf("%s: %s", key, value))
+ }
+ }
+
+ return fmt.Sprintf("curl -k -v -X%s %s %s", r.RequestVerb, headers, r.RequestURL)
+}
+
+// debuggingRoundTripper will display information about the requests passing
+// through it based on what is configured
+type debuggingRoundTripper struct {
+ delegatedRoundTripper http.RoundTripper
+
+ levels map[debugLevel]bool
+}
+
+type debugLevel int
+
+const (
+ debugJustURL debugLevel = iota
+ debugURLTiming
+ debugCurlCommand
+ debugRequestHeaders
+ debugResponseStatus
+ debugResponseHeaders
+)
+
+func newDebuggingRoundTripper(rt http.RoundTripper, levels ...debugLevel) *debuggingRoundTripper {
+ drt := &debuggingRoundTripper{
+ delegatedRoundTripper: rt,
+ levels: make(map[debugLevel]bool, len(levels)),
+ }
+ for _, v := range levels {
+ drt.levels[v] = true
+ }
+ return drt
+}
+
+func (rt *debuggingRoundTripper) CancelRequest(req *http.Request) {
+ if canceler, ok := rt.delegatedRoundTripper.(requestCanceler); ok {
+ canceler.CancelRequest(req)
+ } else {
+ glog.Errorf("CancelRequest not implemented")
+ }
+}
+
+func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+ reqInfo := newRequestInfo(req)
+
+ if rt.levels[debugJustURL] {
+ glog.Infof("%s %s", reqInfo.RequestVerb, reqInfo.RequestURL)
+ }
+ if rt.levels[debugCurlCommand] {
+ glog.Infof("%s", reqInfo.toCurl())
+
+ }
+ if rt.levels[debugRequestHeaders] {
+ glog.Infof("Request Headers:")
+ for key, values := range reqInfo.RequestHeaders {
+ for _, value := range values {
+ glog.Infof(" %s: %s", key, value)
+ }
+ }
+ }
+
+ startTime := time.Now()
+ response, err := rt.delegatedRoundTripper.RoundTrip(req)
+ reqInfo.Duration = time.Since(startTime)
+
+ reqInfo.complete(response, err)
+
+ if rt.levels[debugURLTiming] {
+ glog.Infof("%s %s %s in %d milliseconds", reqInfo.RequestVerb, reqInfo.RequestURL, reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond))
+ }
+ if rt.levels[debugResponseStatus] {
+ glog.Infof("Response Status: %s in %d milliseconds", reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond))
+ }
+ if rt.levels[debugResponseHeaders] {
+ glog.Infof("Response Headers:")
+ for key, values := range reqInfo.ResponseHeaders {
+ for _, value := range values {
+ glog.Infof(" %s: %s", key, value)
+ }
+ }
+ }
+
+ return response, err
+}
+
+func (rt *debuggingRoundTripper) WrappedRoundTripper() http.RoundTripper {
+ return rt.delegatedRoundTripper
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/transport/transport.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/transport/transport.go
new file mode 100644
index 0000000..9c5b9ef
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/transport/transport.go
@@ -0,0 +1,140 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package transport
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+)
+
+// New returns an http.RoundTripper that will provide the authentication
+// or transport level security defined by the provided Config.
+func New(config *Config) (http.RoundTripper, error) {
+ // Set transport level security
+ if config.Transport != nil && (config.HasCA() || config.HasCertAuth() || config.TLS.Insecure) {
+ return nil, fmt.Errorf("using a custom transport with TLS certificate options or the insecure flag is not allowed")
+ }
+
+ var (
+ rt http.RoundTripper
+ err error
+ )
+
+ if config.Transport != nil {
+ rt = config.Transport
+ } else {
+ rt, err = tlsCache.get(config)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return HTTPWrappersForConfig(config, rt)
+}
+
+// TLSConfigFor returns a tls.Config that will provide the transport level security defined
+// by the provided Config. Will return nil if no transport level security is requested.
+func TLSConfigFor(c *Config) (*tls.Config, error) {
+ if !(c.HasCA() || c.HasCertAuth() || c.TLS.Insecure) {
+ return nil, nil
+ }
+ if c.HasCA() && c.TLS.Insecure {
+ return nil, fmt.Errorf("specifying a root certificates file with the insecure flag is not allowed")
+ }
+ if err := loadTLSFiles(c); err != nil {
+ return nil, err
+ }
+
+ tlsConfig := &tls.Config{
+ // Can't use SSLv3 because of POODLE and BEAST
+ // Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher
+ // Can't use TLSv1.1 because of RC4 cipher usage
+ MinVersion: tls.VersionTLS12,
+ InsecureSkipVerify: c.TLS.Insecure,
+ }
+
+ if c.HasCA() {
+ tlsConfig.RootCAs = rootCertPool(c.TLS.CAData)
+ }
+
+ if c.HasCertAuth() {
+ cert, err := tls.X509KeyPair(c.TLS.CertData, c.TLS.KeyData)
+ if err != nil {
+ return nil, err
+ }
+ tlsConfig.Certificates = []tls.Certificate{cert}
+ }
+
+ return tlsConfig, nil
+}
+
+// loadTLSFiles copies the data from the CertFile, KeyFile, and CAFile fields into the CertData,
+// KeyData, and CAFile fields, or returns an error. If no error is returned, all three fields are
+// either populated or were empty to start.
+func loadTLSFiles(c *Config) error {
+ var err error
+ c.TLS.CAData, err = dataFromSliceOrFile(c.TLS.CAData, c.TLS.CAFile)
+ if err != nil {
+ return err
+ }
+
+ c.TLS.CertData, err = dataFromSliceOrFile(c.TLS.CertData, c.TLS.CertFile)
+ if err != nil {
+ return err
+ }
+
+ c.TLS.KeyData, err = dataFromSliceOrFile(c.TLS.KeyData, c.TLS.KeyFile)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// dataFromSliceOrFile returns data from the slice (if non-empty), or from the file,
+// or an error if an error occurred reading the file
+func dataFromSliceOrFile(data []byte, file string) ([]byte, error) {
+ if len(data) > 0 {
+ return data, nil
+ }
+ if len(file) > 0 {
+ fileData, err := ioutil.ReadFile(file)
+ if err != nil {
+ return []byte{}, err
+ }
+ return fileData, nil
+ }
+ return nil, nil
+}
+
+// rootCertPool returns nil if caData is empty. When passed along, this will mean "use system CAs".
+// When caData is not empty, it will be the ONLY information used in the CertPool.
+func rootCertPool(caData []byte) *x509.CertPool {
+ // What we really want is a copy of x509.systemRootsPool, but that isn't exposed. It's difficult to build (see the go
+ // code for a look at the platform specific insanity), so we'll use the fact that RootCAs == nil gives us the system values
+ // It doesn't allow trusting either/or, but hopefully that won't be an issue
+ if len(caData) == 0 {
+ return nil
+ }
+
+ // if we have caData, use it
+ certPool := x509.NewCertPool()
+ certPool.AppendCertsFromPEM(caData)
+ return certPool
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/typed/discovery/discovery_client.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/typed/discovery/discovery_client.go
new file mode 100644
index 0000000..1bd18b9
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/typed/discovery/discovery_client.go
@@ -0,0 +1,317 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package discovery
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "github.com/emicklei/go-restful/swagger"
+
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/errors"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/api/v1"
+ "k8s.io/kubernetes/pkg/client/restclient"
+ "k8s.io/kubernetes/pkg/runtime"
+ "k8s.io/kubernetes/pkg/runtime/serializer"
+ utilerrors "k8s.io/kubernetes/pkg/util/errors"
+ "k8s.io/kubernetes/pkg/version"
+)
+
+// DiscoveryInterface holds the methods that discover server-supported API groups,
+// versions and resources.
+type DiscoveryInterface interface {
+ ServerGroupsInterface
+ ServerResourcesInterface
+ ServerVersionInterface
+ SwaggerSchemaInterface
+}
+
+// ServerGroupsInterface has methods for obtaining supported groups on the API server
+type ServerGroupsInterface interface {
+ // ServerGroups returns the supported groups, with information like supported versions and the
+ // preferred version.
+ ServerGroups() (*unversioned.APIGroupList, error)
+}
+
+// ServerResourcesInterface has methods for obtaining supported resources on the API server
+type ServerResourcesInterface interface {
+ // ServerResourcesForGroupVersion returns the supported resources for a group and version.
+ ServerResourcesForGroupVersion(groupVersion string) (*unversioned.APIResourceList, error)
+ // ServerResources returns the supported resources for all groups and versions.
+ ServerResources() (map[string]*unversioned.APIResourceList, error)
+ // ServerPreferredResources returns the supported resources with the version preferred by the
+ // server.
+ ServerPreferredResources() ([]unversioned.GroupVersionResource, error)
+ // ServerPreferredNamespacedResources returns the supported namespaced resources with the
+ // version preferred by the server.
+ ServerPreferredNamespacedResources() ([]unversioned.GroupVersionResource, error)
+}
+
+// ServerVersionInterface has a method for retrieving the server's version.
+type ServerVersionInterface interface {
+ // ServerVersion retrieves and parses the server's version (git version).
+ ServerVersion() (*version.Info, error)
+}
+
+// SwaggerSchemaInterface has a method to retrieve the swagger schema.
+type SwaggerSchemaInterface interface {
+ // SwaggerSchema retrieves and parses the swagger API schema the server supports.
+ SwaggerSchema(version unversioned.GroupVersion) (*swagger.ApiDeclaration, error)
+}
+
+// DiscoveryClient implements the functions that discover server-supported API groups,
+// versions and resources.
+type DiscoveryClient struct {
+ *restclient.RESTClient
+
+ LegacyPrefix string
+}
+
+// Convert unversioned.APIVersions to unversioned.APIGroup. APIVersions is used by legacy v1, so
+// group would be "".
+func apiVersionsToAPIGroup(apiVersions *unversioned.APIVersions) (apiGroup unversioned.APIGroup) {
+ groupVersions := []unversioned.GroupVersionForDiscovery{}
+ for _, version := range apiVersions.Versions {
+ groupVersion := unversioned.GroupVersionForDiscovery{
+ GroupVersion: version,
+ Version: version,
+ }
+ groupVersions = append(groupVersions, groupVersion)
+ }
+ apiGroup.Versions = groupVersions
+ // There should be only one groupVersion returned at /api
+ apiGroup.PreferredVersion = groupVersions[0]
+ return
+}
+
+// ServerGroups returns the supported groups, with information like supported versions and the
+// preferred version.
+func (d *DiscoveryClient) ServerGroups() (apiGroupList *unversioned.APIGroupList, err error) {
+ // Get the groupVersions exposed at /api
+ v := &unversioned.APIVersions{}
+ err = d.Get().AbsPath(d.LegacyPrefix).Do().Into(v)
+ apiGroup := unversioned.APIGroup{}
+ if err == nil {
+ apiGroup = apiVersionsToAPIGroup(v)
+ }
+ if err != nil && !errors.IsNotFound(err) && !errors.IsForbidden(err) {
+ return nil, err
+ }
+
+ // Get the groupVersions exposed at /apis
+ apiGroupList = &unversioned.APIGroupList{}
+ err = d.Get().AbsPath("/apis").Do().Into(apiGroupList)
+ if err != nil && !errors.IsNotFound(err) && !errors.IsForbidden(err) {
+ return nil, err
+ }
+ // to be compatible with a v1.0 server, if it's a 403 or 404, ignore and return whatever we got from /api
+ if err != nil && (errors.IsNotFound(err) || errors.IsForbidden(err)) {
+ apiGroupList = &unversioned.APIGroupList{}
+ }
+
+ // append the group retrieved from /api to the list
+ apiGroupList.Groups = append(apiGroupList.Groups, apiGroup)
+ return apiGroupList, nil
+}
+
+// ServerResourcesForGroupVersion returns the supported resources for a group and version.
+func (d *DiscoveryClient) ServerResourcesForGroupVersion(groupVersion string) (resources *unversioned.APIResourceList, err error) {
+ url := url.URL{}
+ if len(groupVersion) == 0 {
+ return nil, fmt.Errorf("groupVersion shouldn't be empty")
+ }
+ if len(d.LegacyPrefix) > 0 && groupVersion == "v1" {
+ url.Path = d.LegacyPrefix + "/" + groupVersion
+ } else {
+ url.Path = "/apis/" + groupVersion
+ }
+ resources = &unversioned.APIResourceList{}
+ err = d.Get().AbsPath(url.String()).Do().Into(resources)
+ if err != nil {
+ // ignore 403 or 404 error to be compatible with an v1.0 server.
+ if groupVersion == "v1" && (errors.IsNotFound(err) || errors.IsForbidden(err)) {
+ return resources, nil
+ } else {
+ return nil, err
+ }
+ }
+ return resources, nil
+}
+
+// ServerResources returns the supported resources for all groups and versions.
+func (d *DiscoveryClient) ServerResources() (map[string]*unversioned.APIResourceList, error) {
+ apiGroups, err := d.ServerGroups()
+ if err != nil {
+ return nil, err
+ }
+ groupVersions := unversioned.ExtractGroupVersions(apiGroups)
+ result := map[string]*unversioned.APIResourceList{}
+ for _, groupVersion := range groupVersions {
+ resources, err := d.ServerResourcesForGroupVersion(groupVersion)
+ if err != nil {
+ return nil, err
+ }
+ result[groupVersion] = resources
+ }
+ return result, nil
+}
+
+// serverPreferredResources returns the supported resources with the version preferred by the
+// server. If namespaced is true, only namespaced resources will be returned.
+func (d *DiscoveryClient) serverPreferredResources(namespaced bool) ([]unversioned.GroupVersionResource, error) {
+ results := []unversioned.GroupVersionResource{}
+ serverGroupList, err := d.ServerGroups()
+ if err != nil {
+ return results, err
+ }
+
+ allErrs := []error{}
+ for _, apiGroup := range serverGroupList.Groups {
+ preferredVersion := apiGroup.PreferredVersion
+ apiResourceList, err := d.ServerResourcesForGroupVersion(preferredVersion.GroupVersion)
+ if err != nil {
+ allErrs = append(allErrs, err)
+ continue
+ }
+ groupVersion := unversioned.GroupVersion{Group: apiGroup.Name, Version: preferredVersion.Version}
+ for _, apiResource := range apiResourceList.APIResources {
+ // ignore the root scoped resources if "namespaced" is true.
+ if namespaced && !apiResource.Namespaced {
+ continue
+ }
+ if strings.Contains(apiResource.Name, "/") {
+ continue
+ }
+ results = append(results, groupVersion.WithResource(apiResource.Name))
+ }
+ }
+ return results, utilerrors.NewAggregate(allErrs)
+}
+
+// ServerPreferredResources returns the supported resources with the version preferred by the
+// server.
+func (d *DiscoveryClient) ServerPreferredResources() ([]unversioned.GroupVersionResource, error) {
+ return d.serverPreferredResources(false)
+}
+
+// ServerPreferredNamespacedResources returns the supported namespaced resources with the
+// version preferred by the server.
+func (d *DiscoveryClient) ServerPreferredNamespacedResources() ([]unversioned.GroupVersionResource, error) {
+ return d.serverPreferredResources(true)
+}
+
+// ServerVersion retrieves and parses the server's version (git version).
+func (d *DiscoveryClient) ServerVersion() (*version.Info, error) {
+ body, err := d.Get().AbsPath("/version").Do().Raw()
+ if err != nil {
+ return nil, err
+ }
+ var info version.Info
+ err = json.Unmarshal(body, &info)
+ if err != nil {
+ return nil, fmt.Errorf("got '%s': %v", string(body), err)
+ }
+ return &info, nil
+}
+
+// SwaggerSchema retrieves and parses the swagger API schema the server supports.
+func (d *DiscoveryClient) SwaggerSchema(version unversioned.GroupVersion) (*swagger.ApiDeclaration, error) {
+ if version.IsEmpty() {
+ return nil, fmt.Errorf("groupVersion cannot be empty")
+ }
+
+ groupList, err := d.ServerGroups()
+ if err != nil {
+ return nil, err
+ }
+ groupVersions := unversioned.ExtractGroupVersions(groupList)
+ // This check also takes care the case that kubectl is newer than the running endpoint
+ if stringDoesntExistIn(version.String(), groupVersions) {
+ return nil, fmt.Errorf("API version: %v is not supported by the server. Use one of: %v", version, groupVersions)
+ }
+ var path string
+ if len(d.LegacyPrefix) > 0 && version == v1.SchemeGroupVersion {
+ path = "/swaggerapi" + d.LegacyPrefix + "/" + version.Version
+ } else {
+ path = "/swaggerapi/apis/" + version.Group + "/" + version.Version
+ }
+
+ body, err := d.Get().AbsPath(path).Do().Raw()
+ if err != nil {
+ return nil, err
+ }
+ var schema swagger.ApiDeclaration
+ err = json.Unmarshal(body, &schema)
+ if err != nil {
+ return nil, fmt.Errorf("got '%s': %v", string(body), err)
+ }
+ return &schema, nil
+}
+
+func setDiscoveryDefaults(config *restclient.Config) error {
+ config.APIPath = ""
+ config.GroupVersion = nil
+ codec := runtime.NoopEncoder{Decoder: api.Codecs.UniversalDecoder()}
+ config.NegotiatedSerializer = serializer.NegotiatedSerializerWrapper(
+ runtime.SerializerInfo{Serializer: codec},
+ runtime.StreamSerializerInfo{},
+ )
+ if len(config.UserAgent) == 0 {
+ config.UserAgent = restclient.DefaultKubernetesUserAgent()
+ }
+ return nil
+}
+
+// NewDiscoveryClientForConfig creates a new DiscoveryClient for the given config. This client
+// can be used to discover supported resources in the API server.
+func NewDiscoveryClientForConfig(c *restclient.Config) (*DiscoveryClient, error) {
+ config := *c
+ if err := setDiscoveryDefaults(&config); err != nil {
+ return nil, err
+ }
+ client, err := restclient.UnversionedRESTClientFor(&config)
+ return &DiscoveryClient{RESTClient: client, LegacyPrefix: "/api"}, err
+}
+
+// NewDiscoveryClientForConfig creates a new DiscoveryClient for the given config. If
+// there is an error, it panics.
+func NewDiscoveryClientForConfigOrDie(c *restclient.Config) *DiscoveryClient {
+ client, err := NewDiscoveryClientForConfig(c)
+ if err != nil {
+ panic(err)
+ }
+ return client
+
+}
+
+// New creates a new DiscoveryClient for the given RESTClient.
+func NewDiscoveryClient(c *restclient.RESTClient) *DiscoveryClient {
+ return &DiscoveryClient{RESTClient: c, LegacyPrefix: "/api"}
+}
+
+func stringDoesntExistIn(str string, slice []string) bool {
+ for _, s := range slice {
+ if s == str {
+ return false
+ }
+ }
+ return true
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/apps.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/apps.go
new file mode 100644
index 0000000..f2498cb
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/apps.go
@@ -0,0 +1,76 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/apimachinery/registered"
+ "k8s.io/kubernetes/pkg/apis/apps"
+ "k8s.io/kubernetes/pkg/client/restclient"
+)
+
+type AppsInterface interface {
+ PetSetNamespacer
+}
+
+// AppsClient is used to interact with Kubernetes batch features.
+type AppsClient struct {
+ *restclient.RESTClient
+}
+
+func (c *AppsClient) PetSets(namespace string) PetSetInterface {
+ return newPetSet(c, namespace)
+}
+
+func NewApps(c *restclient.Config) (*AppsClient, error) {
+ config := *c
+ if err := setAppsDefaults(&config); err != nil {
+ return nil, err
+ }
+ client, err := restclient.RESTClientFor(&config)
+ if err != nil {
+ return nil, err
+ }
+ return &AppsClient{client}, nil
+}
+
+func NewAppsOrDie(c *restclient.Config) *AppsClient {
+ client, err := NewApps(c)
+ if err != nil {
+ panic(err)
+ }
+ return client
+}
+
+func setAppsDefaults(config *restclient.Config) error {
+ g, err := registered.Group(apps.GroupName)
+ if err != nil {
+ return err
+ }
+ config.APIPath = defaultAPIPath
+ if config.UserAgent == "" {
+ config.UserAgent = restclient.DefaultKubernetesUserAgent()
+ }
+ // TODO: Unconditionally set the config.Version, until we fix the config.
+ //if config.Version == "" {
+ copyGroupVersion := g.GroupVersion
+ config.GroupVersion = &copyGroupVersion
+ //}
+
+ config.NegotiatedSerializer = api.Codecs
+ return nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/auth/clientauth.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/auth/clientauth.go
new file mode 100644
index 0000000..128597f
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/auth/clientauth.go
@@ -0,0 +1,125 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Package auth defines a file format for holding authentication
+information needed by clients of Kubernetes. Typically,
+a Kubernetes cluster will put auth info for the admin in a known
+location when it is created, and will (soon) put it in a known
+location within a Container's file tree for Containers that
+need access to the Kubernetes API.
+
+Having a defined format allows:
+ - clients to be implmented in multiple languages
+ - applications which link clients to be portable across
+ clusters with different authentication styles (e.g.
+ some may use SSL Client certs, others may not, etc)
+ - when the format changes, applications only
+ need to update this code.
+
+The file format is json, marshalled from a struct authcfg.Info.
+
+Clinet libraries in other languages should use the same format.
+
+It is not intended to store general preferences, such as default
+namespace, output options, etc. CLIs (such as kubectl) and UIs should
+develop their own format and may wish to inline the authcfg.Info type.
+
+The authcfg.Info is just a file format. It is distinct from
+client.Config which holds options for creating a client.Client.
+Helper functions are provided in this package to fill in a
+client.Client from an authcfg.Info.
+
+Example:
+
+ import (
+ "pkg/client"
+ "pkg/client/auth"
+ )
+
+ info, err := auth.LoadFromFile(filename)
+ if err != nil {
+ // handle error
+ }
+ clientConfig = client.Config{}
+ clientConfig.Host = "example.com:4901"
+ clientConfig = info.MergeWithConfig()
+ client := client.New(clientConfig)
+ client.Pods(ns).List()
+*/
+package auth
+
+// TODO: need a way to rotate Tokens. Therefore, need a way for client object to be reset when the authcfg is updated.
+import (
+ "encoding/json"
+ "io/ioutil"
+ "os"
+
+ "k8s.io/kubernetes/pkg/client/restclient"
+)
+
+// Info holds Kubernetes API authorization config. It is intended
+// to be read/written from a file as a JSON object.
+type Info struct {
+ User string
+ Password string
+ CAFile string
+ CertFile string
+ KeyFile string
+ BearerToken string
+ Insecure *bool
+}
+
+// LoadFromFile parses an Info object from a file path.
+// If the file does not exist, then os.IsNotExist(err) == true
+func LoadFromFile(path string) (*Info, error) {
+ var info Info
+ if _, err := os.Stat(path); os.IsNotExist(err) {
+ return nil, err
+ }
+ data, err := ioutil.ReadFile(path)
+ if err != nil {
+ return nil, err
+ }
+ err = json.Unmarshal(data, &info)
+ if err != nil {
+ return nil, err
+ }
+ return &info, err
+}
+
+// MergeWithConfig returns a copy of a client.Config with values from the Info.
+// The fields of client.Config with a corresponding field in the Info are set
+// with the value from the Info.
+func (info Info) MergeWithConfig(c restclient.Config) (restclient.Config, error) {
+ var config restclient.Config = c
+ config.Username = info.User
+ config.Password = info.Password
+ config.CAFile = info.CAFile
+ config.CertFile = info.CertFile
+ config.KeyFile = info.KeyFile
+ config.BearerToken = info.BearerToken
+ if info.Insecure != nil {
+ config.Insecure = *info.Insecure
+ }
+ return config, nil
+}
+
+func (info Info) Complete() bool {
+ return len(info.User) > 0 ||
+ len(info.CertFile) > 0 ||
+ len(info.BearerToken) > 0
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/autoscaling.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/autoscaling.go
new file mode 100644
index 0000000..188a5ea
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/autoscaling.go
@@ -0,0 +1,77 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/apimachinery/registered"
+ "k8s.io/kubernetes/pkg/apis/autoscaling"
+ "k8s.io/kubernetes/pkg/client/restclient"
+)
+
+type AutoscalingInterface interface {
+ HorizontalPodAutoscalersNamespacer
+}
+
+// AutoscalingClient is used to interact with Kubernetes autoscaling features.
+type AutoscalingClient struct {
+ *restclient.RESTClient
+}
+
+func (c *AutoscalingClient) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface {
+ return newHorizontalPodAutoscalers(c, namespace)
+}
+
+func NewAutoscaling(c *restclient.Config) (*AutoscalingClient, error) {
+ config := *c
+ if err := setAutoscalingDefaults(&config); err != nil {
+ return nil, err
+ }
+ client, err := restclient.RESTClientFor(&config)
+ if err != nil {
+ return nil, err
+ }
+ return &AutoscalingClient{client}, nil
+}
+
+func NewAutoscalingOrDie(c *restclient.Config) *AutoscalingClient {
+ client, err := NewAutoscaling(c)
+ if err != nil {
+ panic(err)
+ }
+ return client
+}
+
+func setAutoscalingDefaults(config *restclient.Config) error {
+ // if autoscaling group is not registered, return an error
+ g, err := registered.Group(autoscaling.GroupName)
+ if err != nil {
+ return err
+ }
+ config.APIPath = defaultAPIPath
+ if config.UserAgent == "" {
+ config.UserAgent = restclient.DefaultKubernetesUserAgent()
+ }
+ // TODO: Unconditionally set the config.Version, until we fix the config.
+ //if config.Version == "" {
+ copyGroupVersion := g.GroupVersion
+ config.GroupVersion = &copyGroupVersion
+ //}
+
+ config.NegotiatedSerializer = api.Codecs
+ return nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/batch.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/batch.go
new file mode 100644
index 0000000..c31652f
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/batch.go
@@ -0,0 +1,107 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/apimachinery/registered"
+ "k8s.io/kubernetes/pkg/apis/batch"
+ "k8s.io/kubernetes/pkg/apis/batch/v2alpha1"
+ "k8s.io/kubernetes/pkg/client/restclient"
+)
+
+type BatchInterface interface {
+ JobsNamespacer
+ ScheduledJobsNamespacer
+}
+
+// BatchClient is used to interact with Kubernetes batch features.
+type BatchClient struct {
+ *restclient.RESTClient
+}
+
+func (c *BatchClient) Jobs(namespace string) JobInterface {
+ return newJobsV1(c, namespace)
+}
+
+func (c *BatchClient) ScheduledJobs(namespace string) ScheduledJobInterface {
+ return newScheduledJobs(c, namespace)
+}
+
+func NewBatch(c *restclient.Config) (*BatchClient, error) {
+ config := *c
+ if err := setBatchDefaults(&config, nil); err != nil {
+ return nil, err
+ }
+ client, err := restclient.RESTClientFor(&config)
+ if err != nil {
+ return nil, err
+ }
+ return &BatchClient{client}, nil
+}
+
+func NewBatchV2Alpha1(c *restclient.Config) (*BatchClient, error) {
+ config := *c
+ if err := setBatchDefaults(&config, &v2alpha1.SchemeGroupVersion); err != nil {
+ return nil, err
+ }
+ client, err := restclient.RESTClientFor(&config)
+ if err != nil {
+ return nil, err
+ }
+ return &BatchClient{client}, nil
+}
+
+func NewBatchOrDie(c *restclient.Config) *BatchClient {
+ var (
+ client *BatchClient
+ err error
+ )
+ if c.ContentConfig.GroupVersion != nil && *c.ContentConfig.GroupVersion == v2alpha1.SchemeGroupVersion {
+ client, err = NewBatchV2Alpha1(c)
+ } else {
+ client, err = NewBatch(c)
+ }
+ if err != nil {
+ panic(err)
+ }
+ return client
+}
+
+func setBatchDefaults(config *restclient.Config, gv *unversioned.GroupVersion) error {
+ // if batch group is not registered, return an error
+ g, err := registered.Group(batch.GroupName)
+ if err != nil {
+ return err
+ }
+ config.APIPath = defaultAPIPath
+ if config.UserAgent == "" {
+ config.UserAgent = restclient.DefaultKubernetesUserAgent()
+ }
+ // TODO: Unconditionally set the config.Version, until we fix the config.
+ //if config.Version == "" {
+ copyGroupVersion := g.GroupVersion
+ if gv != nil {
+ copyGroupVersion = *gv
+ }
+ config.GroupVersion = &copyGroupVersion
+ //}
+
+ config.NegotiatedSerializer = api.Codecs
+ return nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/certificates.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/certificates.go
new file mode 100644
index 0000000..29b15c4
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/certificates.go
@@ -0,0 +1,86 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/apimachinery/registered"
+ "k8s.io/kubernetes/pkg/apis/certificates"
+ "k8s.io/kubernetes/pkg/client/restclient"
+)
+
+// Interface holds the methods for clients of Kubernetes to allow mock testing.
+type CertificatesInterface interface {
+ CertificateSigningRequests() CertificateSigningRequestInterface
+}
+
+type CertificatesClient struct {
+ *restclient.RESTClient
+}
+
+func (c *CertificatesClient) CertificateSigningRequests() CertificateSigningRequestInterface {
+ return newCertificateSigningRequests(c)
+}
+
+// NewCertificates creates a new CertificatesClient for the given config.
+func NewCertificates(c *restclient.Config) (*CertificatesClient, error) {
+ config := *c
+ if err := setCertificatesDefaults(&config); err != nil {
+ return nil, err
+ }
+ client, err := restclient.RESTClientFor(&config)
+ if err != nil {
+ return nil, err
+ }
+ return &CertificatesClient{client}, nil
+}
+
+// NewCertificatesOrDie creates a new CertificatesClient for the given config and
+// panics if there is an error in the config.
+func NewCertificatesOrDie(c *restclient.Config) *CertificatesClient {
+ client, err := NewCertificates(c)
+ if err != nil {
+ panic(err)
+ }
+ return client
+}
+
+func setCertificatesDefaults(config *restclient.Config) error {
+ // if certificates group is not registered, return an error
+ g, err := registered.Group(certificates.GroupName)
+ if err != nil {
+ return err
+ }
+ config.APIPath = defaultAPIPath
+ if config.UserAgent == "" {
+ config.UserAgent = restclient.DefaultKubernetesUserAgent()
+ }
+ // TODO: Unconditionally set the config.Version, until we fix the config.
+ //if config.Version == "" {
+ copyGroupVersion := g.GroupVersion
+ config.GroupVersion = &copyGroupVersion
+ //}
+
+ config.NegotiatedSerializer = api.Codecs
+ if config.QPS == 0 {
+ config.QPS = 5
+ }
+ if config.Burst == 0 {
+ config.Burst = 10
+ }
+ return nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/certificatesigningrequests.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/certificatesigningrequests.go
new file mode 100644
index 0000000..f3ce09f
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/certificatesigningrequests.go
@@ -0,0 +1,104 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/apis/certificates"
+ "k8s.io/kubernetes/pkg/watch"
+)
+
+// CertificateSigningRequestInterface has methods to work with CertificateSigningRequest resources.
+type CertificateSigningRequestInterface interface {
+ List(opts api.ListOptions) (*certificates.CertificateSigningRequestList, error)
+ Get(name string) (*certificates.CertificateSigningRequest, error)
+ Delete(name string, options *api.DeleteOptions) error
+ Create(certificateSigningRequest *certificates.CertificateSigningRequest) (*certificates.CertificateSigningRequest, error)
+ Update(certificateSigningRequest *certificates.CertificateSigningRequest) (*certificates.CertificateSigningRequest, error)
+ UpdateStatus(certificateSigningRequest *certificates.CertificateSigningRequest) (*certificates.CertificateSigningRequest, error)
+ UpdateApproval(certificateSigningRequest *certificates.CertificateSigningRequest) (*certificates.CertificateSigningRequest, error)
+ Watch(opts api.ListOptions) (watch.Interface, error)
+}
+
+// certificateSigningRequests implements CertificateSigningRequestsNamespacer interface
+type certificateSigningRequests struct {
+ client *CertificatesClient
+}
+
+// newCertificateSigningRequests returns a certificateSigningRequests
+func newCertificateSigningRequests(c *CertificatesClient) *certificateSigningRequests {
+ return &certificateSigningRequests{
+ client: c,
+ }
+}
+
+// List takes label and field selectors, and returns the list of certificateSigningRequests that match those selectors.
+func (c *certificateSigningRequests) List(opts api.ListOptions) (result *certificates.CertificateSigningRequestList, err error) {
+ result = &certificates.CertificateSigningRequestList{}
+ err = c.client.Get().Resource("certificatesigningrequests").VersionedParams(&opts, api.ParameterCodec).Do().Into(result)
+ return
+}
+
+// Get takes the name of the certificateSigningRequest, and returns the corresponding CertificateSigningRequest object, and an error if it occurs
+func (c *certificateSigningRequests) Get(name string) (result *certificates.CertificateSigningRequest, err error) {
+ result = &certificates.CertificateSigningRequest{}
+ err = c.client.Get().Resource("certificatesigningrequests").Name(name).Do().Into(result)
+ return
+}
+
+// Delete takes the name of the certificateSigningRequest and deletes it. Returns an error if one occurs.
+func (c *certificateSigningRequests) Delete(name string, options *api.DeleteOptions) error {
+ return c.client.Delete().Resource("certificatesigningrequests").Name(name).Body(options).Do().Error()
+}
+
+// Create takes the representation of a certificateSigningRequest and creates it. Returns the server's representation of the certificateSigningRequest, and an error, if it occurs.
+func (c *certificateSigningRequests) Create(certificateSigningRequest *certificates.CertificateSigningRequest) (result *certificates.CertificateSigningRequest, err error) {
+ result = &certificates.CertificateSigningRequest{}
+ err = c.client.Post().Resource("certificatesigningrequests").Body(certificateSigningRequest).Do().Into(result)
+ return
+}
+
+// Update takes the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if it occurs.
+func (c *certificateSigningRequests) Update(certificateSigningRequest *certificates.CertificateSigningRequest) (result *certificates.CertificateSigningRequest, err error) {
+ result = &certificates.CertificateSigningRequest{}
+ err = c.client.Put().Resource("certificatesigningrequests").Name(certificateSigningRequest.Name).Body(certificateSigningRequest).Do().Into(result)
+ return
+}
+
+// UpdateStatus takes the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if it occurs.
+func (c *certificateSigningRequests) UpdateStatus(certificateSigningRequest *certificates.CertificateSigningRequest) (result *certificates.CertificateSigningRequest, err error) {
+ result = &certificates.CertificateSigningRequest{}
+ err = c.client.Put().Resource("certificatesigningrequests").Name(certificateSigningRequest.Name).SubResource("status").Body(certificateSigningRequest).Do().Into(result)
+ return
+}
+
+// UpdateApproval takes the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if it occurs.
+func (c *certificateSigningRequests) UpdateApproval(certificateSigningRequest *certificates.CertificateSigningRequest) (result *certificates.CertificateSigningRequest, err error) {
+ result = &certificates.CertificateSigningRequest{}
+ err = c.client.Put().Resource("certificatesigningrequests").Name(certificateSigningRequest.Name).SubResource("approval").Body(certificateSigningRequest).Do().Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested certificateSigningRequests.
+func (c *certificateSigningRequests) Watch(opts api.ListOptions) (watch.Interface, error) {
+ return c.client.Get().
+ Prefix("watch").
+ Namespace(api.NamespaceAll).
+ Resource("certificatesigningrequests").
+ VersionedParams(&opts, api.ParameterCodec).
+ Watch()
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/client.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/client.go
new file mode 100644
index 0000000..5474e96
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/client.go
@@ -0,0 +1,179 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "net"
+ "net/url"
+ "strings"
+
+ "k8s.io/kubernetes/pkg/client/restclient"
+ "k8s.io/kubernetes/pkg/client/typed/discovery"
+)
+
+// Interface holds the methods for clients of Kubernetes,
+// an interface to allow mock testing.
+type Interface interface {
+ PodsNamespacer
+ PodTemplatesNamespacer
+ ReplicationControllersNamespacer
+ ServicesNamespacer
+ EndpointsNamespacer
+ NodesInterface
+ EventNamespacer
+ LimitRangesNamespacer
+ ResourceQuotasNamespacer
+ ServiceAccountsNamespacer
+ SecretsNamespacer
+ NamespacesInterface
+ PersistentVolumesInterface
+ PersistentVolumeClaimsNamespacer
+ ComponentStatusesInterface
+ ConfigMapsNamespacer
+ Autoscaling() AutoscalingInterface
+ Batch() BatchInterface
+ Extensions() ExtensionsInterface
+ Rbac() RbacInterface
+ Discovery() discovery.DiscoveryInterface
+ Certificates() CertificatesInterface
+}
+
+func (c *Client) ReplicationControllers(namespace string) ReplicationControllerInterface {
+ return newReplicationControllers(c, namespace)
+}
+
+func (c *Client) Nodes() NodeInterface {
+ return newNodes(c)
+}
+
+func (c *Client) Events(namespace string) EventInterface {
+ return newEvents(c, namespace)
+}
+
+func (c *Client) Endpoints(namespace string) EndpointsInterface {
+ return newEndpoints(c, namespace)
+}
+
+func (c *Client) Pods(namespace string) PodInterface {
+ return newPods(c, namespace)
+}
+
+func (c *Client) PodTemplates(namespace string) PodTemplateInterface {
+ return newPodTemplates(c, namespace)
+}
+
+func (c *Client) Services(namespace string) ServiceInterface {
+ return newServices(c, namespace)
+}
+func (c *Client) LimitRanges(namespace string) LimitRangeInterface {
+ return newLimitRanges(c, namespace)
+}
+
+func (c *Client) ResourceQuotas(namespace string) ResourceQuotaInterface {
+ return newResourceQuotas(c, namespace)
+}
+
+func (c *Client) ServiceAccounts(namespace string) ServiceAccountsInterface {
+ return newServiceAccounts(c, namespace)
+}
+
+func (c *Client) Secrets(namespace string) SecretsInterface {
+ return newSecrets(c, namespace)
+}
+
+func (c *Client) Namespaces() NamespaceInterface {
+ return newNamespaces(c)
+}
+
+func (c *Client) PersistentVolumes() PersistentVolumeInterface {
+ return newPersistentVolumes(c)
+}
+
+func (c *Client) PersistentVolumeClaims(namespace string) PersistentVolumeClaimInterface {
+ return newPersistentVolumeClaims(c, namespace)
+}
+
+func (c *Client) ComponentStatuses() ComponentStatusInterface {
+ return newComponentStatuses(c)
+}
+
+func (c *Client) ConfigMaps(namespace string) ConfigMapsInterface {
+ return newConfigMaps(c, namespace)
+}
+
+// Client is the implementation of a Kubernetes client.
+type Client struct {
+ *restclient.RESTClient
+ *AutoscalingClient
+ *BatchClient
+ *ExtensionsClient
+ *AppsClient
+ *PolicyClient
+ *RbacClient
+ *discovery.DiscoveryClient
+ *CertificatesClient
+}
+
+// IsTimeout tests if this is a timeout error in the underlying transport.
+// This is unbelievably ugly.
+// See: http://stackoverflow.com/questions/23494950/specifically-check-for-timeout-error for details
+func IsTimeout(err error) bool {
+ if err == nil {
+ return false
+ }
+ switch err := err.(type) {
+ case *url.Error:
+ if err, ok := err.Err.(net.Error); ok {
+ return err.Timeout()
+ }
+ case net.Error:
+ return err.Timeout()
+ }
+
+ if strings.Contains(err.Error(), "use of closed network connection") {
+ return true
+ }
+ return false
+}
+
+func (c *Client) Autoscaling() AutoscalingInterface {
+ return c.AutoscalingClient
+}
+
+func (c *Client) Batch() BatchInterface {
+ return c.BatchClient
+}
+
+func (c *Client) Extensions() ExtensionsInterface {
+ return c.ExtensionsClient
+}
+
+func (c *Client) Apps() AppsInterface {
+ return c.AppsClient
+}
+
+func (c *Client) Rbac() RbacInterface {
+ return c.RbacClient
+}
+
+func (c *Client) Discovery() discovery.DiscoveryInterface {
+ return c.DiscoveryClient
+}
+
+func (c *Client) Certificates() CertificatesInterface {
+ return c.CertificatesClient
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/helpers.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/helpers.go
new file mode 100644
index 0000000..43e2648
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/helpers.go
@@ -0,0 +1,183 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package api
+
+import (
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path"
+ "path/filepath"
+)
+
+func init() {
+ sDec, _ := base64.StdEncoding.DecodeString("REDACTED+")
+ redactedBytes = []byte(string(sDec))
+}
+
+// IsConfigEmpty returns true if the config is empty.
+func IsConfigEmpty(config *Config) bool {
+ return len(config.AuthInfos) == 0 && len(config.Clusters) == 0 && len(config.Contexts) == 0 &&
+ len(config.CurrentContext) == 0 &&
+ len(config.Preferences.Extensions) == 0 && !config.Preferences.Colors &&
+ len(config.Extensions) == 0
+}
+
+// MinifyConfig read the current context and uses that to keep only the relevant pieces of config
+// This is useful for making secrets based on kubeconfig files
+func MinifyConfig(config *Config) error {
+ if len(config.CurrentContext) == 0 {
+ return errors.New("current-context must exist in order to minify")
+ }
+
+ currContext, exists := config.Contexts[config.CurrentContext]
+ if !exists {
+ return fmt.Errorf("cannot locate context %v", config.CurrentContext)
+ }
+
+ newContexts := map[string]*Context{}
+ newContexts[config.CurrentContext] = currContext
+
+ newClusters := map[string]*Cluster{}
+ if len(currContext.Cluster) > 0 {
+ if _, exists := config.Clusters[currContext.Cluster]; !exists {
+ return fmt.Errorf("cannot locate cluster %v", currContext.Cluster)
+ }
+
+ newClusters[currContext.Cluster] = config.Clusters[currContext.Cluster]
+ }
+
+ newAuthInfos := map[string]*AuthInfo{}
+ if len(currContext.AuthInfo) > 0 {
+ if _, exists := config.AuthInfos[currContext.AuthInfo]; !exists {
+ return fmt.Errorf("cannot locate user %v", currContext.AuthInfo)
+ }
+
+ newAuthInfos[currContext.AuthInfo] = config.AuthInfos[currContext.AuthInfo]
+ }
+
+ config.AuthInfos = newAuthInfos
+ config.Clusters = newClusters
+ config.Contexts = newContexts
+
+ return nil
+}
+
+var redactedBytes []byte
+
+// Flatten redacts raw data entries from the config object for a human-readable view.
+func ShortenConfig(config *Config) {
+ // trick json encoder into printing a human readable string in the raw data
+ // by base64 decoding what we want to print. Relies on implementation of
+ // http://golang.org/pkg/encoding/json/#Marshal using base64 to encode []byte
+ for key, authInfo := range config.AuthInfos {
+ if len(authInfo.ClientKeyData) > 0 {
+ authInfo.ClientKeyData = redactedBytes
+ }
+ if len(authInfo.ClientCertificateData) > 0 {
+ authInfo.ClientCertificateData = redactedBytes
+ }
+ config.AuthInfos[key] = authInfo
+ }
+ for key, cluster := range config.Clusters {
+ if len(cluster.CertificateAuthorityData) > 0 {
+ cluster.CertificateAuthorityData = redactedBytes
+ }
+ config.Clusters[key] = cluster
+ }
+}
+
+// Flatten changes the config object into a self contained config (useful for making secrets)
+func FlattenConfig(config *Config) error {
+ for key, authInfo := range config.AuthInfos {
+ baseDir, err := MakeAbs(path.Dir(authInfo.LocationOfOrigin), "")
+ if err != nil {
+ return err
+ }
+
+ if err := FlattenContent(&authInfo.ClientCertificate, &authInfo.ClientCertificateData, baseDir); err != nil {
+ return err
+ }
+ if err := FlattenContent(&authInfo.ClientKey, &authInfo.ClientKeyData, baseDir); err != nil {
+ return err
+ }
+
+ config.AuthInfos[key] = authInfo
+ }
+ for key, cluster := range config.Clusters {
+ baseDir, err := MakeAbs(path.Dir(cluster.LocationOfOrigin), "")
+ if err != nil {
+ return err
+ }
+
+ if err := FlattenContent(&cluster.CertificateAuthority, &cluster.CertificateAuthorityData, baseDir); err != nil {
+ return err
+ }
+
+ config.Clusters[key] = cluster
+ }
+
+ return nil
+}
+
+func FlattenContent(path *string, contents *[]byte, baseDir string) error {
+ if len(*path) != 0 {
+ if len(*contents) > 0 {
+ return errors.New("cannot have values for both path and contents")
+ }
+
+ var err error
+ absPath := ResolvePath(*path, baseDir)
+ *contents, err = ioutil.ReadFile(absPath)
+ if err != nil {
+ return err
+ }
+
+ *path = ""
+ }
+
+ return nil
+}
+
+// ResolvePath returns the path as an absolute paths, relative to the given base directory
+func ResolvePath(path string, base string) string {
+ // Don't resolve empty paths
+ if len(path) > 0 {
+ // Don't resolve absolute paths
+ if !filepath.IsAbs(path) {
+ return filepath.Join(base, path)
+ }
+ }
+
+ return path
+}
+
+func MakeAbs(path, base string) (string, error) {
+ if filepath.IsAbs(path) {
+ return path, nil
+ }
+ if len(base) == 0 {
+ cwd, err := os.Getwd()
+ if err != nil {
+ return "", err
+ }
+ base = cwd
+ }
+ return filepath.Join(base, path), nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/latest/latest.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/latest/latest.go
new file mode 100644
index 0000000..0b9a427
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/latest/latest.go
@@ -0,0 +1,54 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package latest
+
+import (
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
+ _ "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1"
+ "k8s.io/kubernetes/pkg/runtime"
+ "k8s.io/kubernetes/pkg/runtime/serializer/json"
+ "k8s.io/kubernetes/pkg/runtime/serializer/versioning"
+)
+
+// Version is the string that represents the current external default version.
+const Version = "v1"
+
+var ExternalVersion = unversioned.GroupVersion{Group: "", Version: "v1"}
+
+// OldestVersion is the string that represents the oldest server version supported,
+// for client code that wants to hardcode the lowest common denominator.
+const OldestVersion = "v1"
+
+// Versions is the list of versions that are recognized in code. The order provided
+// may be assumed to be least feature rich to most feature rich, and clients may
+// choose to prefer the latter items in the list over the former items when presented
+// with a set of versions to choose.
+var Versions = []string{"v1"}
+
+var Codec runtime.Codec
+
+func init() {
+ yamlSerializer := json.NewYAMLSerializer(json.DefaultMetaFactory, api.Scheme, api.Scheme)
+ Codec = versioning.NewCodecForScheme(
+ api.Scheme,
+ yamlSerializer,
+ yamlSerializer,
+ []unversioned.GroupVersion{{Version: Version}},
+ []unversioned.GroupVersion{{Version: runtime.APIVersionInternal}},
+ )
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/register.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/register.go
new file mode 100644
index 0000000..5426e7f
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/register.go
@@ -0,0 +1,43 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package api
+
+import (
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/runtime"
+)
+
+// Scheme is the default instance of runtime.Scheme to which types in the Kubernetes API are already registered.
+var Scheme = runtime.NewScheme()
+
+// SchemeGroupVersion is group version used to register these objects
+// TODO this should be in the "kubeconfig" group
+var SchemeGroupVersion = unversioned.GroupVersion{Group: "", Version: runtime.APIVersionInternal}
+
+func init() {
+ Scheme.AddKnownTypes(SchemeGroupVersion,
+ &Config{},
+ )
+}
+
+func (obj *Config) GetObjectKind() unversioned.ObjectKind { return obj }
+func (obj *Config) SetGroupVersionKind(gvk unversioned.GroupVersionKind) {
+ obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind()
+}
+func (obj *Config) GroupVersionKind() unversioned.GroupVersionKind {
+ return unversioned.FromAPIVersionAndKind(obj.APIVersion, obj.Kind)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/types.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/types.go
new file mode 100644
index 0000000..95b5289
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/types.go
@@ -0,0 +1,152 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package api
+
+import (
+ "k8s.io/kubernetes/pkg/runtime"
+)
+
+// Where possible, json tags match the cli argument names.
+// Top level config objects and all values required for proper functioning are not "omitempty". Any truly optional piece of config is allowed to be omitted.
+
+// Config holds the information needed to build connect to remote kubernetes clusters as a given user
+// IMPORTANT if you add fields to this struct, please update IsConfigEmpty()
+type Config struct {
+ // Legacy field from pkg/api/types.go TypeMeta.
+ // TODO(jlowdermilk): remove this after eliminating downstream dependencies.
+ Kind string `json:"kind,omitempty"`
+ // DEPRECATED: APIVersion is the preferred api version for communicating with the kubernetes cluster (v1, v2, etc).
+ // Because a cluster can run multiple API groups and potentially multiple versions of each, it no longer makes sense to specify
+ // a single value for the cluster version.
+ // This field isn't really needed anyway, so we are deprecating it without replacement.
+ // It will be ignored if it is present.
+ APIVersion string `json:"apiVersion,omitempty"`
+ // Preferences holds general information to be use for cli interactions
+ Preferences Preferences `json:"preferences"`
+ // Clusters is a map of referencable names to cluster configs
+ Clusters map[string]*Cluster `json:"clusters"`
+ // AuthInfos is a map of referencable names to user configs
+ AuthInfos map[string]*AuthInfo `json:"users"`
+ // Contexts is a map of referencable names to context configs
+ Contexts map[string]*Context `json:"contexts"`
+ // CurrentContext is the name of the context that you would like to use by default
+ CurrentContext string `json:"current-context"`
+ // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
+ Extensions map[string]runtime.Object `json:"extensions,omitempty"`
+}
+
+// IMPORTANT if you add fields to this struct, please update IsConfigEmpty()
+type Preferences struct {
+ Colors bool `json:"colors,omitempty"`
+ // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
+ Extensions map[string]runtime.Object `json:"extensions,omitempty"`
+}
+
+// Cluster contains information about how to communicate with a kubernetes cluster
+type Cluster struct {
+ // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized.
+ LocationOfOrigin string
+ // Server is the address of the kubernetes cluster (https://hostname:port).
+ Server string `json:"server"`
+ // APIVersion is the preferred api version for communicating with the kubernetes cluster (v1, v2, etc).
+ APIVersion string `json:"api-version,omitempty"`
+ // InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure.
+ InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"`
+ // CertificateAuthority is the path to a cert file for the certificate authority.
+ CertificateAuthority string `json:"certificate-authority,omitempty"`
+ // CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority
+ CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"`
+ // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
+ Extensions map[string]runtime.Object `json:"extensions,omitempty"`
+}
+
+// AuthInfo contains information that describes identity information. This is use to tell the kubernetes cluster who you are.
+type AuthInfo struct {
+ // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized.
+ LocationOfOrigin string
+ // ClientCertificate is the path to a client cert file for TLS.
+ ClientCertificate string `json:"client-certificate,omitempty"`
+ // ClientCertificateData contains PEM-encoded data from a client cert file for TLS. Overrides ClientCertificate
+ ClientCertificateData []byte `json:"client-certificate-data,omitempty"`
+ // ClientKey is the path to a client key file for TLS.
+ ClientKey string `json:"client-key,omitempty"`
+ // ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey
+ ClientKeyData []byte `json:"client-key-data,omitempty"`
+ // Token is the bearer token for authentication to the kubernetes cluster.
+ Token string `json:"token,omitempty"`
+ // Impersonate is the username to act-as.
+ Impersonate string `json:"act-as,omitempty"`
+ // Username is the username for basic authentication to the kubernetes cluster.
+ Username string `json:"username,omitempty"`
+ // Password is the password for basic authentication to the kubernetes cluster.
+ Password string `json:"password,omitempty"`
+ // AuthProvider specifies a custom authentication plugin for the kubernetes cluster.
+ AuthProvider *AuthProviderConfig `json:"auth-provider,omitempty"`
+ // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
+ Extensions map[string]runtime.Object `json:"extensions,omitempty"`
+}
+
+// Context is a tuple of references to a cluster (how do I communicate with a kubernetes cluster), a user (how do I identify myself), and a namespace (what subset of resources do I want to work with)
+type Context struct {
+ // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized.
+ LocationOfOrigin string
+ // Cluster is the name of the cluster for this context
+ Cluster string `json:"cluster"`
+ // AuthInfo is the name of the authInfo for this context
+ AuthInfo string `json:"user"`
+ // Namespace is the default namespace to use on unspecified requests
+ Namespace string `json:"namespace,omitempty"`
+ // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
+ Extensions map[string]runtime.Object `json:"extensions,omitempty"`
+}
+
+// AuthProviderConfig holds the configuration for a specified auth provider.
+type AuthProviderConfig struct {
+ Name string `json:"name"`
+ Config map[string]string `json:"config,omitempty"`
+}
+
+// NewConfig is a convenience function that returns a new Config object with non-nil maps
+func NewConfig() *Config {
+ return &Config{
+ Preferences: *NewPreferences(),
+ Clusters: make(map[string]*Cluster),
+ AuthInfos: make(map[string]*AuthInfo),
+ Contexts: make(map[string]*Context),
+ Extensions: make(map[string]runtime.Object),
+ }
+}
+
+// NewConfig is a convenience function that returns a new Config object with non-nil maps
+func NewContext() *Context {
+ return &Context{Extensions: make(map[string]runtime.Object)}
+}
+
+// NewConfig is a convenience function that returns a new Config object with non-nil maps
+func NewCluster() *Cluster {
+ return &Cluster{Extensions: make(map[string]runtime.Object)}
+}
+
+// NewConfig is a convenience function that returns a new Config object with non-nil maps
+func NewAuthInfo() *AuthInfo {
+ return &AuthInfo{Extensions: make(map[string]runtime.Object)}
+}
+
+// NewConfig is a convenience function that returns a new Config object with non-nil maps
+func NewPreferences() *Preferences {
+ return &Preferences{Extensions: make(map[string]runtime.Object)}
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/conversion.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/conversion.go
new file mode 100644
index 0000000..e22e5f8
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/conversion.go
@@ -0,0 +1,231 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "sort"
+
+ "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
+ "k8s.io/kubernetes/pkg/conversion"
+ "k8s.io/kubernetes/pkg/runtime"
+)
+
+func init() {
+ err := api.Scheme.AddConversionFuncs(
+ func(in *Cluster, out *api.Cluster, s conversion.Scope) error {
+ return s.DefaultConvert(in, out, conversion.IgnoreMissingFields)
+ },
+ func(in *api.Cluster, out *Cluster, s conversion.Scope) error {
+ return s.DefaultConvert(in, out, conversion.IgnoreMissingFields)
+ },
+ func(in *Preferences, out *api.Preferences, s conversion.Scope) error {
+ return s.DefaultConvert(in, out, conversion.IgnoreMissingFields)
+ },
+ func(in *api.Preferences, out *Preferences, s conversion.Scope) error {
+ return s.DefaultConvert(in, out, conversion.IgnoreMissingFields)
+ },
+ func(in *AuthInfo, out *api.AuthInfo, s conversion.Scope) error {
+ return s.DefaultConvert(in, out, conversion.IgnoreMissingFields)
+ },
+ func(in *api.AuthInfo, out *AuthInfo, s conversion.Scope) error {
+ return s.DefaultConvert(in, out, conversion.IgnoreMissingFields)
+ },
+ func(in *Context, out *api.Context, s conversion.Scope) error {
+ return s.DefaultConvert(in, out, conversion.IgnoreMissingFields)
+ },
+ func(in *api.Context, out *Context, s conversion.Scope) error {
+ return s.DefaultConvert(in, out, conversion.IgnoreMissingFields)
+ },
+
+ func(in *Config, out *api.Config, s conversion.Scope) error {
+ out.CurrentContext = in.CurrentContext
+ if err := s.Convert(&in.Preferences, &out.Preferences, 0); err != nil {
+ return err
+ }
+
+ out.Clusters = make(map[string]*api.Cluster)
+ if err := s.Convert(&in.Clusters, &out.Clusters, 0); err != nil {
+ return err
+ }
+ out.AuthInfos = make(map[string]*api.AuthInfo)
+ if err := s.Convert(&in.AuthInfos, &out.AuthInfos, 0); err != nil {
+ return err
+ }
+ out.Contexts = make(map[string]*api.Context)
+ if err := s.Convert(&in.Contexts, &out.Contexts, 0); err != nil {
+ return err
+ }
+ out.Extensions = make(map[string]runtime.Object)
+ if err := s.Convert(&in.Extensions, &out.Extensions, 0); err != nil {
+ return err
+ }
+ return nil
+ },
+ func(in *api.Config, out *Config, s conversion.Scope) error {
+ out.CurrentContext = in.CurrentContext
+ if err := s.Convert(&in.Preferences, &out.Preferences, 0); err != nil {
+ return err
+ }
+
+ out.Clusters = make([]NamedCluster, 0, 0)
+ if err := s.Convert(&in.Clusters, &out.Clusters, 0); err != nil {
+ return err
+ }
+ out.AuthInfos = make([]NamedAuthInfo, 0, 0)
+ if err := s.Convert(&in.AuthInfos, &out.AuthInfos, 0); err != nil {
+ return err
+ }
+ out.Contexts = make([]NamedContext, 0, 0)
+ if err := s.Convert(&in.Contexts, &out.Contexts, 0); err != nil {
+ return err
+ }
+ out.Extensions = make([]NamedExtension, 0, 0)
+ if err := s.Convert(&in.Extensions, &out.Extensions, 0); err != nil {
+ return err
+ }
+ return nil
+ },
+ func(in *[]NamedCluster, out *map[string]*api.Cluster, s conversion.Scope) error {
+ for _, curr := range *in {
+ newCluster := api.NewCluster()
+ if err := s.Convert(&curr.Cluster, newCluster, 0); err != nil {
+ return err
+ }
+ (*out)[curr.Name] = newCluster
+ }
+
+ return nil
+ },
+ func(in *map[string]*api.Cluster, out *[]NamedCluster, s conversion.Scope) error {
+ allKeys := make([]string, 0, len(*in))
+ for key := range *in {
+ allKeys = append(allKeys, key)
+ }
+ sort.Strings(allKeys)
+
+ for _, key := range allKeys {
+ newCluster := (*in)[key]
+ oldCluster := &Cluster{}
+ if err := s.Convert(newCluster, oldCluster, 0); err != nil {
+ return err
+ }
+
+ namedCluster := NamedCluster{key, *oldCluster}
+ *out = append(*out, namedCluster)
+ }
+
+ return nil
+ },
+ func(in *[]NamedAuthInfo, out *map[string]*api.AuthInfo, s conversion.Scope) error {
+ for _, curr := range *in {
+ newAuthInfo := api.NewAuthInfo()
+ if err := s.Convert(&curr.AuthInfo, newAuthInfo, 0); err != nil {
+ return err
+ }
+ (*out)[curr.Name] = newAuthInfo
+ }
+
+ return nil
+ },
+ func(in *map[string]*api.AuthInfo, out *[]NamedAuthInfo, s conversion.Scope) error {
+ allKeys := make([]string, 0, len(*in))
+ for key := range *in {
+ allKeys = append(allKeys, key)
+ }
+ sort.Strings(allKeys)
+
+ for _, key := range allKeys {
+ newAuthInfo := (*in)[key]
+ oldAuthInfo := &AuthInfo{}
+ if err := s.Convert(newAuthInfo, oldAuthInfo, 0); err != nil {
+ return err
+ }
+
+ namedAuthInfo := NamedAuthInfo{key, *oldAuthInfo}
+ *out = append(*out, namedAuthInfo)
+ }
+
+ return nil
+ },
+ func(in *[]NamedContext, out *map[string]*api.Context, s conversion.Scope) error {
+ for _, curr := range *in {
+ newContext := api.NewContext()
+ if err := s.Convert(&curr.Context, newContext, 0); err != nil {
+ return err
+ }
+ (*out)[curr.Name] = newContext
+ }
+
+ return nil
+ },
+ func(in *map[string]*api.Context, out *[]NamedContext, s conversion.Scope) error {
+ allKeys := make([]string, 0, len(*in))
+ for key := range *in {
+ allKeys = append(allKeys, key)
+ }
+ sort.Strings(allKeys)
+
+ for _, key := range allKeys {
+ newContext := (*in)[key]
+ oldContext := &Context{}
+ if err := s.Convert(newContext, oldContext, 0); err != nil {
+ return err
+ }
+
+ namedContext := NamedContext{key, *oldContext}
+ *out = append(*out, namedContext)
+ }
+
+ return nil
+ },
+ func(in *[]NamedExtension, out *map[string]runtime.Object, s conversion.Scope) error {
+ for _, curr := range *in {
+ var newExtension runtime.Object
+ if err := s.Convert(&curr.Extension, &newExtension, 0); err != nil {
+ return err
+ }
+ (*out)[curr.Name] = newExtension
+ }
+
+ return nil
+ },
+ func(in *map[string]runtime.Object, out *[]NamedExtension, s conversion.Scope) error {
+ allKeys := make([]string, 0, len(*in))
+ for key := range *in {
+ allKeys = append(allKeys, key)
+ }
+ sort.Strings(allKeys)
+
+ for _, key := range allKeys {
+ newExtension := (*in)[key]
+ oldExtension := &runtime.RawExtension{}
+ if err := s.Convert(newExtension, oldExtension, 0); err != nil {
+ return err
+ }
+
+ namedExtension := NamedExtension{key, *oldExtension}
+ *out = append(*out, namedExtension)
+ }
+
+ return nil
+ },
+ )
+ if err != nil {
+ // If one of the conversion functions is malformed, detect it immediately.
+ panic(err)
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/register.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/register.go
new file mode 100644
index 0000000..dcdb533
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/register.go
@@ -0,0 +1,40 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
+)
+
+// SchemeGroupVersion is group version used to register these objects
+// TODO this should be in the "kubeconfig" group
+var SchemeGroupVersion = unversioned.GroupVersion{Group: "", Version: "v1"}
+
+func init() {
+ api.Scheme.AddKnownTypes(SchemeGroupVersion,
+ &Config{},
+ )
+}
+
+func (obj *Config) GetObjectKind() unversioned.ObjectKind { return obj }
+func (obj *Config) SetGroupVersionKind(gvk unversioned.GroupVersionKind) {
+ obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind()
+}
+func (obj *Config) GroupVersionKind() unversioned.GroupVersionKind {
+ return unversioned.FromAPIVersionAndKind(obj.APIVersion, obj.Kind)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/types.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/types.go
new file mode 100644
index 0000000..77bce80
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/types.go
@@ -0,0 +1,145 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "k8s.io/kubernetes/pkg/runtime"
+)
+
+// Where possible, json tags match the cli argument names.
+// Top level config objects and all values required for proper functioning are not "omitempty". Any truly optional piece of config is allowed to be omitted.
+
+// Config holds the information needed to build connect to remote kubernetes clusters as a given user
+type Config struct {
+ // Legacy field from pkg/api/types.go TypeMeta.
+ // TODO(jlowdermilk): remove this after eliminating downstream dependencies.
+ Kind string `json:"kind,omitempty"`
+ // DEPRECATED: APIVersion is the preferred api version for communicating with the kubernetes cluster (v1, v2, etc).
+ // Because a cluster can run multiple API groups and potentially multiple versions of each, it no longer makes sense to specify
+ // a single value for the cluster version.
+ // This field isn't really needed anyway, so we are deprecating it without replacement.
+ // It will be ignored if it is present.
+ APIVersion string `json:"apiVersion,omitempty"`
+ // Preferences holds general information to be use for cli interactions
+ Preferences Preferences `json:"preferences"`
+ // Clusters is a map of referencable names to cluster configs
+ Clusters []NamedCluster `json:"clusters"`
+ // AuthInfos is a map of referencable names to user configs
+ AuthInfos []NamedAuthInfo `json:"users"`
+ // Contexts is a map of referencable names to context configs
+ Contexts []NamedContext `json:"contexts"`
+ // CurrentContext is the name of the context that you would like to use by default
+ CurrentContext string `json:"current-context"`
+ // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
+ Extensions []NamedExtension `json:"extensions,omitempty"`
+}
+
+type Preferences struct {
+ Colors bool `json:"colors,omitempty"`
+ // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
+ Extensions []NamedExtension `json:"extensions,omitempty"`
+}
+
+// Cluster contains information about how to communicate with a kubernetes cluster
+type Cluster struct {
+ // Server is the address of the kubernetes cluster (https://hostname:port).
+ Server string `json:"server"`
+ // APIVersion is the preferred api version for communicating with the kubernetes cluster (v1, v2, etc).
+ APIVersion string `json:"api-version,omitempty"`
+ // InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure.
+ InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"`
+ // CertificateAuthority is the path to a cert file for the certificate authority.
+ CertificateAuthority string `json:"certificate-authority,omitempty"`
+ // CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority
+ CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"`
+ // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
+ Extensions []NamedExtension `json:"extensions,omitempty"`
+}
+
+// AuthInfo contains information that describes identity information. This is use to tell the kubernetes cluster who you are.
+type AuthInfo struct {
+ // ClientCertificate is the path to a client cert file for TLS.
+ ClientCertificate string `json:"client-certificate,omitempty"`
+ // ClientCertificateData contains PEM-encoded data from a client cert file for TLS. Overrides ClientCertificate
+ ClientCertificateData []byte `json:"client-certificate-data,omitempty"`
+ // ClientKey is the path to a client key file for TLS.
+ ClientKey string `json:"client-key,omitempty"`
+ // ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey
+ ClientKeyData []byte `json:"client-key-data,omitempty"`
+ // Token is the bearer token for authentication to the kubernetes cluster.
+ Token string `json:"token,omitempty"`
+ // Impersonate is the username to imperonate. The name matches the flag.
+ Impersonate string `json:"as,omitempty"`
+ // Username is the username for basic authentication to the kubernetes cluster.
+ Username string `json:"username,omitempty"`
+ // Password is the password for basic authentication to the kubernetes cluster.
+ Password string `json:"password,omitempty"`
+ // AuthProvider specifies a custom authentication plugin for the kubernetes cluster.
+ AuthProvider *AuthProviderConfig `json:"auth-provider,omitempty"`
+ // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
+ Extensions []NamedExtension `json:"extensions,omitempty"`
+}
+
+// Context is a tuple of references to a cluster (how do I communicate with a kubernetes cluster), a user (how do I identify myself), and a namespace (what subset of resources do I want to work with)
+type Context struct {
+ // Cluster is the name of the cluster for this context
+ Cluster string `json:"cluster"`
+ // AuthInfo is the name of the authInfo for this context
+ AuthInfo string `json:"user"`
+ // Namespace is the default namespace to use on unspecified requests
+ Namespace string `json:"namespace,omitempty"`
+ // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
+ Extensions []NamedExtension `json:"extensions,omitempty"`
+}
+
+// NamedCluster relates nicknames to cluster information
+type NamedCluster struct {
+ // Name is the nickname for this Cluster
+ Name string `json:"name"`
+ // Cluster holds the cluster information
+ Cluster Cluster `json:"cluster"`
+}
+
+// NamedContext relates nicknames to context information
+type NamedContext struct {
+ // Name is the nickname for this Context
+ Name string `json:"name"`
+ // Context holds the context information
+ Context Context `json:"context"`
+}
+
+// NamedAuthInfo relates nicknames to auth information
+type NamedAuthInfo struct {
+ // Name is the nickname for this AuthInfo
+ Name string `json:"name"`
+ // AuthInfo holds the auth information
+ AuthInfo AuthInfo `json:"user"`
+}
+
+// NamedExtension relates nicknames to extension information
+type NamedExtension struct {
+ // Name is the nickname for this Extension
+ Name string `json:"name"`
+ // Extension holds the extension information
+ Extension runtime.RawExtension `json:"extension"`
+}
+
+// AuthProviderConfig holds the configuration for a specified auth provider.
+type AuthProviderConfig struct {
+ Name string `json:"name"`
+ Config map[string]string `json:"config"`
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/auth_loaders.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/auth_loaders.go
new file mode 100644
index 0000000..0abc425
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/auth_loaders.go
@@ -0,0 +1,91 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package clientcmd
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+
+ clientauth "k8s.io/kubernetes/pkg/client/unversioned/auth"
+)
+
+// AuthLoaders are used to build clientauth.Info objects.
+type AuthLoader interface {
+ // LoadAuth takes a path to a config file and can then do anything it needs in order to return a valid clientauth.Info
+ LoadAuth(path string) (*clientauth.Info, error)
+}
+
+// default implementation of an AuthLoader
+type defaultAuthLoader struct{}
+
+// LoadAuth for defaultAuthLoader simply delegates to clientauth.LoadFromFile
+func (*defaultAuthLoader) LoadAuth(path string) (*clientauth.Info, error) {
+ return clientauth.LoadFromFile(path)
+}
+
+type PromptingAuthLoader struct {
+ reader io.Reader
+}
+
+// LoadAuth parses an AuthInfo object from a file path. It prompts user and creates file if it doesn't exist.
+func (a *PromptingAuthLoader) LoadAuth(path string) (*clientauth.Info, error) {
+ var auth clientauth.Info
+ // Prompt for user/pass and write a file if none exists.
+ if _, err := os.Stat(path); os.IsNotExist(err) {
+ auth = *a.Prompt()
+ data, err := json.Marshal(auth)
+ if err != nil {
+ return &auth, err
+ }
+ err = ioutil.WriteFile(path, data, 0600)
+ return &auth, err
+ }
+ authPtr, err := clientauth.LoadFromFile(path)
+ if err != nil {
+ return nil, err
+ }
+ return authPtr, nil
+}
+
+// Prompt pulls the user and password from a reader
+func (a *PromptingAuthLoader) Prompt() *clientauth.Info {
+ auth := &clientauth.Info{}
+ auth.User = promptForString("Username", a.reader)
+ auth.Password = promptForString("Password", a.reader)
+
+ return auth
+}
+
+func promptForString(field string, r io.Reader) string {
+ fmt.Printf("Please enter %s: ", field)
+ var result string
+ fmt.Fscan(r, &result)
+ return result
+}
+
+// NewPromptingAuthLoader is an AuthLoader that parses an AuthInfo object from a file path. It prompts user and creates file if it doesn't exist.
+func NewPromptingAuthLoader(reader io.Reader) *PromptingAuthLoader {
+ return &PromptingAuthLoader{reader}
+}
+
+// NewDefaultAuthLoader returns a default implementation of an AuthLoader that only reads from a config file
+func NewDefaultAuthLoader() AuthLoader {
+ return &defaultAuthLoader{}
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/client_config.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/client_config.go
new file mode 100644
index 0000000..47b14e2
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/client_config.go
@@ -0,0 +1,411 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package clientcmd
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/url"
+ "os"
+ "strings"
+
+ "github.com/golang/glog"
+ "github.com/imdario/mergo"
+
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/client/restclient"
+ clientauth "k8s.io/kubernetes/pkg/client/unversioned/auth"
+ clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
+)
+
+var (
+ // DefaultCluster is the cluster config used when no other config is specified
+ // TODO: eventually apiserver should start on 443 and be secure by default
+ DefaultCluster = clientcmdapi.Cluster{Server: "http://localhost:8080"}
+
+ // EnvVarCluster allows overriding the DefaultCluster using an envvar for the server name
+ EnvVarCluster = clientcmdapi.Cluster{Server: os.Getenv("KUBERNETES_MASTER")}
+
+ DefaultClientConfig = DirectClientConfig{*clientcmdapi.NewConfig(), "", &ConfigOverrides{}, nil, NewDefaultClientConfigLoadingRules()}
+)
+
+// ClientConfig is used to make it easy to get an api server client
+type ClientConfig interface {
+ // RawConfig returns the merged result of all overrides
+ RawConfig() (clientcmdapi.Config, error)
+ // ClientConfig returns a complete client config
+ ClientConfig() (*restclient.Config, error)
+ // Namespace returns the namespace resulting from the merged
+ // result of all overrides and a boolean indicating if it was
+ // overridden
+ Namespace() (string, bool, error)
+ // ConfigAccess returns the rules for loading/persisting the config.
+ ConfigAccess() ConfigAccess
+}
+
+type PersistAuthProviderConfigForUser func(user string) restclient.AuthProviderConfigPersister
+
+// DirectClientConfig is a ClientConfig interface that is backed by a clientcmdapi.Config, options overrides, and an optional fallbackReader for auth information
+type DirectClientConfig struct {
+ config clientcmdapi.Config
+ contextName string
+ overrides *ConfigOverrides
+ fallbackReader io.Reader
+ configAccess ConfigAccess
+}
+
+// NewDefaultClientConfig creates a DirectClientConfig using the config.CurrentContext as the context name
+func NewDefaultClientConfig(config clientcmdapi.Config, overrides *ConfigOverrides) ClientConfig {
+ return &DirectClientConfig{config, config.CurrentContext, overrides, nil, NewDefaultClientConfigLoadingRules()}
+}
+
+// NewNonInteractiveClientConfig creates a DirectClientConfig using the passed context name and does not have a fallback reader for auth information
+func NewNonInteractiveClientConfig(config clientcmdapi.Config, contextName string, overrides *ConfigOverrides, configAccess ConfigAccess) ClientConfig {
+ return &DirectClientConfig{config, contextName, overrides, nil, configAccess}
+}
+
+// NewInteractiveClientConfig creates a DirectClientConfig using the passed context name and a reader in case auth information is not provided via files or flags
+func NewInteractiveClientConfig(config clientcmdapi.Config, contextName string, overrides *ConfigOverrides, fallbackReader io.Reader, configAccess ConfigAccess) ClientConfig {
+ return &DirectClientConfig{config, contextName, overrides, fallbackReader, configAccess}
+}
+
+func (config *DirectClientConfig) RawConfig() (clientcmdapi.Config, error) {
+ return config.config, nil
+}
+
+// ClientConfig implements ClientConfig
+func (config *DirectClientConfig) ClientConfig() (*restclient.Config, error) {
+ if err := config.ConfirmUsable(); err != nil {
+ return nil, err
+ }
+
+ configAuthInfo := config.getAuthInfo()
+ configClusterInfo := config.getCluster()
+
+ clientConfig := &restclient.Config{}
+ clientConfig.Host = configClusterInfo.Server
+ if u, err := url.ParseRequestURI(clientConfig.Host); err == nil && u.Opaque == "" && len(u.Path) > 1 {
+ u.RawQuery = ""
+ u.Fragment = ""
+ clientConfig.Host = u.String()
+ }
+ if len(configAuthInfo.Impersonate) > 0 {
+ clientConfig.Impersonate = configAuthInfo.Impersonate
+ }
+
+ // only try to read the auth information if we are secure
+ if restclient.IsConfigTransportTLS(*clientConfig) {
+ var err error
+
+ // mergo is a first write wins for map value and a last writing wins for interface values
+ // NOTE: This behavior changed with https://github.com/imdario/mergo/commit/d304790b2ed594794496464fadd89d2bb266600a.
+ // Our mergo.Merge version is older than this change.
+ var persister restclient.AuthProviderConfigPersister
+ if config.configAccess != nil {
+ persister = PersisterForUser(config.configAccess, config.getAuthInfoName())
+ }
+ userAuthPartialConfig, err := getUserIdentificationPartialConfig(configAuthInfo, config.fallbackReader, persister)
+ if err != nil {
+ return nil, err
+ }
+ mergo.Merge(clientConfig, userAuthPartialConfig)
+
+ serverAuthPartialConfig, err := getServerIdentificationPartialConfig(configAuthInfo, configClusterInfo)
+ if err != nil {
+ return nil, err
+ }
+ mergo.Merge(clientConfig, serverAuthPartialConfig)
+ }
+
+ return clientConfig, nil
+}
+
+// clientauth.Info object contain both user identification and server identification. We want different precedence orders for
+// both, so we have to split the objects and merge them separately
+// we want this order of precedence for the server identification
+// 1. configClusterInfo (the final result of command line flags and merged .kubeconfig files)
+// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority)
+// 3. load the ~/.kubernetes_auth file as a default
+func getServerIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, configClusterInfo clientcmdapi.Cluster) (*restclient.Config, error) {
+ mergedConfig := &restclient.Config{}
+
+ // configClusterInfo holds the information identify the server provided by .kubeconfig
+ configClientConfig := &restclient.Config{}
+ configClientConfig.CAFile = configClusterInfo.CertificateAuthority
+ configClientConfig.CAData = configClusterInfo.CertificateAuthorityData
+ configClientConfig.Insecure = configClusterInfo.InsecureSkipTLSVerify
+ mergo.Merge(mergedConfig, configClientConfig)
+
+ return mergedConfig, nil
+}
+
+// clientauth.Info object contain both user identification and server identification. We want different precedence orders for
+// both, so we have to split the objects and merge them separately
+// we want this order of precedence for user identifcation
+// 1. configAuthInfo minus auth-path (the final result of command line flags and merged .kubeconfig files)
+// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority)
+// 3. if there is not enough information to idenfity the user, load try the ~/.kubernetes_auth file
+// 4. if there is not enough information to identify the user, prompt if possible
+func getUserIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, fallbackReader io.Reader, persistAuthConfig restclient.AuthProviderConfigPersister) (*restclient.Config, error) {
+ mergedConfig := &restclient.Config{}
+
+ // blindly overwrite existing values based on precedence
+ if len(configAuthInfo.Token) > 0 {
+ mergedConfig.BearerToken = configAuthInfo.Token
+ }
+ if len(configAuthInfo.Impersonate) > 0 {
+ mergedConfig.Impersonate = configAuthInfo.Impersonate
+ }
+ if len(configAuthInfo.ClientCertificate) > 0 || len(configAuthInfo.ClientCertificateData) > 0 {
+ mergedConfig.CertFile = configAuthInfo.ClientCertificate
+ mergedConfig.CertData = configAuthInfo.ClientCertificateData
+ mergedConfig.KeyFile = configAuthInfo.ClientKey
+ mergedConfig.KeyData = configAuthInfo.ClientKeyData
+ }
+ if len(configAuthInfo.Username) > 0 || len(configAuthInfo.Password) > 0 {
+ mergedConfig.Username = configAuthInfo.Username
+ mergedConfig.Password = configAuthInfo.Password
+ }
+ if configAuthInfo.AuthProvider != nil {
+ mergedConfig.AuthProvider = configAuthInfo.AuthProvider
+ mergedConfig.AuthConfigPersister = persistAuthConfig
+ }
+
+ // if there still isn't enough information to authenticate the user, try prompting
+ if !canIdentifyUser(*mergedConfig) && (fallbackReader != nil) {
+ prompter := NewPromptingAuthLoader(fallbackReader)
+ promptedAuthInfo := prompter.Prompt()
+
+ promptedConfig := makeUserIdentificationConfig(*promptedAuthInfo)
+ previouslyMergedConfig := mergedConfig
+ mergedConfig = &restclient.Config{}
+ mergo.Merge(mergedConfig, promptedConfig)
+ mergo.Merge(mergedConfig, previouslyMergedConfig)
+ }
+
+ return mergedConfig, nil
+}
+
+// makeUserIdentificationFieldsConfig returns a client.Config capable of being merged using mergo for only user identification information
+func makeUserIdentificationConfig(info clientauth.Info) *restclient.Config {
+ config := &restclient.Config{}
+ config.Username = info.User
+ config.Password = info.Password
+ config.CertFile = info.CertFile
+ config.KeyFile = info.KeyFile
+ config.BearerToken = info.BearerToken
+ return config
+}
+
+// makeUserIdentificationFieldsConfig returns a client.Config capable of being merged using mergo for only server identification information
+func makeServerIdentificationConfig(info clientauth.Info) restclient.Config {
+ config := restclient.Config{}
+ config.CAFile = info.CAFile
+ if info.Insecure != nil {
+ config.Insecure = *info.Insecure
+ }
+ return config
+}
+
+func canIdentifyUser(config restclient.Config) bool {
+ return len(config.Username) > 0 ||
+ (len(config.CertFile) > 0 || len(config.CertData) > 0) ||
+ len(config.BearerToken) > 0 ||
+ config.AuthProvider != nil
+}
+
+// Namespace implements ClientConfig
+func (config *DirectClientConfig) Namespace() (string, bool, error) {
+ if err := config.ConfirmUsable(); err != nil {
+ return "", false, err
+ }
+
+ configContext := config.getContext()
+
+ if len(configContext.Namespace) == 0 {
+ return api.NamespaceDefault, false, nil
+ }
+
+ overridden := false
+ if config.overrides != nil && config.overrides.Context.Namespace != "" {
+ overridden = true
+ }
+ return configContext.Namespace, overridden, nil
+}
+
+// ConfigAccess implements ClientConfig
+func (config *DirectClientConfig) ConfigAccess() ConfigAccess {
+ return config.configAccess
+}
+
+// ConfirmUsable looks a particular context and determines if that particular part of the config is useable. There might still be errors in the config,
+// but no errors in the sections requested or referenced. It does not return early so that it can find as many errors as possible.
+func (config *DirectClientConfig) ConfirmUsable() error {
+ validationErrors := make([]error, 0)
+ validationErrors = append(validationErrors, validateAuthInfo(config.getAuthInfoName(), config.getAuthInfo())...)
+ validationErrors = append(validationErrors, validateClusterInfo(config.getClusterName(), config.getCluster())...)
+ // when direct client config is specified, and our only error is that no server is defined, we should
+ // return a standard "no config" error
+ if len(validationErrors) == 1 && validationErrors[0] == ErrEmptyCluster {
+ return newErrConfigurationInvalid([]error{ErrEmptyConfig})
+ }
+ return newErrConfigurationInvalid(validationErrors)
+}
+
+func (config *DirectClientConfig) getContextName() string {
+ if len(config.overrides.CurrentContext) != 0 {
+ return config.overrides.CurrentContext
+ }
+ if len(config.contextName) != 0 {
+ return config.contextName
+ }
+
+ return config.config.CurrentContext
+}
+
+func (config *DirectClientConfig) getAuthInfoName() string {
+ if len(config.overrides.Context.AuthInfo) != 0 {
+ return config.overrides.Context.AuthInfo
+ }
+ return config.getContext().AuthInfo
+}
+
+func (config *DirectClientConfig) getClusterName() string {
+ if len(config.overrides.Context.Cluster) != 0 {
+ return config.overrides.Context.Cluster
+ }
+ return config.getContext().Cluster
+}
+
+func (config *DirectClientConfig) getContext() clientcmdapi.Context {
+ contexts := config.config.Contexts
+ contextName := config.getContextName()
+
+ var mergedContext clientcmdapi.Context
+ if configContext, exists := contexts[contextName]; exists {
+ mergo.Merge(&mergedContext, configContext)
+ }
+ mergo.Merge(&mergedContext, config.overrides.Context)
+
+ return mergedContext
+}
+
+func (config *DirectClientConfig) getAuthInfo() clientcmdapi.AuthInfo {
+ authInfos := config.config.AuthInfos
+ authInfoName := config.getAuthInfoName()
+
+ var mergedAuthInfo clientcmdapi.AuthInfo
+ if configAuthInfo, exists := authInfos[authInfoName]; exists {
+ mergo.Merge(&mergedAuthInfo, configAuthInfo)
+ }
+ mergo.Merge(&mergedAuthInfo, config.overrides.AuthInfo)
+
+ return mergedAuthInfo
+}
+
+func (config *DirectClientConfig) getCluster() clientcmdapi.Cluster {
+ clusterInfos := config.config.Clusters
+ clusterInfoName := config.getClusterName()
+
+ var mergedClusterInfo clientcmdapi.Cluster
+ mergo.Merge(&mergedClusterInfo, DefaultCluster)
+ mergo.Merge(&mergedClusterInfo, EnvVarCluster)
+ if configClusterInfo, exists := clusterInfos[clusterInfoName]; exists {
+ mergo.Merge(&mergedClusterInfo, configClusterInfo)
+ }
+ mergo.Merge(&mergedClusterInfo, config.overrides.ClusterInfo)
+ // An override of --insecure-skip-tls-verify=true and no accompanying CA/CA data should clear already-set CA/CA data
+ // otherwise, a kubeconfig containing a CA reference would return an error that "CA and insecure-skip-tls-verify couldn't both be set"
+ caLen := len(config.overrides.ClusterInfo.CertificateAuthority)
+ caDataLen := len(config.overrides.ClusterInfo.CertificateAuthorityData)
+ if config.overrides.ClusterInfo.InsecureSkipTLSVerify && caLen == 0 && caDataLen == 0 {
+ mergedClusterInfo.CertificateAuthority = ""
+ mergedClusterInfo.CertificateAuthorityData = nil
+ }
+
+ return mergedClusterInfo
+}
+
+// inClusterClientConfig makes a config that will work from within a kubernetes cluster container environment.
+type inClusterClientConfig struct{}
+
+func (inClusterClientConfig) RawConfig() (clientcmdapi.Config, error) {
+ return clientcmdapi.Config{}, fmt.Errorf("inCluster environment config doesn't support multiple clusters")
+}
+
+func (inClusterClientConfig) ClientConfig() (*restclient.Config, error) {
+ return restclient.InClusterConfig()
+}
+
+func (inClusterClientConfig) Namespace() (string, error) {
+ // This way assumes you've set the POD_NAMESPACE environment variable using the downward API.
+ // This check has to be done first for backwards compatibility with the way InClusterConfig was originally set up
+ if ns := os.Getenv("POD_NAMESPACE"); ns != "" {
+ return ns, nil
+ }
+
+ // Fall back to the namespace associated with the service account token, if available
+ if data, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace"); err == nil {
+ if ns := strings.TrimSpace(string(data)); len(ns) > 0 {
+ return ns, nil
+ }
+ }
+
+ return "default", nil
+}
+
+func (inClusterClientConfig) ConfigAccess() ConfigAccess {
+ return NewDefaultClientConfigLoadingRules()
+}
+
+// Possible returns true if loading an inside-kubernetes-cluster is possible.
+func (inClusterClientConfig) Possible() bool {
+ fi, err := os.Stat("/var/run/secrets/kubernetes.io/serviceaccount/token")
+ return os.Getenv("KUBERNETES_SERVICE_HOST") != "" &&
+ os.Getenv("KUBERNETES_SERVICE_PORT") != "" &&
+ err == nil && !fi.IsDir()
+}
+
+// BuildConfigFromFlags is a helper function that builds configs from a master
+// url or a kubeconfig filepath. These are passed in as command line flags for cluster
+// components. Warnings should reflect this usage. If neither masterUrl or kubeconfigPath
+// are passed in we fallback to inClusterConfig. If inClusterConfig fails, we fallback
+// to the default config.
+func BuildConfigFromFlags(masterUrl, kubeconfigPath string) (*restclient.Config, error) {
+ if kubeconfigPath == "" && masterUrl == "" {
+ glog.Warningf("Neither --kubeconfig nor --master was specified. Using the inClusterConfig. This might not work.")
+ kubeconfig, err := restclient.InClusterConfig()
+ if err == nil {
+ return kubeconfig, nil
+ }
+ glog.Warning("error creating inClusterConfig, falling back to default config: ", err)
+ }
+ return NewNonInteractiveDeferredLoadingClientConfig(
+ &ClientConfigLoadingRules{ExplicitPath: kubeconfigPath},
+ &ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: masterUrl}}).ClientConfig()
+}
+
+// BuildConfigFromKubeconfigGetter is a helper function that builds configs from a master
+// url and a kubeconfigGetter.
+func BuildConfigFromKubeconfigGetter(masterUrl string, kubeconfigGetter KubeconfigGetter) (*restclient.Config, error) {
+ // TODO: We do not need a DeferredLoader here. Refactor code and see if we can use DirectClientConfig here.
+ cc := NewNonInteractiveDeferredLoadingClientConfig(
+ &ClientConfigGetter{kubeconfigGetter: kubeconfigGetter},
+ &ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: masterUrl}})
+ return cc.ClientConfig()
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/config.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/config.go
new file mode 100644
index 0000000..9df69a7
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/config.go
@@ -0,0 +1,472 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package clientcmd
+
+import (
+ "errors"
+ "os"
+ "path"
+ "path/filepath"
+ "reflect"
+ "sort"
+
+ "github.com/golang/glog"
+
+ "k8s.io/kubernetes/pkg/client/restclient"
+ clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
+)
+
+// ConfigAccess is used by subcommands and methods in this package to load and modify the appropriate config files
+type ConfigAccess interface {
+ // GetLoadingPrecedence returns the slice of files that should be used for loading and inspecting the config
+ GetLoadingPrecedence() []string
+ // GetStartingConfig returns the config that subcommands should being operating against. It may or may not be merged depending on loading rules
+ GetStartingConfig() (*clientcmdapi.Config, error)
+ // GetDefaultFilename returns the name of the file you should write into (create if necessary), if you're trying to create a new stanza as opposed to updating an existing one.
+ GetDefaultFilename() string
+ // IsExplicitFile indicates whether or not this command is interested in exactly one file. This implementation only ever does that via a flag, but implementations that handle local, global, and flags may have more
+ IsExplicitFile() bool
+ // GetExplicitFile returns the particular file this command is operating against. This implementation only ever has one, but implementations that handle local, global, and flags may have more
+ GetExplicitFile() string
+}
+
+type PathOptions struct {
+ // GlobalFile is the full path to the file to load as the global (final) option
+ GlobalFile string
+ // EnvVar is the env var name that points to the list of kubeconfig files to load
+ EnvVar string
+ // ExplicitFileFlag is the name of the flag to use for prompting for the kubeconfig file
+ ExplicitFileFlag string
+
+ // GlobalFileSubpath is an optional value used for displaying help
+ GlobalFileSubpath string
+
+ LoadingRules *ClientConfigLoadingRules
+}
+
+func (o *PathOptions) GetEnvVarFiles() []string {
+ if len(o.EnvVar) == 0 {
+ return []string{}
+ }
+
+ envVarValue := os.Getenv(o.EnvVar)
+ if len(envVarValue) == 0 {
+ return []string{}
+ }
+
+ return filepath.SplitList(envVarValue)
+}
+
+func (o *PathOptions) GetLoadingPrecedence() []string {
+ if envVarFiles := o.GetEnvVarFiles(); len(envVarFiles) > 0 {
+ return envVarFiles
+ }
+
+ return []string{o.GlobalFile}
+}
+
+func (o *PathOptions) GetStartingConfig() (*clientcmdapi.Config, error) {
+ // don't mutate the original
+ loadingRules := *o.LoadingRules
+ loadingRules.Precedence = o.GetLoadingPrecedence()
+
+ clientConfig := NewNonInteractiveDeferredLoadingClientConfig(&loadingRules, &ConfigOverrides{})
+ rawConfig, err := clientConfig.RawConfig()
+ if os.IsNotExist(err) {
+ return clientcmdapi.NewConfig(), nil
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ return &rawConfig, nil
+}
+
+func (o *PathOptions) GetDefaultFilename() string {
+ if o.IsExplicitFile() {
+ return o.GetExplicitFile()
+ }
+
+ if envVarFiles := o.GetEnvVarFiles(); len(envVarFiles) > 0 {
+ if len(envVarFiles) == 1 {
+ return envVarFiles[0]
+ }
+
+ // if any of the envvar files already exists, return it
+ for _, envVarFile := range envVarFiles {
+ if _, err := os.Stat(envVarFile); err == nil {
+ return envVarFile
+ }
+ }
+
+ // otherwise, return the last one in the list
+ return envVarFiles[len(envVarFiles)-1]
+ }
+
+ return o.GlobalFile
+}
+
+func (o *PathOptions) IsExplicitFile() bool {
+ if len(o.LoadingRules.ExplicitPath) > 0 {
+ return true
+ }
+
+ return false
+}
+
+func (o *PathOptions) GetExplicitFile() string {
+ return o.LoadingRules.ExplicitPath
+}
+
+func NewDefaultPathOptions() *PathOptions {
+ ret := &PathOptions{
+ GlobalFile: RecommendedHomeFile,
+ EnvVar: RecommendedConfigPathEnvVar,
+ ExplicitFileFlag: RecommendedConfigPathFlag,
+
+ GlobalFileSubpath: path.Join(RecommendedHomeDir, RecommendedFileName),
+
+ LoadingRules: NewDefaultClientConfigLoadingRules(),
+ }
+ ret.LoadingRules.DoNotResolvePaths = true
+
+ return ret
+}
+
+// ModifyConfig takes a Config object, iterates through Clusters, AuthInfos, and Contexts, uses the LocationOfOrigin if specified or
+// uses the default destination file to write the results into. This results in multiple file reads, but it's very easy to follow.
+// Preferences and CurrentContext should always be set in the default destination file. Since we can't distinguish between empty and missing values
+// (no nil strings), we're forced have separate handling for them. In the kubeconfig cases, newConfig should have at most one difference,
+// that means that this code will only write into a single file. If you want to relativizePaths, you must provide a fully qualified path in any
+// modified element.
+func ModifyConfig(configAccess ConfigAccess, newConfig clientcmdapi.Config, relativizePaths bool) error {
+ possibleSources := configAccess.GetLoadingPrecedence()
+ // sort the possible kubeconfig files so we always "lock" in the same order
+ // to avoid deadlock (note: this can fail w/ symlinks, but... come on).
+ sort.Strings(possibleSources)
+ for _, filename := range possibleSources {
+ if err := lockFile(filename); err != nil {
+ return err
+ }
+ defer unlockFile(filename)
+ }
+
+ startingConfig, err := configAccess.GetStartingConfig()
+ if err != nil {
+ return err
+ }
+
+ // We need to find all differences, locate their original files, read a partial config to modify only that stanza and write out the file.
+ // Special case the test for current context and preferences since those always write to the default file.
+ if reflect.DeepEqual(*startingConfig, newConfig) {
+ // nothing to do
+ return nil
+ }
+
+ if startingConfig.CurrentContext != newConfig.CurrentContext {
+ if err := writeCurrentContext(configAccess, newConfig.CurrentContext); err != nil {
+ return err
+ }
+ }
+
+ if !reflect.DeepEqual(startingConfig.Preferences, newConfig.Preferences) {
+ if err := writePreferences(configAccess, newConfig.Preferences); err != nil {
+ return err
+ }
+ }
+
+ // Search every cluster, authInfo, and context. First from new to old for differences, then from old to new for deletions
+ for key, cluster := range newConfig.Clusters {
+ startingCluster, exists := startingConfig.Clusters[key]
+ if !reflect.DeepEqual(cluster, startingCluster) || !exists {
+ destinationFile := cluster.LocationOfOrigin
+ if len(destinationFile) == 0 {
+ destinationFile = configAccess.GetDefaultFilename()
+ }
+
+ configToWrite, err := getConfigFromFile(destinationFile)
+ if err != nil {
+ return err
+ }
+ t := *cluster
+
+ configToWrite.Clusters[key] = &t
+ configToWrite.Clusters[key].LocationOfOrigin = destinationFile
+ if relativizePaths {
+ if err := RelativizeClusterLocalPaths(configToWrite.Clusters[key]); err != nil {
+ return err
+ }
+ }
+
+ if err := WriteToFile(*configToWrite, destinationFile); err != nil {
+ return err
+ }
+ }
+ }
+
+ for key, context := range newConfig.Contexts {
+ startingContext, exists := startingConfig.Contexts[key]
+ if !reflect.DeepEqual(context, startingContext) || !exists {
+ destinationFile := context.LocationOfOrigin
+ if len(destinationFile) == 0 {
+ destinationFile = configAccess.GetDefaultFilename()
+ }
+
+ configToWrite, err := getConfigFromFile(destinationFile)
+ if err != nil {
+ return err
+ }
+ configToWrite.Contexts[key] = context
+
+ if err := WriteToFile(*configToWrite, destinationFile); err != nil {
+ return err
+ }
+ }
+ }
+
+ for key, authInfo := range newConfig.AuthInfos {
+ startingAuthInfo, exists := startingConfig.AuthInfos[key]
+ if !reflect.DeepEqual(authInfo, startingAuthInfo) || !exists {
+ destinationFile := authInfo.LocationOfOrigin
+ if len(destinationFile) == 0 {
+ destinationFile = configAccess.GetDefaultFilename()
+ }
+
+ configToWrite, err := getConfigFromFile(destinationFile)
+ if err != nil {
+ return err
+ }
+ t := *authInfo
+ configToWrite.AuthInfos[key] = &t
+ configToWrite.AuthInfos[key].LocationOfOrigin = destinationFile
+ if relativizePaths {
+ if err := RelativizeAuthInfoLocalPaths(configToWrite.AuthInfos[key]); err != nil {
+ return err
+ }
+ }
+
+ if err := WriteToFile(*configToWrite, destinationFile); err != nil {
+ return err
+ }
+ }
+ }
+
+ for key, cluster := range startingConfig.Clusters {
+ if _, exists := newConfig.Clusters[key]; !exists {
+ destinationFile := cluster.LocationOfOrigin
+ if len(destinationFile) == 0 {
+ destinationFile = configAccess.GetDefaultFilename()
+ }
+
+ configToWrite, err := getConfigFromFile(destinationFile)
+ if err != nil {
+ return err
+ }
+ delete(configToWrite.Clusters, key)
+
+ if err := WriteToFile(*configToWrite, destinationFile); err != nil {
+ return err
+ }
+ }
+ }
+
+ for key, context := range startingConfig.Contexts {
+ if _, exists := newConfig.Contexts[key]; !exists {
+ destinationFile := context.LocationOfOrigin
+ if len(destinationFile) == 0 {
+ destinationFile = configAccess.GetDefaultFilename()
+ }
+
+ configToWrite, err := getConfigFromFile(destinationFile)
+ if err != nil {
+ return err
+ }
+ delete(configToWrite.Contexts, key)
+
+ if err := WriteToFile(*configToWrite, destinationFile); err != nil {
+ return err
+ }
+ }
+ }
+
+ for key, authInfo := range startingConfig.AuthInfos {
+ if _, exists := newConfig.AuthInfos[key]; !exists {
+ destinationFile := authInfo.LocationOfOrigin
+ if len(destinationFile) == 0 {
+ destinationFile = configAccess.GetDefaultFilename()
+ }
+
+ configToWrite, err := getConfigFromFile(destinationFile)
+ if err != nil {
+ return err
+ }
+ delete(configToWrite.AuthInfos, key)
+
+ if err := WriteToFile(*configToWrite, destinationFile); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func PersisterForUser(configAccess ConfigAccess, user string) restclient.AuthProviderConfigPersister {
+ return &persister{configAccess, user}
+}
+
+type persister struct {
+ configAccess ConfigAccess
+ user string
+}
+
+func (p *persister) Persist(config map[string]string) error {
+ newConfig, err := p.configAccess.GetStartingConfig()
+ if err != nil {
+ return err
+ }
+ authInfo, ok := newConfig.AuthInfos[p.user]
+ if ok && authInfo.AuthProvider != nil {
+ authInfo.AuthProvider.Config = config
+ ModifyConfig(p.configAccess, *newConfig, false)
+ }
+ return nil
+}
+
+// writeCurrentContext takes three possible paths.
+// If newCurrentContext is the same as the startingConfig's current context, then we exit.
+// If newCurrentContext has a value, then that value is written into the default destination file.
+// If newCurrentContext is empty, then we find the config file that is setting the CurrentContext and clear the value from that file
+func writeCurrentContext(configAccess ConfigAccess, newCurrentContext string) error {
+ if startingConfig, err := configAccess.GetStartingConfig(); err != nil {
+ return err
+ } else if startingConfig.CurrentContext == newCurrentContext {
+ return nil
+ }
+
+ if configAccess.IsExplicitFile() {
+ file := configAccess.GetExplicitFile()
+ currConfig, err := getConfigFromFile(file)
+ if err != nil {
+ return err
+ }
+ currConfig.CurrentContext = newCurrentContext
+ if err := WriteToFile(*currConfig, file); err != nil {
+ return err
+ }
+
+ return nil
+ }
+
+ if len(newCurrentContext) > 0 {
+ destinationFile := configAccess.GetDefaultFilename()
+ config, err := getConfigFromFile(destinationFile)
+ if err != nil {
+ return err
+ }
+ config.CurrentContext = newCurrentContext
+
+ if err := WriteToFile(*config, destinationFile); err != nil {
+ return err
+ }
+
+ return nil
+ }
+
+ // we're supposed to be clearing the current context. We need to find the first spot in the chain that is setting it and clear it
+ for _, file := range configAccess.GetLoadingPrecedence() {
+ if _, err := os.Stat(file); err == nil {
+ currConfig, err := getConfigFromFile(file)
+ if err != nil {
+ return err
+ }
+
+ if len(currConfig.CurrentContext) > 0 {
+ currConfig.CurrentContext = newCurrentContext
+ if err := WriteToFile(*currConfig, file); err != nil {
+ return err
+ }
+
+ return nil
+ }
+ }
+ }
+
+ return errors.New("no config found to write context")
+}
+
+func writePreferences(configAccess ConfigAccess, newPrefs clientcmdapi.Preferences) error {
+ if startingConfig, err := configAccess.GetStartingConfig(); err != nil {
+ return err
+ } else if reflect.DeepEqual(startingConfig.Preferences, newPrefs) {
+ return nil
+ }
+
+ if configAccess.IsExplicitFile() {
+ file := configAccess.GetExplicitFile()
+ currConfig, err := getConfigFromFile(file)
+ if err != nil {
+ return err
+ }
+ currConfig.Preferences = newPrefs
+ if err := WriteToFile(*currConfig, file); err != nil {
+ return err
+ }
+
+ return nil
+ }
+
+ for _, file := range configAccess.GetLoadingPrecedence() {
+ currConfig, err := getConfigFromFile(file)
+ if err != nil {
+ return err
+ }
+
+ if !reflect.DeepEqual(currConfig.Preferences, newPrefs) {
+ currConfig.Preferences = newPrefs
+ if err := WriteToFile(*currConfig, file); err != nil {
+ return err
+ }
+
+ return nil
+ }
+ }
+
+ return errors.New("no config found to write preferences")
+}
+
+// getConfigFromFile tries to read a kubeconfig file and if it can't, returns an error. One exception, missing files result in empty configs, not an error.
+func getConfigFromFile(filename string) (*clientcmdapi.Config, error) {
+ config, err := LoadFromFile(filename)
+ if err != nil && !os.IsNotExist(err) {
+ return nil, err
+ }
+ if config == nil {
+ config = clientcmdapi.NewConfig()
+ }
+ return config, nil
+}
+
+// GetConfigFromFileOrDie tries to read a kubeconfig file and if it can't, it calls exit. One exception, missing files result in empty configs, not an exit
+func GetConfigFromFileOrDie(filename string) *clientcmdapi.Config {
+ config, err := getConfigFromFile(filename)
+ if err != nil {
+ glog.FatalDepth(1, err)
+ }
+
+ return config
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/doc.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/doc.go
new file mode 100644
index 0000000..30ef6f3
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/doc.go
@@ -0,0 +1,37 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Package clientcmd provides one stop shopping for building a working client from a fixed config,
+from a .kubeconfig file, from command line flags, or from any merged combination.
+
+Sample usage from merged .kubeconfig files (local directory, home directory)
+
+ loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
+ // if you want to change the loading rules (which files in which order), you can do so here
+
+ configOverrides := &clientcmd.ConfigOverrides{}
+ // if you want to change override values or bind them to flags, there are methods to help you
+
+ kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides)
+ config, err := kubeConfig.ClientConfig()
+ if err != nil {
+ // Do something
+ }
+ client, err := unversioned.New(config)
+ // ...
+*/
+package clientcmd
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/loader.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/loader.go
new file mode 100644
index 0000000..1009406
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/loader.go
@@ -0,0 +1,585 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package clientcmd
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path"
+ "path/filepath"
+ goruntime "runtime"
+ "strings"
+
+ "github.com/golang/glog"
+ "github.com/imdario/mergo"
+
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
+ clientcmdlatest "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/latest"
+ "k8s.io/kubernetes/pkg/runtime"
+ utilerrors "k8s.io/kubernetes/pkg/util/errors"
+ "k8s.io/kubernetes/pkg/util/homedir"
+)
+
+const (
+ RecommendedConfigPathFlag = "kubeconfig"
+ RecommendedConfigPathEnvVar = "KUBECONFIG"
+ RecommendedHomeDir = ".kube"
+ RecommendedFileName = "config"
+ RecommendedSchemaName = "schema"
+)
+
+var RecommendedHomeFile = path.Join(homedir.HomeDir(), RecommendedHomeDir, RecommendedFileName)
+var RecommendedSchemaFile = path.Join(homedir.HomeDir(), RecommendedHomeDir, RecommendedSchemaName)
+
+// currentMigrationRules returns a map that holds the history of recommended home directories used in previous versions.
+// Any future changes to RecommendedHomeFile and related are expected to add a migration rule here, in order to make
+// sure existing config files are migrated to their new locations properly.
+func currentMigrationRules() map[string]string {
+ oldRecommendedHomeFile := path.Join(os.Getenv("HOME"), "/.kube/.kubeconfig")
+ oldRecommendedWindowsHomeFile := path.Join(os.Getenv("HOME"), RecommendedHomeDir, RecommendedFileName)
+
+ migrationRules := map[string]string{}
+ migrationRules[RecommendedHomeFile] = oldRecommendedHomeFile
+ if goruntime.GOOS == "windows" {
+ migrationRules[RecommendedHomeFile] = oldRecommendedWindowsHomeFile
+ }
+ return migrationRules
+}
+
+type ClientConfigLoader interface {
+ ConfigAccess
+ Load() (*clientcmdapi.Config, error)
+}
+
+type KubeconfigGetter func() (*clientcmdapi.Config, error)
+
+type ClientConfigGetter struct {
+ kubeconfigGetter KubeconfigGetter
+}
+
+// ClientConfigGetter implements the ClientConfigLoader interface.
+var _ ClientConfigLoader = &ClientConfigGetter{}
+
+func (g *ClientConfigGetter) Load() (*clientcmdapi.Config, error) {
+ return g.kubeconfigGetter()
+}
+
+func (g *ClientConfigGetter) GetLoadingPrecedence() []string {
+ return nil
+}
+func (g *ClientConfigGetter) GetStartingConfig() (*clientcmdapi.Config, error) {
+ return nil, nil
+}
+func (g *ClientConfigGetter) GetDefaultFilename() string {
+ return ""
+}
+func (g *ClientConfigGetter) IsExplicitFile() bool {
+ return false
+}
+func (g *ClientConfigGetter) GetExplicitFile() string {
+ return ""
+}
+
+// ClientConfigLoadingRules is an ExplicitPath and string slice of specific locations that are used for merging together a Config
+// Callers can put the chain together however they want, but we'd recommend:
+// EnvVarPathFiles if set (a list of files if set) OR the HomeDirectoryPath
+// ExplicitPath is special, because if a user specifically requests a certain file be used and error is reported if thie file is not present
+type ClientConfigLoadingRules struct {
+ ExplicitPath string
+ Precedence []string
+
+ // MigrationRules is a map of destination files to source files. If a destination file is not present, then the source file is checked.
+ // If the source file is present, then it is copied to the destination file BEFORE any further loading happens.
+ MigrationRules map[string]string
+
+ // DoNotResolvePaths indicates whether or not to resolve paths with respect to the originating files. This is phrased as a negative so
+ // that a default object that doesn't set this will usually get the behavior it wants.
+ DoNotResolvePaths bool
+}
+
+// ClientConfigLoadingRules implements the ClientConfigLoader interface.
+var _ ClientConfigLoader = &ClientConfigLoadingRules{}
+
+// NewDefaultClientConfigLoadingRules returns a ClientConfigLoadingRules object with default fields filled in. You are not required to
+// use this constructor
+func NewDefaultClientConfigLoadingRules() *ClientConfigLoadingRules {
+ chain := []string{}
+
+ envVarFiles := os.Getenv(RecommendedConfigPathEnvVar)
+ if len(envVarFiles) != 0 {
+ chain = append(chain, filepath.SplitList(envVarFiles)...)
+
+ } else {
+ chain = append(chain, RecommendedHomeFile)
+ }
+
+ return &ClientConfigLoadingRules{
+ Precedence: chain,
+ MigrationRules: currentMigrationRules(),
+ }
+}
+
+// Load starts by running the MigrationRules and then
+// takes the loading rules and returns a Config object based on following rules.
+// if the ExplicitPath, return the unmerged explicit file
+// Otherwise, return a merged config based on the Precedence slice
+// A missing ExplicitPath file produces an error. Empty filenames or other missing files are ignored.
+// Read errors or files with non-deserializable content produce errors.
+// The first file to set a particular map key wins and map key's value is never changed.
+// BUT, if you set a struct value that is NOT contained inside of map, the value WILL be changed.
+// This results in some odd looking logic to merge in one direction, merge in the other, and then merge the two.
+// It also means that if two files specify a "red-user", only values from the first file's red-user are used. Even
+// non-conflicting entries from the second file's "red-user" are discarded.
+// Relative paths inside of the .kubeconfig files are resolved against the .kubeconfig file's parent folder
+// and only absolute file paths are returned.
+func (rules *ClientConfigLoadingRules) Load() (*clientcmdapi.Config, error) {
+ if err := rules.Migrate(); err != nil {
+ return nil, err
+ }
+
+ errlist := []error{}
+
+ kubeConfigFiles := []string{}
+
+ // Make sure a file we were explicitly told to use exists
+ if len(rules.ExplicitPath) > 0 {
+ if _, err := os.Stat(rules.ExplicitPath); os.IsNotExist(err) {
+ return nil, err
+ }
+ kubeConfigFiles = append(kubeConfigFiles, rules.ExplicitPath)
+
+ } else {
+ kubeConfigFiles = append(kubeConfigFiles, rules.Precedence...)
+ }
+
+ kubeconfigs := []*clientcmdapi.Config{}
+ // read and cache the config files so that we only look at them once
+ for _, filename := range kubeConfigFiles {
+ if len(filename) == 0 {
+ // no work to do
+ continue
+ }
+
+ config, err := LoadFromFile(filename)
+ if os.IsNotExist(err) {
+ // skip missing files
+ continue
+ }
+ if err != nil {
+ errlist = append(errlist, fmt.Errorf("Error loading config file \"%s\": %v", filename, err))
+ continue
+ }
+
+ kubeconfigs = append(kubeconfigs, config)
+ }
+
+ // first merge all of our maps
+ mapConfig := clientcmdapi.NewConfig()
+ for _, kubeconfig := range kubeconfigs {
+ mergo.Merge(mapConfig, kubeconfig)
+ }
+
+ // merge all of the struct values in the reverse order so that priority is given correctly
+ // errors are not added to the list the second time
+ nonMapConfig := clientcmdapi.NewConfig()
+ for i := len(kubeconfigs) - 1; i >= 0; i-- {
+ kubeconfig := kubeconfigs[i]
+ mergo.Merge(nonMapConfig, kubeconfig)
+ }
+
+ // since values are overwritten, but maps values are not, we can merge the non-map config on top of the map config and
+ // get the values we expect.
+ config := clientcmdapi.NewConfig()
+ mergo.Merge(config, mapConfig)
+ mergo.Merge(config, nonMapConfig)
+
+ if rules.ResolvePaths() {
+ if err := ResolveLocalPaths(config); err != nil {
+ errlist = append(errlist, err)
+ }
+ }
+
+ return config, utilerrors.NewAggregate(errlist)
+}
+
+// Migrate uses the MigrationRules map. If a destination file is not present, then the source file is checked.
+// If the source file is present, then it is copied to the destination file BEFORE any further loading happens.
+func (rules *ClientConfigLoadingRules) Migrate() error {
+ if rules.MigrationRules == nil {
+ return nil
+ }
+
+ for destination, source := range rules.MigrationRules {
+ if _, err := os.Stat(destination); err == nil {
+ // if the destination already exists, do nothing
+ continue
+ } else if os.IsPermission(err) {
+ // if we can't access the file, skip it
+ continue
+ } else if !os.IsNotExist(err) {
+ // if we had an error other than non-existence, fail
+ return err
+ }
+
+ if sourceInfo, err := os.Stat(source); err != nil {
+ if os.IsNotExist(err) || os.IsPermission(err) {
+ // if the source file doesn't exist or we can't access it, there's no work to do.
+ continue
+ }
+
+ // if we had an error other than non-existence, fail
+ return err
+ } else if sourceInfo.IsDir() {
+ return fmt.Errorf("cannot migrate %v to %v because it is a directory", source, destination)
+ }
+
+ in, err := os.Open(source)
+ if err != nil {
+ return err
+ }
+ defer in.Close()
+ out, err := os.Create(destination)
+ if err != nil {
+ return err
+ }
+ defer out.Close()
+
+ if _, err = io.Copy(out, in); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// GetLoadingPrecedence implements ConfigAccess
+func (rules *ClientConfigLoadingRules) GetLoadingPrecedence() []string {
+ return rules.Precedence
+}
+
+// GetStartingConfig implements ConfigAccess
+func (rules *ClientConfigLoadingRules) GetStartingConfig() (*clientcmdapi.Config, error) {
+ clientConfig := NewNonInteractiveDeferredLoadingClientConfig(rules, &ConfigOverrides{})
+ rawConfig, err := clientConfig.RawConfig()
+ if os.IsNotExist(err) {
+ return clientcmdapi.NewConfig(), nil
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ return &rawConfig, nil
+}
+
+// GetDefaultFilename implements ConfigAccess
+func (rules *ClientConfigLoadingRules) GetDefaultFilename() string {
+ // Explicit file if we have one.
+ if rules.IsExplicitFile() {
+ return rules.GetExplicitFile()
+ }
+ // Otherwise, first existing file from precedence.
+ for _, filename := range rules.GetLoadingPrecedence() {
+ if _, err := os.Stat(filename); err == nil {
+ return filename
+ }
+ }
+ // If none exists, use the first from precedence.
+ if len(rules.Precedence) > 0 {
+ return rules.Precedence[0]
+ }
+ return ""
+}
+
+// IsExplicitFile implements ConfigAccess
+func (rules *ClientConfigLoadingRules) IsExplicitFile() bool {
+ return len(rules.ExplicitPath) > 0
+}
+
+// GetExplicitFile implements ConfigAccess
+func (rules *ClientConfigLoadingRules) GetExplicitFile() string {
+ return rules.ExplicitPath
+}
+
+// LoadFromFile takes a filename and deserializes the contents into Config object
+func LoadFromFile(filename string) (*clientcmdapi.Config, error) {
+ kubeconfigBytes, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ config, err := Load(kubeconfigBytes)
+ if err != nil {
+ return nil, err
+ }
+ glog.V(6).Infoln("Config loaded from file", filename)
+
+ // set LocationOfOrigin on every Cluster, User, and Context
+ for key, obj := range config.AuthInfos {
+ obj.LocationOfOrigin = filename
+ config.AuthInfos[key] = obj
+ }
+ for key, obj := range config.Clusters {
+ obj.LocationOfOrigin = filename
+ config.Clusters[key] = obj
+ }
+ for key, obj := range config.Contexts {
+ obj.LocationOfOrigin = filename
+ config.Contexts[key] = obj
+ }
+
+ if config.AuthInfos == nil {
+ config.AuthInfos = map[string]*clientcmdapi.AuthInfo{}
+ }
+ if config.Clusters == nil {
+ config.Clusters = map[string]*clientcmdapi.Cluster{}
+ }
+ if config.Contexts == nil {
+ config.Contexts = map[string]*clientcmdapi.Context{}
+ }
+
+ return config, nil
+}
+
+// Load takes a byte slice and deserializes the contents into Config object.
+// Encapsulates deserialization without assuming the source is a file.
+func Load(data []byte) (*clientcmdapi.Config, error) {
+ config := clientcmdapi.NewConfig()
+ // if there's no data in a file, return the default object instead of failing (DecodeInto reject empty input)
+ if len(data) == 0 {
+ return config, nil
+ }
+ decoded, _, err := clientcmdlatest.Codec.Decode(data, &unversioned.GroupVersionKind{Version: clientcmdlatest.Version, Kind: "Config"}, config)
+ if err != nil {
+ return nil, err
+ }
+ return decoded.(*clientcmdapi.Config), nil
+}
+
+// WriteToFile serializes the config to yaml and writes it out to a file. If not present, it creates the file with the mode 0600. If it is present
+// it stomps the contents
+func WriteToFile(config clientcmdapi.Config, filename string) error {
+ content, err := Write(config)
+ if err != nil {
+ return err
+ }
+ dir := filepath.Dir(filename)
+ if _, err := os.Stat(dir); os.IsNotExist(err) {
+ if err = os.MkdirAll(dir, 0755); err != nil {
+ return err
+ }
+ }
+
+ if err := ioutil.WriteFile(filename, content, 0600); err != nil {
+ return err
+ }
+ return nil
+}
+
+func lockFile(filename string) error {
+ // TODO: find a way to do this with actual file locks. Will
+ // probably need seperate solution for windows and linux.
+
+ // Make sure the dir exists before we try to create a lock file.
+ dir := filepath.Dir(filename)
+ if _, err := os.Stat(dir); os.IsNotExist(err) {
+ if err = os.MkdirAll(dir, 0755); err != nil {
+ return err
+ }
+ }
+ f, err := os.OpenFile(lockName(filename), os.O_CREATE|os.O_EXCL, 0)
+ if err != nil {
+ return err
+ }
+ f.Close()
+ return nil
+}
+
+func unlockFile(filename string) error {
+ return os.Remove(lockName(filename))
+}
+
+func lockName(filename string) string {
+ return filename + ".lock"
+}
+
+// Write serializes the config to yaml.
+// Encapsulates serialization without assuming the destination is a file.
+func Write(config clientcmdapi.Config) ([]byte, error) {
+ return runtime.Encode(clientcmdlatest.Codec, &config)
+}
+
+func (rules ClientConfigLoadingRules) ResolvePaths() bool {
+ return !rules.DoNotResolvePaths
+}
+
+// ResolveLocalPaths resolves all relative paths in the config object with respect to the stanza's LocationOfOrigin
+// this cannot be done directly inside of LoadFromFile because doing so there would make it impossible to load a file without
+// modification of its contents.
+func ResolveLocalPaths(config *clientcmdapi.Config) error {
+ for _, cluster := range config.Clusters {
+ if len(cluster.LocationOfOrigin) == 0 {
+ continue
+ }
+ base, err := filepath.Abs(filepath.Dir(cluster.LocationOfOrigin))
+ if err != nil {
+ return fmt.Errorf("Could not determine the absolute path of config file %s: %v", cluster.LocationOfOrigin, err)
+ }
+
+ if err := ResolvePaths(GetClusterFileReferences(cluster), base); err != nil {
+ return err
+ }
+ }
+ for _, authInfo := range config.AuthInfos {
+ if len(authInfo.LocationOfOrigin) == 0 {
+ continue
+ }
+ base, err := filepath.Abs(filepath.Dir(authInfo.LocationOfOrigin))
+ if err != nil {
+ return fmt.Errorf("Could not determine the absolute path of config file %s: %v", authInfo.LocationOfOrigin, err)
+ }
+
+ if err := ResolvePaths(GetAuthInfoFileReferences(authInfo), base); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// RelativizeClusterLocalPaths first absolutizes the paths by calling ResolveLocalPaths. This assumes that any NEW path is already
+// absolute, but any existing path will be resolved relative to LocationOfOrigin
+func RelativizeClusterLocalPaths(cluster *clientcmdapi.Cluster) error {
+ if len(cluster.LocationOfOrigin) == 0 {
+ return fmt.Errorf("no location of origin for %s", cluster.Server)
+ }
+ base, err := filepath.Abs(filepath.Dir(cluster.LocationOfOrigin))
+ if err != nil {
+ return fmt.Errorf("could not determine the absolute path of config file %s: %v", cluster.LocationOfOrigin, err)
+ }
+
+ if err := ResolvePaths(GetClusterFileReferences(cluster), base); err != nil {
+ return err
+ }
+ if err := RelativizePathWithNoBacksteps(GetClusterFileReferences(cluster), base); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// RelativizeAuthInfoLocalPaths first absolutizes the paths by calling ResolveLocalPaths. This assumes that any NEW path is already
+// absolute, but any existing path will be resolved relative to LocationOfOrigin
+func RelativizeAuthInfoLocalPaths(authInfo *clientcmdapi.AuthInfo) error {
+ if len(authInfo.LocationOfOrigin) == 0 {
+ return fmt.Errorf("no location of origin for %v", authInfo)
+ }
+ base, err := filepath.Abs(filepath.Dir(authInfo.LocationOfOrigin))
+ if err != nil {
+ return fmt.Errorf("could not determine the absolute path of config file %s: %v", authInfo.LocationOfOrigin, err)
+ }
+
+ if err := ResolvePaths(GetAuthInfoFileReferences(authInfo), base); err != nil {
+ return err
+ }
+ if err := RelativizePathWithNoBacksteps(GetAuthInfoFileReferences(authInfo), base); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func RelativizeConfigPaths(config *clientcmdapi.Config, base string) error {
+ return RelativizePathWithNoBacksteps(GetConfigFileReferences(config), base)
+}
+
+func ResolveConfigPaths(config *clientcmdapi.Config, base string) error {
+ return ResolvePaths(GetConfigFileReferences(config), base)
+}
+
+func GetConfigFileReferences(config *clientcmdapi.Config) []*string {
+ refs := []*string{}
+
+ for _, cluster := range config.Clusters {
+ refs = append(refs, GetClusterFileReferences(cluster)...)
+ }
+ for _, authInfo := range config.AuthInfos {
+ refs = append(refs, GetAuthInfoFileReferences(authInfo)...)
+ }
+
+ return refs
+}
+
+func GetClusterFileReferences(cluster *clientcmdapi.Cluster) []*string {
+ return []*string{&cluster.CertificateAuthority}
+}
+
+func GetAuthInfoFileReferences(authInfo *clientcmdapi.AuthInfo) []*string {
+ return []*string{&authInfo.ClientCertificate, &authInfo.ClientKey}
+}
+
+// ResolvePaths updates the given refs to be absolute paths, relative to the given base directory
+func ResolvePaths(refs []*string, base string) error {
+ for _, ref := range refs {
+ // Don't resolve empty paths
+ if len(*ref) > 0 {
+ // Don't resolve absolute paths
+ if !filepath.IsAbs(*ref) {
+ *ref = filepath.Join(base, *ref)
+ }
+ }
+ }
+ return nil
+}
+
+// RelativizePathWithNoBacksteps updates the given refs to be relative paths, relative to the given base directory as long as they do not require backsteps.
+// Any path requiring a backstep is left as-is as long it is absolute. Any non-absolute path that can't be relativized produces an error
+func RelativizePathWithNoBacksteps(refs []*string, base string) error {
+ for _, ref := range refs {
+ // Don't relativize empty paths
+ if len(*ref) > 0 {
+ rel, err := MakeRelative(*ref, base)
+ if err != nil {
+ return err
+ }
+
+ // if we have a backstep, don't mess with the path
+ if strings.HasPrefix(rel, "../") {
+ if filepath.IsAbs(*ref) {
+ continue
+ }
+
+ return fmt.Errorf("%v requires backsteps and is not absolute", *ref)
+ }
+
+ *ref = rel
+ }
+ }
+ return nil
+}
+
+func MakeRelative(path, base string) (string, error) {
+ if len(path) > 0 {
+ rel, err := filepath.Rel(base, path)
+ if err != nil {
+ return path, err
+ }
+ return rel, nil
+ }
+ return path, nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/merged_client_builder.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/merged_client_builder.go
new file mode 100644
index 0000000..0180469
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/merged_client_builder.go
@@ -0,0 +1,122 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package clientcmd
+
+import (
+ "io"
+ "reflect"
+ "sync"
+
+ "github.com/golang/glog"
+
+ "k8s.io/kubernetes/pkg/client/restclient"
+ clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
+)
+
+// DeferredLoadingClientConfig is a ClientConfig interface that is backed by a client config loader.
+// It is used in cases where the loading rules may change after you've instantiated them and you want to be sure that
+// the most recent rules are used. This is useful in cases where you bind flags to loading rule parameters before
+// the parse happens and you want your calling code to be ignorant of how the values are being mutated to avoid
+// passing extraneous information down a call stack
+type DeferredLoadingClientConfig struct {
+ loader ClientConfigLoader
+ overrides *ConfigOverrides
+ fallbackReader io.Reader
+
+ clientConfig ClientConfig
+ loadingLock sync.Mutex
+}
+
+// NewNonInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name
+func NewNonInteractiveDeferredLoadingClientConfig(loader ClientConfigLoader, overrides *ConfigOverrides) ClientConfig {
+ return &DeferredLoadingClientConfig{loader: loader, overrides: overrides}
+}
+
+// NewInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name and the fallback auth reader
+func NewInteractiveDeferredLoadingClientConfig(loader ClientConfigLoader, overrides *ConfigOverrides, fallbackReader io.Reader) ClientConfig {
+ return &DeferredLoadingClientConfig{loader: loader, overrides: overrides, fallbackReader: fallbackReader}
+}
+
+func (config *DeferredLoadingClientConfig) createClientConfig() (ClientConfig, error) {
+ if config.clientConfig == nil {
+ config.loadingLock.Lock()
+ defer config.loadingLock.Unlock()
+
+ if config.clientConfig == nil {
+ mergedConfig, err := config.loader.Load()
+ if err != nil {
+ return nil, err
+ }
+
+ var mergedClientConfig ClientConfig
+ if config.fallbackReader != nil {
+ mergedClientConfig = NewInteractiveClientConfig(*mergedConfig, config.overrides.CurrentContext, config.overrides, config.fallbackReader, config.loader)
+ } else {
+ mergedClientConfig = NewNonInteractiveClientConfig(*mergedConfig, config.overrides.CurrentContext, config.overrides, config.loader)
+ }
+
+ config.clientConfig = mergedClientConfig
+ }
+ }
+
+ return config.clientConfig, nil
+}
+
+func (config *DeferredLoadingClientConfig) RawConfig() (clientcmdapi.Config, error) {
+ mergedConfig, err := config.createClientConfig()
+ if err != nil {
+ return clientcmdapi.Config{}, err
+ }
+
+ return mergedConfig.RawConfig()
+}
+
+// ClientConfig implements ClientConfig
+func (config *DeferredLoadingClientConfig) ClientConfig() (*restclient.Config, error) {
+ mergedClientConfig, err := config.createClientConfig()
+ if err != nil {
+ return nil, err
+ }
+
+ mergedConfig, err := mergedClientConfig.ClientConfig()
+ if err != nil {
+ return nil, err
+ }
+ // Are we running in a cluster and were no other configs found? If so, use the in-cluster-config.
+ icc := inClusterClientConfig{}
+ defaultConfig, err := DefaultClientConfig.ClientConfig()
+ if icc.Possible() && err == nil && reflect.DeepEqual(mergedConfig, defaultConfig) {
+ glog.V(2).Info("No kubeconfig could be created, falling back to service account.")
+ return icc.ClientConfig()
+ }
+ return mergedConfig, nil
+}
+
+// Namespace implements KubeConfig
+func (config *DeferredLoadingClientConfig) Namespace() (string, bool, error) {
+ mergedKubeConfig, err := config.createClientConfig()
+ if err != nil {
+ return "", false, err
+ }
+
+ return mergedKubeConfig.Namespace()
+}
+
+// ConfigAccess implements ClientConfig
+func (config *DeferredLoadingClientConfig) ConfigAccess() ConfigAccess {
+ return config.loader
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/overrides.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/overrides.go
new file mode 100644
index 0000000..40a35e6
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/overrides.go
@@ -0,0 +1,198 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package clientcmd
+
+import (
+ "strconv"
+
+ "github.com/spf13/pflag"
+
+ clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
+)
+
+// ConfigOverrides holds values that should override whatever information is pulled from the actual Config object. You can't
+// simply use an actual Config object, because Configs hold maps, but overrides are restricted to "at most one"
+type ConfigOverrides struct {
+ AuthInfo clientcmdapi.AuthInfo
+ ClusterInfo clientcmdapi.Cluster
+ Context clientcmdapi.Context
+ CurrentContext string
+}
+
+// ConfigOverrideFlags holds the flag names to be used for binding command line flags. Notice that this structure tightly
+// corresponds to ConfigOverrides
+type ConfigOverrideFlags struct {
+ AuthOverrideFlags AuthOverrideFlags
+ ClusterOverrideFlags ClusterOverrideFlags
+ ContextOverrideFlags ContextOverrideFlags
+ CurrentContext FlagInfo
+}
+
+// AuthOverrideFlags holds the flag names to be used for binding command line flags for AuthInfo objects
+type AuthOverrideFlags struct {
+ ClientCertificate FlagInfo
+ ClientKey FlagInfo
+ Token FlagInfo
+ Impersonate FlagInfo
+ Username FlagInfo
+ Password FlagInfo
+}
+
+// ContextOverrideFlags holds the flag names to be used for binding command line flags for Cluster objects
+type ContextOverrideFlags struct {
+ ClusterName FlagInfo
+ AuthInfoName FlagInfo
+ Namespace FlagInfo
+}
+
+// ClusterOverride holds the flag names to be used for binding command line flags for Cluster objects
+type ClusterOverrideFlags struct {
+ APIServer FlagInfo
+ APIVersion FlagInfo
+ CertificateAuthority FlagInfo
+ InsecureSkipTLSVerify FlagInfo
+}
+
+// FlagInfo contains information about how to register a flag. This struct is useful if you want to provide a way for an extender to
+// get back a set of recommended flag names, descriptions, and defaults, but allow for customization by an extender. This makes for
+// coherent extension, without full prescription
+type FlagInfo struct {
+ // LongName is the long string for a flag. If this is empty, then the flag will not be bound
+ LongName string
+ // ShortName is the single character for a flag. If this is empty, then there will be no short flag
+ ShortName string
+ // Default is the default value for the flag
+ Default string
+ // Description is the description for the flag
+ Description string
+}
+
+// BindStringFlag binds the flag based on the provided info. If LongName == "", nothing is registered
+func (f FlagInfo) BindStringFlag(flags *pflag.FlagSet, target *string) {
+ // you can't register a flag without a long name
+ if len(f.LongName) > 0 {
+ flags.StringVarP(target, f.LongName, f.ShortName, f.Default, f.Description)
+ }
+}
+
+// BindBoolFlag binds the flag based on the provided info. If LongName == "", nothing is registered
+func (f FlagInfo) BindBoolFlag(flags *pflag.FlagSet, target *bool) {
+ // you can't register a flag without a long name
+ if len(f.LongName) > 0 {
+ // try to parse Default as a bool. If it fails, assume false
+ boolVal, err := strconv.ParseBool(f.Default)
+ if err != nil {
+ boolVal = false
+ }
+
+ flags.BoolVarP(target, f.LongName, f.ShortName, boolVal, f.Description)
+ }
+}
+
+const (
+ FlagClusterName = "cluster"
+ FlagAuthInfoName = "user"
+ FlagContext = "context"
+ FlagNamespace = "namespace"
+ FlagAPIServer = "server"
+ FlagAPIVersion = "api-version"
+ FlagInsecure = "insecure-skip-tls-verify"
+ FlagCertFile = "client-certificate"
+ FlagKeyFile = "client-key"
+ FlagCAFile = "certificate-authority"
+ FlagEmbedCerts = "embed-certs"
+ FlagBearerToken = "token"
+ FlagImpersonate = "as"
+ FlagUsername = "username"
+ FlagPassword = "password"
+)
+
+// RecommendedAuthOverrideFlags is a convenience method to return recommended flag names prefixed with a string of your choosing
+func RecommendedAuthOverrideFlags(prefix string) AuthOverrideFlags {
+ return AuthOverrideFlags{
+ ClientCertificate: FlagInfo{prefix + FlagCertFile, "", "", "Path to a client certificate file for TLS"},
+ ClientKey: FlagInfo{prefix + FlagKeyFile, "", "", "Path to a client key file for TLS"},
+ Token: FlagInfo{prefix + FlagBearerToken, "", "", "Bearer token for authentication to the API server"},
+ Impersonate: FlagInfo{prefix + FlagImpersonate, "", "", "Username to impersonate for the operation"},
+ Username: FlagInfo{prefix + FlagUsername, "", "", "Username for basic authentication to the API server"},
+ Password: FlagInfo{prefix + FlagPassword, "", "", "Password for basic authentication to the API server"},
+ }
+}
+
+// RecommendedClusterOverrideFlags is a convenience method to return recommended flag names prefixed with a string of your choosing
+func RecommendedClusterOverrideFlags(prefix string) ClusterOverrideFlags {
+ return ClusterOverrideFlags{
+ APIServer: FlagInfo{prefix + FlagAPIServer, "", "", "The address and port of the Kubernetes API server"},
+ APIVersion: FlagInfo{prefix + FlagAPIVersion, "", "", "DEPRECATED: The API version to use when talking to the server"},
+ CertificateAuthority: FlagInfo{prefix + FlagCAFile, "", "", "Path to a cert. file for the certificate authority"},
+ InsecureSkipTLSVerify: FlagInfo{prefix + FlagInsecure, "", "false", "If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure"},
+ }
+}
+
+// RecommendedConfigOverrideFlags is a convenience method to return recommended flag names prefixed with a string of your choosing
+func RecommendedConfigOverrideFlags(prefix string) ConfigOverrideFlags {
+ return ConfigOverrideFlags{
+ AuthOverrideFlags: RecommendedAuthOverrideFlags(prefix),
+ ClusterOverrideFlags: RecommendedClusterOverrideFlags(prefix),
+ ContextOverrideFlags: RecommendedContextOverrideFlags(prefix),
+ CurrentContext: FlagInfo{prefix + FlagContext, "", "", "The name of the kubeconfig context to use"},
+ }
+}
+
+// RecommendedContextOverrideFlags is a convenience method to return recommended flag names prefixed with a string of your choosing
+func RecommendedContextOverrideFlags(prefix string) ContextOverrideFlags {
+ return ContextOverrideFlags{
+ ClusterName: FlagInfo{prefix + FlagClusterName, "", "", "The name of the kubeconfig cluster to use"},
+ AuthInfoName: FlagInfo{prefix + FlagAuthInfoName, "", "", "The name of the kubeconfig user to use"},
+ Namespace: FlagInfo{prefix + FlagNamespace, "", "", "If present, the namespace scope for this CLI request"},
+ }
+}
+
+// BindAuthInfoFlags is a convenience method to bind the specified flags to their associated variables
+func BindAuthInfoFlags(authInfo *clientcmdapi.AuthInfo, flags *pflag.FlagSet, flagNames AuthOverrideFlags) {
+ flagNames.ClientCertificate.BindStringFlag(flags, &authInfo.ClientCertificate)
+ flagNames.ClientKey.BindStringFlag(flags, &authInfo.ClientKey)
+ flagNames.Token.BindStringFlag(flags, &authInfo.Token)
+ flagNames.Impersonate.BindStringFlag(flags, &authInfo.Impersonate)
+ flagNames.Username.BindStringFlag(flags, &authInfo.Username)
+ flagNames.Password.BindStringFlag(flags, &authInfo.Password)
+}
+
+// BindClusterFlags is a convenience method to bind the specified flags to their associated variables
+func BindClusterFlags(clusterInfo *clientcmdapi.Cluster, flags *pflag.FlagSet, flagNames ClusterOverrideFlags) {
+ flagNames.APIServer.BindStringFlag(flags, &clusterInfo.Server)
+ // TODO: remove --api-version flag in 1.3.
+ flagNames.APIVersion.BindStringFlag(flags, &clusterInfo.APIVersion)
+ flags.MarkDeprecated(FlagAPIVersion, "flag is no longer respected and will be deleted in the next release")
+ flagNames.CertificateAuthority.BindStringFlag(flags, &clusterInfo.CertificateAuthority)
+ flagNames.InsecureSkipTLSVerify.BindBoolFlag(flags, &clusterInfo.InsecureSkipTLSVerify)
+}
+
+// BindOverrideFlags is a convenience method to bind the specified flags to their associated variables
+func BindOverrideFlags(overrides *ConfigOverrides, flags *pflag.FlagSet, flagNames ConfigOverrideFlags) {
+ BindAuthInfoFlags(&overrides.AuthInfo, flags, flagNames.AuthOverrideFlags)
+ BindClusterFlags(&overrides.ClusterInfo, flags, flagNames.ClusterOverrideFlags)
+ BindContextFlags(&overrides.Context, flags, flagNames.ContextOverrideFlags)
+ flagNames.CurrentContext.BindStringFlag(flags, &overrides.CurrentContext)
+}
+
+// BindFlags is a convenience method to bind the specified flags to their associated variables
+func BindContextFlags(contextInfo *clientcmdapi.Context, flags *pflag.FlagSet, flagNames ContextOverrideFlags) {
+ flagNames.ClusterName.BindStringFlag(flags, &contextInfo.Cluster)
+ flagNames.AuthInfoName.BindStringFlag(flags, &contextInfo.AuthInfo)
+ flagNames.Namespace.BindStringFlag(flags, &contextInfo.Namespace)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/validation.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/validation.go
new file mode 100644
index 0000000..63f8ade
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/validation.go
@@ -0,0 +1,270 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package clientcmd
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "reflect"
+ "strings"
+
+ clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
+ utilerrors "k8s.io/kubernetes/pkg/util/errors"
+ "k8s.io/kubernetes/pkg/util/validation"
+)
+
+var (
+ ErrNoContext = errors.New("no context chosen")
+ ErrEmptyConfig = errors.New("no configuration has been provided")
+ // message is for consistency with old behavior
+ ErrEmptyCluster = errors.New("cluster has no server defined")
+)
+
+type errContextNotFound struct {
+ ContextName string
+}
+
+func (e *errContextNotFound) Error() string {
+ return fmt.Sprintf("context was not found for specified context: %v", e.ContextName)
+}
+
+// IsContextNotFound returns a boolean indicating whether the error is known to
+// report that a context was not found
+func IsContextNotFound(err error) bool {
+ if err == nil {
+ return false
+ }
+ if _, ok := err.(*errContextNotFound); ok || err == ErrNoContext {
+ return true
+ }
+ return strings.Contains(err.Error(), "context was not found for specified context")
+}
+
+// IsEmptyConfig returns true if the provided error indicates the provided configuration
+// is empty.
+func IsEmptyConfig(err error) bool {
+ switch t := err.(type) {
+ case errConfigurationInvalid:
+ return len(t) == 1 && t[0] == ErrEmptyConfig
+ }
+ return err == ErrEmptyConfig
+}
+
+// errConfigurationInvalid is a set of errors indicating the configuration is invalid.
+type errConfigurationInvalid []error
+
+// errConfigurationInvalid implements error and Aggregate
+var _ error = errConfigurationInvalid{}
+var _ utilerrors.Aggregate = errConfigurationInvalid{}
+
+func newErrConfigurationInvalid(errs []error) error {
+ switch len(errs) {
+ case 0:
+ return nil
+ default:
+ return errConfigurationInvalid(errs)
+ }
+}
+
+// Error implements the error interface
+func (e errConfigurationInvalid) Error() string {
+ return fmt.Sprintf("invalid configuration: %v", utilerrors.NewAggregate(e).Error())
+}
+
+// Errors implements the AggregateError interface
+func (e errConfigurationInvalid) Errors() []error {
+ return e
+}
+
+// IsConfigurationInvalid returns true if the provided error indicates the configuration is invalid.
+func IsConfigurationInvalid(err error) bool {
+ switch err.(type) {
+ case *errContextNotFound, errConfigurationInvalid:
+ return true
+ }
+ return IsContextNotFound(err)
+}
+
+// Validate checks for errors in the Config. It does not return early so that it can find as many errors as possible.
+func Validate(config clientcmdapi.Config) error {
+ validationErrors := make([]error, 0)
+
+ if clientcmdapi.IsConfigEmpty(&config) {
+ return newErrConfigurationInvalid([]error{ErrEmptyConfig})
+ }
+
+ if len(config.CurrentContext) != 0 {
+ if _, exists := config.Contexts[config.CurrentContext]; !exists {
+ validationErrors = append(validationErrors, &errContextNotFound{config.CurrentContext})
+ }
+ }
+
+ for contextName, context := range config.Contexts {
+ validationErrors = append(validationErrors, validateContext(contextName, *context, config)...)
+ }
+
+ for authInfoName, authInfo := range config.AuthInfos {
+ validationErrors = append(validationErrors, validateAuthInfo(authInfoName, *authInfo)...)
+ }
+
+ for clusterName, clusterInfo := range config.Clusters {
+ validationErrors = append(validationErrors, validateClusterInfo(clusterName, *clusterInfo)...)
+ }
+
+ return newErrConfigurationInvalid(validationErrors)
+}
+
+// ConfirmUsable looks a particular context and determines if that particular part of the config is useable. There might still be errors in the config,
+// but no errors in the sections requested or referenced. It does not return early so that it can find as many errors as possible.
+func ConfirmUsable(config clientcmdapi.Config, passedContextName string) error {
+ validationErrors := make([]error, 0)
+
+ if clientcmdapi.IsConfigEmpty(&config) {
+ return newErrConfigurationInvalid([]error{ErrEmptyConfig})
+ }
+
+ var contextName string
+ if len(passedContextName) != 0 {
+ contextName = passedContextName
+ } else {
+ contextName = config.CurrentContext
+ }
+
+ if len(contextName) == 0 {
+ return ErrNoContext
+ }
+
+ context, exists := config.Contexts[contextName]
+ if !exists {
+ validationErrors = append(validationErrors, &errContextNotFound{contextName})
+ }
+
+ if exists {
+ validationErrors = append(validationErrors, validateContext(contextName, *context, config)...)
+ validationErrors = append(validationErrors, validateAuthInfo(context.AuthInfo, *config.AuthInfos[context.AuthInfo])...)
+ validationErrors = append(validationErrors, validateClusterInfo(context.Cluster, *config.Clusters[context.Cluster])...)
+ }
+
+ return newErrConfigurationInvalid(validationErrors)
+}
+
+// validateClusterInfo looks for conflicts and errors in the cluster info
+func validateClusterInfo(clusterName string, clusterInfo clientcmdapi.Cluster) []error {
+ validationErrors := make([]error, 0)
+
+ if reflect.DeepEqual(clientcmdapi.Cluster{}, clusterInfo) {
+ return []error{ErrEmptyCluster}
+ }
+
+ if len(clusterInfo.Server) == 0 {
+ if len(clusterName) == 0 {
+ validationErrors = append(validationErrors, fmt.Errorf("default cluster has no server defined"))
+ } else {
+ validationErrors = append(validationErrors, fmt.Errorf("no server found for cluster %q", clusterName))
+ }
+ }
+ // Make sure CA data and CA file aren't both specified
+ if len(clusterInfo.CertificateAuthority) != 0 && len(clusterInfo.CertificateAuthorityData) != 0 {
+ validationErrors = append(validationErrors, fmt.Errorf("certificate-authority-data and certificate-authority are both specified for %v. certificate-authority-data will override.", clusterName))
+ }
+ if len(clusterInfo.CertificateAuthority) != 0 {
+ clientCertCA, err := os.Open(clusterInfo.CertificateAuthority)
+ defer clientCertCA.Close()
+ if err != nil {
+ validationErrors = append(validationErrors, fmt.Errorf("unable to read certificate-authority %v for %v due to %v", clusterInfo.CertificateAuthority, clusterName, err))
+ }
+ }
+
+ return validationErrors
+}
+
+// validateAuthInfo looks for conflicts and errors in the auth info
+func validateAuthInfo(authInfoName string, authInfo clientcmdapi.AuthInfo) []error {
+ validationErrors := make([]error, 0)
+
+ usingAuthPath := false
+ methods := make([]string, 0, 3)
+ if len(authInfo.Token) != 0 {
+ methods = append(methods, "token")
+ }
+ if len(authInfo.Username) != 0 || len(authInfo.Password) != 0 {
+ methods = append(methods, "basicAuth")
+ }
+
+ if len(authInfo.ClientCertificate) != 0 || len(authInfo.ClientCertificateData) != 0 {
+ // Make sure cert data and file aren't both specified
+ if len(authInfo.ClientCertificate) != 0 && len(authInfo.ClientCertificateData) != 0 {
+ validationErrors = append(validationErrors, fmt.Errorf("client-cert-data and client-cert are both specified for %v. client-cert-data will override.", authInfoName))
+ }
+ // Make sure key data and file aren't both specified
+ if len(authInfo.ClientKey) != 0 && len(authInfo.ClientKeyData) != 0 {
+ validationErrors = append(validationErrors, fmt.Errorf("client-key-data and client-key are both specified for %v; client-key-data will override", authInfoName))
+ }
+ // Make sure a key is specified
+ if len(authInfo.ClientKey) == 0 && len(authInfo.ClientKeyData) == 0 {
+ validationErrors = append(validationErrors, fmt.Errorf("client-key-data or client-key must be specified for %v to use the clientCert authentication method.", authInfoName))
+ }
+
+ if len(authInfo.ClientCertificate) != 0 {
+ clientCertFile, err := os.Open(authInfo.ClientCertificate)
+ defer clientCertFile.Close()
+ if err != nil {
+ validationErrors = append(validationErrors, fmt.Errorf("unable to read client-cert %v for %v due to %v", authInfo.ClientCertificate, authInfoName, err))
+ }
+ }
+ if len(authInfo.ClientKey) != 0 {
+ clientKeyFile, err := os.Open(authInfo.ClientKey)
+ defer clientKeyFile.Close()
+ if err != nil {
+ validationErrors = append(validationErrors, fmt.Errorf("unable to read client-key %v for %v due to %v", authInfo.ClientKey, authInfoName, err))
+ }
+ }
+ }
+
+ // authPath also provides information for the client to identify the server, so allow multiple auth methods in that case
+ if (len(methods) > 1) && (!usingAuthPath) {
+ validationErrors = append(validationErrors, fmt.Errorf("more than one authentication method found for %v; found %v, only one is allowed", authInfoName, methods))
+ }
+
+ return validationErrors
+}
+
+// validateContext looks for errors in the context. It is not transitive, so errors in the reference authInfo or cluster configs are not included in this return
+func validateContext(contextName string, context clientcmdapi.Context, config clientcmdapi.Config) []error {
+ validationErrors := make([]error, 0)
+
+ if len(context.AuthInfo) == 0 {
+ validationErrors = append(validationErrors, fmt.Errorf("user was not specified for context %q", contextName))
+ } else if _, exists := config.AuthInfos[context.AuthInfo]; !exists {
+ validationErrors = append(validationErrors, fmt.Errorf("user %q was not found for context %q", context.AuthInfo, contextName))
+ }
+
+ if len(context.Cluster) == 0 {
+ validationErrors = append(validationErrors, fmt.Errorf("cluster was not specified for context %q", contextName))
+ } else if _, exists := config.Clusters[context.Cluster]; !exists {
+ validationErrors = append(validationErrors, fmt.Errorf("cluster %q was not found for context %q", context.Cluster, contextName))
+ }
+
+ if len(context.Namespace) != 0 {
+ if len(validation.IsDNS1123Label(context.Namespace)) != 0 {
+ validationErrors = append(validationErrors, fmt.Errorf("namespace %q for context %q does not conform to the kubernetes DNS_LABEL rules", context.Namespace, contextName))
+ }
+ }
+
+ return validationErrors
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clusterrolebindings.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clusterrolebindings.go
new file mode 100644
index 0000000..fa9cad9
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clusterrolebindings.go
@@ -0,0 +1,92 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/apis/rbac"
+ "k8s.io/kubernetes/pkg/watch"
+)
+
+// ClusterRoleBindings has methods to work with ClusterRoleBinding resources in a namespace
+type ClusterRoleBindings interface {
+ ClusterRoleBindings() ClusterRoleBindingInterface
+}
+
+// ClusterRoleBindingInterface has methods to work with ClusterRoleBinding resources.
+type ClusterRoleBindingInterface interface {
+ List(opts api.ListOptions) (*rbac.ClusterRoleBindingList, error)
+ Get(name string) (*rbac.ClusterRoleBinding, error)
+ Delete(name string, options *api.DeleteOptions) error
+ Create(clusterRoleBinding *rbac.ClusterRoleBinding) (*rbac.ClusterRoleBinding, error)
+ Update(clusterRoleBinding *rbac.ClusterRoleBinding) (*rbac.ClusterRoleBinding, error)
+ Watch(opts api.ListOptions) (watch.Interface, error)
+}
+
+// clusterRoleBindings implements ClusterRoleBindingsNamespacer interface
+type clusterRoleBindings struct {
+ client *RbacClient
+}
+
+// newClusterRoleBindings returns a clusterRoleBindings
+func newClusterRoleBindings(c *RbacClient) *clusterRoleBindings {
+ return &clusterRoleBindings{
+ client: c,
+ }
+}
+
+// List takes label and field selectors, and returns the list of clusterRoleBindings that match those selectors.
+func (c *clusterRoleBindings) List(opts api.ListOptions) (result *rbac.ClusterRoleBindingList, err error) {
+ result = &rbac.ClusterRoleBindingList{}
+ err = c.client.Get().Resource("clusterrolebindings").VersionedParams(&opts, api.ParameterCodec).Do().Into(result)
+ return
+}
+
+// Get takes the name of the clusterRoleBinding, and returns the corresponding ClusterRoleBinding object, and an error if it occurs
+func (c *clusterRoleBindings) Get(name string) (result *rbac.ClusterRoleBinding, err error) {
+ result = &rbac.ClusterRoleBinding{}
+ err = c.client.Get().Resource("clusterrolebindings").Name(name).Do().Into(result)
+ return
+}
+
+// Delete takes the name of the clusterRoleBinding and deletes it. Returns an error if one occurs.
+func (c *clusterRoleBindings) Delete(name string, options *api.DeleteOptions) error {
+ return c.client.Delete().Resource("clusterrolebindings").Name(name).Body(options).Do().Error()
+}
+
+// Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if it occurs.
+func (c *clusterRoleBindings) Create(clusterRoleBinding *rbac.ClusterRoleBinding) (result *rbac.ClusterRoleBinding, err error) {
+ result = &rbac.ClusterRoleBinding{}
+ err = c.client.Post().Resource("clusterrolebindings").Body(clusterRoleBinding).Do().Into(result)
+ return
+}
+
+// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if it occurs.
+func (c *clusterRoleBindings) Update(clusterRoleBinding *rbac.ClusterRoleBinding) (result *rbac.ClusterRoleBinding, err error) {
+ result = &rbac.ClusterRoleBinding{}
+ err = c.client.Put().Resource("clusterrolebindings").Name(clusterRoleBinding.Name).Body(clusterRoleBinding).Do().Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested clusterRoleBindings.
+func (c *clusterRoleBindings) Watch(opts api.ListOptions) (watch.Interface, error) {
+ return c.client.Get().
+ Prefix("watch").
+ Resource("clusterrolebindings").
+ VersionedParams(&opts, api.ParameterCodec).
+ Watch()
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clusterroles.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clusterroles.go
new file mode 100644
index 0000000..165271a
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/clusterroles.go
@@ -0,0 +1,92 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/apis/rbac"
+ "k8s.io/kubernetes/pkg/watch"
+)
+
+// ClusterRoles has methods to work with ClusterRole resources in a namespace
+type ClusterRoles interface {
+ ClusterRoles() ClusterRoleInterface
+}
+
+// ClusterRoleInterface has methods to work with ClusterRole resources.
+type ClusterRoleInterface interface {
+ List(opts api.ListOptions) (*rbac.ClusterRoleList, error)
+ Get(name string) (*rbac.ClusterRole, error)
+ Delete(name string, options *api.DeleteOptions) error
+ Create(clusterRole *rbac.ClusterRole) (*rbac.ClusterRole, error)
+ Update(clusterRole *rbac.ClusterRole) (*rbac.ClusterRole, error)
+ Watch(opts api.ListOptions) (watch.Interface, error)
+}
+
+// clusterRoles implements ClusterRolesNamespacer interface
+type clusterRoles struct {
+ client *RbacClient
+}
+
+// newClusterRoles returns a clusterRoles
+func newClusterRoles(c *RbacClient) *clusterRoles {
+ return &clusterRoles{
+ client: c,
+ }
+}
+
+// List takes label and field selectors, and returns the list of clusterRoles that match those selectors.
+func (c *clusterRoles) List(opts api.ListOptions) (result *rbac.ClusterRoleList, err error) {
+ result = &rbac.ClusterRoleList{}
+ err = c.client.Get().Resource("clusterroles").VersionedParams(&opts, api.ParameterCodec).Do().Into(result)
+ return
+}
+
+// Get takes the name of the clusterRole, and returns the corresponding ClusterRole object, and an error if it occurs
+func (c *clusterRoles) Get(name string) (result *rbac.ClusterRole, err error) {
+ result = &rbac.ClusterRole{}
+ err = c.client.Get().Resource("clusterroles").Name(name).Do().Into(result)
+ return
+}
+
+// Delete takes the name of the clusterRole and deletes it. Returns an error if one occurs.
+func (c *clusterRoles) Delete(name string, options *api.DeleteOptions) error {
+ return c.client.Delete().Resource("clusterroles").Name(name).Body(options).Do().Error()
+}
+
+// Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if it occurs.
+func (c *clusterRoles) Create(clusterRole *rbac.ClusterRole) (result *rbac.ClusterRole, err error) {
+ result = &rbac.ClusterRole{}
+ err = c.client.Post().Resource("clusterroles").Body(clusterRole).Do().Into(result)
+ return
+}
+
+// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if it occurs.
+func (c *clusterRoles) Update(clusterRole *rbac.ClusterRole) (result *rbac.ClusterRole, err error) {
+ result = &rbac.ClusterRole{}
+ err = c.client.Put().Resource("clusterroles").Name(clusterRole.Name).Body(clusterRole).Do().Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested clusterRoles.
+func (c *clusterRoles) Watch(opts api.ListOptions) (watch.Interface, error) {
+ return c.client.Get().
+ Prefix("watch").
+ Resource("clusterroles").
+ VersionedParams(&opts, api.ParameterCodec).
+ Watch()
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/componentstatuses.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/componentstatuses.go
new file mode 100644
index 0000000..aca996b
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/componentstatuses.go
@@ -0,0 +1,60 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+)
+
+type ComponentStatusesInterface interface {
+ ComponentStatuses() ComponentStatusInterface
+}
+
+// ComponentStatusInterface contains methods to retrieve ComponentStatus
+type ComponentStatusInterface interface {
+ List(opts api.ListOptions) (*api.ComponentStatusList, error)
+ Get(name string) (*api.ComponentStatus, error)
+
+ // TODO: It'd be nice to have watch support at some point
+ //Watch(opts api.ListOptions) (watch.Interface, error)
+}
+
+// componentStatuses implements ComponentStatusesInterface
+type componentStatuses struct {
+ client *Client
+}
+
+func newComponentStatuses(c *Client) *componentStatuses {
+ return &componentStatuses{c}
+}
+
+func (c *componentStatuses) List(opts api.ListOptions) (result *api.ComponentStatusList, err error) {
+ result = &api.ComponentStatusList{}
+ err = c.client.Get().
+ Resource("componentStatuses").
+ VersionedParams(&opts, api.ParameterCodec).
+ Do().
+ Into(result)
+
+ return result, err
+}
+
+func (c *componentStatuses) Get(name string) (result *api.ComponentStatus, err error) {
+ result = &api.ComponentStatus{}
+ err = c.client.Get().Resource("componentStatuses").Name(name).Do().Into(result)
+ return
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/conditions.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/conditions.go
new file mode 100644
index 0000000..5c28429
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/conditions.go
@@ -0,0 +1,240 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "fmt"
+
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/errors"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/apis/batch"
+ "k8s.io/kubernetes/pkg/apis/extensions"
+ "k8s.io/kubernetes/pkg/util/wait"
+ "k8s.io/kubernetes/pkg/watch"
+)
+
+// ControllerHasDesiredReplicas returns a condition that will be true if and only if
+// the desired replica count for a controller's ReplicaSelector equals the Replicas count.
+func ControllerHasDesiredReplicas(c Interface, controller *api.ReplicationController) wait.ConditionFunc {
+
+ // If we're given a controller where the status lags the spec, it either means that the controller is stale,
+ // or that the rc manager hasn't noticed the update yet. Polling status.Replicas is not safe in the latter case.
+ desiredGeneration := controller.Generation
+
+ return func() (bool, error) {
+ ctrl, err := c.ReplicationControllers(controller.Namespace).Get(controller.Name)
+ if err != nil {
+ return false, err
+ }
+ // There's a chance a concurrent update modifies the Spec.Replicas causing this check to pass,
+ // or, after this check has passed, a modification causes the rc manager to create more pods.
+ // This will not be an issue once we've implemented graceful delete for rcs, but till then
+ // concurrent stop operations on the same rc might have unintended side effects.
+ return ctrl.Status.ObservedGeneration >= desiredGeneration && ctrl.Status.Replicas == ctrl.Spec.Replicas, nil
+ }
+}
+
+// ReplicaSetHasDesiredReplicas returns a condition that will be true if and only if
+// the desired replica count for a ReplicaSet's ReplicaSelector equals the Replicas count.
+func ReplicaSetHasDesiredReplicas(c ExtensionsInterface, replicaSet *extensions.ReplicaSet) wait.ConditionFunc {
+
+ // If we're given a ReplicaSet where the status lags the spec, it either means that the
+ // ReplicaSet is stale, or that the ReplicaSet manager hasn't noticed the update yet.
+ // Polling status.Replicas is not safe in the latter case.
+ desiredGeneration := replicaSet.Generation
+
+ return func() (bool, error) {
+ rs, err := c.ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name)
+ if err != nil {
+ return false, err
+ }
+ // There's a chance a concurrent update modifies the Spec.Replicas causing this check to
+ // pass, or, after this check has passed, a modification causes the ReplicaSet manager to
+ // create more pods. This will not be an issue once we've implemented graceful delete for
+ // ReplicaSets, but till then concurrent stop operations on the same ReplicaSet might have
+ // unintended side effects.
+ return rs.Status.ObservedGeneration >= desiredGeneration && rs.Status.Replicas == rs.Spec.Replicas, nil
+ }
+}
+
+// JobHasDesiredParallelism returns a condition that will be true if the desired parallelism count
+// for a job equals the current active counts or is less by an appropriate successful/unsuccessful count.
+func JobHasDesiredParallelism(c BatchInterface, job *batch.Job) wait.ConditionFunc {
+
+ return func() (bool, error) {
+ job, err := c.Jobs(job.Namespace).Get(job.Name)
+ if err != nil {
+ return false, err
+ }
+
+ // desired parallelism can be either the exact number, in which case return immediately
+ if job.Status.Active == *job.Spec.Parallelism {
+ return true, nil
+ }
+ if job.Spec.Completions == nil {
+ // A job without specified completions needs to wait for Active to reach Parallelism.
+ return false, nil
+ } else {
+ // otherwise count successful
+ progress := *job.Spec.Completions - job.Status.Active - job.Status.Succeeded
+ return progress == 0, nil
+ }
+ }
+}
+
+// DeploymentHasDesiredReplicas returns a condition that will be true if and only if
+// the desired replica count for a deployment equals its updated replicas count.
+// (non-terminated pods that have the desired template spec).
+func DeploymentHasDesiredReplicas(c ExtensionsInterface, deployment *extensions.Deployment) wait.ConditionFunc {
+ // If we're given a deployment where the status lags the spec, it either
+ // means that the deployment is stale, or that the deployment manager hasn't
+ // noticed the update yet. Polling status.Replicas is not safe in the latter
+ // case.
+ desiredGeneration := deployment.Generation
+
+ return func() (bool, error) {
+ deployment, err := c.Deployments(deployment.Namespace).Get(deployment.Name)
+ if err != nil {
+ return false, err
+ }
+ return deployment.Status.ObservedGeneration >= desiredGeneration &&
+ deployment.Status.UpdatedReplicas == deployment.Spec.Replicas, nil
+ }
+}
+
+// ErrPodCompleted is returned by PodRunning or PodContainerRunning to indicate that
+// the pod has already reached completed state.
+var ErrPodCompleted = fmt.Errorf("pod ran to completion")
+
+// PodRunning returns true if the pod is running, false if the pod has not yet reached running state,
+// returns ErrPodCompleted if the pod has run to completion, or an error in any other case.
+func PodRunning(event watch.Event) (bool, error) {
+ switch event.Type {
+ case watch.Deleted:
+ return false, errors.NewNotFound(unversioned.GroupResource{Resource: "pods"}, "")
+ }
+ switch t := event.Object.(type) {
+ case *api.Pod:
+ switch t.Status.Phase {
+ case api.PodRunning:
+ return true, nil
+ case api.PodFailed, api.PodSucceeded:
+ return false, ErrPodCompleted
+ }
+ }
+ return false, nil
+}
+
+// PodCompleted returns true if the pod has run to completion, false if the pod has not yet
+// reached running state, or an error in any other case.
+func PodCompleted(event watch.Event) (bool, error) {
+ switch event.Type {
+ case watch.Deleted:
+ return false, errors.NewNotFound(unversioned.GroupResource{Resource: "pods"}, "")
+ }
+ switch t := event.Object.(type) {
+ case *api.Pod:
+ switch t.Status.Phase {
+ case api.PodFailed, api.PodSucceeded:
+ return true, nil
+ }
+ }
+ return false, nil
+}
+
+// PodRunningAndReady returns true if the pod is running and ready, false if the pod has not
+// yet reached those states, returns ErrPodCompleted if the pod has run to completion, or
+// an error in any other case.
+func PodRunningAndReady(event watch.Event) (bool, error) {
+ switch event.Type {
+ case watch.Deleted:
+ return false, errors.NewNotFound(unversioned.GroupResource{Resource: "pods"}, "")
+ }
+ switch t := event.Object.(type) {
+ case *api.Pod:
+ switch t.Status.Phase {
+ case api.PodFailed, api.PodSucceeded:
+ return false, ErrPodCompleted
+ case api.PodRunning:
+ return api.IsPodReady(t), nil
+ }
+ }
+ return false, nil
+}
+
+// PodNotPending returns true if the pod has left the pending state, false if it has not,
+// or an error in any other case (such as if the pod was deleted).
+func PodNotPending(event watch.Event) (bool, error) {
+ switch event.Type {
+ case watch.Deleted:
+ return false, errors.NewNotFound(unversioned.GroupResource{Resource: "pods"}, "")
+ }
+ switch t := event.Object.(type) {
+ case *api.Pod:
+ switch t.Status.Phase {
+ case api.PodPending:
+ return false, nil
+ default:
+ return true, nil
+ }
+ }
+ return false, nil
+}
+
+// PodContainerRunning returns false until the named container has ContainerStatus running (at least once),
+// and will return an error if the pod is deleted, runs to completion, or the container pod is not available.
+func PodContainerRunning(containerName string) watch.ConditionFunc {
+ return func(event watch.Event) (bool, error) {
+ switch event.Type {
+ case watch.Deleted:
+ return false, errors.NewNotFound(unversioned.GroupResource{Resource: "pods"}, "")
+ }
+ switch t := event.Object.(type) {
+ case *api.Pod:
+ switch t.Status.Phase {
+ case api.PodRunning, api.PodPending:
+ case api.PodFailed, api.PodSucceeded:
+ return false, ErrPodCompleted
+ default:
+ return false, nil
+ }
+ for _, s := range t.Status.ContainerStatuses {
+ if s.Name != containerName {
+ continue
+ }
+ return s.State.Running != nil, nil
+ }
+ return false, nil
+ }
+ return false, nil
+ }
+}
+
+// ServiceAccountHasSecrets returns true if the service account has at least one secret,
+// false if it does not, or an error.
+func ServiceAccountHasSecrets(event watch.Event) (bool, error) {
+ switch event.Type {
+ case watch.Deleted:
+ return false, errors.NewNotFound(unversioned.GroupResource{Resource: "serviceaccounts"}, "")
+ }
+ switch t := event.Object.(type) {
+ case *api.ServiceAccount:
+ return len(t.Secrets) > 0, nil
+ }
+ return false, nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/configmap.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/configmap.go
new file mode 100644
index 0000000..c2f2035
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/configmap.go
@@ -0,0 +1,122 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/watch"
+)
+
+const (
+ ConfigMapResourceName string = "configmaps"
+)
+
+type ConfigMapsNamespacer interface {
+ ConfigMaps(namespace string) ConfigMapsInterface
+}
+
+type ConfigMapsInterface interface {
+ Get(string) (*api.ConfigMap, error)
+ List(opts api.ListOptions) (*api.ConfigMapList, error)
+ Create(*api.ConfigMap) (*api.ConfigMap, error)
+ Delete(string) error
+ Update(*api.ConfigMap) (*api.ConfigMap, error)
+ Watch(api.ListOptions) (watch.Interface, error)
+}
+
+type ConfigMaps struct {
+ client *Client
+ namespace string
+}
+
+// ConfigMaps should implement ConfigMapsInterface
+var _ ConfigMapsInterface = &ConfigMaps{}
+
+func newConfigMaps(c *Client, ns string) *ConfigMaps {
+ return &ConfigMaps{
+ client: c,
+ namespace: ns,
+ }
+}
+
+func (c *ConfigMaps) Get(name string) (*api.ConfigMap, error) {
+ result := &api.ConfigMap{}
+ err := c.client.Get().
+ Namespace(c.namespace).
+ Resource(ConfigMapResourceName).
+ Name(name).
+ Do().
+ Into(result)
+
+ return result, err
+}
+
+func (c *ConfigMaps) List(opts api.ListOptions) (*api.ConfigMapList, error) {
+ result := &api.ConfigMapList{}
+ err := c.client.Get().
+ Namespace(c.namespace).
+ Resource(ConfigMapResourceName).
+ VersionedParams(&opts, api.ParameterCodec).
+ Do().
+ Into(result)
+
+ return result, err
+}
+
+func (c *ConfigMaps) Create(cfg *api.ConfigMap) (*api.ConfigMap, error) {
+ result := &api.ConfigMap{}
+ err := c.client.Post().
+ Namespace(c.namespace).
+ Resource(ConfigMapResourceName).
+ Body(cfg).
+ Do().
+ Into(result)
+
+ return result, err
+}
+
+func (c *ConfigMaps) Delete(name string) error {
+ return c.client.Delete().
+ Namespace(c.namespace).
+ Resource(ConfigMapResourceName).
+ Name(name).
+ Do().
+ Error()
+}
+
+func (c *ConfigMaps) Update(cfg *api.ConfigMap) (*api.ConfigMap, error) {
+ result := &api.ConfigMap{}
+
+ err := c.client.Put().
+ Namespace(c.namespace).
+ Resource(ConfigMapResourceName).
+ Name(cfg.Name).
+ Body(cfg).
+ Do().
+ Into(result)
+
+ return result, err
+}
+
+func (c *ConfigMaps) Watch(opts api.ListOptions) (watch.Interface, error) {
+ return c.client.Get().
+ Prefix("watch").
+ Namespace(c.namespace).
+ Resource(ConfigMapResourceName).
+ VersionedParams(&opts, api.ParameterCodec).
+ Watch()
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/containerinfo.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/containerinfo.go
new file mode 100644
index 0000000..2f9aae8
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/containerinfo.go
@@ -0,0 +1,123 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "strconv"
+
+ cadvisorapi "github.com/google/cadvisor/info/v1"
+)
+
+type ContainerInfoGetter interface {
+ // GetContainerInfo returns information about a container.
+ GetContainerInfo(host, podID, containerID string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error)
+ // GetRootInfo returns information about the root container on a machine.
+ GetRootInfo(host string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error)
+ // GetMachineInfo returns the machine's information like number of cores, memory capacity.
+ GetMachineInfo(host string) (*cadvisorapi.MachineInfo, error)
+}
+
+type HTTPContainerInfoGetter struct {
+ Client *http.Client
+ Port int
+}
+
+func (self *HTTPContainerInfoGetter) GetMachineInfo(host string) (*cadvisorapi.MachineInfo, error) {
+ request, err := http.NewRequest(
+ "GET",
+ fmt.Sprintf("http://%v/spec",
+ net.JoinHostPort(host, strconv.Itoa(self.Port)),
+ ),
+ nil,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ response, err := self.Client.Do(request)
+ if err != nil {
+ return nil, err
+ }
+ defer response.Body.Close()
+ if response.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("trying to get machine spec from %v; received status %v",
+ host, response.Status)
+ }
+ var minfo cadvisorapi.MachineInfo
+ err = json.NewDecoder(response.Body).Decode(&minfo)
+ if err != nil {
+ return nil, err
+ }
+ return &minfo, nil
+}
+
+func (self *HTTPContainerInfoGetter) getContainerInfo(host, path string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) {
+ var body io.Reader
+ if req != nil {
+ content, err := json.Marshal(req)
+ if err != nil {
+ return nil, err
+ }
+ body = bytes.NewBuffer(content)
+ }
+
+ request, err := http.NewRequest(
+ "GET",
+ fmt.Sprintf("http://%v/stats/%v",
+ net.JoinHostPort(host, strconv.Itoa(self.Port)),
+ path,
+ ),
+ body,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ response, err := self.Client.Do(request)
+ if err != nil {
+ return nil, err
+ }
+ defer response.Body.Close()
+ if response.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("trying to get info for %v from %v; received status %v",
+ path, host, response.Status)
+ }
+ var cinfo cadvisorapi.ContainerInfo
+ err = json.NewDecoder(response.Body).Decode(&cinfo)
+ if err != nil {
+ return nil, err
+ }
+ return &cinfo, nil
+}
+
+func (self *HTTPContainerInfoGetter) GetContainerInfo(host, podID, containerID string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) {
+ return self.getContainerInfo(
+ host,
+ fmt.Sprintf("%v/%v", podID, containerID),
+ req,
+ )
+}
+
+func (self *HTTPContainerInfoGetter) GetRootInfo(host string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) {
+ return self.getContainerInfo(host, "", req)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/daemon_sets.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/daemon_sets.go
new file mode 100644
index 0000000..7ec9182
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/daemon_sets.go
@@ -0,0 +1,100 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/apis/extensions"
+ "k8s.io/kubernetes/pkg/watch"
+)
+
+// DaemonsSetsNamespacer has methods to work with DaemonSet resources in a namespace
+type DaemonSetsNamespacer interface {
+ DaemonSets(namespace string) DaemonSetInterface
+}
+
+type DaemonSetInterface interface {
+ List(opts api.ListOptions) (*extensions.DaemonSetList, error)
+ Get(name string) (*extensions.DaemonSet, error)
+ Create(ctrl *extensions.DaemonSet) (*extensions.DaemonSet, error)
+ Update(ctrl *extensions.DaemonSet) (*extensions.DaemonSet, error)
+ UpdateStatus(ctrl *extensions.DaemonSet) (*extensions.DaemonSet, error)
+ Delete(name string) error
+ Watch(opts api.ListOptions) (watch.Interface, error)
+}
+
+// daemonSets implements DaemonsSetsNamespacer interface
+type daemonSets struct {
+ r *ExtensionsClient
+ ns string
+}
+
+func newDaemonSets(c *ExtensionsClient, namespace string) *daemonSets {
+ return &daemonSets{c, namespace}
+}
+
+// Ensure statically that daemonSets implements DaemonSetsInterface.
+var _ DaemonSetInterface = &daemonSets{}
+
+func (c *daemonSets) List(opts api.ListOptions) (result *extensions.DaemonSetList, err error) {
+ result = &extensions.DaemonSetList{}
+ err = c.r.Get().Namespace(c.ns).Resource("daemonsets").VersionedParams(&opts, api.ParameterCodec).Do().Into(result)
+ return
+}
+
+// Get returns information about a particular daemon set.
+func (c *daemonSets) Get(name string) (result *extensions.DaemonSet, err error) {
+ result = &extensions.DaemonSet{}
+ err = c.r.Get().Namespace(c.ns).Resource("daemonsets").Name(name).Do().Into(result)
+ return
+}
+
+// Create creates a new daemon set.
+func (c *daemonSets) Create(daemon *extensions.DaemonSet) (result *extensions.DaemonSet, err error) {
+ result = &extensions.DaemonSet{}
+ err = c.r.Post().Namespace(c.ns).Resource("daemonsets").Body(daemon).Do().Into(result)
+ return
+}
+
+// Update updates an existing daemon set.
+func (c *daemonSets) Update(daemon *extensions.DaemonSet) (result *extensions.DaemonSet, err error) {
+ result = &extensions.DaemonSet{}
+ err = c.r.Put().Namespace(c.ns).Resource("daemonsets").Name(daemon.Name).Body(daemon).Do().Into(result)
+ return
+}
+
+// UpdateStatus updates an existing daemon set status
+func (c *daemonSets) UpdateStatus(daemon *extensions.DaemonSet) (result *extensions.DaemonSet, err error) {
+ result = &extensions.DaemonSet{}
+ err = c.r.Put().Namespace(c.ns).Resource("daemonsets").Name(daemon.Name).SubResource("status").Body(daemon).Do().Into(result)
+ return
+}
+
+// Delete deletes an existing daemon set.
+func (c *daemonSets) Delete(name string) error {
+ return c.r.Delete().Namespace(c.ns).Resource("daemonsets").Name(name).Do().Error()
+}
+
+// Watch returns a watch.Interface that watches the requested daemon sets.
+func (c *daemonSets) Watch(opts api.ListOptions) (watch.Interface, error) {
+ return c.r.Get().
+ Prefix("watch").
+ Namespace(c.ns).
+ Resource("daemonsets").
+ VersionedParams(&opts, api.ParameterCodec).
+ Watch()
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/deployment.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/deployment.go
new file mode 100644
index 0000000..a5e8afe
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/deployment.go
@@ -0,0 +1,111 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/apis/extensions"
+ "k8s.io/kubernetes/pkg/watch"
+)
+
+// DeploymentsNamespacer has methods to work with Deployment resources in a namespace
+type DeploymentsNamespacer interface {
+ Deployments(namespace string) DeploymentInterface
+}
+
+// DeploymentInterface has methods to work with Deployment resources.
+type DeploymentInterface interface {
+ List(opts api.ListOptions) (*extensions.DeploymentList, error)
+ Get(name string) (*extensions.Deployment, error)
+ Delete(name string, options *api.DeleteOptions) error
+ Create(*extensions.Deployment) (*extensions.Deployment, error)
+ Update(*extensions.Deployment) (*extensions.Deployment, error)
+ UpdateStatus(*extensions.Deployment) (*extensions.Deployment, error)
+ Watch(opts api.ListOptions) (watch.Interface, error)
+ Rollback(*extensions.DeploymentRollback) error
+}
+
+// deployments implements DeploymentInterface
+type deployments struct {
+ client *ExtensionsClient
+ ns string
+}
+
+// Ensure statically that deployments implements DeploymentInterface.
+var _ DeploymentInterface = &deployments{}
+
+// newDeployments returns a Deployments
+func newDeployments(c *ExtensionsClient, namespace string) *deployments {
+ return &deployments{
+ client: c,
+ ns: namespace,
+ }
+}
+
+// List takes label and field selectors, and returns the list of Deployments that match those selectors.
+func (c *deployments) List(opts api.ListOptions) (result *extensions.DeploymentList, err error) {
+ result = &extensions.DeploymentList{}
+ err = c.client.Get().Namespace(c.ns).Resource("deployments").VersionedParams(&opts, api.ParameterCodec).Do().Into(result)
+ return
+}
+
+// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any.
+func (c *deployments) Get(name string) (result *extensions.Deployment, err error) {
+ result = &extensions.Deployment{}
+ err = c.client.Get().Namespace(c.ns).Resource("deployments").Name(name).Do().Into(result)
+ return
+}
+
+// Delete takes name of the deployment and deletes it. Returns an error if one occurs.
+func (c *deployments) Delete(name string, options *api.DeleteOptions) error {
+ return c.client.Delete().Namespace(c.ns).Resource("deployments").Name(name).Body(options).Do().Error()
+}
+
+// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any.
+func (c *deployments) Create(deployment *extensions.Deployment) (result *extensions.Deployment, err error) {
+ result = &extensions.Deployment{}
+ err = c.client.Post().Namespace(c.ns).Resource("deployments").Body(deployment).Do().Into(result)
+ return
+}
+
+// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any.
+func (c *deployments) Update(deployment *extensions.Deployment) (result *extensions.Deployment, err error) {
+ result = &extensions.Deployment{}
+ err = c.client.Put().Namespace(c.ns).Resource("deployments").Name(deployment.Name).Body(deployment).Do().Into(result)
+ return
+}
+
+func (c *deployments) UpdateStatus(deployment *extensions.Deployment) (result *extensions.Deployment, err error) {
+ result = &extensions.Deployment{}
+ err = c.client.Put().Namespace(c.ns).Resource("deployments").Name(deployment.Name).SubResource("status").Body(deployment).Do().Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested deployments.
+func (c *deployments) Watch(opts api.ListOptions) (watch.Interface, error) {
+ return c.client.Get().
+ Prefix("watch").
+ Namespace(c.ns).
+ Resource("deployments").
+ VersionedParams(&opts, api.ParameterCodec).
+ Watch()
+}
+
+// Rollback applied the provided DeploymentRollback to the named deployment in the current namespace.
+func (c *deployments) Rollback(deploymentRollback *extensions.DeploymentRollback) error {
+ return c.client.Post().Namespace(c.ns).Resource("deployments").Name(deploymentRollback.Name).SubResource("rollback").Body(deploymentRollback).Do().Error()
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/doc.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/doc.go
new file mode 100644
index 0000000..dac3925
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/doc.go
@@ -0,0 +1,57 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Package unversioned contains the implementation of the client side communication with the
+Kubernetes master. The Client class provides methods for reading, creating, updating,
+and deleting pods, replication controllers, daemons, services, and nodes.
+
+Most consumers should use the Config object to create a Client:
+
+ import (
+ client "k8s.io/kubernetes/pkg/client/unversioned"
+ "k8s.io/kubernetes/pkg/api"
+ )
+
+ [...]
+
+ config := &client.Config{
+ Host: "http://localhost:8080",
+ Username: "test",
+ Password: "password",
+ }
+ client, err := client.New(config)
+ if err != nil {
+ // handle error
+ }
+ pods, err := client.Pods(api.NamespaceDefault).List(api.ListOptions{})
+ if err != nil {
+ // handle error
+ }
+
+More advanced consumers may wish to provide their own transport via a http.RoundTripper:
+
+ config := &client.Config{
+ Host: "https://localhost:8080",
+ Transport: oauthclient.Transport(),
+ }
+ client, err := client.New(config)
+
+The RESTClient type implements the Kubernetes API conventions (see `docs/devel/api-conventions.md`)
+for a given API path and is intended for use by consumers implementing their own Kubernetes
+compatible APIs.
+*/
+package unversioned
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/endpoints.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/endpoints.go
new file mode 100644
index 0000000..6e20a34
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/endpoints.go
@@ -0,0 +1,101 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/watch"
+)
+
+// EndpointsNamespacer has methods to work with Endpoints resources in a namespace
+type EndpointsNamespacer interface {
+ Endpoints(namespace string) EndpointsInterface
+}
+
+// EndpointsInterface has methods to work with Endpoints resources
+type EndpointsInterface interface {
+ Create(endpoints *api.Endpoints) (*api.Endpoints, error)
+ List(opts api.ListOptions) (*api.EndpointsList, error)
+ Get(name string) (*api.Endpoints, error)
+ Delete(name string) error
+ Update(endpoints *api.Endpoints) (*api.Endpoints, error)
+ Watch(opts api.ListOptions) (watch.Interface, error)
+}
+
+// endpoints implements EndpointsInterface
+type endpoints struct {
+ r *Client
+ ns string
+}
+
+// newEndpoints returns a endpoints
+func newEndpoints(c *Client, namespace string) *endpoints {
+ return &endpoints{c, namespace}
+}
+
+// Create creates a new endpoint.
+func (c *endpoints) Create(endpoints *api.Endpoints) (*api.Endpoints, error) {
+ result := &api.Endpoints{}
+ err := c.r.Post().Namespace(c.ns).Resource("endpoints").Body(endpoints).Do().Into(result)
+ return result, err
+}
+
+// List takes a selector, and returns the list of endpoints that match that selector
+func (c *endpoints) List(opts api.ListOptions) (result *api.EndpointsList, err error) {
+ result = &api.EndpointsList{}
+ err = c.r.Get().
+ Namespace(c.ns).
+ Resource("endpoints").
+ VersionedParams(&opts, api.ParameterCodec).
+ Do().
+ Into(result)
+ return
+}
+
+// Get returns information about the endpoints for a particular service.
+func (c *endpoints) Get(name string) (result *api.Endpoints, err error) {
+ result = &api.Endpoints{}
+ err = c.r.Get().Namespace(c.ns).Resource("endpoints").Name(name).Do().Into(result)
+ return
+}
+
+// Delete takes the name of the endpoint, and returns an error if one occurs
+func (c *endpoints) Delete(name string) error {
+ return c.r.Delete().Namespace(c.ns).Resource("endpoints").Name(name).Do().Error()
+}
+
+// Watch returns a watch.Interface that watches the requested endpoints for a service.
+func (c *endpoints) Watch(opts api.ListOptions) (watch.Interface, error) {
+ return c.r.Get().
+ Prefix("watch").
+ Namespace(c.ns).
+ Resource("endpoints").
+ VersionedParams(&opts, api.ParameterCodec).
+ Watch()
+}
+
+func (c *endpoints) Update(endpoints *api.Endpoints) (*api.Endpoints, error) {
+ result := &api.Endpoints{}
+ err := c.r.Put().
+ Namespace(c.ns).
+ Resource("endpoints").
+ Name(endpoints.Name).
+ Body(endpoints).
+ Do().
+ Into(result)
+ return result, err
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/events.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/events.go
new file mode 100644
index 0000000..3421bd8
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/events.go
@@ -0,0 +1,219 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "fmt"
+
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/fields"
+ "k8s.io/kubernetes/pkg/runtime"
+ "k8s.io/kubernetes/pkg/watch"
+)
+
+// EventNamespacer can return an EventInterface for the given namespace.
+type EventNamespacer interface {
+ Events(namespace string) EventInterface
+}
+
+// EventInterface has methods to work with Event resources
+type EventInterface interface {
+ Create(event *api.Event) (*api.Event, error)
+ Update(event *api.Event) (*api.Event, error)
+ Patch(event *api.Event, data []byte) (*api.Event, error)
+ List(opts api.ListOptions) (*api.EventList, error)
+ Get(name string) (*api.Event, error)
+ Watch(opts api.ListOptions) (watch.Interface, error)
+ // Search finds events about the specified object
+ Search(objOrRef runtime.Object) (*api.EventList, error)
+ Delete(name string) error
+ // DeleteCollection deletes a collection of events.
+ DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error
+ // Returns the appropriate field selector based on the API version being used to communicate with the server.
+ // The returned field selector can be used with List and Watch to filter desired events.
+ GetFieldSelector(involvedObjectName, involvedObjectNamespace, involvedObjectKind, involvedObjectUID *string) fields.Selector
+}
+
+// events implements Events interface
+type events struct {
+ client *Client
+ namespace string
+}
+
+// newEvents returns a new events object.
+func newEvents(c *Client, ns string) *events {
+ return &events{
+ client: c,
+ namespace: ns,
+ }
+}
+
+// Create makes a new event. Returns the copy of the event the server returns,
+// or an error. The namespace to create the event within is deduced from the
+// event; it must either match this event client's namespace, or this event
+// client must have been created with the "" namespace.
+func (e *events) Create(event *api.Event) (*api.Event, error) {
+ if e.namespace != "" && event.Namespace != e.namespace {
+ return nil, fmt.Errorf("can't create an event with namespace '%v' in namespace '%v'", event.Namespace, e.namespace)
+ }
+ result := &api.Event{}
+ err := e.client.Post().
+ Namespace(event.Namespace).
+ Resource("events").
+ Body(event).
+ Do().
+ Into(result)
+ return result, err
+}
+
+// Update modifies an existing event. It returns the copy of the event that the server returns,
+// or an error. The namespace and key to update the event within is deduced from the event. The
+// namespace must either match this event client's namespace, or this event client must have been
+// created with the "" namespace. Update also requires the ResourceVersion to be set in the event
+// object.
+func (e *events) Update(event *api.Event) (*api.Event, error) {
+ result := &api.Event{}
+ err := e.client.Put().
+ Namespace(event.Namespace).
+ Resource("events").
+ Name(event.Name).
+ Body(event).
+ Do().
+ Into(result)
+ return result, err
+}
+
+// Patch modifies an existing event. It returns the copy of the event that the server returns, or an
+// error. The namespace and name of the target event is deduced from the incompleteEvent. The
+// namespace must either match this event client's namespace, or this event client must have been
+// created with the "" namespace.
+func (e *events) Patch(incompleteEvent *api.Event, data []byte) (*api.Event, error) {
+ result := &api.Event{}
+ err := e.client.Patch(api.StrategicMergePatchType).
+ Namespace(incompleteEvent.Namespace).
+ Resource("events").
+ Name(incompleteEvent.Name).
+ Body(data).
+ Do().
+ Into(result)
+ return result, err
+}
+
+// List returns a list of events matching the selectors.
+func (e *events) List(opts api.ListOptions) (*api.EventList, error) {
+ result := &api.EventList{}
+ err := e.client.Get().
+ Namespace(e.namespace).
+ Resource("events").
+ VersionedParams(&opts, api.ParameterCodec).
+ Do().
+ Into(result)
+ return result, err
+}
+
+// Get returns the given event, or an error.
+func (e *events) Get(name string) (*api.Event, error) {
+ result := &api.Event{}
+ err := e.client.Get().
+ Namespace(e.namespace).
+ Resource("events").
+ Name(name).
+ Do().
+ Into(result)
+ return result, err
+}
+
+// Watch starts watching for events matching the given selectors.
+func (e *events) Watch(opts api.ListOptions) (watch.Interface, error) {
+ return e.client.Get().
+ Prefix("watch").
+ Namespace(e.namespace).
+ Resource("events").
+ VersionedParams(&opts, api.ParameterCodec).
+ Watch()
+}
+
+// Search finds events about the specified object. The namespace of the
+// object must match this event's client namespace unless the event client
+// was made with the "" namespace.
+func (e *events) Search(objOrRef runtime.Object) (*api.EventList, error) {
+ ref, err := api.GetReference(objOrRef)
+ if err != nil {
+ return nil, err
+ }
+ if e.namespace != "" && ref.Namespace != e.namespace {
+ return nil, fmt.Errorf("won't be able to find any events of namespace '%v' in namespace '%v'", ref.Namespace, e.namespace)
+ }
+ stringRefKind := string(ref.Kind)
+ var refKind *string
+ if stringRefKind != "" {
+ refKind = &stringRefKind
+ }
+ stringRefUID := string(ref.UID)
+ var refUID *string
+ if stringRefUID != "" {
+ refUID = &stringRefUID
+ }
+ fieldSelector := e.GetFieldSelector(&ref.Name, &ref.Namespace, refKind, refUID)
+ return e.List(api.ListOptions{FieldSelector: fieldSelector})
+}
+
+// Delete deletes an existing event.
+func (e *events) Delete(name string) error {
+ return e.client.Delete().
+ Namespace(e.namespace).
+ Resource("events").
+ Name(name).
+ Do().
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (e *events) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error {
+ return e.client.Delete().
+ Namespace(e.namespace).
+ Resource("events").
+ VersionedParams(&listOptions, api.ParameterCodec).
+ Body(options).
+ Do().
+ Error()
+}
+
+// Returns the appropriate field selector based on the API version being used to communicate with the server.
+// The returned field selector can be used with List and Watch to filter desired events.
+func (e *events) GetFieldSelector(involvedObjectName, involvedObjectNamespace, involvedObjectKind, involvedObjectUID *string) fields.Selector {
+ apiVersion := e.client.APIVersion().String()
+ field := fields.Set{}
+ if involvedObjectName != nil {
+ field[GetInvolvedObjectNameFieldLabel(apiVersion)] = *involvedObjectName
+ }
+ if involvedObjectNamespace != nil {
+ field["involvedObject.namespace"] = *involvedObjectNamespace
+ }
+ if involvedObjectKind != nil {
+ field["involvedObject.kind"] = *involvedObjectKind
+ }
+ if involvedObjectUID != nil {
+ field["involvedObject.uid"] = *involvedObjectUID
+ }
+ return field.AsSelector()
+}
+
+// Returns the appropriate field label to use for name of the involved object as per the given API version.
+func GetInvolvedObjectNameFieldLabel(version string) string {
+ return "involvedObject.name"
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/extensions.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/extensions.go
new file mode 100644
index 0000000..39b3408
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/extensions.go
@@ -0,0 +1,131 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/apimachinery/registered"
+ "k8s.io/kubernetes/pkg/apis/extensions"
+ "k8s.io/kubernetes/pkg/client/restclient"
+)
+
+// Interface holds the experimental methods for clients of Kubernetes
+// to allow mock testing.
+// Features of Extensions group are not supported and may be changed or removed in
+// incompatible ways at any time.
+type ExtensionsInterface interface {
+ ScaleNamespacer
+ DaemonSetsNamespacer
+ DeploymentsNamespacer
+ JobsNamespacer
+ IngressNamespacer
+ NetworkPolicyNamespacer
+ ThirdPartyResourceNamespacer
+ ReplicaSetsNamespacer
+ PodSecurityPoliciesInterface
+}
+
+// ExtensionsClient is used to interact with experimental Kubernetes features.
+// Features of Extensions group are not supported and may be changed or removed in
+// incompatible ways at any time.
+type ExtensionsClient struct {
+ *restclient.RESTClient
+}
+
+func (c *ExtensionsClient) PodSecurityPolicies() PodSecurityPolicyInterface {
+ return newPodSecurityPolicy(c)
+}
+
+func (c *ExtensionsClient) Scales(namespace string) ScaleInterface {
+ return newScales(c, namespace)
+}
+
+func (c *ExtensionsClient) DaemonSets(namespace string) DaemonSetInterface {
+ return newDaemonSets(c, namespace)
+}
+
+func (c *ExtensionsClient) Deployments(namespace string) DeploymentInterface {
+ return newDeployments(c, namespace)
+}
+
+func (c *ExtensionsClient) Jobs(namespace string) JobInterface {
+ return newJobs(c, namespace)
+}
+
+func (c *ExtensionsClient) Ingress(namespace string) IngressInterface {
+ return newIngress(c, namespace)
+}
+
+func (c *ExtensionsClient) NetworkPolicies(namespace string) NetworkPolicyInterface {
+ return newNetworkPolicies(c, namespace)
+}
+
+func (c *ExtensionsClient) ThirdPartyResources() ThirdPartyResourceInterface {
+ return newThirdPartyResources(c)
+}
+
+func (c *ExtensionsClient) ReplicaSets(namespace string) ReplicaSetInterface {
+ return newReplicaSets(c, namespace)
+}
+
+// NewExtensions creates a new ExtensionsClient for the given config. This client
+// provides access to experimental Kubernetes features.
+// Features of Extensions group are not supported and may be changed or removed in
+// incompatible ways at any time.
+func NewExtensions(c *restclient.Config) (*ExtensionsClient, error) {
+ config := *c
+ if err := setExtensionsDefaults(&config); err != nil {
+ return nil, err
+ }
+ client, err := restclient.RESTClientFor(&config)
+ if err != nil {
+ return nil, err
+ }
+ return &ExtensionsClient{client}, nil
+}
+
+// NewExtensionsOrDie creates a new ExtensionsClient for the given config and
+// panics if there is an error in the config.
+// Features of Extensions group are not supported and may be changed or removed in
+// incompatible ways at any time.
+func NewExtensionsOrDie(c *restclient.Config) *ExtensionsClient {
+ client, err := NewExtensions(c)
+ if err != nil {
+ panic(err)
+ }
+ return client
+}
+
+func setExtensionsDefaults(config *restclient.Config) error {
+ // if experimental group is not registered, return an error
+ g, err := registered.Group(extensions.GroupName)
+ if err != nil {
+ return err
+ }
+ config.APIPath = defaultAPIPath
+ if config.UserAgent == "" {
+ config.UserAgent = restclient.DefaultKubernetesUserAgent()
+ }
+ // TODO: Unconditionally set the config.Version, until we fix the config.
+ //if config.Version == "" {
+ copyGroupVersion := g.GroupVersion
+ config.GroupVersion = &copyGroupVersion
+ //}
+
+ config.NegotiatedSerializer = api.Codecs
+ return nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/flags.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/flags.go
new file mode 100644
index 0000000..7d32a25
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/flags.go
@@ -0,0 +1,31 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "time"
+)
+
+// FlagSet abstracts the flag interface for compatibility with both Golang "flag"
+// and cobra pflags (Posix style).
+type FlagSet interface {
+ StringVar(p *string, name, value, usage string)
+ BoolVar(p *bool, name string, value bool, usage string)
+ UintVar(p *uint, name string, value uint, usage string)
+ DurationVar(p *time.Duration, name string, value time.Duration, usage string)
+ IntVar(p *int, name string, value int, usage string)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/helper.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/helper.go
new file mode 100644
index 0000000..8475769
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/helper.go
@@ -0,0 +1,273 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "fmt"
+
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/apimachinery/registered"
+ "k8s.io/kubernetes/pkg/apis/apps"
+ "k8s.io/kubernetes/pkg/apis/autoscaling"
+ "k8s.io/kubernetes/pkg/apis/batch"
+ "k8s.io/kubernetes/pkg/apis/certificates"
+ "k8s.io/kubernetes/pkg/apis/extensions"
+ "k8s.io/kubernetes/pkg/apis/policy"
+ "k8s.io/kubernetes/pkg/apis/rbac"
+ "k8s.io/kubernetes/pkg/client/restclient"
+ "k8s.io/kubernetes/pkg/client/typed/discovery"
+ "k8s.io/kubernetes/pkg/util/sets"
+ "k8s.io/kubernetes/pkg/version"
+ // Import solely to initialize client auth plugins.
+ _ "k8s.io/kubernetes/plugin/pkg/client/auth"
+)
+
+const (
+ legacyAPIPath = "/api"
+ defaultAPIPath = "/apis"
+)
+
+// New creates a Kubernetes client for the given config. This client works with pods,
+// replication controllers, daemons, and services. It allows operations such as list, get, update
+// and delete on these objects. An error is returned if the provided configuration
+// is not valid.
+func New(c *restclient.Config) (*Client, error) {
+ config := *c
+ if err := SetKubernetesDefaults(&config); err != nil {
+ return nil, err
+ }
+ client, err := restclient.RESTClientFor(&config)
+ if err != nil {
+ return nil, err
+ }
+
+ discoveryConfig := *c
+ discoveryClient, err := discovery.NewDiscoveryClientForConfig(&discoveryConfig)
+ if err != nil {
+ return nil, err
+ }
+
+ var autoscalingClient *AutoscalingClient
+ if registered.IsRegistered(autoscaling.GroupName) {
+ autoscalingConfig := *c
+ autoscalingClient, err = NewAutoscaling(&autoscalingConfig)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ var batchClient *BatchClient
+ if registered.IsRegistered(batch.GroupName) {
+ batchConfig := *c
+ batchClient, err = NewBatch(&batchConfig)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ var extensionsClient *ExtensionsClient
+ if registered.IsRegistered(extensions.GroupName) {
+ extensionsConfig := *c
+ extensionsClient, err = NewExtensions(&extensionsConfig)
+ if err != nil {
+ return nil, err
+ }
+ }
+ var policyClient *PolicyClient
+ if registered.IsRegistered(policy.GroupName) {
+ policyConfig := *c
+ policyClient, err = NewPolicy(&policyConfig)
+ if err != nil {
+ return nil, err
+ }
+ }
+ var certsClient *CertificatesClient
+ if registered.IsRegistered(certificates.GroupName) {
+ certsConfig := *c
+ certsClient, err = NewCertificates(&certsConfig)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ var appsClient *AppsClient
+ if registered.IsRegistered(apps.GroupName) {
+ appsConfig := *c
+ appsClient, err = NewApps(&appsConfig)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ var rbacClient *RbacClient
+ if registered.IsRegistered(rbac.GroupName) {
+ rbacConfig := *c
+ rbacClient, err = NewRbac(&rbacConfig)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return &Client{RESTClient: client, AutoscalingClient: autoscalingClient, BatchClient: batchClient, CertificatesClient: certsClient, ExtensionsClient: extensionsClient, DiscoveryClient: discoveryClient, AppsClient: appsClient, PolicyClient: policyClient, RbacClient: rbacClient}, nil
+}
+
+// MatchesServerVersion queries the server to compares the build version
+// (git hash) of the client with the server's build version. It returns an error
+// if it failed to contact the server or if the versions are not an exact match.
+func MatchesServerVersion(client *Client, c *restclient.Config) error {
+ var err error
+ if client == nil {
+ client, err = New(c)
+ if err != nil {
+ return err
+ }
+ }
+ cVer := version.Get()
+ sVer, err := client.Discovery().ServerVersion()
+ if err != nil {
+ return fmt.Errorf("couldn't read version from server: %v\n", err)
+ }
+ // GitVersion includes GitCommit and GitTreeState, but best to be safe?
+ if cVer.GitVersion != sVer.GitVersion || cVer.GitCommit != sVer.GitCommit || cVer.GitTreeState != sVer.GitTreeState {
+ return fmt.Errorf("server version (%#v) differs from client version (%#v)!\n", sVer, cVer)
+ }
+
+ return nil
+}
+
+// NegotiateVersion queries the server's supported api versions to find
+// a version that both client and server support.
+// - If no version is provided, try registered client versions in order of
+// preference.
+// - If version is provided, but not default config (explicitly requested via
+// commandline flag), and is unsupported by the server, print a warning to
+// stderr and try client's registered versions in order of preference.
+// - If version is config default, and the server does not support it,
+// return an error.
+func NegotiateVersion(client *Client, c *restclient.Config, requestedGV *unversioned.GroupVersion, clientRegisteredGVs []unversioned.GroupVersion) (*unversioned.GroupVersion, error) {
+ var err error
+ if client == nil {
+ client, err = New(c)
+ if err != nil {
+ return nil, err
+ }
+ }
+ clientVersions := sets.String{}
+ for _, gv := range clientRegisteredGVs {
+ clientVersions.Insert(gv.String())
+ }
+ groups, err := client.ServerGroups()
+ if err != nil {
+ // This is almost always a connection error, and higher level code should treat this as a generic error,
+ // not a negotiation specific error.
+ return nil, err
+ }
+ versions := unversioned.ExtractGroupVersions(groups)
+ serverVersions := sets.String{}
+ for _, v := range versions {
+ serverVersions.Insert(v)
+ }
+
+ // If no version requested, use config version (may also be empty).
+ // make a copy of the original so we don't risk mutating input here or in the returned value
+ var preferredGV *unversioned.GroupVersion
+ switch {
+ case requestedGV != nil:
+ t := *requestedGV
+ preferredGV = &t
+ case c.GroupVersion != nil:
+ t := *c.GroupVersion
+ preferredGV = &t
+ }
+
+ // If version explicitly requested verify that both client and server support it.
+ // If server does not support warn, but try to negotiate a lower version.
+ if preferredGV != nil {
+ if !clientVersions.Has(preferredGV.String()) {
+ return nil, fmt.Errorf("client does not support API version %q; client supported API versions: %v", preferredGV, clientVersions)
+
+ }
+ // If the server supports no versions, then we should just use the preferredGV
+ // This can happen because discovery fails due to 403 Forbidden errors
+ if len(serverVersions) == 0 {
+ return preferredGV, nil
+ }
+ if serverVersions.Has(preferredGV.String()) {
+ return preferredGV, nil
+ }
+ // If we are using an explicit config version the server does not support, fail.
+ if (c.GroupVersion != nil) && (*preferredGV == *c.GroupVersion) {
+ return nil, fmt.Errorf("server does not support API version %q", preferredGV)
+ }
+ }
+
+ for _, clientGV := range clientRegisteredGVs {
+ if serverVersions.Has(clientGV.String()) {
+ // Version was not explicitly requested in command config (--api-version).
+ // Ok to fall back to a supported version with a warning.
+ // TODO: caesarxuchao: enable the warning message when we have
+ // proper fix. Please refer to issue #14895.
+ // if len(version) != 0 {
+ // glog.Warningf("Server does not support API version '%s'. Falling back to '%s'.", version, clientVersion)
+ // }
+ t := clientGV
+ return &t, nil
+ }
+ }
+ return nil, fmt.Errorf("failed to negotiate an api version; server supports: %v, client supports: %v",
+ serverVersions, clientVersions)
+}
+
+// NewOrDie creates a Kubernetes client and panics if the provided API version is not recognized.
+func NewOrDie(c *restclient.Config) *Client {
+ client, err := New(c)
+ if err != nil {
+ panic(err)
+ }
+ return client
+}
+
+// NewInCluster is a shortcut for calling InClusterConfig() and then New().
+func NewInCluster() (*Client, error) {
+ cc, err := restclient.InClusterConfig()
+ if err != nil {
+ return nil, err
+ }
+ return New(cc)
+}
+
+// SetKubernetesDefaults sets default values on the provided client config for accessing the
+// Kubernetes API or returns an error if any of the defaults are impossible or invalid.
+// TODO: this method needs to be split into one that sets defaults per group, expected to be fix in PR "Refactoring clientcache.go and helper.go #14592"
+func SetKubernetesDefaults(config *restclient.Config) error {
+ if config.APIPath == "" {
+ config.APIPath = legacyAPIPath
+ }
+ g, err := registered.Group(api.GroupName)
+ if err != nil {
+ return err
+ }
+ // TODO: Unconditionally set the config.Version, until we fix the config.
+ copyGroupVersion := g.GroupVersion
+ config.GroupVersion = &copyGroupVersion
+ if config.NegotiatedSerializer == nil {
+ config.NegotiatedSerializer = api.Codecs
+ }
+
+ return restclient.SetKubernetesDefaults(config)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/horizontalpodautoscaler.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/horizontalpodautoscaler.go
new file mode 100644
index 0000000..76c6a9c
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/horizontalpodautoscaler.go
@@ -0,0 +1,103 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/apis/autoscaling"
+ "k8s.io/kubernetes/pkg/watch"
+)
+
+// HorizontalPodAutoscalersNamespacer has methods to work with HorizontalPodAutoscaler resources in a namespace
+type HorizontalPodAutoscalersNamespacer interface {
+ HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface
+}
+
+// HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources.
+type HorizontalPodAutoscalerInterface interface {
+ List(opts api.ListOptions) (*autoscaling.HorizontalPodAutoscalerList, error)
+ Get(name string) (*autoscaling.HorizontalPodAutoscaler, error)
+ Delete(name string, options *api.DeleteOptions) error
+ Create(horizontalPodAutoscaler *autoscaling.HorizontalPodAutoscaler) (*autoscaling.HorizontalPodAutoscaler, error)
+ Update(horizontalPodAutoscaler *autoscaling.HorizontalPodAutoscaler) (*autoscaling.HorizontalPodAutoscaler, error)
+ UpdateStatus(horizontalPodAutoscaler *autoscaling.HorizontalPodAutoscaler) (*autoscaling.HorizontalPodAutoscaler, error)
+ Watch(opts api.ListOptions) (watch.Interface, error)
+}
+
+// horizontalPodAutoscalers implements HorizontalPodAutoscalersNamespacer interface using AutoscalingClient internally
+type horizontalPodAutoscalers struct {
+ client *AutoscalingClient
+ ns string
+}
+
+// newHorizontalPodAutoscalers returns a horizontalPodAutoscalers
+func newHorizontalPodAutoscalers(c *AutoscalingClient, namespace string) *horizontalPodAutoscalers {
+ return &horizontalPodAutoscalers{
+ client: c,
+ ns: namespace,
+ }
+}
+
+// List takes label and field selectors, and returns the list of horizontalPodAutoscalers that match those selectors.
+func (c *horizontalPodAutoscalers) List(opts api.ListOptions) (result *autoscaling.HorizontalPodAutoscalerList, err error) {
+ result = &autoscaling.HorizontalPodAutoscalerList{}
+ err = c.client.Get().Namespace(c.ns).Resource("horizontalPodAutoscalers").VersionedParams(&opts, api.ParameterCodec).Do().Into(result)
+ return
+}
+
+// Get takes the name of the horizontalPodAutoscaler, and returns the corresponding HorizontalPodAutoscaler object, and an error if it occurs
+func (c *horizontalPodAutoscalers) Get(name string) (result *autoscaling.HorizontalPodAutoscaler, err error) {
+ result = &autoscaling.HorizontalPodAutoscaler{}
+ err = c.client.Get().Namespace(c.ns).Resource("horizontalPodAutoscalers").Name(name).Do().Into(result)
+ return
+}
+
+// Delete takes the name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs.
+func (c *horizontalPodAutoscalers) Delete(name string, options *api.DeleteOptions) error {
+ return c.client.Delete().Namespace(c.ns).Resource("horizontalPodAutoscalers").Name(name).Body(options).Do().Error()
+}
+
+// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if it occurs.
+func (c *horizontalPodAutoscalers) Create(horizontalPodAutoscaler *autoscaling.HorizontalPodAutoscaler) (result *autoscaling.HorizontalPodAutoscaler, err error) {
+ result = &autoscaling.HorizontalPodAutoscaler{}
+ err = c.client.Post().Namespace(c.ns).Resource("horizontalPodAutoscalers").Body(horizontalPodAutoscaler).Do().Into(result)
+ return
+}
+
+// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if it occurs.
+func (c *horizontalPodAutoscalers) Update(horizontalPodAutoscaler *autoscaling.HorizontalPodAutoscaler) (result *autoscaling.HorizontalPodAutoscaler, err error) {
+ result = &autoscaling.HorizontalPodAutoscaler{}
+ err = c.client.Put().Namespace(c.ns).Resource("horizontalPodAutoscalers").Name(horizontalPodAutoscaler.Name).Body(horizontalPodAutoscaler).Do().Into(result)
+ return
+}
+
+// UpdateStatus takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if it occurs.
+func (c *horizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *autoscaling.HorizontalPodAutoscaler) (result *autoscaling.HorizontalPodAutoscaler, err error) {
+ result = &autoscaling.HorizontalPodAutoscaler{}
+ err = c.client.Put().Namespace(c.ns).Resource("horizontalPodAutoscalers").Name(horizontalPodAutoscaler.Name).SubResource("status").Body(horizontalPodAutoscaler).Do().Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers.
+func (c *horizontalPodAutoscalers) Watch(opts api.ListOptions) (watch.Interface, error) {
+ return c.client.Get().
+ Prefix("watch").
+ Namespace(c.ns).
+ Resource("horizontalPodAutoscalers").
+ VersionedParams(&opts, api.ParameterCodec).
+ Watch()
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/import_known_versions.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/import_known_versions.go
new file mode 100644
index 0000000..8508573
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/import_known_versions.go
@@ -0,0 +1,41 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+// These imports are the API groups the client will support.
+import (
+ "fmt"
+
+ _ "k8s.io/kubernetes/pkg/api/install"
+ "k8s.io/kubernetes/pkg/apimachinery/registered"
+ _ "k8s.io/kubernetes/pkg/apis/apps/install"
+ _ "k8s.io/kubernetes/pkg/apis/authentication.k8s.io/install"
+ _ "k8s.io/kubernetes/pkg/apis/authorization/install"
+ _ "k8s.io/kubernetes/pkg/apis/autoscaling/install"
+ _ "k8s.io/kubernetes/pkg/apis/batch/install"
+ _ "k8s.io/kubernetes/pkg/apis/certificates/install"
+ _ "k8s.io/kubernetes/pkg/apis/componentconfig/install"
+ _ "k8s.io/kubernetes/pkg/apis/extensions/install"
+ _ "k8s.io/kubernetes/pkg/apis/policy/install"
+ _ "k8s.io/kubernetes/pkg/apis/rbac/install"
+)
+
+func init() {
+ if missingVersions := registered.ValidateEnvRequestedVersions(); len(missingVersions) != 0 {
+ panic(fmt.Sprintf("KUBE_API_VERSIONS contains versions that are not installed: %q.", missingVersions))
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/ingress.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/ingress.go
new file mode 100644
index 0000000..59c6a6d
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/ingress.go
@@ -0,0 +1,100 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/apis/extensions"
+ "k8s.io/kubernetes/pkg/watch"
+)
+
+// IngressNamespacer has methods to work with Ingress resources in a namespace
+type IngressNamespacer interface {
+ Ingress(namespace string) IngressInterface
+}
+
+// IngressInterface exposes methods to work on Ingress resources.
+type IngressInterface interface {
+ List(opts api.ListOptions) (*extensions.IngressList, error)
+ Get(name string) (*extensions.Ingress, error)
+ Create(ingress *extensions.Ingress) (*extensions.Ingress, error)
+ Update(ingress *extensions.Ingress) (*extensions.Ingress, error)
+ Delete(name string, options *api.DeleteOptions) error
+ Watch(opts api.ListOptions) (watch.Interface, error)
+ UpdateStatus(ingress *extensions.Ingress) (*extensions.Ingress, error)
+}
+
+// ingress implements IngressNamespacer interface
+type ingress struct {
+ r *ExtensionsClient
+ ns string
+}
+
+// newIngress returns a ingress
+func newIngress(c *ExtensionsClient, namespace string) *ingress {
+ return &ingress{c, namespace}
+}
+
+// List returns a list of ingress that match the label and field selectors.
+func (c *ingress) List(opts api.ListOptions) (result *extensions.IngressList, err error) {
+ result = &extensions.IngressList{}
+ err = c.r.Get().Namespace(c.ns).Resource("ingresses").VersionedParams(&opts, api.ParameterCodec).Do().Into(result)
+ return
+}
+
+// Get returns information about a particular ingress.
+func (c *ingress) Get(name string) (result *extensions.Ingress, err error) {
+ result = &extensions.Ingress{}
+ err = c.r.Get().Namespace(c.ns).Resource("ingresses").Name(name).Do().Into(result)
+ return
+}
+
+// Create creates a new ingress.
+func (c *ingress) Create(ingress *extensions.Ingress) (result *extensions.Ingress, err error) {
+ result = &extensions.Ingress{}
+ err = c.r.Post().Namespace(c.ns).Resource("ingresses").Body(ingress).Do().Into(result)
+ return
+}
+
+// Update updates an existing ingress.
+func (c *ingress) Update(ingress *extensions.Ingress) (result *extensions.Ingress, err error) {
+ result = &extensions.Ingress{}
+ err = c.r.Put().Namespace(c.ns).Resource("ingresses").Name(ingress.Name).Body(ingress).Do().Into(result)
+ return
+}
+
+// Delete deletes a ingress, returns error if one occurs.
+func (c *ingress) Delete(name string, options *api.DeleteOptions) (err error) {
+ return c.r.Delete().Namespace(c.ns).Resource("ingresses").Name(name).Body(options).Do().Error()
+}
+
+// Watch returns a watch.Interface that watches the requested ingress.
+func (c *ingress) Watch(opts api.ListOptions) (watch.Interface, error) {
+ return c.r.Get().
+ Prefix("watch").
+ Namespace(c.ns).
+ Resource("ingresses").
+ VersionedParams(&opts, api.ParameterCodec).
+ Watch()
+}
+
+// UpdateStatus takes the name of the ingress and the new status. Returns the server's representation of the ingress, and an error, if it occurs.
+func (c *ingress) UpdateStatus(ingress *extensions.Ingress) (result *extensions.Ingress, err error) {
+ result = &extensions.Ingress{}
+ err = c.r.Put().Namespace(c.ns).Resource("ingresses").Name(ingress.Name).SubResource("status").Body(ingress).Do().Into(result)
+ return
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/jobs.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/jobs.go
new file mode 100644
index 0000000..14cfa3a
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/jobs.go
@@ -0,0 +1,167 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/apis/batch"
+ "k8s.io/kubernetes/pkg/watch"
+)
+
+// JobsNamespacer has methods to work with Job resources in a namespace
+type JobsNamespacer interface {
+ Jobs(namespace string) JobInterface
+}
+
+// JobInterface exposes methods to work on Job resources.
+type JobInterface interface {
+ List(opts api.ListOptions) (*batch.JobList, error)
+ Get(name string) (*batch.Job, error)
+ Create(job *batch.Job) (*batch.Job, error)
+ Update(job *batch.Job) (*batch.Job, error)
+ Delete(name string, options *api.DeleteOptions) error
+ Watch(opts api.ListOptions) (watch.Interface, error)
+ UpdateStatus(job *batch.Job) (*batch.Job, error)
+}
+
+// jobs implements JobsNamespacer interface
+type jobs struct {
+ r *ExtensionsClient
+ ns string
+}
+
+// newJobs returns a jobs
+func newJobs(c *ExtensionsClient, namespace string) *jobs {
+ return &jobs{c, namespace}
+}
+
+// Ensure statically that jobs implements JobInterface.
+var _ JobInterface = &jobs{}
+
+// List returns a list of jobs that match the label and field selectors.
+func (c *jobs) List(opts api.ListOptions) (result *batch.JobList, err error) {
+ result = &batch.JobList{}
+ err = c.r.Get().Namespace(c.ns).Resource("jobs").VersionedParams(&opts, api.ParameterCodec).Do().Into(result)
+ return
+}
+
+// Get returns information about a particular job.
+func (c *jobs) Get(name string) (result *batch.Job, err error) {
+ result = &batch.Job{}
+ err = c.r.Get().Namespace(c.ns).Resource("jobs").Name(name).Do().Into(result)
+ return
+}
+
+// Create creates a new job.
+func (c *jobs) Create(job *batch.Job) (result *batch.Job, err error) {
+ result = &batch.Job{}
+ err = c.r.Post().Namespace(c.ns).Resource("jobs").Body(job).Do().Into(result)
+ return
+}
+
+// Update updates an existing job.
+func (c *jobs) Update(job *batch.Job) (result *batch.Job, err error) {
+ result = &batch.Job{}
+ err = c.r.Put().Namespace(c.ns).Resource("jobs").Name(job.Name).Body(job).Do().Into(result)
+ return
+}
+
+// Delete deletes a job, returns error if one occurs.
+func (c *jobs) Delete(name string, options *api.DeleteOptions) (err error) {
+ return c.r.Delete().Namespace(c.ns).Resource("jobs").Name(name).Body(options).Do().Error()
+}
+
+// Watch returns a watch.Interface that watches the requested jobs.
+func (c *jobs) Watch(opts api.ListOptions) (watch.Interface, error) {
+ return c.r.Get().
+ Prefix("watch").
+ Namespace(c.ns).
+ Resource("jobs").
+ VersionedParams(&opts, api.ParameterCodec).
+ Watch()
+}
+
+// UpdateStatus takes the name of the job and the new status. Returns the server's representation of the job, and an error, if it occurs.
+func (c *jobs) UpdateStatus(job *batch.Job) (result *batch.Job, err error) {
+ result = &batch.Job{}
+ err = c.r.Put().Namespace(c.ns).Resource("jobs").Name(job.Name).SubResource("status").Body(job).Do().Into(result)
+ return
+}
+
+// jobsV1 implements JobsNamespacer interface using BatchClient internally
+type jobsV1 struct {
+ r *BatchClient
+ ns string
+}
+
+// newJobsV1 returns a jobsV1
+func newJobsV1(c *BatchClient, namespace string) *jobsV1 {
+ return &jobsV1{c, namespace}
+}
+
+// Ensure statically that jobsV1 implements JobInterface.
+var _ JobInterface = &jobsV1{}
+
+// List returns a list of jobs that match the label and field selectors.
+func (c *jobsV1) List(opts api.ListOptions) (result *batch.JobList, err error) {
+ result = &batch.JobList{}
+ err = c.r.Get().Namespace(c.ns).Resource("jobs").VersionedParams(&opts, api.ParameterCodec).Do().Into(result)
+ return
+}
+
+// Get returns information about a particular job.
+func (c *jobsV1) Get(name string) (result *batch.Job, err error) {
+ result = &batch.Job{}
+ err = c.r.Get().Namespace(c.ns).Resource("jobs").Name(name).Do().Into(result)
+ return
+}
+
+// Create creates a new job.
+func (c *jobsV1) Create(job *batch.Job) (result *batch.Job, err error) {
+ result = &batch.Job{}
+ err = c.r.Post().Namespace(c.ns).Resource("jobs").Body(job).Do().Into(result)
+ return
+}
+
+// Update updates an existing job.
+func (c *jobsV1) Update(job *batch.Job) (result *batch.Job, err error) {
+ result = &batch.Job{}
+ err = c.r.Put().Namespace(c.ns).Resource("jobs").Name(job.Name).Body(job).Do().Into(result)
+ return
+}
+
+// Delete deletes a job, returns error if one occurs.
+func (c *jobsV1) Delete(name string, options *api.DeleteOptions) (err error) {
+ return c.r.Delete().Namespace(c.ns).Resource("jobs").Name(name).Body(options).Do().Error()
+}
+
+// Watch returns a watch.Interface that watches the requested jobs.
+func (c *jobsV1) Watch(opts api.ListOptions) (watch.Interface, error) {
+ return c.r.Get().
+ Prefix("watch").
+ Namespace(c.ns).
+ Resource("jobs").
+ VersionedParams(&opts, api.ParameterCodec).
+ Watch()
+}
+
+// UpdateStatus takes the name of the job and the new status. Returns the server's representation of the job, and an error, if it occurs.
+func (c *jobsV1) UpdateStatus(job *batch.Job) (result *batch.Job, err error) {
+ result = &batch.Job{}
+ err = c.r.Put().Namespace(c.ns).Resource("jobs").Name(job.Name).SubResource("status").Body(job).Do().Into(result)
+ return
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/limit_ranges.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/limit_ranges.go
new file mode 100644
index 0000000..914a049
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/limit_ranges.go
@@ -0,0 +1,94 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/watch"
+)
+
+// LimitRangesNamespacer has methods to work with LimitRange resources in a namespace
+type LimitRangesNamespacer interface {
+ LimitRanges(namespace string) LimitRangeInterface
+}
+
+// LimitRangeInterface has methods to work with LimitRange resources.
+type LimitRangeInterface interface {
+ List(opts api.ListOptions) (*api.LimitRangeList, error)
+ Get(name string) (*api.LimitRange, error)
+ Delete(name string) error
+ Create(limitRange *api.LimitRange) (*api.LimitRange, error)
+ Update(limitRange *api.LimitRange) (*api.LimitRange, error)
+ Watch(opts api.ListOptions) (watch.Interface, error)
+}
+
+// limitRanges implements LimitRangesNamespacer interface
+type limitRanges struct {
+ r *Client
+ ns string
+}
+
+// newLimitRanges returns a limitRanges
+func newLimitRanges(c *Client, namespace string) *limitRanges {
+ return &limitRanges{
+ r: c,
+ ns: namespace,
+ }
+}
+
+// List takes a selector, and returns the list of limitRanges that match that selector.
+func (c *limitRanges) List(opts api.ListOptions) (result *api.LimitRangeList, err error) {
+ result = &api.LimitRangeList{}
+ err = c.r.Get().Namespace(c.ns).Resource("limitRanges").VersionedParams(&opts, api.ParameterCodec).Do().Into(result)
+ return
+}
+
+// Get takes the name of the limitRange, and returns the corresponding Pod object, and an error if it occurs
+func (c *limitRanges) Get(name string) (result *api.LimitRange, err error) {
+ result = &api.LimitRange{}
+ err = c.r.Get().Namespace(c.ns).Resource("limitRanges").Name(name).Do().Into(result)
+ return
+}
+
+// Delete takes the name of the limitRange, and returns an error if one occurs
+func (c *limitRanges) Delete(name string) error {
+ return c.r.Delete().Namespace(c.ns).Resource("limitRanges").Name(name).Do().Error()
+}
+
+// Create takes the representation of a limitRange. Returns the server's representation of the limitRange, and an error, if it occurs.
+func (c *limitRanges) Create(limitRange *api.LimitRange) (result *api.LimitRange, err error) {
+ result = &api.LimitRange{}
+ err = c.r.Post().Namespace(c.ns).Resource("limitRanges").Body(limitRange).Do().Into(result)
+ return
+}
+
+// Update takes the representation of a limitRange to update. Returns the server's representation of the limitRange, and an error, if it occurs.
+func (c *limitRanges) Update(limitRange *api.LimitRange) (result *api.LimitRange, err error) {
+ result = &api.LimitRange{}
+ err = c.r.Put().Namespace(c.ns).Resource("limitRanges").Name(limitRange.Name).Body(limitRange).Do().Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested resource
+func (c *limitRanges) Watch(opts api.ListOptions) (watch.Interface, error) {
+ return c.r.Get().
+ Prefix("watch").
+ Namespace(c.ns).
+ Resource("limitRanges").
+ VersionedParams(&opts, api.ParameterCodec).
+ Watch()
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/namespaces.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/namespaces.go
new file mode 100644
index 0000000..b4a3836
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/namespaces.go
@@ -0,0 +1,116 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "fmt"
+
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/watch"
+)
+
+type NamespacesInterface interface {
+ Namespaces() NamespaceInterface
+}
+
+type NamespaceInterface interface {
+ Create(item *api.Namespace) (*api.Namespace, error)
+ Get(name string) (result *api.Namespace, err error)
+ List(opts api.ListOptions) (*api.NamespaceList, error)
+ Delete(name string) error
+ Update(item *api.Namespace) (*api.Namespace, error)
+ Watch(opts api.ListOptions) (watch.Interface, error)
+ Finalize(item *api.Namespace) (*api.Namespace, error)
+ Status(item *api.Namespace) (*api.Namespace, error)
+}
+
+// namespaces implements NamespacesInterface
+type namespaces struct {
+ r *Client
+}
+
+// newNamespaces returns a namespaces object.
+func newNamespaces(c *Client) *namespaces {
+ return &namespaces{r: c}
+}
+
+// Create creates a new namespace.
+func (c *namespaces) Create(namespace *api.Namespace) (*api.Namespace, error) {
+ result := &api.Namespace{}
+ err := c.r.Post().Resource("namespaces").Body(namespace).Do().Into(result)
+ return result, err
+}
+
+// List lists all the namespaces in the cluster.
+func (c *namespaces) List(opts api.ListOptions) (*api.NamespaceList, error) {
+ result := &api.NamespaceList{}
+ err := c.r.Get().
+ Resource("namespaces").
+ VersionedParams(&opts, api.ParameterCodec).
+ Do().Into(result)
+ return result, err
+}
+
+// Update takes the representation of a namespace to update. Returns the server's representation of the namespace, and an error, if it occurs.
+func (c *namespaces) Update(namespace *api.Namespace) (result *api.Namespace, err error) {
+ result = &api.Namespace{}
+ err = c.r.Put().Resource("namespaces").Name(namespace.Name).Body(namespace).Do().Into(result)
+ return
+}
+
+// Finalize takes the representation of a namespace to update. Returns the server's representation of the namespace, and an error, if it occurs.
+func (c *namespaces) Finalize(namespace *api.Namespace) (result *api.Namespace, err error) {
+ result = &api.Namespace{}
+ if len(namespace.ResourceVersion) == 0 {
+ err = fmt.Errorf("invalid update object, missing resource version: %v", namespace)
+ return
+ }
+ err = c.r.Put().Resource("namespaces").Name(namespace.Name).SubResource("finalize").Body(namespace).Do().Into(result)
+ return
+}
+
+// Status takes the representation of a namespace to update. Returns the server's representation of the namespace, and an error, if it occurs.
+func (c *namespaces) Status(namespace *api.Namespace) (result *api.Namespace, err error) {
+ result = &api.Namespace{}
+ if len(namespace.ResourceVersion) == 0 {
+ err = fmt.Errorf("invalid update object, missing resource version: %v", namespace)
+ return
+ }
+ err = c.r.Put().Resource("namespaces").Name(namespace.Name).SubResource("status").Body(namespace).Do().Into(result)
+ return
+}
+
+// Get gets an existing namespace
+func (c *namespaces) Get(name string) (*api.Namespace, error) {
+ result := &api.Namespace{}
+ err := c.r.Get().Resource("namespaces").Name(name).Do().Into(result)
+ return result, err
+}
+
+// Delete deletes an existing namespace.
+func (c *namespaces) Delete(name string) error {
+ return c.r.Delete().Resource("namespaces").Name(name).Do().Error()
+}
+
+// Watch returns a watch.Interface that watches the requested namespaces.
+func (c *namespaces) Watch(opts api.ListOptions) (watch.Interface, error) {
+ return c.r.Get().
+ Prefix("watch").
+ Resource("namespaces").
+ VersionedParams(&opts, api.ParameterCodec).
+ Watch()
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/network_policys.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/network_policys.go
new file mode 100644
index 0000000..3e3f610
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/network_policys.go
@@ -0,0 +1,92 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/apis/extensions"
+ "k8s.io/kubernetes/pkg/watch"
+)
+
+// NetworkPolicyNamespacer has methods to work with NetworkPolicy resources in a namespace
+type NetworkPolicyNamespacer interface {
+ NetworkPolicies(namespace string) NetworkPolicyInterface
+}
+
+// NetworkPolicyInterface exposes methods to work on NetworkPolicy resources.
+type NetworkPolicyInterface interface {
+ List(opts api.ListOptions) (*extensions.NetworkPolicyList, error)
+ Get(name string) (*extensions.NetworkPolicy, error)
+ Create(networkPolicy *extensions.NetworkPolicy) (*extensions.NetworkPolicy, error)
+ Update(networkPolicy *extensions.NetworkPolicy) (*extensions.NetworkPolicy, error)
+ Delete(name string, options *api.DeleteOptions) error
+ Watch(opts api.ListOptions) (watch.Interface, error)
+}
+
+// NetworkPolicies implements NetworkPolicyNamespacer interface
+type NetworkPolicies struct {
+ r *ExtensionsClient
+ ns string
+}
+
+// newNetworkPolicies returns a NetworkPolicies
+func newNetworkPolicies(c *ExtensionsClient, namespace string) *NetworkPolicies {
+ return &NetworkPolicies{c, namespace}
+}
+
+// List returns a list of networkPolicy that match the label and field selectors.
+func (c *NetworkPolicies) List(opts api.ListOptions) (result *extensions.NetworkPolicyList, err error) {
+ result = &extensions.NetworkPolicyList{}
+ err = c.r.Get().Namespace(c.ns).Resource("networkpolicies").VersionedParams(&opts, api.ParameterCodec).Do().Into(result)
+ return
+}
+
+// Get returns information about a particular networkPolicy.
+func (c *NetworkPolicies) Get(name string) (result *extensions.NetworkPolicy, err error) {
+ result = &extensions.NetworkPolicy{}
+ err = c.r.Get().Namespace(c.ns).Resource("networkpolicies").Name(name).Do().Into(result)
+ return
+}
+
+// Create creates a new networkPolicy.
+func (c *NetworkPolicies) Create(networkPolicy *extensions.NetworkPolicy) (result *extensions.NetworkPolicy, err error) {
+ result = &extensions.NetworkPolicy{}
+ err = c.r.Post().Namespace(c.ns).Resource("networkpolicies").Body(networkPolicy).Do().Into(result)
+ return
+}
+
+// Update updates an existing networkPolicy.
+func (c *NetworkPolicies) Update(networkPolicy *extensions.NetworkPolicy) (result *extensions.NetworkPolicy, err error) {
+ result = &extensions.NetworkPolicy{}
+ err = c.r.Put().Namespace(c.ns).Resource("networkpolicies").Name(networkPolicy.Name).Body(networkPolicy).Do().Into(result)
+ return
+}
+
+// Delete deletes a networkPolicy, returns error if one occurs.
+func (c *NetworkPolicies) Delete(name string, options *api.DeleteOptions) (err error) {
+ return c.r.Delete().Namespace(c.ns).Resource("networkpolicies").Name(name).Body(options).Do().Error()
+}
+
+// Watch returns a watch.Interface that watches the requested networkPolicy.
+func (c *NetworkPolicies) Watch(opts api.ListOptions) (watch.Interface, error) {
+ return c.r.Get().
+ Prefix("watch").
+ Namespace(c.ns).
+ Resource("networkpolicies").
+ VersionedParams(&opts, api.ParameterCodec).
+ Watch()
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/nodes.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/nodes.go
new file mode 100644
index 0000000..15a7db2
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/nodes.go
@@ -0,0 +1,111 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/watch"
+)
+
+type NodesInterface interface {
+ Nodes() NodeInterface
+}
+
+type NodeInterface interface {
+ Get(name string) (result *api.Node, err error)
+ Create(node *api.Node) (*api.Node, error)
+ List(opts api.ListOptions) (*api.NodeList, error)
+ Delete(name string) error
+ DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error
+ Update(*api.Node) (*api.Node, error)
+ UpdateStatus(*api.Node) (*api.Node, error)
+ Watch(opts api.ListOptions) (watch.Interface, error)
+}
+
+// nodes implements NodesInterface
+type nodes struct {
+ r *Client
+}
+
+// newNodes returns a nodes object.
+func newNodes(c *Client) *nodes {
+ return &nodes{c}
+}
+
+// resourceName returns node's URL resource name.
+func (c *nodes) resourceName() string {
+ return "nodes"
+}
+
+// Create creates a new node.
+func (c *nodes) Create(node *api.Node) (*api.Node, error) {
+ result := &api.Node{}
+ err := c.r.Post().Resource(c.resourceName()).Body(node).Do().Into(result)
+ return result, err
+}
+
+// List takes a selector, and returns the list of nodes that match that selector in the cluster.
+func (c *nodes) List(opts api.ListOptions) (*api.NodeList, error) {
+ result := &api.NodeList{}
+ err := c.r.Get().Resource(c.resourceName()).VersionedParams(&opts, api.ParameterCodec).Do().Into(result)
+ return result, err
+}
+
+// Get gets an existing node.
+func (c *nodes) Get(name string) (*api.Node, error) {
+ result := &api.Node{}
+ err := c.r.Get().Resource(c.resourceName()).Name(name).Do().Into(result)
+ return result, err
+}
+
+// Delete deletes an existing node.
+func (c *nodes) Delete(name string) error {
+ return c.r.Delete().Resource(c.resourceName()).Name(name).Do().Error()
+}
+
+// DeleteCollection deletes a collection of nodes.
+func (c *nodes) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error {
+ return c.r.Delete().
+ Resource(c.resourceName()).
+ VersionedParams(&listOptions, api.ParameterCodec).
+ Body(options).
+ Do().
+ Error()
+}
+
+// Update updates an existing node.
+func (c *nodes) Update(node *api.Node) (*api.Node, error) {
+ result := &api.Node{}
+ err := c.r.Put().Resource(c.resourceName()).Name(node.Name).Body(node).Do().Into(result)
+ return result, err
+}
+
+func (c *nodes) UpdateStatus(node *api.Node) (*api.Node, error) {
+ result := &api.Node{}
+ err := c.r.Put().Resource(c.resourceName()).Name(node.Name).SubResource("status").Body(node).Do().Into(result)
+ return result, err
+}
+
+// Watch returns a watch.Interface that watches the requested nodes.
+func (c *nodes) Watch(opts api.ListOptions) (watch.Interface, error) {
+ return c.r.Get().
+ Prefix("watch").
+ Namespace(api.NamespaceAll).
+ Resource(c.resourceName()).
+ VersionedParams(&opts, api.ParameterCodec).
+ Watch()
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/persistentvolumeclaim.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/persistentvolumeclaim.go
new file mode 100644
index 0000000..4ea3a95
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/persistentvolumeclaim.go
@@ -0,0 +1,99 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/watch"
+)
+
+// PersistentVolumeClaimsNamespacer has methods to work with PersistentVolumeClaim resources in a namespace
+type PersistentVolumeClaimsNamespacer interface {
+ PersistentVolumeClaims(namespace string) PersistentVolumeClaimInterface
+}
+
+// PersistentVolumeClaimInterface has methods to work with PersistentVolumeClaim resources.
+type PersistentVolumeClaimInterface interface {
+ List(opts api.ListOptions) (*api.PersistentVolumeClaimList, error)
+ Get(name string) (*api.PersistentVolumeClaim, error)
+ Create(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error)
+ Update(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error)
+ UpdateStatus(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error)
+ Delete(name string) error
+ Watch(opts api.ListOptions) (watch.Interface, error)
+}
+
+// persistentVolumeClaims implements PersistentVolumeClaimsNamespacer interface
+type persistentVolumeClaims struct {
+ client *Client
+ namespace string
+}
+
+// newPersistentVolumeClaims returns a PodsClient
+func newPersistentVolumeClaims(c *Client, namespace string) *persistentVolumeClaims {
+ return &persistentVolumeClaims{c, namespace}
+}
+
+func (c *persistentVolumeClaims) List(opts api.ListOptions) (result *api.PersistentVolumeClaimList, err error) {
+ result = &api.PersistentVolumeClaimList{}
+
+ err = c.client.Get().
+ Namespace(c.namespace).
+ Resource("persistentVolumeClaims").
+ VersionedParams(&opts, api.ParameterCodec).
+ Do().
+ Into(result)
+
+ return result, err
+}
+
+func (c *persistentVolumeClaims) Get(name string) (result *api.PersistentVolumeClaim, err error) {
+ result = &api.PersistentVolumeClaim{}
+ err = c.client.Get().Namespace(c.namespace).Resource("persistentVolumeClaims").Name(name).Do().Into(result)
+ return
+}
+
+func (c *persistentVolumeClaims) Create(claim *api.PersistentVolumeClaim) (result *api.PersistentVolumeClaim, err error) {
+ result = &api.PersistentVolumeClaim{}
+ err = c.client.Post().Namespace(c.namespace).Resource("persistentVolumeClaims").Body(claim).Do().Into(result)
+ return
+}
+
+func (c *persistentVolumeClaims) Update(claim *api.PersistentVolumeClaim) (result *api.PersistentVolumeClaim, err error) {
+ result = &api.PersistentVolumeClaim{}
+ err = c.client.Put().Namespace(c.namespace).Resource("persistentVolumeClaims").Name(claim.Name).Body(claim).Do().Into(result)
+ return
+}
+
+func (c *persistentVolumeClaims) UpdateStatus(claim *api.PersistentVolumeClaim) (result *api.PersistentVolumeClaim, err error) {
+ result = &api.PersistentVolumeClaim{}
+ err = c.client.Put().Namespace(c.namespace).Resource("persistentVolumeClaims").Name(claim.Name).SubResource("status").Body(claim).Do().Into(result)
+ return
+}
+
+func (c *persistentVolumeClaims) Delete(name string) error {
+ return c.client.Delete().Namespace(c.namespace).Resource("persistentVolumeClaims").Name(name).Do().Error()
+}
+
+func (c *persistentVolumeClaims) Watch(opts api.ListOptions) (watch.Interface, error) {
+ return c.client.Get().
+ Prefix("watch").
+ Namespace(c.namespace).
+ Resource("persistentVolumeClaims").
+ VersionedParams(&opts, api.ParameterCodec).
+ Watch()
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/persistentvolumes.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/persistentvolumes.go
new file mode 100644
index 0000000..5fce1f0
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/persistentvolumes.go
@@ -0,0 +1,93 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/watch"
+)
+
+type PersistentVolumesInterface interface {
+ PersistentVolumes() PersistentVolumeInterface
+}
+
+// PersistentVolumeInterface has methods to work with PersistentVolume resources.
+type PersistentVolumeInterface interface {
+ List(opts api.ListOptions) (*api.PersistentVolumeList, error)
+ Get(name string) (*api.PersistentVolume, error)
+ Create(volume *api.PersistentVolume) (*api.PersistentVolume, error)
+ Update(volume *api.PersistentVolume) (*api.PersistentVolume, error)
+ UpdateStatus(persistentVolume *api.PersistentVolume) (*api.PersistentVolume, error)
+ Delete(name string) error
+ Watch(opts api.ListOptions) (watch.Interface, error)
+}
+
+// persistentVolumes implements PersistentVolumesInterface
+type persistentVolumes struct {
+ client *Client
+}
+
+func newPersistentVolumes(c *Client) *persistentVolumes {
+ return &persistentVolumes{c}
+}
+
+func (c *persistentVolumes) List(opts api.ListOptions) (result *api.PersistentVolumeList, err error) {
+ result = &api.PersistentVolumeList{}
+ err = c.client.Get().
+ Resource("persistentVolumes").
+ VersionedParams(&opts, api.ParameterCodec).
+ Do().
+ Into(result)
+
+ return result, err
+}
+
+func (c *persistentVolumes) Get(name string) (result *api.PersistentVolume, err error) {
+ result = &api.PersistentVolume{}
+ err = c.client.Get().Resource("persistentVolumes").Name(name).Do().Into(result)
+ return
+}
+
+func (c *persistentVolumes) Create(volume *api.PersistentVolume) (result *api.PersistentVolume, err error) {
+ result = &api.PersistentVolume{}
+ err = c.client.Post().Resource("persistentVolumes").Body(volume).Do().Into(result)
+ return
+}
+
+func (c *persistentVolumes) Update(volume *api.PersistentVolume) (result *api.PersistentVolume, err error) {
+ result = &api.PersistentVolume{}
+ err = c.client.Put().Resource("persistentVolumes").Name(volume.Name).Body(volume).Do().Into(result)
+ return
+}
+
+func (c *persistentVolumes) UpdateStatus(volume *api.PersistentVolume) (result *api.PersistentVolume, err error) {
+ result = &api.PersistentVolume{}
+ err = c.client.Put().Resource("persistentVolumes").Name(volume.Name).SubResource("status").Body(volume).Do().Into(result)
+ return
+}
+
+func (c *persistentVolumes) Delete(name string) error {
+ return c.client.Delete().Resource("persistentVolumes").Name(name).Do().Error()
+}
+
+func (c *persistentVolumes) Watch(opts api.ListOptions) (watch.Interface, error) {
+ return c.client.Get().
+ Prefix("watch").
+ Resource("persistentVolumes").
+ VersionedParams(&opts, api.ParameterCodec).
+ Watch()
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/pet_sets.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/pet_sets.go
new file mode 100644
index 0000000..954efcd
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/pet_sets.go
@@ -0,0 +1,100 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/apis/apps"
+ "k8s.io/kubernetes/pkg/watch"
+)
+
+// PetSetNamespacer has methods to work with PetSet resources in a namespace
+type PetSetNamespacer interface {
+ PetSets(namespace string) PetSetInterface
+}
+
+// PetSetInterface exposes methods to work on PetSet resources.
+type PetSetInterface interface {
+ List(opts api.ListOptions) (*apps.PetSetList, error)
+ Get(name string) (*apps.PetSet, error)
+ Create(petSet *apps.PetSet) (*apps.PetSet, error)
+ Update(petSet *apps.PetSet) (*apps.PetSet, error)
+ Delete(name string, options *api.DeleteOptions) error
+ Watch(opts api.ListOptions) (watch.Interface, error)
+ UpdateStatus(petSet *apps.PetSet) (*apps.PetSet, error)
+}
+
+// petSet implements PetSetNamespacer interface
+type petSet struct {
+ r *AppsClient
+ ns string
+}
+
+// newPetSet returns a petSet
+func newPetSet(c *AppsClient, namespace string) *petSet {
+ return &petSet{c, namespace}
+}
+
+// List returns a list of petSet that match the label and field selectors.
+func (c *petSet) List(opts api.ListOptions) (result *apps.PetSetList, err error) {
+ result = &apps.PetSetList{}
+ err = c.r.Get().Namespace(c.ns).Resource("petsets").VersionedParams(&opts, api.ParameterCodec).Do().Into(result)
+ return
+}
+
+// Get returns information about a particular petSet.
+func (c *petSet) Get(name string) (result *apps.PetSet, err error) {
+ result = &apps.PetSet{}
+ err = c.r.Get().Namespace(c.ns).Resource("petsets").Name(name).Do().Into(result)
+ return
+}
+
+// Create creates a new petSet.
+func (c *petSet) Create(petSet *apps.PetSet) (result *apps.PetSet, err error) {
+ result = &apps.PetSet{}
+ err = c.r.Post().Namespace(c.ns).Resource("petsets").Body(petSet).Do().Into(result)
+ return
+}
+
+// Update updates an existing petSet.
+func (c *petSet) Update(petSet *apps.PetSet) (result *apps.PetSet, err error) {
+ result = &apps.PetSet{}
+ err = c.r.Put().Namespace(c.ns).Resource("petsets").Name(petSet.Name).Body(petSet).Do().Into(result)
+ return
+}
+
+// Delete deletes a petSet, returns error if one occurs.
+func (c *petSet) Delete(name string, options *api.DeleteOptions) (err error) {
+ return c.r.Delete().Namespace(c.ns).Resource("petsets").Name(name).Body(options).Do().Error()
+}
+
+// Watch returns a watch.Interface that watches the requested petSet.
+func (c *petSet) Watch(opts api.ListOptions) (watch.Interface, error) {
+ return c.r.Get().
+ Prefix("watch").
+ Namespace(c.ns).
+ Resource("petsets").
+ VersionedParams(&opts, api.ParameterCodec).
+ Watch()
+}
+
+// UpdateStatus takes the name of the petSet and the new status. Returns the server's representation of the petSet, and an error, if it occurs.
+func (c *petSet) UpdateStatus(petSet *apps.PetSet) (result *apps.PetSet, err error) {
+ result = &apps.PetSet{}
+ err = c.r.Put().Namespace(c.ns).Resource("petsets").Name(petSet.Name).SubResource("status").Body(petSet).Do().Into(result)
+ return
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/pod_disruption_budgets.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/pod_disruption_budgets.go
new file mode 100644
index 0000000..0239623
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/pod_disruption_budgets.go
@@ -0,0 +1,100 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/apis/policy"
+ "k8s.io/kubernetes/pkg/watch"
+)
+
+// PodDisruptionBudgetNamespacer has methods to work with PodDisruptionBudget resources in a namespace
+type PodDisruptionBudgetNamespacer interface {
+ PodDisruptionBudgets(namespace string) PodDisruptionBudgetInterface
+}
+
+// PodDisruptionBudgetInterface exposes methods to work on PodDisruptionBudget resources.
+type PodDisruptionBudgetInterface interface {
+ List(opts api.ListOptions) (*policy.PodDisruptionBudgetList, error)
+ Get(name string) (*policy.PodDisruptionBudget, error)
+ Create(podDisruptionBudget *policy.PodDisruptionBudget) (*policy.PodDisruptionBudget, error)
+ Update(podDisruptionBudget *policy.PodDisruptionBudget) (*policy.PodDisruptionBudget, error)
+ Delete(name string, options *api.DeleteOptions) error
+ Watch(opts api.ListOptions) (watch.Interface, error)
+ UpdateStatus(podDisruptionBudget *policy.PodDisruptionBudget) (*policy.PodDisruptionBudget, error)
+}
+
+// podDisruptionBudget implements PodDisruptionBudgetNamespacer interface
+type podDisruptionBudget struct {
+ r *PolicyClient
+ ns string
+}
+
+// newPodDisruptionBudget returns a podDisruptionBudget
+func newPodDisruptionBudget(c *PolicyClient, namespace string) *podDisruptionBudget {
+ return &podDisruptionBudget{c, namespace}
+}
+
+// List returns a list of podDisruptionBudget that match the label and field selectors.
+func (c *podDisruptionBudget) List(opts api.ListOptions) (result *policy.PodDisruptionBudgetList, err error) {
+ result = &policy.PodDisruptionBudgetList{}
+ err = c.r.Get().Namespace(c.ns).Resource("poddisruptionbudgets").VersionedParams(&opts, api.ParameterCodec).Do().Into(result)
+ return
+}
+
+// Get returns information about a particular podDisruptionBudget.
+func (c *podDisruptionBudget) Get(name string) (result *policy.PodDisruptionBudget, err error) {
+ result = &policy.PodDisruptionBudget{}
+ err = c.r.Get().Namespace(c.ns).Resource("poddisruptionbudgets").Name(name).Do().Into(result)
+ return
+}
+
+// Create creates a new podDisruptionBudget.
+func (c *podDisruptionBudget) Create(podDisruptionBudget *policy.PodDisruptionBudget) (result *policy.PodDisruptionBudget, err error) {
+ result = &policy.PodDisruptionBudget{}
+ err = c.r.Post().Namespace(c.ns).Resource("poddisruptionbudgets").Body(podDisruptionBudget).Do().Into(result)
+ return
+}
+
+// Update updates an existing podDisruptionBudget.
+func (c *podDisruptionBudget) Update(podDisruptionBudget *policy.PodDisruptionBudget) (result *policy.PodDisruptionBudget, err error) {
+ result = &policy.PodDisruptionBudget{}
+ err = c.r.Put().Namespace(c.ns).Resource("poddisruptionbudgets").Name(podDisruptionBudget.Name).Body(podDisruptionBudget).Do().Into(result)
+ return
+}
+
+// Delete deletes a podDisruptionBudget, returns error if one occurs.
+func (c *podDisruptionBudget) Delete(name string, options *api.DeleteOptions) (err error) {
+ return c.r.Delete().Namespace(c.ns).Resource("poddisruptionbudgets").Name(name).Body(options).Do().Error()
+}
+
+// Watch returns a watch.Interface that watches the requested podDisruptionBudget.
+func (c *podDisruptionBudget) Watch(opts api.ListOptions) (watch.Interface, error) {
+ return c.r.Get().
+ Prefix("watch").
+ Namespace(c.ns).
+ Resource("poddisruptionbudgets").
+ VersionedParams(&opts, api.ParameterCodec).
+ Watch()
+}
+
+// UpdateStatus takes the name of the podDisruptionBudget and the new status. Returns the server's representation of the podDisruptionBudget, and an error, if it occurs.
+func (c *podDisruptionBudget) UpdateStatus(podDisruptionBudget *policy.PodDisruptionBudget) (result *policy.PodDisruptionBudget, err error) {
+ result = &policy.PodDisruptionBudget{}
+ err = c.r.Put().Namespace(c.ns).Resource("poddisruptionbudgets").Name(podDisruptionBudget.Name).SubResource("status").Body(podDisruptionBudget).Do().Into(result)
+ return
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/pod_templates.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/pod_templates.go
new file mode 100644
index 0000000..7627d73
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/pod_templates.go
@@ -0,0 +1,94 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/watch"
+)
+
+// PodTemplatesNamespacer has methods to work with PodTemplate resources in a namespace
+type PodTemplatesNamespacer interface {
+ PodTemplates(namespace string) PodTemplateInterface
+}
+
+// PodTemplateInterface has methods to work with PodTemplate resources.
+type PodTemplateInterface interface {
+ List(opts api.ListOptions) (*api.PodTemplateList, error)
+ Get(name string) (*api.PodTemplate, error)
+ Delete(name string, options *api.DeleteOptions) error
+ Create(podTemplate *api.PodTemplate) (*api.PodTemplate, error)
+ Update(podTemplate *api.PodTemplate) (*api.PodTemplate, error)
+ Watch(opts api.ListOptions) (watch.Interface, error)
+}
+
+// podTemplates implements PodTemplatesNamespacer interface
+type podTemplates struct {
+ r *Client
+ ns string
+}
+
+// newPodTemplates returns a podTemplates
+func newPodTemplates(c *Client, namespace string) *podTemplates {
+ return &podTemplates{
+ r: c,
+ ns: namespace,
+ }
+}
+
+// List takes label and field selectors, and returns the list of podTemplates that match those selectors.
+func (c *podTemplates) List(opts api.ListOptions) (result *api.PodTemplateList, err error) {
+ result = &api.PodTemplateList{}
+ err = c.r.Get().Namespace(c.ns).Resource("podTemplates").VersionedParams(&opts, api.ParameterCodec).Do().Into(result)
+ return
+}
+
+// Get takes the name of the podTemplate, and returns the corresponding PodTemplate object, and an error if it occurs
+func (c *podTemplates) Get(name string) (result *api.PodTemplate, err error) {
+ result = &api.PodTemplate{}
+ err = c.r.Get().Namespace(c.ns).Resource("podTemplates").Name(name).Do().Into(result)
+ return
+}
+
+// Delete takes the name of the podTemplate, and returns an error if one occurs
+func (c *podTemplates) Delete(name string, options *api.DeleteOptions) error {
+ return c.r.Delete().Namespace(c.ns).Resource("podTemplates").Name(name).Body(options).Do().Error()
+}
+
+// Create takes the representation of a podTemplate. Returns the server's representation of the podTemplate, and an error, if it occurs.
+func (c *podTemplates) Create(podTemplate *api.PodTemplate) (result *api.PodTemplate, err error) {
+ result = &api.PodTemplate{}
+ err = c.r.Post().Namespace(c.ns).Resource("podTemplates").Body(podTemplate).Do().Into(result)
+ return
+}
+
+// Update takes the representation of a podTemplate to update. Returns the server's representation of the podTemplate, and an error, if it occurs.
+func (c *podTemplates) Update(podTemplate *api.PodTemplate) (result *api.PodTemplate, err error) {
+ result = &api.PodTemplate{}
+ err = c.r.Put().Namespace(c.ns).Resource("podTemplates").Name(podTemplate.Name).Body(podTemplate).Do().Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested podTemplates.
+func (c *podTemplates) Watch(opts api.ListOptions) (watch.Interface, error) {
+ return c.r.Get().
+ Prefix("watch").
+ Namespace(c.ns).
+ Resource("podTemplates").
+ VersionedParams(&opts, api.ParameterCodec).
+ Watch()
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/pods.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/pods.go
new file mode 100644
index 0000000..ea16fb8
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/pods.go
@@ -0,0 +1,115 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/client/restclient"
+ "k8s.io/kubernetes/pkg/watch"
+)
+
+// PodsNamespacer has methods to work with Pod resources in a namespace
+type PodsNamespacer interface {
+ Pods(namespace string) PodInterface
+}
+
+// PodInterface has methods to work with Pod resources.
+type PodInterface interface {
+ List(opts api.ListOptions) (*api.PodList, error)
+ Get(name string) (*api.Pod, error)
+ Delete(name string, options *api.DeleteOptions) error
+ Create(pod *api.Pod) (*api.Pod, error)
+ Update(pod *api.Pod) (*api.Pod, error)
+ Watch(opts api.ListOptions) (watch.Interface, error)
+ Bind(binding *api.Binding) error
+ UpdateStatus(pod *api.Pod) (*api.Pod, error)
+ GetLogs(name string, opts *api.PodLogOptions) *restclient.Request
+}
+
+// pods implements PodsNamespacer interface
+type pods struct {
+ r *Client
+ ns string
+}
+
+// newPods returns a pods
+func newPods(c *Client, namespace string) *pods {
+ return &pods{
+ r: c,
+ ns: namespace,
+ }
+}
+
+// List takes label and field selectors, and returns the list of pods that match those selectors.
+func (c *pods) List(opts api.ListOptions) (result *api.PodList, err error) {
+ result = &api.PodList{}
+ err = c.r.Get().Namespace(c.ns).Resource("pods").VersionedParams(&opts, api.ParameterCodec).Do().Into(result)
+ return
+}
+
+// Get takes the name of the pod, and returns the corresponding Pod object, and an error if it occurs
+func (c *pods) Get(name string) (result *api.Pod, err error) {
+ result = &api.Pod{}
+ err = c.r.Get().Namespace(c.ns).Resource("pods").Name(name).Do().Into(result)
+ return
+}
+
+// Delete takes the name of the pod, and returns an error if one occurs
+func (c *pods) Delete(name string, options *api.DeleteOptions) error {
+ return c.r.Delete().Namespace(c.ns).Resource("pods").Name(name).Body(options).Do().Error()
+}
+
+// Create takes the representation of a pod. Returns the server's representation of the pod, and an error, if it occurs.
+func (c *pods) Create(pod *api.Pod) (result *api.Pod, err error) {
+ result = &api.Pod{}
+ err = c.r.Post().Namespace(c.ns).Resource("pods").Body(pod).Do().Into(result)
+ return
+}
+
+// Update takes the representation of a pod to update. Returns the server's representation of the pod, and an error, if it occurs.
+func (c *pods) Update(pod *api.Pod) (result *api.Pod, err error) {
+ result = &api.Pod{}
+ err = c.r.Put().Namespace(c.ns).Resource("pods").Name(pod.Name).Body(pod).Do().Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested pods.
+func (c *pods) Watch(opts api.ListOptions) (watch.Interface, error) {
+ return c.r.Get().
+ Prefix("watch").
+ Namespace(c.ns).
+ Resource("pods").
+ VersionedParams(&opts, api.ParameterCodec).
+ Watch()
+}
+
+// Bind applies the provided binding to the named pod in the current namespace (binding.Namespace is ignored).
+func (c *pods) Bind(binding *api.Binding) error {
+ return c.r.Post().Namespace(c.ns).Resource("pods").Name(binding.Name).SubResource("binding").Body(binding).Do().Error()
+}
+
+// UpdateStatus takes the name of the pod and the new status. Returns the server's representation of the pod, and an error, if it occurs.
+func (c *pods) UpdateStatus(pod *api.Pod) (result *api.Pod, err error) {
+ result = &api.Pod{}
+ err = c.r.Put().Namespace(c.ns).Resource("pods").Name(pod.Name).SubResource("status").Body(pod).Do().Into(result)
+ return
+}
+
+// Get constructs a request for getting the logs for a pod
+func (c *pods) GetLogs(name string, opts *api.PodLogOptions) *restclient.Request {
+ return c.r.Get().Namespace(c.ns).Name(name).Resource("pods").SubResource("log").VersionedParams(opts, api.ParameterCodec)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/podsecuritypolicy.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/podsecuritypolicy.go
new file mode 100644
index 0000000..f03e643
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/podsecuritypolicy.go
@@ -0,0 +1,111 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/apis/extensions"
+ "k8s.io/kubernetes/pkg/watch"
+)
+
+type PodSecurityPoliciesInterface interface {
+ PodSecurityPolicies() PodSecurityPolicyInterface
+}
+
+type PodSecurityPolicyInterface interface {
+ Get(name string) (result *extensions.PodSecurityPolicy, err error)
+ Create(psp *extensions.PodSecurityPolicy) (*extensions.PodSecurityPolicy, error)
+ List(opts api.ListOptions) (*extensions.PodSecurityPolicyList, error)
+ Delete(name string) error
+ Update(*extensions.PodSecurityPolicy) (*extensions.PodSecurityPolicy, error)
+ Watch(opts api.ListOptions) (watch.Interface, error)
+}
+
+// podSecurityPolicy implements PodSecurityPolicyInterface
+type podSecurityPolicy struct {
+ client *ExtensionsClient
+}
+
+// newPodSecurityPolicy returns a podSecurityPolicy object.
+func newPodSecurityPolicy(c *ExtensionsClient) *podSecurityPolicy {
+ return &podSecurityPolicy{c}
+}
+
+func (s *podSecurityPolicy) Create(psp *extensions.PodSecurityPolicy) (*extensions.PodSecurityPolicy, error) {
+ result := &extensions.PodSecurityPolicy{}
+ err := s.client.Post().
+ Resource("podsecuritypolicies").
+ Body(psp).
+ Do().
+ Into(result)
+
+ return result, err
+}
+
+// List returns a list of PodSecurityPolicies matching the selectors.
+func (s *podSecurityPolicy) List(opts api.ListOptions) (*extensions.PodSecurityPolicyList, error) {
+ result := &extensions.PodSecurityPolicyList{}
+
+ err := s.client.Get().
+ Resource("podsecuritypolicies").
+ VersionedParams(&opts, api.ParameterCodec).
+ Do().
+ Into(result)
+
+ return result, err
+}
+
+// Get returns the given PodSecurityPolicy, or an error.
+func (s *podSecurityPolicy) Get(name string) (*extensions.PodSecurityPolicy, error) {
+ result := &extensions.PodSecurityPolicy{}
+ err := s.client.Get().
+ Resource("podsecuritypolicies").
+ Name(name).
+ Do().
+ Into(result)
+
+ return result, err
+}
+
+// Watch starts watching for PodSecurityPolicies matching the given selectors.
+func (s *podSecurityPolicy) Watch(opts api.ListOptions) (watch.Interface, error) {
+ return s.client.Get().
+ Prefix("watch").
+ Resource("podsecuritypolicies").
+ VersionedParams(&opts, api.ParameterCodec).
+ Watch()
+}
+
+func (s *podSecurityPolicy) Delete(name string) error {
+ return s.client.Delete().
+ Resource("podsecuritypolicies").
+ Name(name).
+ Do().
+ Error()
+}
+
+func (s *podSecurityPolicy) Update(psp *extensions.PodSecurityPolicy) (result *extensions.PodSecurityPolicy, err error) {
+ result = &extensions.PodSecurityPolicy{}
+ err = s.client.Put().
+ Resource("podsecuritypolicies").
+ Name(psp.Name).
+ Body(psp).
+ Do().
+ Into(result)
+
+ return
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/policy.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/policy.go
new file mode 100644
index 0000000..9a47d79
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/policy.go
@@ -0,0 +1,76 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/apimachinery/registered"
+ "k8s.io/kubernetes/pkg/apis/policy"
+ "k8s.io/kubernetes/pkg/client/restclient"
+)
+
+type PolicyInterface interface {
+ PodDisruptionBudgetNamespacer
+}
+
+// PolicyClient is used to interact with Kubernetes batch features.
+type PolicyClient struct {
+ *restclient.RESTClient
+}
+
+func (c *PolicyClient) PodDisruptionBudgets(namespace string) PodDisruptionBudgetInterface {
+ return newPodDisruptionBudget(c, namespace)
+}
+
+func NewPolicy(c *restclient.Config) (*PolicyClient, error) {
+ config := *c
+ if err := setPolicyDefaults(&config); err != nil {
+ return nil, err
+ }
+ client, err := restclient.RESTClientFor(&config)
+ if err != nil {
+ return nil, err
+ }
+ return &PolicyClient{client}, nil
+}
+
+func NewPolicyOrDie(c *restclient.Config) *PolicyClient {
+ client, err := NewPolicy(c)
+ if err != nil {
+ panic(err)
+ }
+ return client
+}
+
+func setPolicyDefaults(config *restclient.Config) error {
+ g, err := registered.Group(policy.GroupName)
+ if err != nil {
+ return err
+ }
+ config.APIPath = defaultAPIPath
+ if config.UserAgent == "" {
+ config.UserAgent = restclient.DefaultKubernetesUserAgent()
+ }
+ // TODO: Unconditionally set the config.Version, until we fix the config.
+ //if config.Version == "" {
+ copyGroupVersion := g.GroupVersion
+ config.GroupVersion = &copyGroupVersion
+ //}
+
+ config.NegotiatedSerializer = api.Codecs
+ return nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/rbac.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/rbac.go
new file mode 100644
index 0000000..09b2b40
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/rbac.go
@@ -0,0 +1,96 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/apimachinery/registered"
+ "k8s.io/kubernetes/pkg/apis/rbac"
+ "k8s.io/kubernetes/pkg/client/restclient"
+)
+
+// Interface holds the methods for clients of Kubernetes to allow mock testing.
+type RbacInterface interface {
+ RoleBindingsNamespacer
+ RolesNamespacer
+ ClusterRoleBindings
+ ClusterRoles
+}
+
+type RbacClient struct {
+ *restclient.RESTClient
+}
+
+func (c *RbacClient) RoleBindings(namespace string) RoleBindingInterface {
+ return newRoleBindings(c, namespace)
+}
+
+func (c *RbacClient) Roles(namespace string) RoleInterface {
+ return newRoles(c, namespace)
+}
+
+func (c *RbacClient) ClusterRoleBindings() ClusterRoleBindingInterface {
+ return newClusterRoleBindings(c)
+}
+
+func (c *RbacClient) ClusterRoles() ClusterRoleInterface {
+ return newClusterRoles(c)
+}
+
+// NewRbac creates a new RbacClient for the given config.
+func NewRbac(c *restclient.Config) (*RbacClient, error) {
+ config := *c
+ if err := setRbacDefaults(&config); err != nil {
+ return nil, err
+ }
+ client, err := restclient.RESTClientFor(&config)
+ if err != nil {
+ return nil, err
+ }
+ return &RbacClient{client}, nil
+}
+
+// NewRbacOrDie creates a new RbacClient for the given config and
+// panics if there is an error in the config.
+func NewRbacOrDie(c *restclient.Config) *RbacClient {
+ client, err := NewRbac(c)
+ if err != nil {
+ panic(err)
+ }
+ return client
+}
+
+func setRbacDefaults(config *restclient.Config) error {
+ // if rbac group is not registered, return an error
+ g, err := registered.Group(rbac.GroupName)
+ if err != nil {
+ return err
+ }
+ config.APIPath = defaultAPIPath
+ if config.UserAgent == "" {
+ config.UserAgent = restclient.DefaultKubernetesUserAgent()
+ }
+
+ // TODO: Unconditionally set the config.Version, until we fix the config.
+ //if config.Version == "" {
+ copyGroupVersion := g.GroupVersion
+ config.GroupVersion = &copyGroupVersion
+ //}
+
+ config.NegotiatedSerializer = api.Codecs
+ return nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/replica_sets.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/replica_sets.go
new file mode 100644
index 0000000..191a006
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/replica_sets.go
@@ -0,0 +1,100 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/apis/extensions"
+ "k8s.io/kubernetes/pkg/watch"
+)
+
+// ReplicaSetsNamespacer has methods to work with ReplicaSet resources in a namespace
+type ReplicaSetsNamespacer interface {
+ ReplicaSets(namespace string) ReplicaSetInterface
+}
+
+// ReplicaSetInterface has methods to work with ReplicaSet resources.
+type ReplicaSetInterface interface {
+ List(opts api.ListOptions) (*extensions.ReplicaSetList, error)
+ Get(name string) (*extensions.ReplicaSet, error)
+ Create(ctrl *extensions.ReplicaSet) (*extensions.ReplicaSet, error)
+ Update(ctrl *extensions.ReplicaSet) (*extensions.ReplicaSet, error)
+ UpdateStatus(ctrl *extensions.ReplicaSet) (*extensions.ReplicaSet, error)
+ Delete(name string, options *api.DeleteOptions) error
+ Watch(opts api.ListOptions) (watch.Interface, error)
+}
+
+// replicaSets implements ReplicaSetsNamespacer interface
+type replicaSets struct {
+ client *ExtensionsClient
+ ns string
+}
+
+// newReplicaSets returns a ReplicaSetClient
+func newReplicaSets(c *ExtensionsClient, namespace string) *replicaSets {
+ return &replicaSets{c, namespace}
+}
+
+// List takes a selector, and returns the list of ReplicaSets that match that selector.
+func (c *replicaSets) List(opts api.ListOptions) (result *extensions.ReplicaSetList, err error) {
+ result = &extensions.ReplicaSetList{}
+ err = c.client.Get().Namespace(c.ns).Resource("replicasets").VersionedParams(&opts, api.ParameterCodec).Do().Into(result)
+ return
+}
+
+// Get returns information about a particular ReplicaSet.
+func (c *replicaSets) Get(name string) (result *extensions.ReplicaSet, err error) {
+ result = &extensions.ReplicaSet{}
+ err = c.client.Get().Namespace(c.ns).Resource("replicasets").Name(name).Do().Into(result)
+ return
+}
+
+// Create creates a new ReplicaSet.
+func (c *replicaSets) Create(rs *extensions.ReplicaSet) (result *extensions.ReplicaSet, err error) {
+ result = &extensions.ReplicaSet{}
+ err = c.client.Post().Namespace(c.ns).Resource("replicasets").Body(rs).Do().Into(result)
+ return
+}
+
+// Update updates an existing ReplicaSet.
+func (c *replicaSets) Update(rs *extensions.ReplicaSet) (result *extensions.ReplicaSet, err error) {
+ result = &extensions.ReplicaSet{}
+ err = c.client.Put().Namespace(c.ns).Resource("replicasets").Name(rs.Name).Body(rs).Do().Into(result)
+ return
+}
+
+// UpdateStatus updates an existing ReplicaSet status
+func (c *replicaSets) UpdateStatus(rs *extensions.ReplicaSet) (result *extensions.ReplicaSet, err error) {
+ result = &extensions.ReplicaSet{}
+ err = c.client.Put().Namespace(c.ns).Resource("replicasets").Name(rs.Name).SubResource("status").Body(rs).Do().Into(result)
+ return
+}
+
+// Delete deletes an existing ReplicaSet.
+func (c *replicaSets) Delete(name string, options *api.DeleteOptions) (err error) {
+ return c.client.Delete().Namespace(c.ns).Resource("replicasets").Name(name).Body(options).Do().Error()
+}
+
+// Watch returns a watch.Interface that watches the requested ReplicaSets.
+func (c *replicaSets) Watch(opts api.ListOptions) (watch.Interface, error) {
+ return c.client.Get().
+ Prefix("watch").
+ Namespace(c.ns).
+ Resource("replicasets").
+ VersionedParams(&opts, api.ParameterCodec).
+ Watch()
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/replication_controllers.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/replication_controllers.go
new file mode 100644
index 0000000..e4b9e2d
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/replication_controllers.go
@@ -0,0 +1,99 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/watch"
+)
+
+// ReplicationControllersNamespacer has methods to work with ReplicationController resources in a namespace
+type ReplicationControllersNamespacer interface {
+ ReplicationControllers(namespace string) ReplicationControllerInterface
+}
+
+// ReplicationControllerInterface has methods to work with ReplicationController resources.
+type ReplicationControllerInterface interface {
+ List(opts api.ListOptions) (*api.ReplicationControllerList, error)
+ Get(name string) (*api.ReplicationController, error)
+ Create(ctrl *api.ReplicationController) (*api.ReplicationController, error)
+ Update(ctrl *api.ReplicationController) (*api.ReplicationController, error)
+ UpdateStatus(ctrl *api.ReplicationController) (*api.ReplicationController, error)
+ Delete(name string) error
+ Watch(opts api.ListOptions) (watch.Interface, error)
+}
+
+// replicationControllers implements ReplicationControllersNamespacer interface
+type replicationControllers struct {
+ r *Client
+ ns string
+}
+
+// newReplicationControllers returns a PodsClient
+func newReplicationControllers(c *Client, namespace string) *replicationControllers {
+ return &replicationControllers{c, namespace}
+}
+
+// List takes a selector, and returns the list of replication controllers that match that selector.
+func (c *replicationControllers) List(opts api.ListOptions) (result *api.ReplicationControllerList, err error) {
+ result = &api.ReplicationControllerList{}
+ err = c.r.Get().Namespace(c.ns).Resource("replicationControllers").VersionedParams(&opts, api.ParameterCodec).Do().Into(result)
+ return
+}
+
+// Get returns information about a particular replication controller.
+func (c *replicationControllers) Get(name string) (result *api.ReplicationController, err error) {
+ result = &api.ReplicationController{}
+ err = c.r.Get().Namespace(c.ns).Resource("replicationControllers").Name(name).Do().Into(result)
+ return
+}
+
+// Create creates a new replication controller.
+func (c *replicationControllers) Create(controller *api.ReplicationController) (result *api.ReplicationController, err error) {
+ result = &api.ReplicationController{}
+ err = c.r.Post().Namespace(c.ns).Resource("replicationControllers").Body(controller).Do().Into(result)
+ return
+}
+
+// Update updates an existing replication controller.
+func (c *replicationControllers) Update(controller *api.ReplicationController) (result *api.ReplicationController, err error) {
+ result = &api.ReplicationController{}
+ err = c.r.Put().Namespace(c.ns).Resource("replicationControllers").Name(controller.Name).Body(controller).Do().Into(result)
+ return
+}
+
+// UpdateStatus updates an existing replication controller status
+func (c *replicationControllers) UpdateStatus(controller *api.ReplicationController) (result *api.ReplicationController, err error) {
+ result = &api.ReplicationController{}
+ err = c.r.Put().Namespace(c.ns).Resource("replicationControllers").Name(controller.Name).SubResource("status").Body(controller).Do().Into(result)
+ return
+}
+
+// Delete deletes an existing replication controller.
+func (c *replicationControllers) Delete(name string) error {
+ return c.r.Delete().Namespace(c.ns).Resource("replicationControllers").Name(name).Do().Error()
+}
+
+// Watch returns a watch.Interface that watches the requested controllers.
+func (c *replicationControllers) Watch(opts api.ListOptions) (watch.Interface, error) {
+ return c.r.Get().
+ Prefix("watch").
+ Namespace(c.ns).
+ Resource("replicationControllers").
+ VersionedParams(&opts, api.ParameterCodec).
+ Watch()
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/resource_quotas.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/resource_quotas.go
new file mode 100644
index 0000000..9944cef
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/resource_quotas.go
@@ -0,0 +1,102 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/watch"
+)
+
+// ResourceQuotasNamespacer has methods to work with ResourceQuota resources in a namespace
+type ResourceQuotasNamespacer interface {
+ ResourceQuotas(namespace string) ResourceQuotaInterface
+}
+
+// ResourceQuotaInterface has methods to work with ResourceQuota resources.
+type ResourceQuotaInterface interface {
+ List(opts api.ListOptions) (*api.ResourceQuotaList, error)
+ Get(name string) (*api.ResourceQuota, error)
+ Delete(name string) error
+ Create(resourceQuota *api.ResourceQuota) (*api.ResourceQuota, error)
+ Update(resourceQuota *api.ResourceQuota) (*api.ResourceQuota, error)
+ UpdateStatus(resourceQuota *api.ResourceQuota) (*api.ResourceQuota, error)
+ Watch(opts api.ListOptions) (watch.Interface, error)
+}
+
+// resourceQuotas implements ResourceQuotasNamespacer interface
+type resourceQuotas struct {
+ r *Client
+ ns string
+}
+
+// newResourceQuotas returns a resourceQuotas
+func newResourceQuotas(c *Client, namespace string) *resourceQuotas {
+ return &resourceQuotas{
+ r: c,
+ ns: namespace,
+ }
+}
+
+// List takes a selector, and returns the list of resourceQuotas that match that selector.
+func (c *resourceQuotas) List(opts api.ListOptions) (result *api.ResourceQuotaList, err error) {
+ result = &api.ResourceQuotaList{}
+ err = c.r.Get().Namespace(c.ns).Resource("resourceQuotas").VersionedParams(&opts, api.ParameterCodec).Do().Into(result)
+ return
+}
+
+// Get takes the name of the resourceQuota, and returns the corresponding ResourceQuota object, and an error if it occurs
+func (c *resourceQuotas) Get(name string) (result *api.ResourceQuota, err error) {
+ result = &api.ResourceQuota{}
+ err = c.r.Get().Namespace(c.ns).Resource("resourceQuotas").Name(name).Do().Into(result)
+ return
+}
+
+// Delete takes the name of the resourceQuota, and returns an error if one occurs
+func (c *resourceQuotas) Delete(name string) error {
+ return c.r.Delete().Namespace(c.ns).Resource("resourceQuotas").Name(name).Do().Error()
+}
+
+// Create takes the representation of a resourceQuota. Returns the server's representation of the resourceQuota, and an error, if it occurs.
+func (c *resourceQuotas) Create(resourceQuota *api.ResourceQuota) (result *api.ResourceQuota, err error) {
+ result = &api.ResourceQuota{}
+ err = c.r.Post().Namespace(c.ns).Resource("resourceQuotas").Body(resourceQuota).Do().Into(result)
+ return
+}
+
+// Update takes the representation of a resourceQuota to update spec. Returns the server's representation of the resourceQuota, and an error, if it occurs.
+func (c *resourceQuotas) Update(resourceQuota *api.ResourceQuota) (result *api.ResourceQuota, err error) {
+ result = &api.ResourceQuota{}
+ err = c.r.Put().Namespace(c.ns).Resource("resourceQuotas").Name(resourceQuota.Name).Body(resourceQuota).Do().Into(result)
+ return
+}
+
+// Status takes the representation of a resourceQuota to update status. Returns the server's representation of the resourceQuota, and an error, if it occurs.
+func (c *resourceQuotas) UpdateStatus(resourceQuota *api.ResourceQuota) (result *api.ResourceQuota, err error) {
+ result = &api.ResourceQuota{}
+ err = c.r.Put().Namespace(c.ns).Resource("resourceQuotas").Name(resourceQuota.Name).SubResource("status").Body(resourceQuota).Do().Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested resource
+func (c *resourceQuotas) Watch(opts api.ListOptions) (watch.Interface, error) {
+ return c.r.Get().
+ Prefix("watch").
+ Namespace(c.ns).
+ Resource("resourceQuotas").
+ VersionedParams(&opts, api.ParameterCodec).
+ Watch()
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/rolebindings.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/rolebindings.go
new file mode 100644
index 0000000..b798380
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/rolebindings.go
@@ -0,0 +1,95 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/apis/rbac"
+ "k8s.io/kubernetes/pkg/watch"
+)
+
+// RoleBindingsNamespacer has methods to work with RoleBinding resources in a namespace
+type RoleBindingsNamespacer interface {
+ RoleBindings(namespace string) RoleBindingInterface
+}
+
+// RoleBindingInterface has methods to work with RoleBinding resources.
+type RoleBindingInterface interface {
+ List(opts api.ListOptions) (*rbac.RoleBindingList, error)
+ Get(name string) (*rbac.RoleBinding, error)
+ Delete(name string, options *api.DeleteOptions) error
+ Create(roleBinding *rbac.RoleBinding) (*rbac.RoleBinding, error)
+ Update(roleBinding *rbac.RoleBinding) (*rbac.RoleBinding, error)
+ Watch(opts api.ListOptions) (watch.Interface, error)
+}
+
+// roleBindings implements RoleBindingsNamespacer interface
+type roleBindings struct {
+ client *RbacClient
+ ns string
+}
+
+// newRoleBindings returns a roleBindings
+func newRoleBindings(c *RbacClient, namespace string) *roleBindings {
+ return &roleBindings{
+ client: c,
+ ns: namespace,
+ }
+}
+
+// List takes label and field selectors, and returns the list of roleBindings that match those selectors.
+func (c *roleBindings) List(opts api.ListOptions) (result *rbac.RoleBindingList, err error) {
+ result = &rbac.RoleBindingList{}
+ err = c.client.Get().Namespace(c.ns).Resource("rolebindings").VersionedParams(&opts, api.ParameterCodec).Do().Into(result)
+ return
+}
+
+// Get takes the name of the roleBinding, and returns the corresponding RoleBinding object, and an error if it occurs
+func (c *roleBindings) Get(name string) (result *rbac.RoleBinding, err error) {
+ result = &rbac.RoleBinding{}
+ err = c.client.Get().Namespace(c.ns).Resource("rolebindings").Name(name).Do().Into(result)
+ return
+}
+
+// Delete takes the name of the roleBinding and deletes it. Returns an error if one occurs.
+func (c *roleBindings) Delete(name string, options *api.DeleteOptions) error {
+ return c.client.Delete().Namespace(c.ns).Resource("rolebindings").Name(name).Body(options).Do().Error()
+}
+
+// Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if it occurs.
+func (c *roleBindings) Create(roleBinding *rbac.RoleBinding) (result *rbac.RoleBinding, err error) {
+ result = &rbac.RoleBinding{}
+ err = c.client.Post().Namespace(c.ns).Resource("rolebindings").Body(roleBinding).Do().Into(result)
+ return
+}
+
+// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if it occurs.
+func (c *roleBindings) Update(roleBinding *rbac.RoleBinding) (result *rbac.RoleBinding, err error) {
+ result = &rbac.RoleBinding{}
+ err = c.client.Put().Namespace(c.ns).Resource("rolebindings").Name(roleBinding.Name).Body(roleBinding).Do().Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested roleBindings.
+func (c *roleBindings) Watch(opts api.ListOptions) (watch.Interface, error) {
+ return c.client.Get().
+ Prefix("watch").
+ Namespace(c.ns).
+ Resource("rolebindings").
+ VersionedParams(&opts, api.ParameterCodec).
+ Watch()
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/roles.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/roles.go
new file mode 100644
index 0000000..b265e78
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/roles.go
@@ -0,0 +1,95 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/apis/rbac"
+ "k8s.io/kubernetes/pkg/watch"
+)
+
+// RolesNamespacer has methods to work with Role resources in a namespace
+type RolesNamespacer interface {
+ Roles(namespace string) RoleInterface
+}
+
+// RoleInterface has methods to work with Role resources.
+type RoleInterface interface {
+ List(opts api.ListOptions) (*rbac.RoleList, error)
+ Get(name string) (*rbac.Role, error)
+ Delete(name string, options *api.DeleteOptions) error
+ Create(role *rbac.Role) (*rbac.Role, error)
+ Update(role *rbac.Role) (*rbac.Role, error)
+ Watch(opts api.ListOptions) (watch.Interface, error)
+}
+
+// roles implements RolesNamespacer interface
+type roles struct {
+ client *RbacClient
+ ns string
+}
+
+// newRoles returns a roles
+func newRoles(c *RbacClient, namespace string) *roles {
+ return &roles{
+ client: c,
+ ns: namespace,
+ }
+}
+
+// List takes label and field selectors, and returns the list of roles that match those selectors.
+func (c *roles) List(opts api.ListOptions) (result *rbac.RoleList, err error) {
+ result = &rbac.RoleList{}
+ err = c.client.Get().Namespace(c.ns).Resource("roles").VersionedParams(&opts, api.ParameterCodec).Do().Into(result)
+ return
+}
+
+// Get takes the name of the role, and returns the corresponding Role object, and an error if it occurs
+func (c *roles) Get(name string) (result *rbac.Role, err error) {
+ result = &rbac.Role{}
+ err = c.client.Get().Namespace(c.ns).Resource("roles").Name(name).Do().Into(result)
+ return
+}
+
+// Delete takes the name of the role and deletes it. Returns an error if one occurs.
+func (c *roles) Delete(name string, options *api.DeleteOptions) error {
+ return c.client.Delete().Namespace(c.ns).Resource("roles").Name(name).Body(options).Do().Error()
+}
+
+// Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if it occurs.
+func (c *roles) Create(role *rbac.Role) (result *rbac.Role, err error) {
+ result = &rbac.Role{}
+ err = c.client.Post().Namespace(c.ns).Resource("roles").Body(role).Do().Into(result)
+ return
+}
+
+// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if it occurs.
+func (c *roles) Update(role *rbac.Role) (result *rbac.Role, err error) {
+ result = &rbac.Role{}
+ err = c.client.Put().Namespace(c.ns).Resource("roles").Name(role.Name).Body(role).Do().Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested roles.
+func (c *roles) Watch(opts api.ListOptions) (watch.Interface, error) {
+ return c.client.Get().
+ Prefix("watch").
+ Namespace(c.ns).
+ Resource("roles").
+ VersionedParams(&opts, api.ParameterCodec).
+ Watch()
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/scale.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/scale.go
new file mode 100644
index 0000000..a55b077
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/scale.go
@@ -0,0 +1,77 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "k8s.io/kubernetes/pkg/api/meta"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/apis/extensions"
+)
+
+type ScaleNamespacer interface {
+ Scales(namespace string) ScaleInterface
+}
+
+// ScaleInterface has methods to work with Scale (sub)resources.
+type ScaleInterface interface {
+ Get(string, string) (*extensions.Scale, error)
+ Update(string, *extensions.Scale) (*extensions.Scale, error)
+}
+
+// horizontalPodAutoscalers implements HorizontalPodAutoscalersNamespacer interface
+type scales struct {
+ client *ExtensionsClient
+ ns string
+}
+
+// newHorizontalPodAutoscalers returns a horizontalPodAutoscalers
+func newScales(c *ExtensionsClient, namespace string) *scales {
+ return &scales{
+ client: c,
+ ns: namespace,
+ }
+}
+
+// Get takes the reference to scale subresource and returns the subresource or error, if one occurs.
+func (c *scales) Get(kind string, name string) (result *extensions.Scale, err error) {
+ result = &extensions.Scale{}
+
+ // TODO this method needs to take a proper unambiguous kind
+ fullyQualifiedKind := unversioned.GroupVersionKind{Kind: kind}
+ resource, _ := meta.KindToResource(fullyQualifiedKind)
+
+ err = c.client.Get().Namespace(c.ns).Resource(resource.Resource).Name(name).SubResource("scale").Do().Into(result)
+ return
+}
+
+func (c *scales) Update(kind string, scale *extensions.Scale) (result *extensions.Scale, err error) {
+ result = &extensions.Scale{}
+
+ // TODO this method needs to take a proper unambiguous kind
+ fullyQualifiedKind := unversioned.GroupVersionKind{Kind: kind}
+ resource, _ := meta.KindToResource(fullyQualifiedKind)
+
+ err = c.client.Put().
+ Namespace(scale.Namespace).
+ Resource(resource.Resource).
+ Name(scale.Name).
+ SubResource("scale").
+ Body(scale).
+ Do().
+ Into(result)
+ return
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/scheduledjobs.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/scheduledjobs.go
new file mode 100644
index 0000000..de07a7e
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/scheduledjobs.go
@@ -0,0 +1,103 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/apis/batch"
+ "k8s.io/kubernetes/pkg/watch"
+)
+
+// ScheduledJobsNamespacer has methods to work with ScheduledJob resources in a namespace
+type ScheduledJobsNamespacer interface {
+ ScheduledJobs(namespace string) ScheduledJobInterface
+}
+
+// ScheduledJobInterface exposes methods to work on ScheduledJob resources.
+type ScheduledJobInterface interface {
+ List(opts api.ListOptions) (*batch.ScheduledJobList, error)
+ Get(name string) (*batch.ScheduledJob, error)
+ Create(scheduledJob *batch.ScheduledJob) (*batch.ScheduledJob, error)
+ Update(scheduledJob *batch.ScheduledJob) (*batch.ScheduledJob, error)
+ Delete(name string, options *api.DeleteOptions) error
+ Watch(opts api.ListOptions) (watch.Interface, error)
+ UpdateStatus(scheduledJob *batch.ScheduledJob) (*batch.ScheduledJob, error)
+}
+
+// scheduledJobs implements ScheduledJobsNamespacer interface
+type scheduledJobs struct {
+ r *BatchClient
+ ns string
+}
+
+// newScheduledJobs returns a scheduledJobs
+func newScheduledJobs(c *BatchClient, namespace string) *scheduledJobs {
+ return &scheduledJobs{c, namespace}
+}
+
+// Ensure statically that scheduledJobs implements ScheduledJobInterface.
+var _ ScheduledJobInterface = &scheduledJobs{}
+
+// List returns a list of scheduled jobs that match the label and field selectors.
+func (c *scheduledJobs) List(opts api.ListOptions) (result *batch.ScheduledJobList, err error) {
+ result = &batch.ScheduledJobList{}
+ err = c.r.Get().Namespace(c.ns).Resource("scheduledJobs").VersionedParams(&opts, api.ParameterCodec).Do().Into(result)
+ return
+}
+
+// Get returns information about a particular scheduled job.
+func (c *scheduledJobs) Get(name string) (result *batch.ScheduledJob, err error) {
+ result = &batch.ScheduledJob{}
+ err = c.r.Get().Namespace(c.ns).Resource("scheduledJobs").Name(name).Do().Into(result)
+ return
+}
+
+// Create creates a new scheduled job.
+func (c *scheduledJobs) Create(job *batch.ScheduledJob) (result *batch.ScheduledJob, err error) {
+ result = &batch.ScheduledJob{}
+ err = c.r.Post().Namespace(c.ns).Resource("scheduledJobs").Body(job).Do().Into(result)
+ return
+}
+
+// Update updates an existing scheduled job.
+func (c *scheduledJobs) Update(job *batch.ScheduledJob) (result *batch.ScheduledJob, err error) {
+ result = &batch.ScheduledJob{}
+ err = c.r.Put().Namespace(c.ns).Resource("scheduledJobs").Name(job.Name).Body(job).Do().Into(result)
+ return
+}
+
+// Delete deletes a scheduled job, returns error if one occurs.
+func (c *scheduledJobs) Delete(name string, options *api.DeleteOptions) (err error) {
+ return c.r.Delete().Namespace(c.ns).Resource("scheduledJobs").Name(name).Body(options).Do().Error()
+}
+
+// Watch returns a watch.Interface that watches the requested scheduled jobs.
+func (c *scheduledJobs) Watch(opts api.ListOptions) (watch.Interface, error) {
+ return c.r.Get().
+ Prefix("watch").
+ Namespace(c.ns).
+ Resource("scheduledJobs").
+ VersionedParams(&opts, api.ParameterCodec).
+ Watch()
+}
+
+// UpdateStatus takes the name of the scheduled job and the new status. Returns the server's representation of the scheduled job, and an error, if it occurs.
+func (c *scheduledJobs) UpdateStatus(job *batch.ScheduledJob) (result *batch.ScheduledJob, err error) {
+ result = &batch.ScheduledJob{}
+ err = c.r.Put().Namespace(c.ns).Resource("scheduledJobs").Name(job.Name).SubResource("status").Body(job).Do().Into(result)
+ return
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/secrets.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/secrets.go
new file mode 100644
index 0000000..bba3fd9
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/secrets.go
@@ -0,0 +1,120 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/watch"
+)
+
+type SecretsNamespacer interface {
+ Secrets(namespace string) SecretsInterface
+}
+
+type SecretsInterface interface {
+ Create(secret *api.Secret) (*api.Secret, error)
+ Update(secret *api.Secret) (*api.Secret, error)
+ Delete(name string) error
+ List(opts api.ListOptions) (*api.SecretList, error)
+ Get(name string) (*api.Secret, error)
+ Watch(opts api.ListOptions) (watch.Interface, error)
+}
+
+// events implements Secrets interface
+type secrets struct {
+ client *Client
+ namespace string
+}
+
+// newSecrets returns a new secrets object.
+func newSecrets(c *Client, ns string) *secrets {
+ return &secrets{
+ client: c,
+ namespace: ns,
+ }
+}
+
+func (s *secrets) Create(secret *api.Secret) (*api.Secret, error) {
+ result := &api.Secret{}
+ err := s.client.Post().
+ Namespace(s.namespace).
+ Resource("secrets").
+ Body(secret).
+ Do().
+ Into(result)
+
+ return result, err
+}
+
+// List returns a list of secrets matching the selectors.
+func (s *secrets) List(opts api.ListOptions) (*api.SecretList, error) {
+ result := &api.SecretList{}
+
+ err := s.client.Get().
+ Namespace(s.namespace).
+ Resource("secrets").
+ VersionedParams(&opts, api.ParameterCodec).
+ Do().
+ Into(result)
+
+ return result, err
+}
+
+// Get returns the given secret, or an error.
+func (s *secrets) Get(name string) (*api.Secret, error) {
+ result := &api.Secret{}
+ err := s.client.Get().
+ Namespace(s.namespace).
+ Resource("secrets").
+ Name(name).
+ Do().
+ Into(result)
+
+ return result, err
+}
+
+// Watch starts watching for secrets matching the given selectors.
+func (s *secrets) Watch(opts api.ListOptions) (watch.Interface, error) {
+ return s.client.Get().
+ Prefix("watch").
+ Namespace(s.namespace).
+ Resource("secrets").
+ VersionedParams(&opts, api.ParameterCodec).
+ Watch()
+}
+
+func (s *secrets) Delete(name string) error {
+ return s.client.Delete().
+ Namespace(s.namespace).
+ Resource("secrets").
+ Name(name).
+ Do().
+ Error()
+}
+
+func (s *secrets) Update(secret *api.Secret) (result *api.Secret, err error) {
+ result = &api.Secret{}
+ err = s.client.Put().
+ Namespace(s.namespace).
+ Resource("secrets").
+ Name(secret.Name).
+ Body(secret).
+ Do().
+ Into(result)
+
+ return
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/service_accounts.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/service_accounts.go
new file mode 100644
index 0000000..68d1b21
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/service_accounts.go
@@ -0,0 +1,120 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/watch"
+)
+
+type ServiceAccountsNamespacer interface {
+ ServiceAccounts(namespace string) ServiceAccountsInterface
+}
+
+type ServiceAccountsInterface interface {
+ Create(serviceAccount *api.ServiceAccount) (*api.ServiceAccount, error)
+ Update(serviceAccount *api.ServiceAccount) (*api.ServiceAccount, error)
+ Delete(name string) error
+ List(opts api.ListOptions) (*api.ServiceAccountList, error)
+ Get(name string) (*api.ServiceAccount, error)
+ Watch(opts api.ListOptions) (watch.Interface, error)
+}
+
+// serviceAccounts implements ServiceAccounts interface
+type serviceAccounts struct {
+ client *Client
+ namespace string
+}
+
+// newServiceAccounts returns a new serviceAccounts object.
+func newServiceAccounts(c *Client, ns string) ServiceAccountsInterface {
+ return &serviceAccounts{
+ client: c,
+ namespace: ns,
+ }
+}
+
+func (s *serviceAccounts) Create(serviceAccount *api.ServiceAccount) (*api.ServiceAccount, error) {
+ result := &api.ServiceAccount{}
+ err := s.client.Post().
+ Namespace(s.namespace).
+ Resource("serviceAccounts").
+ Body(serviceAccount).
+ Do().
+ Into(result)
+
+ return result, err
+}
+
+// List returns a list of serviceAccounts matching the selectors.
+func (s *serviceAccounts) List(opts api.ListOptions) (*api.ServiceAccountList, error) {
+ result := &api.ServiceAccountList{}
+
+ err := s.client.Get().
+ Namespace(s.namespace).
+ Resource("serviceAccounts").
+ VersionedParams(&opts, api.ParameterCodec).
+ Do().
+ Into(result)
+
+ return result, err
+}
+
+// Get returns the given serviceAccount, or an error.
+func (s *serviceAccounts) Get(name string) (*api.ServiceAccount, error) {
+ result := &api.ServiceAccount{}
+ err := s.client.Get().
+ Namespace(s.namespace).
+ Resource("serviceAccounts").
+ Name(name).
+ Do().
+ Into(result)
+
+ return result, err
+}
+
+// Watch starts watching for serviceAccounts matching the given selectors.
+func (s *serviceAccounts) Watch(opts api.ListOptions) (watch.Interface, error) {
+ return s.client.Get().
+ Prefix("watch").
+ Namespace(s.namespace).
+ Resource("serviceAccounts").
+ VersionedParams(&opts, api.ParameterCodec).
+ Watch()
+}
+
+func (s *serviceAccounts) Delete(name string) error {
+ return s.client.Delete().
+ Namespace(s.namespace).
+ Resource("serviceAccounts").
+ Name(name).
+ Do().
+ Error()
+}
+
+func (s *serviceAccounts) Update(serviceAccount *api.ServiceAccount) (result *api.ServiceAccount, err error) {
+ result = &api.ServiceAccount{}
+ err = s.client.Put().
+ Namespace(s.namespace).
+ Resource("serviceAccounts").
+ Name(serviceAccount.Name).
+ Body(serviceAccount).
+ Do().
+ Into(result)
+
+ return
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/services.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/services.go
new file mode 100644
index 0000000..aada5c1
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/services.go
@@ -0,0 +1,121 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/client/restclient"
+ "k8s.io/kubernetes/pkg/util/net"
+ "k8s.io/kubernetes/pkg/watch"
+)
+
+// ServicesNamespacer has methods to work with Service resources in a namespace
+type ServicesNamespacer interface {
+ Services(namespace string) ServiceInterface
+}
+
+// ServiceInterface has methods to work with Service resources.
+type ServiceInterface interface {
+ List(opts api.ListOptions) (*api.ServiceList, error)
+ Get(name string) (*api.Service, error)
+ Create(srv *api.Service) (*api.Service, error)
+ Update(srv *api.Service) (*api.Service, error)
+ UpdateStatus(srv *api.Service) (*api.Service, error)
+ Delete(name string) error
+ Watch(opts api.ListOptions) (watch.Interface, error)
+ ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper
+}
+
+// services implements ServicesNamespacer interface
+type services struct {
+ r *Client
+ ns string
+}
+
+// newServices returns a services
+func newServices(c *Client, namespace string) *services {
+ return &services{c, namespace}
+}
+
+// List takes a selector, and returns the list of services that match that selector
+func (c *services) List(opts api.ListOptions) (result *api.ServiceList, err error) {
+ result = &api.ServiceList{}
+ err = c.r.Get().
+ Namespace(c.ns).
+ Resource("services").
+ VersionedParams(&opts, api.ParameterCodec).
+ Do().
+ Into(result)
+ return
+}
+
+// Get returns information about a particular service.
+func (c *services) Get(name string) (result *api.Service, err error) {
+ result = &api.Service{}
+ err = c.r.Get().Namespace(c.ns).Resource("services").Name(name).Do().Into(result)
+ return
+}
+
+// Create creates a new service.
+func (c *services) Create(svc *api.Service) (result *api.Service, err error) {
+ result = &api.Service{}
+ err = c.r.Post().Namespace(c.ns).Resource("services").Body(svc).Do().Into(result)
+ return
+}
+
+// Update updates an existing service.
+func (c *services) Update(svc *api.Service) (result *api.Service, err error) {
+ result = &api.Service{}
+ err = c.r.Put().Namespace(c.ns).Resource("services").Name(svc.Name).Body(svc).Do().Into(result)
+ return
+}
+
+// UpdateStatus takes a Service object with the new status and applies it as an update to the existing Service.
+func (c *services) UpdateStatus(service *api.Service) (result *api.Service, err error) {
+ result = &api.Service{}
+ err = c.r.Put().Namespace(c.ns).Resource("services").Name(service.Name).SubResource("status").Body(service).Do().Into(result)
+ return
+}
+
+// Delete deletes an existing service.
+func (c *services) Delete(name string) error {
+ return c.r.Delete().Namespace(c.ns).Resource("services").Name(name).Do().Error()
+}
+
+// Watch returns a watch.Interface that watches the requested services.
+func (c *services) Watch(opts api.ListOptions) (watch.Interface, error) {
+ return c.r.Get().
+ Prefix("watch").
+ Namespace(c.ns).
+ Resource("services").
+ VersionedParams(&opts, api.ParameterCodec).
+ Watch()
+}
+
+// ProxyGet returns a response of the service by calling it through the proxy.
+func (c *services) ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper {
+ request := c.r.Get().
+ Namespace(c.ns).
+ Resource("services").
+ SubResource("proxy").
+ Name(net.JoinSchemeNamePort(scheme, name, port)).
+ Suffix(path)
+ for k, v := range params {
+ request = request.Param(k, v)
+ }
+ return request
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/thirdpartyresources.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/thirdpartyresources.go
new file mode 100644
index 0000000..68adddb
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/thirdpartyresources.go
@@ -0,0 +1,98 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/apis/extensions"
+ "k8s.io/kubernetes/pkg/watch"
+)
+
+// ThirdPartyResourceNamespacer has methods to work with ThirdPartyResource resources in a namespace
+type ThirdPartyResourceNamespacer interface {
+ ThirdPartyResources() ThirdPartyResourceInterface
+}
+
+type ThirdPartyResourceInterface interface {
+ List(opts api.ListOptions) (*extensions.ThirdPartyResourceList, error)
+ Get(name string) (*extensions.ThirdPartyResource, error)
+ Create(ctrl *extensions.ThirdPartyResource) (*extensions.ThirdPartyResource, error)
+ Update(ctrl *extensions.ThirdPartyResource) (*extensions.ThirdPartyResource, error)
+ UpdateStatus(ctrl *extensions.ThirdPartyResource) (*extensions.ThirdPartyResource, error)
+ Delete(name string) error
+ Watch(opts api.ListOptions) (watch.Interface, error)
+}
+
+// thirdPartyResources implements DaemonsSetsNamespacer interface
+type thirdPartyResources struct {
+ r *ExtensionsClient
+}
+
+func newThirdPartyResources(c *ExtensionsClient) *thirdPartyResources {
+ return &thirdPartyResources{c}
+}
+
+// Ensure statically that thirdPartyResources implements ThirdPartyResourcesInterface.
+var _ ThirdPartyResourceInterface = &thirdPartyResources{}
+
+func (c *thirdPartyResources) List(opts api.ListOptions) (result *extensions.ThirdPartyResourceList, err error) {
+ result = &extensions.ThirdPartyResourceList{}
+ err = c.r.Get().Resource("thirdpartyresources").VersionedParams(&opts, api.ParameterCodec).Do().Into(result)
+ return
+}
+
+// Get returns information about a particular third party resource.
+func (c *thirdPartyResources) Get(name string) (result *extensions.ThirdPartyResource, err error) {
+ result = &extensions.ThirdPartyResource{}
+ err = c.r.Get().Resource("thirdpartyresources").Name(name).Do().Into(result)
+ return
+}
+
+// Create creates a new third party resource.
+func (c *thirdPartyResources) Create(resource *extensions.ThirdPartyResource) (result *extensions.ThirdPartyResource, err error) {
+ result = &extensions.ThirdPartyResource{}
+ err = c.r.Post().Resource("thirdpartyresources").Body(resource).Do().Into(result)
+ return
+}
+
+// Update updates an existing third party resource.
+func (c *thirdPartyResources) Update(resource *extensions.ThirdPartyResource) (result *extensions.ThirdPartyResource, err error) {
+ result = &extensions.ThirdPartyResource{}
+ err = c.r.Put().Resource("thirdpartyresources").Name(resource.Name).Body(resource).Do().Into(result)
+ return
+}
+
+// UpdateStatus updates an existing third party resource status
+func (c *thirdPartyResources) UpdateStatus(resource *extensions.ThirdPartyResource) (result *extensions.ThirdPartyResource, err error) {
+ result = &extensions.ThirdPartyResource{}
+ err = c.r.Put().Resource("thirdpartyresources").Name(resource.Name).SubResource("status").Body(resource).Do().Into(result)
+ return
+}
+
+// Delete deletes an existing third party resource.
+func (c *thirdPartyResources) Delete(name string) error {
+ return c.r.Delete().Resource("thirdpartyresources").Name(name).Do().Error()
+}
+
+// Watch returns a watch.Interface that watches the requested third party resources.
+func (c *thirdPartyResources) Watch(opts api.ListOptions) (watch.Interface, error) {
+ return c.r.Get().
+ Prefix("watch").
+ Resource("thirdpartyresources").
+ VersionedParams(&opts, api.ParameterCodec).
+ Watch()
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/util.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/util.go
new file mode 100644
index 0000000..9657ff2
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/client/unversioned/util.go
@@ -0,0 +1,79 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unversioned
+
+import (
+ "time"
+
+ "k8s.io/kubernetes/pkg/api/errors"
+ "k8s.io/kubernetes/pkg/util/wait"
+)
+
+// DefaultRetry is the recommended retry for a conflict where multiple clients
+// are making changes to the same resource.
+var DefaultRetry = wait.Backoff{
+ Steps: 5,
+ Duration: 10 * time.Millisecond,
+ Factor: 1.0,
+ Jitter: 0.1,
+}
+
+// DefaultBackoff is the recommended backoff for a conflict where a client
+// may be attempting to make an unrelated modification to a resource under
+// active management by one or more controllers.
+var DefaultBackoff = wait.Backoff{
+ Steps: 4,
+ Duration: 10 * time.Millisecond,
+ Factor: 5.0,
+ Jitter: 0.1,
+}
+
+// RetryConflict executes the provided function repeatedly, retrying if the server returns a conflicting
+// write. Callers should preserve previous executions if they wish to retry changes. It performs an
+// exponential backoff.
+//
+// var pod *api.Pod
+// err := RetryOnConflict(DefaultBackoff, func() (err error) {
+// pod, err = c.Pods("mynamespace").UpdateStatus(podStatus)
+// return
+// })
+// if err != nil {
+// // may be conflict if max retries were hit
+// return err
+// }
+// ...
+//
+// TODO: Make Backoff an interface?
+func RetryOnConflict(backoff wait.Backoff, fn func() error) error {
+ var lastConflictErr error
+ err := wait.ExponentialBackoff(backoff, func() (bool, error) {
+ err := fn()
+ switch {
+ case err == nil:
+ return true, nil
+ case errors.IsConflict(err):
+ lastConflictErr = err
+ return false, nil
+ default:
+ return false, err
+ }
+ })
+ if err == wait.ErrWaitTimeout {
+ err = lastConflictErr
+ }
+ return err
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/controller/framework/controller.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/controller/framework/controller.go
new file mode 100644
index 0000000..8cbd124
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/controller/framework/controller.go
@@ -0,0 +1,326 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package framework
+
+import (
+ "sync"
+ "time"
+
+ "k8s.io/kubernetes/pkg/client/cache"
+ "k8s.io/kubernetes/pkg/runtime"
+ utilruntime "k8s.io/kubernetes/pkg/util/runtime"
+ "k8s.io/kubernetes/pkg/util/wait"
+)
+
+// Config contains all the settings for a Controller.
+type Config struct {
+ // The queue for your objects; either a cache.FIFO or
+ // a cache.DeltaFIFO. Your Process() function should accept
+ // the output of this Oueue's Pop() method.
+ cache.Queue
+
+ // Something that can list and watch your objects.
+ cache.ListerWatcher
+
+ // Something that can process your objects.
+ Process ProcessFunc
+
+ // The type of your objects.
+ ObjectType runtime.Object
+
+ // Reprocess everything at least this often.
+ // Note that if it takes longer for you to clear the queue than this
+ // period, you will end up processing items in the order determined
+ // by cache.FIFO.Replace(). Currently, this is random. If this is a
+ // problem, we can change that replacement policy to append new
+ // things to the end of the queue instead of replacing the entire
+ // queue.
+ FullResyncPeriod time.Duration
+
+ // If true, when Process() returns an error, re-enqueue the object.
+ // TODO: add interface to let you inject a delay/backoff or drop
+ // the object completely if desired. Pass the object in
+ // question to this interface as a parameter.
+ RetryOnError bool
+}
+
+// ProcessFunc processes a single object.
+type ProcessFunc func(obj interface{}) error
+
+// Controller is a generic controller framework.
+type Controller struct {
+ config Config
+ reflector *cache.Reflector
+ reflectorMutex sync.RWMutex
+}
+
+// TODO make the "Controller" private, and convert all references to use ControllerInterface instead
+type ControllerInterface interface {
+ Run(stopCh <-chan struct{})
+ HasSynced() bool
+}
+
+// New makes a new Controller from the given Config.
+func New(c *Config) *Controller {
+ ctlr := &Controller{
+ config: *c,
+ }
+ return ctlr
+}
+
+// Run begins processing items, and will continue until a value is sent down stopCh.
+// It's an error to call Run more than once.
+// Run blocks; call via go.
+func (c *Controller) Run(stopCh <-chan struct{}) {
+ defer utilruntime.HandleCrash()
+ r := cache.NewReflector(
+ c.config.ListerWatcher,
+ c.config.ObjectType,
+ c.config.Queue,
+ c.config.FullResyncPeriod,
+ )
+
+ c.reflectorMutex.Lock()
+ c.reflector = r
+ c.reflectorMutex.Unlock()
+
+ r.RunUntil(stopCh)
+
+ wait.Until(c.processLoop, time.Second, stopCh)
+}
+
+// Returns true once this controller has completed an initial resource listing
+func (c *Controller) HasSynced() bool {
+ return c.config.Queue.HasSynced()
+}
+
+// Requeue adds the provided object back into the queue if it does not already exist.
+func (c *Controller) Requeue(obj interface{}) error {
+ return c.config.Queue.AddIfNotPresent(cache.Deltas{
+ cache.Delta{
+ Type: cache.Sync,
+ Object: obj,
+ },
+ })
+}
+
+// processLoop drains the work queue.
+// TODO: Consider doing the processing in parallel. This will require a little thought
+// to make sure that we don't end up processing the same object multiple times
+// concurrently.
+func (c *Controller) processLoop() {
+ for {
+ obj, err := c.config.Queue.Pop(cache.PopProcessFunc(c.config.Process))
+ if err != nil {
+ if c.config.RetryOnError {
+ // This is the safe way to re-enqueue.
+ c.config.Queue.AddIfNotPresent(obj)
+ }
+ }
+ }
+}
+
+// ResourceEventHandler can handle notifications for events that happen to a
+// resource. The events are informational only, so you can't return an
+// error.
+// * OnAdd is called when an object is added.
+// * OnUpdate is called when an object is modified. Note that oldObj is the
+// last known state of the object-- it is possible that several changes
+// were combined together, so you can't use this to see every single
+// change. OnUpdate is also called when a re-list happens, and it will
+// get called even if nothing changed. This is useful for periodically
+// evaluating or syncing something.
+// * OnDelete will get the final state of the item if it is known, otherwise
+// it will get an object of type cache.DeletedFinalStateUnknown. This can
+// happen if the watch is closed and misses the delete event and we don't
+// notice the deletion until the subsequent re-list.
+type ResourceEventHandler interface {
+ OnAdd(obj interface{})
+ OnUpdate(oldObj, newObj interface{})
+ OnDelete(obj interface{})
+}
+
+// ResourceEventHandlerFuncs is an adaptor to let you easily specify as many or
+// as few of the notification functions as you want while still implementing
+// ResourceEventHandler.
+type ResourceEventHandlerFuncs struct {
+ AddFunc func(obj interface{})
+ UpdateFunc func(oldObj, newObj interface{})
+ DeleteFunc func(obj interface{})
+}
+
+// OnAdd calls AddFunc if it's not nil.
+func (r ResourceEventHandlerFuncs) OnAdd(obj interface{}) {
+ if r.AddFunc != nil {
+ r.AddFunc(obj)
+ }
+}
+
+// OnUpdate calls UpdateFunc if it's not nil.
+func (r ResourceEventHandlerFuncs) OnUpdate(oldObj, newObj interface{}) {
+ if r.UpdateFunc != nil {
+ r.UpdateFunc(oldObj, newObj)
+ }
+}
+
+// OnDelete calls DeleteFunc if it's not nil.
+func (r ResourceEventHandlerFuncs) OnDelete(obj interface{}) {
+ if r.DeleteFunc != nil {
+ r.DeleteFunc(obj)
+ }
+}
+
+// DeletionHandlingMetaNamespaceKeyFunc checks for
+// cache.DeletedFinalStateUnknown objects before calling
+// cache.MetaNamespaceKeyFunc.
+func DeletionHandlingMetaNamespaceKeyFunc(obj interface{}) (string, error) {
+ if d, ok := obj.(cache.DeletedFinalStateUnknown); ok {
+ return d.Key, nil
+ }
+ return cache.MetaNamespaceKeyFunc(obj)
+}
+
+// NewInformer returns a cache.Store and a controller for populating the store
+// while also providing event notifications. You should only used the returned
+// cache.Store for Get/List operations; Add/Modify/Deletes will cause the event
+// notifications to be faulty.
+//
+// Parameters:
+// * lw is list and watch functions for the source of the resource you want to
+// be informed of.
+// * objType is an object of the type that you expect to receive.
+// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate
+// calls, even if nothing changed). Otherwise, re-list will be delayed as
+// long as possible (until the upstream source closes the watch or times out,
+// or you stop the controller).
+// * h is the object you want notifications sent to.
+//
+func NewInformer(
+ lw cache.ListerWatcher,
+ objType runtime.Object,
+ resyncPeriod time.Duration,
+ h ResourceEventHandler,
+) (cache.Store, *Controller) {
+ // This will hold the client state, as we know it.
+ clientState := cache.NewStore(DeletionHandlingMetaNamespaceKeyFunc)
+
+ // This will hold incoming changes. Note how we pass clientState in as a
+ // KeyLister, that way resync operations will result in the correct set
+ // of update/delete deltas.
+ fifo := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, nil, clientState)
+
+ cfg := &Config{
+ Queue: fifo,
+ ListerWatcher: lw,
+ ObjectType: objType,
+ FullResyncPeriod: resyncPeriod,
+ RetryOnError: false,
+
+ Process: func(obj interface{}) error {
+ // from oldest to newest
+ for _, d := range obj.(cache.Deltas) {
+ switch d.Type {
+ case cache.Sync, cache.Added, cache.Updated:
+ if old, exists, err := clientState.Get(d.Object); err == nil && exists {
+ if err := clientState.Update(d.Object); err != nil {
+ return err
+ }
+ h.OnUpdate(old, d.Object)
+ } else {
+ if err := clientState.Add(d.Object); err != nil {
+ return err
+ }
+ h.OnAdd(d.Object)
+ }
+ case cache.Deleted:
+ if err := clientState.Delete(d.Object); err != nil {
+ return err
+ }
+ h.OnDelete(d.Object)
+ }
+ }
+ return nil
+ },
+ }
+ return clientState, New(cfg)
+}
+
+// NewIndexerInformer returns a cache.Indexer and a controller for populating the index
+// while also providing event notifications. You should only used the returned
+// cache.Index for Get/List operations; Add/Modify/Deletes will cause the event
+// notifications to be faulty.
+//
+// Parameters:
+// * lw is list and watch functions for the source of the resource you want to
+// be informed of.
+// * objType is an object of the type that you expect to receive.
+// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate
+// calls, even if nothing changed). Otherwise, re-list will be delayed as
+// long as possible (until the upstream source closes the watch or times out,
+// or you stop the controller).
+// * h is the object you want notifications sent to.
+//
+func NewIndexerInformer(
+ lw cache.ListerWatcher,
+ objType runtime.Object,
+ resyncPeriod time.Duration,
+ h ResourceEventHandler,
+ indexers cache.Indexers,
+) (cache.Indexer, *Controller) {
+ // This will hold the client state, as we know it.
+ clientState := cache.NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers)
+
+ // This will hold incoming changes. Note how we pass clientState in as a
+ // KeyLister, that way resync operations will result in the correct set
+ // of update/delete deltas.
+ fifo := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, nil, clientState)
+
+ cfg := &Config{
+ Queue: fifo,
+ ListerWatcher: lw,
+ ObjectType: objType,
+ FullResyncPeriod: resyncPeriod,
+ RetryOnError: false,
+
+ Process: func(obj interface{}) error {
+ // from oldest to newest
+ for _, d := range obj.(cache.Deltas) {
+ switch d.Type {
+ case cache.Sync, cache.Added, cache.Updated:
+ if old, exists, err := clientState.Get(d.Object); err == nil && exists {
+ if err := clientState.Update(d.Object); err != nil {
+ return err
+ }
+ h.OnUpdate(old, d.Object)
+ } else {
+ if err := clientState.Add(d.Object); err != nil {
+ return err
+ }
+ h.OnAdd(d.Object)
+ }
+ case cache.Deleted:
+ if err := clientState.Delete(d.Object); err != nil {
+ return err
+ }
+ h.OnDelete(d.Object)
+ }
+ }
+ return nil
+ },
+ }
+ return clientState, New(cfg)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/controller/framework/doc.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/controller/framework/doc.go
new file mode 100644
index 0000000..feceba3
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/controller/framework/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package framework implements all the grunt work involved in running a simple controller.
+package framework
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/controller/framework/fake_controller_source.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/controller/framework/fake_controller_source.go
new file mode 100644
index 0000000..ee00c05
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/controller/framework/fake_controller_source.go
@@ -0,0 +1,262 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package framework
+
+import (
+ "errors"
+ "math/rand"
+ "strconv"
+ "sync"
+
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/meta"
+ "k8s.io/kubernetes/pkg/runtime"
+ "k8s.io/kubernetes/pkg/types"
+ "k8s.io/kubernetes/pkg/watch"
+)
+
+func NewFakeControllerSource() *FakeControllerSource {
+ return &FakeControllerSource{
+ Items: map[nnu]runtime.Object{},
+ Broadcaster: watch.NewBroadcaster(100, watch.WaitIfChannelFull),
+ }
+}
+
+func NewFakePVControllerSource() *FakePVControllerSource {
+ return &FakePVControllerSource{
+ FakeControllerSource{
+ Items: map[nnu]runtime.Object{},
+ Broadcaster: watch.NewBroadcaster(100, watch.WaitIfChannelFull),
+ }}
+}
+
+func NewFakePVCControllerSource() *FakePVCControllerSource {
+ return &FakePVCControllerSource{
+ FakeControllerSource{
+ Items: map[nnu]runtime.Object{},
+ Broadcaster: watch.NewBroadcaster(100, watch.WaitIfChannelFull),
+ }}
+}
+
+// FakeControllerSource implements listing/watching for testing.
+type FakeControllerSource struct {
+ lock sync.RWMutex
+ Items map[nnu]runtime.Object
+ changes []watch.Event // one change per resourceVersion
+ Broadcaster *watch.Broadcaster
+}
+
+type FakePVControllerSource struct {
+ FakeControllerSource
+}
+
+type FakePVCControllerSource struct {
+ FakeControllerSource
+}
+
+// namespace, name, uid to be used as a key.
+type nnu struct {
+ namespace, name string
+ uid types.UID
+}
+
+// Add adds an object to the set and sends an add event to watchers.
+// obj's ResourceVersion is set.
+func (f *FakeControllerSource) Add(obj runtime.Object) {
+ f.Change(watch.Event{Type: watch.Added, Object: obj}, 1)
+}
+
+// Modify updates an object in the set and sends a modified event to watchers.
+// obj's ResourceVersion is set.
+func (f *FakeControllerSource) Modify(obj runtime.Object) {
+ f.Change(watch.Event{Type: watch.Modified, Object: obj}, 1)
+}
+
+// Delete deletes an object from the set and sends a delete event to watchers.
+// obj's ResourceVersion is set.
+func (f *FakeControllerSource) Delete(lastValue runtime.Object) {
+ f.Change(watch.Event{Type: watch.Deleted, Object: lastValue}, 1)
+}
+
+// AddDropWatch adds an object to the set but forgets to send an add event to
+// watchers.
+// obj's ResourceVersion is set.
+func (f *FakeControllerSource) AddDropWatch(obj runtime.Object) {
+ f.Change(watch.Event{Type: watch.Added, Object: obj}, 0)
+}
+
+// ModifyDropWatch updates an object in the set but forgets to send a modify
+// event to watchers.
+// obj's ResourceVersion is set.
+func (f *FakeControllerSource) ModifyDropWatch(obj runtime.Object) {
+ f.Change(watch.Event{Type: watch.Modified, Object: obj}, 0)
+}
+
+// DeleteDropWatch deletes an object from the set but forgets to send a delete
+// event to watchers.
+// obj's ResourceVersion is set.
+func (f *FakeControllerSource) DeleteDropWatch(lastValue runtime.Object) {
+ f.Change(watch.Event{Type: watch.Deleted, Object: lastValue}, 0)
+}
+
+func (f *FakeControllerSource) key(accessor meta.Object) nnu {
+ return nnu{accessor.GetNamespace(), accessor.GetName(), accessor.GetUID()}
+}
+
+// Change records the given event (setting the object's resource version) and
+// sends a watch event with the specified probability.
+func (f *FakeControllerSource) Change(e watch.Event, watchProbability float64) {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ accessor, err := meta.Accessor(e.Object)
+ if err != nil {
+ panic(err) // this is test code only
+ }
+
+ resourceVersion := len(f.changes) + 1
+ accessor.SetResourceVersion(strconv.Itoa(resourceVersion))
+ f.changes = append(f.changes, e)
+ key := f.key(accessor)
+ switch e.Type {
+ case watch.Added, watch.Modified:
+ f.Items[key] = e.Object
+ case watch.Deleted:
+ delete(f.Items, key)
+ }
+
+ if rand.Float64() < watchProbability {
+ f.Broadcaster.Action(e.Type, e.Object)
+ }
+}
+
+func (f *FakeControllerSource) getListItemsLocked() ([]runtime.Object, error) {
+ list := make([]runtime.Object, 0, len(f.Items))
+ for _, obj := range f.Items {
+ // Must make a copy to allow clients to modify the object.
+ // Otherwise, if they make a change and write it back, they
+ // will inadvertently change our canonical copy (in
+ // addition to racing with other clients).
+ objCopy, err := api.Scheme.DeepCopy(obj)
+ if err != nil {
+ return nil, err
+ }
+ list = append(list, objCopy.(runtime.Object))
+ }
+ return list, nil
+}
+
+// List returns a list object, with its resource version set.
+func (f *FakeControllerSource) List(options api.ListOptions) (runtime.Object, error) {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+ list, err := f.getListItemsLocked()
+ if err != nil {
+ return nil, err
+ }
+ listObj := &api.List{}
+ if err := meta.SetList(listObj, list); err != nil {
+ return nil, err
+ }
+ objMeta, err := api.ListMetaFor(listObj)
+ if err != nil {
+ return nil, err
+ }
+ resourceVersion := len(f.changes)
+ objMeta.ResourceVersion = strconv.Itoa(resourceVersion)
+ return listObj, nil
+}
+
+// List returns a list object, with its resource version set.
+func (f *FakePVControllerSource) List(options api.ListOptions) (runtime.Object, error) {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+ list, err := f.FakeControllerSource.getListItemsLocked()
+ if err != nil {
+ return nil, err
+ }
+ listObj := &api.PersistentVolumeList{}
+ if err := meta.SetList(listObj, list); err != nil {
+ return nil, err
+ }
+ objMeta, err := api.ListMetaFor(listObj)
+ if err != nil {
+ return nil, err
+ }
+ resourceVersion := len(f.changes)
+ objMeta.ResourceVersion = strconv.Itoa(resourceVersion)
+ return listObj, nil
+}
+
+// List returns a list object, with its resource version set.
+func (f *FakePVCControllerSource) List(options api.ListOptions) (runtime.Object, error) {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+ list, err := f.FakeControllerSource.getListItemsLocked()
+ if err != nil {
+ return nil, err
+ }
+ listObj := &api.PersistentVolumeClaimList{}
+ if err := meta.SetList(listObj, list); err != nil {
+ return nil, err
+ }
+ objMeta, err := api.ListMetaFor(listObj)
+ if err != nil {
+ return nil, err
+ }
+ resourceVersion := len(f.changes)
+ objMeta.ResourceVersion = strconv.Itoa(resourceVersion)
+ return listObj, nil
+}
+
+// Watch returns a watch, which will be pre-populated with all changes
+// after resourceVersion.
+func (f *FakeControllerSource) Watch(options api.ListOptions) (watch.Interface, error) {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+ rc, err := strconv.Atoi(options.ResourceVersion)
+ if err != nil {
+ return nil, err
+ }
+ if rc < len(f.changes) {
+ changes := []watch.Event{}
+ for _, c := range f.changes[rc:] {
+ // Must make a copy to allow clients to modify the
+ // object. Otherwise, if they make a change and write
+ // it back, they will inadvertently change the our
+ // canonical copy (in addition to racing with other
+ // clients).
+ objCopy, err := api.Scheme.DeepCopy(c.Object)
+ if err != nil {
+ return nil, err
+ }
+ changes = append(changes, watch.Event{Type: c.Type, Object: objCopy.(runtime.Object)})
+ }
+ return f.Broadcaster.WatchWithPrefix(changes), nil
+ } else if rc > len(f.changes) {
+ return nil, errors.New("resource version in the future not supported by this fake")
+ }
+ return f.Broadcaster.Watch(), nil
+}
+
+// Shutdown closes the underlying broadcaster, waiting for events to be
+// delivered. It's an error to call any method after calling shutdown. This is
+// enforced by Shutdown() leaving f locked.
+func (f *FakeControllerSource) Shutdown() {
+ f.lock.Lock() // Purposely no unlock.
+ f.Broadcaster.Shutdown()
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/controller/framework/shared_informer.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/controller/framework/shared_informer.go
new file mode 100644
index 0000000..87dcac6
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/controller/framework/shared_informer.go
@@ -0,0 +1,383 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package framework
+
+import (
+ "fmt"
+ "sync"
+ "time"
+
+ "k8s.io/kubernetes/pkg/client/cache"
+ "k8s.io/kubernetes/pkg/runtime"
+ utilruntime "k8s.io/kubernetes/pkg/util/runtime"
+)
+
+// if you use this, there is one behavior change compared to a standard Informer.
+// When you receive a notification, the cache will be AT LEAST as fresh as the
+// notification, but it MAY be more fresh. You should NOT depend on the contents
+// of the cache exactly matching the notification you've received in handler
+// functions. If there was a create, followed by a delete, the cache may NOT
+// have your item. This has advantages over the broadcaster since it allows us
+// to share a common cache across many controllers. Extending the broadcaster
+// would have required us keep duplicate caches for each watch.
+type SharedInformer interface {
+ // events to a single handler are delivered sequentially, but there is no coordination between different handlers
+ // You may NOT add a handler *after* the SharedInformer is running. That will result in an error being returned.
+ // TODO we should try to remove this restriction eventually.
+ AddEventHandler(handler ResourceEventHandler) error
+ GetStore() cache.Store
+ // GetController gives back a synthetic interface that "votes" to start the informer
+ GetController() ControllerInterface
+ Run(stopCh <-chan struct{})
+ HasSynced() bool
+ LastSyncResourceVersion() string
+}
+
+type SharedIndexInformer interface {
+ SharedInformer
+ // AddIndexers add indexers to the informer before it starts.
+ AddIndexers(indexers cache.Indexers) error
+ GetIndexer() cache.Indexer
+}
+
+// NewSharedInformer creates a new instance for the listwatcher.
+// TODO: create a cache/factory of these at a higher level for the list all, watch all of a given resource that can
+// be shared amongst all consumers.
+func NewSharedInformer(lw cache.ListerWatcher, objType runtime.Object, resyncPeriod time.Duration) SharedInformer {
+ return NewSharedIndexInformer(lw, objType, resyncPeriod, cache.Indexers{})
+}
+
+// NewSharedIndexInformer creates a new instance for the listwatcher.
+// TODO: create a cache/factory of these at a higher level for the list all, watch all of a given resource that can
+// be shared amongst all consumers.
+func NewSharedIndexInformer(lw cache.ListerWatcher, objType runtime.Object, resyncPeriod time.Duration, indexers cache.Indexers) SharedIndexInformer {
+ sharedIndexInformer := &sharedIndexInformer{
+ processor: &sharedProcessor{},
+ indexer: cache.NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers),
+ listerWatcher: lw,
+ objectType: objType,
+ fullResyncPeriod: resyncPeriod,
+ }
+ return sharedIndexInformer
+}
+
+type sharedIndexInformer struct {
+ indexer cache.Indexer
+ controller *Controller
+
+ processor *sharedProcessor
+
+ // This block is tracked to handle late initialization of the controller
+ listerWatcher cache.ListerWatcher
+ objectType runtime.Object
+ fullResyncPeriod time.Duration
+
+ started bool
+ startedLock sync.Mutex
+
+ // blockDeltas gives a way to stop all event distribution so that a late event handler
+ // can safely join the shared informer.
+ blockDeltas sync.Mutex
+ // stopCh is the channel used to stop the main Run process. We have to track it so that
+ // late joiners can have a proper stop
+ stopCh <-chan struct{}
+}
+
+// dummyController hides the fact that a SharedInformer is different from a dedicated one
+// where a caller can `Run`. The run method is disonnected in this case, because higher
+// level logic will decide when to start the SharedInformer and related controller.
+// Because returning information back is always asynchronous, the legacy callers shouldn't
+// notice any change in behavior.
+type dummyController struct {
+ informer *sharedIndexInformer
+}
+
+func (v *dummyController) Run(stopCh <-chan struct{}) {
+}
+
+func (v *dummyController) HasSynced() bool {
+ return v.informer.HasSynced()
+}
+
+type updateNotification struct {
+ oldObj interface{}
+ newObj interface{}
+}
+
+type addNotification struct {
+ newObj interface{}
+}
+
+type deleteNotification struct {
+ oldObj interface{}
+}
+
+func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) {
+ defer utilruntime.HandleCrash()
+
+ fifo := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, nil, s.indexer)
+
+ cfg := &Config{
+ Queue: fifo,
+ ListerWatcher: s.listerWatcher,
+ ObjectType: s.objectType,
+ FullResyncPeriod: s.fullResyncPeriod,
+ RetryOnError: false,
+
+ Process: s.HandleDeltas,
+ }
+
+ func() {
+ s.startedLock.Lock()
+ defer s.startedLock.Unlock()
+
+ s.controller = New(cfg)
+ s.started = true
+ }()
+
+ s.stopCh = stopCh
+ s.processor.run(stopCh)
+ s.controller.Run(stopCh)
+}
+
+func (s *sharedIndexInformer) isStarted() bool {
+ s.startedLock.Lock()
+ defer s.startedLock.Unlock()
+ return s.started
+}
+
+func (s *sharedIndexInformer) HasSynced() bool {
+ s.startedLock.Lock()
+ defer s.startedLock.Unlock()
+
+ if s.controller == nil {
+ return false
+ }
+ return s.controller.HasSynced()
+}
+
+func (s *sharedIndexInformer) LastSyncResourceVersion() string {
+ s.startedLock.Lock()
+ defer s.startedLock.Unlock()
+
+ if s.controller == nil {
+ return ""
+ }
+ return s.controller.reflector.LastSyncResourceVersion()
+}
+
+func (s *sharedIndexInformer) GetStore() cache.Store {
+ return s.indexer
+}
+
+func (s *sharedIndexInformer) GetIndexer() cache.Indexer {
+ return s.indexer
+}
+
+func (s *sharedIndexInformer) AddIndexers(indexers cache.Indexers) error {
+ s.startedLock.Lock()
+ defer s.startedLock.Unlock()
+
+ if s.started {
+ return fmt.Errorf("informer has already started")
+ }
+
+ return s.indexer.AddIndexers(indexers)
+}
+
+func (s *sharedIndexInformer) GetController() ControllerInterface {
+ return &dummyController{informer: s}
+}
+
+func (s *sharedIndexInformer) AddEventHandler(handler ResourceEventHandler) error {
+ s.startedLock.Lock()
+ defer s.startedLock.Unlock()
+
+ if !s.started {
+ listener := newProcessListener(handler)
+ s.processor.listeners = append(s.processor.listeners, listener)
+ return nil
+ }
+
+ // in order to safely join, we have to
+ // 1. stop sending add/update/delete notifications
+ // 2. do a list against the store
+ // 3. send synthetic "Add" events to the new handler
+ // 4. unblock
+ s.blockDeltas.Lock()
+ defer s.blockDeltas.Unlock()
+
+ listener := newProcessListener(handler)
+ s.processor.listeners = append(s.processor.listeners, listener)
+
+ go listener.run(s.stopCh)
+ go listener.pop(s.stopCh)
+
+ items := s.indexer.List()
+ for i := range items {
+ listener.add(addNotification{newObj: items[i]})
+ }
+
+ return nil
+}
+
+func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error {
+ s.blockDeltas.Lock()
+ defer s.blockDeltas.Unlock()
+
+ // from oldest to newest
+ for _, d := range obj.(cache.Deltas) {
+ switch d.Type {
+ case cache.Sync, cache.Added, cache.Updated:
+ if old, exists, err := s.indexer.Get(d.Object); err == nil && exists {
+ if err := s.indexer.Update(d.Object); err != nil {
+ return err
+ }
+ s.processor.distribute(updateNotification{oldObj: old, newObj: d.Object})
+ } else {
+ if err := s.indexer.Add(d.Object); err != nil {
+ return err
+ }
+ s.processor.distribute(addNotification{newObj: d.Object})
+ }
+ case cache.Deleted:
+ if err := s.indexer.Delete(d.Object); err != nil {
+ return err
+ }
+ s.processor.distribute(deleteNotification{oldObj: d.Object})
+ }
+ }
+ return nil
+}
+
+type sharedProcessor struct {
+ listeners []*processorListener
+}
+
+func (p *sharedProcessor) distribute(obj interface{}) {
+ for _, listener := range p.listeners {
+ listener.add(obj)
+ }
+}
+
+func (p *sharedProcessor) run(stopCh <-chan struct{}) {
+ for _, listener := range p.listeners {
+ go listener.run(stopCh)
+ go listener.pop(stopCh)
+ }
+}
+
+type processorListener struct {
+ // lock/cond protects access to 'pendingNotifications'.
+ lock sync.RWMutex
+ cond sync.Cond
+
+ // pendingNotifications is an unbounded slice that holds all notifications not yet distributed
+ // there is one per listener, but a failing/stalled listener will have infinite pendingNotifications
+ // added until we OOM.
+ // TODO This is no worse that before, since reflectors were backed by unbounded DeltaFIFOs, but
+ // we should try to do something better
+ pendingNotifications []interface{}
+
+ nextCh chan interface{}
+
+ handler ResourceEventHandler
+}
+
+func newProcessListener(handler ResourceEventHandler) *processorListener {
+ ret := &processorListener{
+ pendingNotifications: []interface{}{},
+ nextCh: make(chan interface{}),
+ handler: handler,
+ }
+
+ ret.cond.L = &ret.lock
+ return ret
+}
+
+func (p *processorListener) add(notification interface{}) {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ p.pendingNotifications = append(p.pendingNotifications, notification)
+ p.cond.Broadcast()
+}
+
+func (p *processorListener) pop(stopCh <-chan struct{}) {
+ defer utilruntime.HandleCrash()
+
+ for {
+ blockingGet := func() (interface{}, bool) {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ for len(p.pendingNotifications) == 0 {
+ // check if we're shutdown
+ select {
+ case <-stopCh:
+ return nil, true
+ default:
+ }
+ p.cond.Wait()
+ }
+
+ nt := p.pendingNotifications[0]
+ p.pendingNotifications = p.pendingNotifications[1:]
+ return nt, false
+ }
+
+ notification, stopped := blockingGet()
+ if stopped {
+ return
+ }
+
+ select {
+ case <-stopCh:
+ return
+ case p.nextCh <- notification:
+ }
+ }
+}
+
+func (p *processorListener) run(stopCh <-chan struct{}) {
+ defer utilruntime.HandleCrash()
+
+ for {
+ var next interface{}
+ select {
+ case <-stopCh:
+ func() {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+ p.cond.Broadcast()
+ }()
+ return
+ case next = <-p.nextCh:
+ }
+
+ switch notification := next.(type) {
+ case updateNotification:
+ p.handler.OnUpdate(notification.oldObj, notification.newObj)
+ case addNotification:
+ p.handler.OnAdd(notification.newObj)
+ case deleteNotification:
+ p.handler.OnDelete(notification.oldObj)
+ default:
+ utilruntime.HandleError(fmt.Errorf("unrecognized notification: %#v", next))
+ }
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/conversion/OWNERS b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/conversion/OWNERS
new file mode 100644
index 0000000..a046efc
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/conversion/OWNERS
@@ -0,0 +1,5 @@
+assignees:
+ - derekwaynecarr
+ - lavalamp
+ - smarterclayton
+ - wojtek-t
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/conversion/cloner.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/conversion/cloner.go
new file mode 100644
index 0000000..e4e74f6
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/conversion/cloner.go
@@ -0,0 +1,237 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package conversion
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// Cloner knows how to copy one type to another.
+type Cloner struct {
+ // Map from the type to a function which can do the deep copy.
+ deepCopyFuncs map[reflect.Type]reflect.Value
+ generatedDeepCopyFuncs map[reflect.Type]reflect.Value
+}
+
+// NewCloner creates a new Cloner object.
+func NewCloner() *Cloner {
+ c := &Cloner{
+ deepCopyFuncs: map[reflect.Type]reflect.Value{},
+ generatedDeepCopyFuncs: map[reflect.Type]reflect.Value{},
+ }
+ if err := c.RegisterDeepCopyFunc(byteSliceDeepCopy); err != nil {
+ // If one of the deep-copy functions is malformed, detect it immediately.
+ panic(err)
+ }
+ return c
+}
+
+// Prevent recursing into every byte...
+func byteSliceDeepCopy(in []byte, out *[]byte, c *Cloner) error {
+ if in != nil {
+ *out = make([]byte, len(in))
+ copy(*out, in)
+ } else {
+ *out = nil
+ }
+ return nil
+}
+
+// Verifies whether a deep-copy function has a correct signature.
+func verifyDeepCopyFunctionSignature(ft reflect.Type) error {
+ if ft.Kind() != reflect.Func {
+ return fmt.Errorf("expected func, got: %v", ft)
+ }
+ if ft.NumIn() != 3 {
+ return fmt.Errorf("expected three 'in' params, got %v", ft)
+ }
+ if ft.NumOut() != 1 {
+ return fmt.Errorf("expected one 'out' param, got %v", ft)
+ }
+ if ft.In(1).Kind() != reflect.Ptr {
+ return fmt.Errorf("expected pointer arg for 'in' param 1, got: %v", ft)
+ }
+ if ft.In(1).Elem() != ft.In(0) {
+ return fmt.Errorf("expected 'in' param 0 the same as param 1, got: %v", ft)
+ }
+ var forClonerType Cloner
+ if expected := reflect.TypeOf(&forClonerType); ft.In(2) != expected {
+ return fmt.Errorf("expected '%v' arg for 'in' param 2, got: '%v'", expected, ft.In(2))
+ }
+ var forErrorType error
+ // This convolution is necessary, otherwise TypeOf picks up on the fact
+ // that forErrorType is nil
+ errorType := reflect.TypeOf(&forErrorType).Elem()
+ if ft.Out(0) != errorType {
+ return fmt.Errorf("expected error return, got: %v", ft)
+ }
+ return nil
+}
+
+// RegisterGeneratedDeepCopyFunc registers a copying func with the Cloner.
+// deepCopyFunc must take three parameters: a type input, a pointer to a
+// type output, and a pointer to Cloner. It should return an error.
+//
+// Example:
+// c.RegisterGeneratedDeepCopyFunc(
+// func(in Pod, out *Pod, c *Cloner) error {
+// // deep copy logic...
+// return nil
+// })
+func (c *Cloner) RegisterDeepCopyFunc(deepCopyFunc interface{}) error {
+ fv := reflect.ValueOf(deepCopyFunc)
+ ft := fv.Type()
+ if err := verifyDeepCopyFunctionSignature(ft); err != nil {
+ return err
+ }
+ c.deepCopyFuncs[ft.In(0)] = fv
+ return nil
+}
+
+// Similar to RegisterDeepCopyFunc, but registers deep copy function that were
+// automatically generated.
+func (c *Cloner) RegisterGeneratedDeepCopyFunc(deepCopyFunc interface{}) error {
+ fv := reflect.ValueOf(deepCopyFunc)
+ ft := fv.Type()
+ if err := verifyDeepCopyFunctionSignature(ft); err != nil {
+ return err
+ }
+ c.generatedDeepCopyFuncs[ft.In(0)] = fv
+ return nil
+}
+
+// DeepCopy will perform a deep copy of a given object.
+func (c *Cloner) DeepCopy(in interface{}) (interface{}, error) {
+ // Can be invalid if we run DeepCopy(X) where X is a nil interface type.
+ // For example, we get an invalid value when someone tries to deep-copy
+ // a nil labels.Selector.
+ // This does not occur if X is nil and is a pointer to a concrete type.
+ if in == nil {
+ return nil, nil
+ }
+ inValue := reflect.ValueOf(in)
+ outValue, err := c.deepCopy(inValue)
+ if err != nil {
+ return nil, err
+ }
+ return outValue.Interface(), nil
+}
+
+func (c *Cloner) deepCopy(src reflect.Value) (reflect.Value, error) {
+ inType := src.Type()
+
+ if fv, ok := c.deepCopyFuncs[inType]; ok {
+ return c.customDeepCopy(src, fv)
+ }
+ if fv, ok := c.generatedDeepCopyFuncs[inType]; ok {
+ return c.customDeepCopy(src, fv)
+ }
+ return c.defaultDeepCopy(src)
+}
+
+func (c *Cloner) customDeepCopy(src, fv reflect.Value) (reflect.Value, error) {
+ outValue := reflect.New(src.Type())
+ args := []reflect.Value{src, outValue, reflect.ValueOf(c)}
+ result := fv.Call(args)[0].Interface()
+ // This convolution is necessary because nil interfaces won't convert
+ // to error.
+ if result == nil {
+ return outValue.Elem(), nil
+ }
+ return outValue.Elem(), result.(error)
+}
+
+func (c *Cloner) defaultDeepCopy(src reflect.Value) (reflect.Value, error) {
+ switch src.Kind() {
+ case reflect.Chan, reflect.Func, reflect.UnsafePointer, reflect.Uintptr:
+ return src, fmt.Errorf("cannot deep copy kind: %s", src.Kind())
+ case reflect.Array:
+ dst := reflect.New(src.Type())
+ for i := 0; i < src.Len(); i++ {
+ copyVal, err := c.deepCopy(src.Index(i))
+ if err != nil {
+ return src, err
+ }
+ dst.Elem().Index(i).Set(copyVal)
+ }
+ return dst.Elem(), nil
+ case reflect.Interface:
+ if src.IsNil() {
+ return src, nil
+ }
+ return c.deepCopy(src.Elem())
+ case reflect.Map:
+ if src.IsNil() {
+ return src, nil
+ }
+ dst := reflect.MakeMap(src.Type())
+ for _, k := range src.MapKeys() {
+ copyVal, err := c.deepCopy(src.MapIndex(k))
+ if err != nil {
+ return src, err
+ }
+ dst.SetMapIndex(k, copyVal)
+ }
+ return dst, nil
+ case reflect.Ptr:
+ if src.IsNil() {
+ return src, nil
+ }
+ dst := reflect.New(src.Type().Elem())
+ copyVal, err := c.deepCopy(src.Elem())
+ if err != nil {
+ return src, err
+ }
+ dst.Elem().Set(copyVal)
+ return dst, nil
+ case reflect.Slice:
+ if src.IsNil() {
+ return src, nil
+ }
+ dst := reflect.MakeSlice(src.Type(), 0, src.Len())
+ for i := 0; i < src.Len(); i++ {
+ copyVal, err := c.deepCopy(src.Index(i))
+ if err != nil {
+ return src, err
+ }
+ dst = reflect.Append(dst, copyVal)
+ }
+ return dst, nil
+ case reflect.Struct:
+ dst := reflect.New(src.Type())
+ for i := 0; i < src.NumField(); i++ {
+ if !dst.Elem().Field(i).CanSet() {
+ // Can't set private fields. At this point, the
+ // best we can do is a shallow copy. For
+ // example, time.Time is a value type with
+ // private members that can be shallow copied.
+ return src, nil
+ }
+ copyVal, err := c.deepCopy(src.Field(i))
+ if err != nil {
+ return src, err
+ }
+ dst.Elem().Field(i).Set(copyVal)
+ }
+ return dst.Elem(), nil
+
+ default:
+ // Value types like numbers, booleans, and strings.
+ return src, nil
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/conversion/converter.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/conversion/converter.go
new file mode 100644
index 0000000..7a18d63
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/conversion/converter.go
@@ -0,0 +1,951 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package conversion
+
+import (
+ "fmt"
+ "reflect"
+)
+
+type typePair struct {
+ source reflect.Type
+ dest reflect.Type
+}
+
+type typeNamePair struct {
+ fieldType reflect.Type
+ fieldName string
+}
+
+// DebugLogger allows you to get debugging messages if necessary.
+type DebugLogger interface {
+ Logf(format string, args ...interface{})
+}
+
+type NameFunc func(t reflect.Type) string
+
+var DefaultNameFunc = func(t reflect.Type) string { return t.Name() }
+
+type GenericConversionFunc func(a, b interface{}, scope Scope) (bool, error)
+
+// Converter knows how to convert one type to another.
+type Converter struct {
+ // Map from the conversion pair to a function which can
+ // do the conversion.
+ conversionFuncs ConversionFuncs
+ generatedConversionFuncs ConversionFuncs
+
+ // genericConversions are called during normal conversion to offer a "fast-path"
+ // that avoids all reflection. These methods are not called outside of the .Convert()
+ // method.
+ genericConversions []GenericConversionFunc
+
+ // Set of conversions that should be treated as a no-op
+ ignoredConversions map[typePair]struct{}
+
+ // This is a map from a source field type and name, to a list of destination
+ // field type and name.
+ structFieldDests map[typeNamePair][]typeNamePair
+
+ // Allows for the opposite lookup of structFieldDests. So that SourceFromDest
+ // copy flag also works. So this is a map of destination field name, to potential
+ // source field name and type to look for.
+ structFieldSources map[typeNamePair][]typeNamePair
+
+ // Map from a type to a function which applies defaults.
+ defaultingFuncs map[reflect.Type]reflect.Value
+
+ // Similar to above, but function is stored as interface{}.
+ defaultingInterfaces map[reflect.Type]interface{}
+
+ // Map from an input type to a function which can apply a key name mapping
+ inputFieldMappingFuncs map[reflect.Type]FieldMappingFunc
+
+ // Map from an input type to a set of default conversion flags.
+ inputDefaultFlags map[reflect.Type]FieldMatchingFlags
+
+ // If non-nil, will be called to print helpful debugging info. Quite verbose.
+ Debug DebugLogger
+
+ // nameFunc is called to retrieve the name of a type; this name is used for the
+ // purpose of deciding whether two types match or not (i.e., will we attempt to
+ // do a conversion). The default returns the go type name.
+ nameFunc func(t reflect.Type) string
+}
+
+// NewConverter creates a new Converter object.
+func NewConverter(nameFn NameFunc) *Converter {
+ c := &Converter{
+ conversionFuncs: NewConversionFuncs(),
+ generatedConversionFuncs: NewConversionFuncs(),
+ ignoredConversions: make(map[typePair]struct{}),
+ defaultingFuncs: make(map[reflect.Type]reflect.Value),
+ defaultingInterfaces: make(map[reflect.Type]interface{}),
+ nameFunc: nameFn,
+ structFieldDests: make(map[typeNamePair][]typeNamePair),
+ structFieldSources: make(map[typeNamePair][]typeNamePair),
+
+ inputFieldMappingFuncs: make(map[reflect.Type]FieldMappingFunc),
+ inputDefaultFlags: make(map[reflect.Type]FieldMatchingFlags),
+ }
+ c.RegisterConversionFunc(Convert_Slice_byte_To_Slice_byte)
+ return c
+}
+
+// AddGenericConversionFunc adds a function that accepts the ConversionFunc call pattern
+// (for two conversion types) to the converter. These functions are checked first during
+// a normal conversion, but are otherwise not called. Use AddConversionFuncs when registering
+// typed conversions.
+func (c *Converter) AddGenericConversionFunc(fn GenericConversionFunc) {
+ c.genericConversions = append(c.genericConversions, fn)
+}
+
+// WithConversions returns a Converter that is a copy of c but with the additional
+// fns merged on top.
+func (c *Converter) WithConversions(fns ConversionFuncs) *Converter {
+ copied := *c
+ copied.conversionFuncs = c.conversionFuncs.Merge(fns)
+ return &copied
+}
+
+// DefaultMeta returns the conversion FieldMappingFunc and meta for a given type.
+func (c *Converter) DefaultMeta(t reflect.Type) (FieldMatchingFlags, *Meta) {
+ return c.inputDefaultFlags[t], &Meta{
+ KeyNameMapping: c.inputFieldMappingFuncs[t],
+ }
+}
+
+// Convert_Slice_byte_To_Slice_byte prevents recursing into every byte
+func Convert_Slice_byte_To_Slice_byte(in *[]byte, out *[]byte, s Scope) error {
+ if *in == nil {
+ *out = nil
+ return nil
+ }
+ *out = make([]byte, len(*in))
+ copy(*out, *in)
+ return nil
+}
+
+// Scope is passed to conversion funcs to allow them to continue an ongoing conversion.
+// If multiple converters exist in the system, Scope will allow you to use the correct one
+// from a conversion function--that is, the one your conversion function was called by.
+type Scope interface {
+ // Call Convert to convert sub-objects. Note that if you call it with your own exact
+ // parameters, you'll run out of stack space before anything useful happens.
+ Convert(src, dest interface{}, flags FieldMatchingFlags) error
+
+ // DefaultConvert performs the default conversion, without calling a conversion func
+ // on the current stack frame. This makes it safe to call from a conversion func.
+ DefaultConvert(src, dest interface{}, flags FieldMatchingFlags) error
+
+ // If registered, returns a function applying defaults for objects of a given type.
+ // Used for automatically generating conversion functions.
+ DefaultingInterface(inType reflect.Type) (interface{}, bool)
+
+ // SrcTags and DestTags contain the struct tags that src and dest had, respectively.
+ // If the enclosing object was not a struct, then these will contain no tags, of course.
+ SrcTag() reflect.StructTag
+ DestTag() reflect.StructTag
+
+ // Flags returns the flags with which the conversion was started.
+ Flags() FieldMatchingFlags
+
+ // Meta returns any information originally passed to Convert.
+ Meta() *Meta
+}
+
+// FieldMappingFunc can convert an input field value into different values, depending on
+// the value of the source or destination struct tags.
+type FieldMappingFunc func(key string, sourceTag, destTag reflect.StructTag) (source string, dest string)
+
+func NewConversionFuncs() ConversionFuncs {
+ return ConversionFuncs{fns: make(map[typePair]reflect.Value)}
+}
+
+type ConversionFuncs struct {
+ fns map[typePair]reflect.Value
+}
+
+// Add adds the provided conversion functions to the lookup table - they must have the signature
+// `func(type1, type2, Scope) error`. Functions are added in the order passed and will override
+// previously registered pairs.
+func (c ConversionFuncs) Add(fns ...interface{}) error {
+ for _, fn := range fns {
+ fv := reflect.ValueOf(fn)
+ ft := fv.Type()
+ if err := verifyConversionFunctionSignature(ft); err != nil {
+ return err
+ }
+ c.fns[typePair{ft.In(0).Elem(), ft.In(1).Elem()}] = fv
+ }
+ return nil
+}
+
+// Merge returns a new ConversionFuncs that contains all conversions from
+// both other and c, with other conversions taking precedence.
+func (c ConversionFuncs) Merge(other ConversionFuncs) ConversionFuncs {
+ merged := NewConversionFuncs()
+ for k, v := range c.fns {
+ merged.fns[k] = v
+ }
+ for k, v := range other.fns {
+ merged.fns[k] = v
+ }
+ return merged
+}
+
+// Meta is supplied by Scheme, when it calls Convert.
+type Meta struct {
+ // KeyNameMapping is an optional function which may map the listed key (field name)
+ // into a source and destination value.
+ KeyNameMapping FieldMappingFunc
+}
+
+// scope contains information about an ongoing conversion.
+type scope struct {
+ converter *Converter
+ meta *Meta
+ flags FieldMatchingFlags
+
+ // srcStack & destStack are separate because they may not have a 1:1
+ // relationship.
+ srcStack scopeStack
+ destStack scopeStack
+}
+
+type scopeStackElem struct {
+ tag reflect.StructTag
+ value reflect.Value
+ key string
+}
+
+type scopeStack []scopeStackElem
+
+func (s *scopeStack) pop() {
+ n := len(*s)
+ *s = (*s)[:n-1]
+}
+
+func (s *scopeStack) push(e scopeStackElem) {
+ *s = append(*s, e)
+}
+
+func (s *scopeStack) top() *scopeStackElem {
+ return &(*s)[len(*s)-1]
+}
+
+func (s scopeStack) describe() string {
+ desc := ""
+ if len(s) > 1 {
+ desc = "(" + s[1].value.Type().String() + ")"
+ }
+ for i, v := range s {
+ if i < 2 {
+ // First layer on stack is not real; second is handled specially above.
+ continue
+ }
+ if v.key == "" {
+ desc += fmt.Sprintf(".%v", v.value.Type())
+ } else {
+ desc += fmt.Sprintf(".%v", v.key)
+ }
+ }
+ return desc
+}
+
+func (s *scope) DefaultingInterface(inType reflect.Type) (interface{}, bool) {
+ value, found := s.converter.defaultingInterfaces[inType]
+ return value, found
+}
+
+// Formats src & dest as indices for printing.
+func (s *scope) setIndices(src, dest int) {
+ s.srcStack.top().key = fmt.Sprintf("[%v]", src)
+ s.destStack.top().key = fmt.Sprintf("[%v]", dest)
+}
+
+// Formats src & dest as map keys for printing.
+func (s *scope) setKeys(src, dest interface{}) {
+ s.srcStack.top().key = fmt.Sprintf(`["%v"]`, src)
+ s.destStack.top().key = fmt.Sprintf(`["%v"]`, dest)
+}
+
+// Convert continues a conversion.
+func (s *scope) Convert(src, dest interface{}, flags FieldMatchingFlags) error {
+ return s.converter.Convert(src, dest, flags, s.meta)
+}
+
+// DefaultConvert continues a conversion, performing a default conversion (no conversion func)
+// for the current stack frame.
+func (s *scope) DefaultConvert(src, dest interface{}, flags FieldMatchingFlags) error {
+ return s.converter.DefaultConvert(src, dest, flags, s.meta)
+}
+
+// SrcTag returns the tag of the struct containing the current source item, if any.
+func (s *scope) SrcTag() reflect.StructTag {
+ return s.srcStack.top().tag
+}
+
+// DestTag returns the tag of the struct containing the current dest item, if any.
+func (s *scope) DestTag() reflect.StructTag {
+ return s.destStack.top().tag
+}
+
+// Flags returns the flags with which the current conversion was started.
+func (s *scope) Flags() FieldMatchingFlags {
+ return s.flags
+}
+
+// Meta returns the meta object that was originally passed to Convert.
+func (s *scope) Meta() *Meta {
+ return s.meta
+}
+
+// describe prints the path to get to the current (source, dest) values.
+func (s *scope) describe() (src, dest string) {
+ return s.srcStack.describe(), s.destStack.describe()
+}
+
+// error makes an error that includes information about where we were in the objects
+// we were asked to convert.
+func (s *scope) errorf(message string, args ...interface{}) error {
+ srcPath, destPath := s.describe()
+ where := fmt.Sprintf("converting %v to %v: ", srcPath, destPath)
+ return fmt.Errorf(where+message, args...)
+}
+
+// Verifies whether a conversion function has a correct signature.
+func verifyConversionFunctionSignature(ft reflect.Type) error {
+ if ft.Kind() != reflect.Func {
+ return fmt.Errorf("expected func, got: %v", ft)
+ }
+ if ft.NumIn() != 3 {
+ return fmt.Errorf("expected three 'in' params, got: %v", ft)
+ }
+ if ft.NumOut() != 1 {
+ return fmt.Errorf("expected one 'out' param, got: %v", ft)
+ }
+ if ft.In(0).Kind() != reflect.Ptr {
+ return fmt.Errorf("expected pointer arg for 'in' param 0, got: %v", ft)
+ }
+ if ft.In(1).Kind() != reflect.Ptr {
+ return fmt.Errorf("expected pointer arg for 'in' param 1, got: %v", ft)
+ }
+ scopeType := Scope(nil)
+ if e, a := reflect.TypeOf(&scopeType).Elem(), ft.In(2); e != a {
+ return fmt.Errorf("expected '%v' arg for 'in' param 2, got '%v' (%v)", e, a, ft)
+ }
+ var forErrorType error
+ // This convolution is necessary, otherwise TypeOf picks up on the fact
+ // that forErrorType is nil.
+ errorType := reflect.TypeOf(&forErrorType).Elem()
+ if ft.Out(0) != errorType {
+ return fmt.Errorf("expected error return, got: %v", ft)
+ }
+ return nil
+}
+
+// RegisterConversionFunc registers a conversion func with the
+// Converter. conversionFunc must take three parameters: a pointer to the input
+// type, a pointer to the output type, and a conversion.Scope (which should be
+// used if recursive conversion calls are desired). It must return an error.
+//
+// Example:
+// c.RegisterConversionFunc(
+// func(in *Pod, out *v1.Pod, s Scope) error {
+// // conversion logic...
+// return nil
+// })
+func (c *Converter) RegisterConversionFunc(conversionFunc interface{}) error {
+ return c.conversionFuncs.Add(conversionFunc)
+}
+
+// Similar to RegisterConversionFunc, but registers conversion function that were
+// automatically generated.
+func (c *Converter) RegisterGeneratedConversionFunc(conversionFunc interface{}) error {
+ return c.generatedConversionFuncs.Add(conversionFunc)
+}
+
+// RegisterIgnoredConversion registers a "no-op" for conversion, where any requested
+// conversion between from and to is ignored.
+func (c *Converter) RegisterIgnoredConversion(from, to interface{}) error {
+ typeFrom := reflect.TypeOf(from)
+ typeTo := reflect.TypeOf(to)
+ if reflect.TypeOf(from).Kind() != reflect.Ptr {
+ return fmt.Errorf("expected pointer arg for 'from' param 0, got: %v", typeFrom)
+ }
+ if typeTo.Kind() != reflect.Ptr {
+ return fmt.Errorf("expected pointer arg for 'to' param 1, got: %v", typeTo)
+ }
+ c.ignoredConversions[typePair{typeFrom.Elem(), typeTo.Elem()}] = struct{}{}
+ return nil
+}
+
+// IsConversionIgnored returns true if the specified objects should be dropped during
+// conversion.
+func (c *Converter) IsConversionIgnored(inType, outType reflect.Type) bool {
+ _, found := c.ignoredConversions[typePair{inType, outType}]
+ return found
+}
+
+func (c *Converter) HasConversionFunc(inType, outType reflect.Type) bool {
+ _, found := c.conversionFuncs.fns[typePair{inType, outType}]
+ return found
+}
+
+func (c *Converter) ConversionFuncValue(inType, outType reflect.Type) (reflect.Value, bool) {
+ value, found := c.conversionFuncs.fns[typePair{inType, outType}]
+ return value, found
+}
+
+// SetStructFieldCopy registers a correspondence. Whenever a struct field is encountered
+// which has a type and name matching srcFieldType and srcFieldName, it wil be copied
+// into the field in the destination struct matching destFieldType & Name, if such a
+// field exists.
+// May be called multiple times, even for the same source field & type--all applicable
+// copies will be performed.
+func (c *Converter) SetStructFieldCopy(srcFieldType interface{}, srcFieldName string, destFieldType interface{}, destFieldName string) error {
+ st := reflect.TypeOf(srcFieldType)
+ dt := reflect.TypeOf(destFieldType)
+ srcKey := typeNamePair{st, srcFieldName}
+ destKey := typeNamePair{dt, destFieldName}
+ c.structFieldDests[srcKey] = append(c.structFieldDests[srcKey], destKey)
+ c.structFieldSources[destKey] = append(c.structFieldSources[destKey], srcKey)
+ return nil
+}
+
+// RegisterDefaultingFunc registers a value-defaulting func with the Converter.
+// defaultingFunc must take one parameters: a pointer to the input type.
+//
+// Example:
+// c.RegisteDefaultingFunc(
+// func(in *v1.Pod) {
+// // defaulting logic...
+// })
+func (c *Converter) RegisterDefaultingFunc(defaultingFunc interface{}) error {
+ fv := reflect.ValueOf(defaultingFunc)
+ ft := fv.Type()
+ if ft.Kind() != reflect.Func {
+ return fmt.Errorf("expected func, got: %v", ft)
+ }
+ if ft.NumIn() != 1 {
+ return fmt.Errorf("expected one 'in' param, got: %v", ft)
+ }
+ if ft.NumOut() != 0 {
+ return fmt.Errorf("expected zero 'out' params, got: %v", ft)
+ }
+ if ft.In(0).Kind() != reflect.Ptr {
+ return fmt.Errorf("expected pointer arg for 'in' param 0, got: %v", ft)
+ }
+ inType := ft.In(0).Elem()
+ c.defaultingFuncs[inType] = fv
+ c.defaultingInterfaces[inType] = defaultingFunc
+ return nil
+}
+
+// RegisterInputDefaults registers a field name mapping function, used when converting
+// from maps to structs. Inputs to the conversion methods are checked for this type and a mapping
+// applied automatically if the input matches in. A set of default flags for the input conversion
+// may also be provided, which will be used when no explicit flags are requested.
+func (c *Converter) RegisterInputDefaults(in interface{}, fn FieldMappingFunc, defaultFlags FieldMatchingFlags) error {
+ fv := reflect.ValueOf(in)
+ ft := fv.Type()
+ if ft.Kind() != reflect.Ptr {
+ return fmt.Errorf("expected pointer 'in' argument, got: %v", ft)
+ }
+ c.inputFieldMappingFuncs[ft] = fn
+ c.inputDefaultFlags[ft] = defaultFlags
+ return nil
+}
+
+// FieldMatchingFlags contains a list of ways in which struct fields could be
+// copied. These constants may be | combined.
+type FieldMatchingFlags int
+
+const (
+ // Loop through destination fields, search for matching source
+ // field to copy it from. Source fields with no corresponding
+ // destination field will be ignored. If SourceToDest is
+ // specified, this flag is ignored. If neither is specified,
+ // or no flags are passed, this flag is the default.
+ DestFromSource FieldMatchingFlags = 0
+ // Loop through source fields, search for matching dest field
+ // to copy it into. Destination fields with no corresponding
+ // source field will be ignored.
+ SourceToDest FieldMatchingFlags = 1 << iota
+ // Don't treat it as an error if the corresponding source or
+ // dest field can't be found.
+ IgnoreMissingFields
+ // Don't require type names to match.
+ AllowDifferentFieldTypeNames
+)
+
+// IsSet returns true if the given flag or combination of flags is set.
+func (f FieldMatchingFlags) IsSet(flag FieldMatchingFlags) bool {
+ if flag == DestFromSource {
+ // The bit logic doesn't work on the default value.
+ return f&SourceToDest != SourceToDest
+ }
+ return f&flag == flag
+}
+
+// Convert will translate src to dest if it knows how. Both must be pointers.
+// If no conversion func is registered and the default copying mechanism
+// doesn't work on this type pair, an error will be returned.
+// Read the comments on the various FieldMatchingFlags constants to understand
+// what the 'flags' parameter does.
+// 'meta' is given to allow you to pass information to conversion functions,
+// it is not used by Convert() other than storing it in the scope.
+// Not safe for objects with cyclic references!
+func (c *Converter) Convert(src, dest interface{}, flags FieldMatchingFlags, meta *Meta) error {
+ if len(c.genericConversions) > 0 {
+ // TODO: avoid scope allocation
+ s := &scope{converter: c, flags: flags, meta: meta}
+ for _, fn := range c.genericConversions {
+ if ok, err := fn(src, dest, s); ok {
+ return err
+ }
+ }
+ }
+ return c.doConversion(src, dest, flags, meta, c.convert)
+}
+
+// DefaultConvert will translate src to dest if it knows how. Both must be pointers.
+// No conversion func is used. If the default copying mechanism
+// doesn't work on this type pair, an error will be returned.
+// Read the comments on the various FieldMatchingFlags constants to understand
+// what the 'flags' parameter does.
+// 'meta' is given to allow you to pass information to conversion functions,
+// it is not used by DefaultConvert() other than storing it in the scope.
+// Not safe for objects with cyclic references!
+func (c *Converter) DefaultConvert(src, dest interface{}, flags FieldMatchingFlags, meta *Meta) error {
+ return c.doConversion(src, dest, flags, meta, c.defaultConvert)
+}
+
+type conversionFunc func(sv, dv reflect.Value, scope *scope) error
+
+func (c *Converter) doConversion(src, dest interface{}, flags FieldMatchingFlags, meta *Meta, f conversionFunc) error {
+ dv, err := EnforcePtr(dest)
+ if err != nil {
+ return err
+ }
+ if !dv.CanAddr() && !dv.CanSet() {
+ return fmt.Errorf("can't write to dest")
+ }
+ sv, err := EnforcePtr(src)
+ if err != nil {
+ return err
+ }
+ s := &scope{
+ converter: c,
+ flags: flags,
+ meta: meta,
+ }
+ // Leave something on the stack, so that calls to struct tag getters never fail.
+ s.srcStack.push(scopeStackElem{})
+ s.destStack.push(scopeStackElem{})
+ return f(sv, dv, s)
+}
+
+// callCustom calls 'custom' with sv & dv. custom must be a conversion function.
+func (c *Converter) callCustom(sv, dv, custom reflect.Value, scope *scope) error {
+ if !sv.CanAddr() {
+ sv2 := reflect.New(sv.Type())
+ sv2.Elem().Set(sv)
+ sv = sv2
+ } else {
+ sv = sv.Addr()
+ }
+ if !dv.CanAddr() {
+ if !dv.CanSet() {
+ return scope.errorf("can't addr or set dest.")
+ }
+ dvOrig := dv
+ dv := reflect.New(dvOrig.Type())
+ defer func() { dvOrig.Set(dv) }()
+ } else {
+ dv = dv.Addr()
+ }
+ args := []reflect.Value{sv, dv, reflect.ValueOf(scope)}
+ ret := custom.Call(args)[0].Interface()
+ // This convolution is necessary because nil interfaces won't convert
+ // to errors.
+ if ret == nil {
+ return nil
+ }
+ return ret.(error)
+}
+
+// convert recursively copies sv into dv, calling an appropriate conversion function if
+// one is registered.
+func (c *Converter) convert(sv, dv reflect.Value, scope *scope) error {
+ dt, st := dv.Type(), sv.Type()
+ // Apply default values.
+ if fv, ok := c.defaultingFuncs[st]; ok {
+ if c.Debug != nil {
+ c.Debug.Logf("Applying defaults for '%v'", st)
+ }
+ args := []reflect.Value{sv.Addr()}
+ fv.Call(args)
+ }
+
+ pair := typePair{st, dt}
+
+ // ignore conversions of this type
+ if _, ok := c.ignoredConversions[pair]; ok {
+ if c.Debug != nil {
+ c.Debug.Logf("Ignoring conversion of '%v' to '%v'", st, dt)
+ }
+ return nil
+ }
+
+ // Convert sv to dv.
+ if fv, ok := c.conversionFuncs.fns[pair]; ok {
+ if c.Debug != nil {
+ c.Debug.Logf("Calling custom conversion of '%v' to '%v'", st, dt)
+ }
+ return c.callCustom(sv, dv, fv, scope)
+ }
+ if fv, ok := c.generatedConversionFuncs.fns[pair]; ok {
+ if c.Debug != nil {
+ c.Debug.Logf("Calling generated conversion of '%v' to '%v'", st, dt)
+ }
+ return c.callCustom(sv, dv, fv, scope)
+ }
+
+ return c.defaultConvert(sv, dv, scope)
+}
+
+// defaultConvert recursively copies sv into dv. no conversion function is called
+// for the current stack frame (but conversion functions may be called for nested objects)
+func (c *Converter) defaultConvert(sv, dv reflect.Value, scope *scope) error {
+ dt, st := dv.Type(), sv.Type()
+
+ if !dv.CanSet() {
+ return scope.errorf("Cannot set dest. (Tried to deep copy something with unexported fields?)")
+ }
+
+ if !scope.flags.IsSet(AllowDifferentFieldTypeNames) && c.nameFunc(dt) != c.nameFunc(st) {
+ return scope.errorf(
+ "type names don't match (%v, %v), and no conversion 'func (%v, %v) error' registered.",
+ c.nameFunc(st), c.nameFunc(dt), st, dt)
+ }
+
+ switch st.Kind() {
+ case reflect.Map, reflect.Ptr, reflect.Slice, reflect.Interface, reflect.Struct:
+ // Don't copy these via assignment/conversion!
+ default:
+ // This should handle all simple types.
+ if st.AssignableTo(dt) {
+ dv.Set(sv)
+ return nil
+ }
+ if st.ConvertibleTo(dt) {
+ dv.Set(sv.Convert(dt))
+ return nil
+ }
+ }
+
+ if c.Debug != nil {
+ c.Debug.Logf("Trying to convert '%v' to '%v'", st, dt)
+ }
+
+ scope.srcStack.push(scopeStackElem{value: sv})
+ scope.destStack.push(scopeStackElem{value: dv})
+ defer scope.srcStack.pop()
+ defer scope.destStack.pop()
+
+ switch dv.Kind() {
+ case reflect.Struct:
+ return c.convertKV(toKVValue(sv), toKVValue(dv), scope)
+ case reflect.Slice:
+ if sv.IsNil() {
+ // Don't make a zero-length slice.
+ dv.Set(reflect.Zero(dt))
+ return nil
+ }
+ dv.Set(reflect.MakeSlice(dt, sv.Len(), sv.Cap()))
+ for i := 0; i < sv.Len(); i++ {
+ scope.setIndices(i, i)
+ if err := c.convert(sv.Index(i), dv.Index(i), scope); err != nil {
+ return err
+ }
+ }
+ case reflect.Ptr:
+ if sv.IsNil() {
+ // Don't copy a nil ptr!
+ dv.Set(reflect.Zero(dt))
+ return nil
+ }
+ dv.Set(reflect.New(dt.Elem()))
+ switch st.Kind() {
+ case reflect.Ptr, reflect.Interface:
+ return c.convert(sv.Elem(), dv.Elem(), scope)
+ default:
+ return c.convert(sv, dv.Elem(), scope)
+ }
+ case reflect.Map:
+ if sv.IsNil() {
+ // Don't copy a nil ptr!
+ dv.Set(reflect.Zero(dt))
+ return nil
+ }
+ dv.Set(reflect.MakeMap(dt))
+ for _, sk := range sv.MapKeys() {
+ dk := reflect.New(dt.Key()).Elem()
+ if err := c.convert(sk, dk, scope); err != nil {
+ return err
+ }
+ dkv := reflect.New(dt.Elem()).Elem()
+ scope.setKeys(sk.Interface(), dk.Interface())
+ // TODO: sv.MapIndex(sk) may return a value with CanAddr() == false,
+ // because a map[string]struct{} does not allow a pointer reference.
+ // Calling a custom conversion function defined for the map value
+ // will panic. Example is PodInfo map[string]ContainerStatus.
+ if err := c.convert(sv.MapIndex(sk), dkv, scope); err != nil {
+ return err
+ }
+ dv.SetMapIndex(dk, dkv)
+ }
+ case reflect.Interface:
+ if sv.IsNil() {
+ // Don't copy a nil interface!
+ dv.Set(reflect.Zero(dt))
+ return nil
+ }
+ tmpdv := reflect.New(sv.Elem().Type()).Elem()
+ if err := c.convert(sv.Elem(), tmpdv, scope); err != nil {
+ return err
+ }
+ dv.Set(reflect.ValueOf(tmpdv.Interface()))
+ return nil
+ default:
+ return scope.errorf("couldn't copy '%v' into '%v'; didn't understand types", st, dt)
+ }
+ return nil
+}
+
+var stringType = reflect.TypeOf("")
+
+func toKVValue(v reflect.Value) kvValue {
+ switch v.Kind() {
+ case reflect.Struct:
+ return structAdaptor(v)
+ case reflect.Map:
+ if v.Type().Key().AssignableTo(stringType) {
+ return stringMapAdaptor(v)
+ }
+ }
+
+ return nil
+}
+
+// kvValue lets us write the same conversion logic to work with both maps
+// and structs. Only maps with string keys make sense for this.
+type kvValue interface {
+ // returns all keys, as a []string.
+ keys() []string
+ // Will just return "" for maps.
+ tagOf(key string) reflect.StructTag
+ // Will return the zero Value if the key doesn't exist.
+ value(key string) reflect.Value
+ // Maps require explicit setting-- will do nothing for structs.
+ // Returns false on failure.
+ confirmSet(key string, v reflect.Value) bool
+}
+
+type stringMapAdaptor reflect.Value
+
+func (a stringMapAdaptor) len() int {
+ return reflect.Value(a).Len()
+}
+
+func (a stringMapAdaptor) keys() []string {
+ v := reflect.Value(a)
+ keys := make([]string, v.Len())
+ for i, v := range v.MapKeys() {
+ if v.IsNil() {
+ continue
+ }
+ switch t := v.Interface().(type) {
+ case string:
+ keys[i] = t
+ }
+ }
+ return keys
+}
+
+func (a stringMapAdaptor) tagOf(key string) reflect.StructTag {
+ return ""
+}
+
+func (a stringMapAdaptor) value(key string) reflect.Value {
+ return reflect.Value(a).MapIndex(reflect.ValueOf(key))
+}
+
+func (a stringMapAdaptor) confirmSet(key string, v reflect.Value) bool {
+ return true
+}
+
+type structAdaptor reflect.Value
+
+func (a structAdaptor) len() int {
+ v := reflect.Value(a)
+ return v.Type().NumField()
+}
+
+func (a structAdaptor) keys() []string {
+ v := reflect.Value(a)
+ t := v.Type()
+ keys := make([]string, t.NumField())
+ for i := range keys {
+ keys[i] = t.Field(i).Name
+ }
+ return keys
+}
+
+func (a structAdaptor) tagOf(key string) reflect.StructTag {
+ v := reflect.Value(a)
+ field, ok := v.Type().FieldByName(key)
+ if ok {
+ return field.Tag
+ }
+ return ""
+}
+
+func (a structAdaptor) value(key string) reflect.Value {
+ v := reflect.Value(a)
+ return v.FieldByName(key)
+}
+
+func (a structAdaptor) confirmSet(key string, v reflect.Value) bool {
+ return true
+}
+
+// convertKV can convert things that consist of key/value pairs, like structs
+// and some maps.
+func (c *Converter) convertKV(skv, dkv kvValue, scope *scope) error {
+ if skv == nil || dkv == nil {
+ // TODO: add keys to stack to support really understandable error messages.
+ return fmt.Errorf("Unable to convert %#v to %#v", skv, dkv)
+ }
+
+ lister := dkv
+ if scope.flags.IsSet(SourceToDest) {
+ lister = skv
+ }
+
+ var mapping FieldMappingFunc
+ if scope.meta != nil && scope.meta.KeyNameMapping != nil {
+ mapping = scope.meta.KeyNameMapping
+ }
+
+ for _, key := range lister.keys() {
+ if found, err := c.checkField(key, skv, dkv, scope); found {
+ if err != nil {
+ return err
+ }
+ continue
+ }
+ stag := skv.tagOf(key)
+ dtag := dkv.tagOf(key)
+ skey := key
+ dkey := key
+ if mapping != nil {
+ skey, dkey = scope.meta.KeyNameMapping(key, stag, dtag)
+ }
+
+ df := dkv.value(dkey)
+ sf := skv.value(skey)
+ if !df.IsValid() || !sf.IsValid() {
+ switch {
+ case scope.flags.IsSet(IgnoreMissingFields):
+ // No error.
+ case scope.flags.IsSet(SourceToDest):
+ return scope.errorf("%v not present in dest", dkey)
+ default:
+ return scope.errorf("%v not present in src", skey)
+ }
+ continue
+ }
+ scope.srcStack.top().key = skey
+ scope.srcStack.top().tag = stag
+ scope.destStack.top().key = dkey
+ scope.destStack.top().tag = dtag
+ if err := c.convert(sf, df, scope); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// checkField returns true if the field name matches any of the struct
+// field copying rules. The error should be ignored if it returns false.
+func (c *Converter) checkField(fieldName string, skv, dkv kvValue, scope *scope) (bool, error) {
+ replacementMade := false
+ if scope.flags.IsSet(DestFromSource) {
+ df := dkv.value(fieldName)
+ if !df.IsValid() {
+ return false, nil
+ }
+ destKey := typeNamePair{df.Type(), fieldName}
+ // Check each of the potential source (type, name) pairs to see if they're
+ // present in sv.
+ for _, potentialSourceKey := range c.structFieldSources[destKey] {
+ sf := skv.value(potentialSourceKey.fieldName)
+ if !sf.IsValid() {
+ continue
+ }
+ if sf.Type() == potentialSourceKey.fieldType {
+ // Both the source's name and type matched, so copy.
+ scope.srcStack.top().key = potentialSourceKey.fieldName
+ scope.destStack.top().key = fieldName
+ if err := c.convert(sf, df, scope); err != nil {
+ return true, err
+ }
+ dkv.confirmSet(fieldName, df)
+ replacementMade = true
+ }
+ }
+ return replacementMade, nil
+ }
+
+ sf := skv.value(fieldName)
+ if !sf.IsValid() {
+ return false, nil
+ }
+ srcKey := typeNamePair{sf.Type(), fieldName}
+ // Check each of the potential dest (type, name) pairs to see if they're
+ // present in dv.
+ for _, potentialDestKey := range c.structFieldDests[srcKey] {
+ df := dkv.value(potentialDestKey.fieldName)
+ if !df.IsValid() {
+ continue
+ }
+ if df.Type() == potentialDestKey.fieldType {
+ // Both the dest's name and type matched, so copy.
+ scope.srcStack.top().key = fieldName
+ scope.destStack.top().key = potentialDestKey.fieldName
+ if err := c.convert(sf, df, scope); err != nil {
+ return true, err
+ }
+ dkv.confirmSet(potentialDestKey.fieldName, df)
+ replacementMade = true
+ }
+ }
+ return replacementMade, nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/conversion/deep_equal.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/conversion/deep_equal.go
new file mode 100644
index 0000000..6bfc870
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/conversion/deep_equal.go
@@ -0,0 +1,36 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package conversion
+
+import (
+ "k8s.io/kubernetes/third_party/forked/golang/reflect"
+)
+
+// The code for this type must be located in third_party, since it forks from
+// go std lib. But for convenience, we expose the type here, too.
+type Equalities struct {
+ reflect.Equalities
+}
+
+// For convenience, panics on errors
+func EqualitiesOrDie(funcs ...interface{}) Equalities {
+ e := Equalities{reflect.Equalities{}}
+ if err := e.AddFuncs(funcs...); err != nil {
+ panic(err)
+ }
+ return e
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/conversion/doc.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/conversion/doc.go
new file mode 100644
index 0000000..0c46ef2
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/conversion/doc.go
@@ -0,0 +1,24 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package conversion provides go object versioning.
+//
+// Specifically, conversion provides a way for you to define multiple versions
+// of the same object. You may write functions which implement conversion logic,
+// but for the fields which did not change, copying is automated. This makes it
+// easy to modify the structures you use in memory without affecting the format
+// you store on disk or respond to in your external API calls.
+package conversion
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/conversion/helper.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/conversion/helper.go
new file mode 100644
index 0000000..4ebc1eb
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/conversion/helper.go
@@ -0,0 +1,39 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package conversion
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// EnforcePtr ensures that obj is a pointer of some sort. Returns a reflect.Value
+// of the dereferenced pointer, ensuring that it is settable/addressable.
+// Returns an error if this is not possible.
+func EnforcePtr(obj interface{}) (reflect.Value, error) {
+ v := reflect.ValueOf(obj)
+ if v.Kind() != reflect.Ptr {
+ if v.Kind() == reflect.Invalid {
+ return reflect.Value{}, fmt.Errorf("expected pointer, but got invalid kind")
+ }
+ return reflect.Value{}, fmt.Errorf("expected pointer, but got %v type", v.Type())
+ }
+ if v.IsNil() {
+ return reflect.Value{}, fmt.Errorf("expected pointer, but got nil")
+ }
+ return v.Elem(), nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/conversion/queryparams/convert.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/conversion/queryparams/convert.go
new file mode 100644
index 0000000..30f717b
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/conversion/queryparams/convert.go
@@ -0,0 +1,188 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package queryparams
+
+import (
+ "fmt"
+ "net/url"
+ "reflect"
+ "strings"
+)
+
+// Marshaler converts an object to a query parameter string representation
+type Marshaler interface {
+ MarshalQueryParameter() (string, error)
+}
+
+// Unmarshaler converts a string representation to an object
+type Unmarshaler interface {
+ UnmarshalQueryParameter(string) error
+}
+
+func jsonTag(field reflect.StructField) (string, bool) {
+ structTag := field.Tag.Get("json")
+ if len(structTag) == 0 {
+ return "", false
+ }
+ parts := strings.Split(structTag, ",")
+ tag := parts[0]
+ if tag == "-" {
+ tag = ""
+ }
+ omitempty := false
+ parts = parts[1:]
+ for _, part := range parts {
+ if part == "omitempty" {
+ omitempty = true
+ break
+ }
+ }
+ return tag, omitempty
+}
+
+func formatValue(value interface{}) string {
+ return fmt.Sprintf("%v", value)
+}
+
+func isPointerKind(kind reflect.Kind) bool {
+ return kind == reflect.Ptr
+}
+
+func isStructKind(kind reflect.Kind) bool {
+ return kind == reflect.Struct
+}
+
+func isValueKind(kind reflect.Kind) bool {
+ switch kind {
+ case reflect.String, reflect.Bool, reflect.Int, reflect.Int8, reflect.Int16,
+ reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8,
+ reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Float32,
+ reflect.Float64, reflect.Complex64, reflect.Complex128:
+ return true
+ default:
+ return false
+ }
+}
+
+func zeroValue(value reflect.Value) bool {
+ return reflect.DeepEqual(reflect.Zero(value.Type()).Interface(), value.Interface())
+}
+
+func customMarshalValue(value reflect.Value) (reflect.Value, bool) {
+ // Return unless we implement a custom query marshaler
+ if !value.CanInterface() {
+ return reflect.Value{}, false
+ }
+
+ marshaler, ok := value.Interface().(Marshaler)
+ if !ok {
+ return reflect.Value{}, false
+ }
+
+ // Don't invoke functions on nil pointers
+ // If the type implements MarshalQueryParameter, AND the tag is not omitempty, AND the value is a nil pointer, "" seems like a reasonable response
+ if isPointerKind(value.Kind()) && zeroValue(value) {
+ return reflect.ValueOf(""), true
+ }
+
+ // Get the custom marshalled value
+ v, err := marshaler.MarshalQueryParameter()
+ if err != nil {
+ return reflect.Value{}, false
+ }
+ return reflect.ValueOf(v), true
+}
+
+func addParam(values url.Values, tag string, omitempty bool, value reflect.Value) {
+ if omitempty && zeroValue(value) {
+ return
+ }
+ val := ""
+ iValue := fmt.Sprintf("%v", value.Interface())
+
+ if iValue != "<nil>" {
+ val = iValue
+ }
+ values.Add(tag, val)
+}
+
+func addListOfParams(values url.Values, tag string, omitempty bool, list reflect.Value) {
+ for i := 0; i < list.Len(); i++ {
+ addParam(values, tag, omitempty, list.Index(i))
+ }
+}
+
+// Convert takes an object and converts it to a url.Values object using JSON tags as
+// parameter names. Only top-level simple values, arrays, and slices are serialized.
+// Embedded structs, maps, etc. will not be serialized.
+func Convert(obj interface{}) (url.Values, error) {
+ result := url.Values{}
+ if obj == nil {
+ return result, nil
+ }
+ var sv reflect.Value
+ switch reflect.TypeOf(obj).Kind() {
+ case reflect.Ptr, reflect.Interface:
+ sv = reflect.ValueOf(obj).Elem()
+ default:
+ return nil, fmt.Errorf("expecting a pointer or interface")
+ }
+ st := sv.Type()
+ if !isStructKind(st.Kind()) {
+ return nil, fmt.Errorf("expecting a pointer to a struct")
+ }
+
+ // Check all object fields
+ convertStruct(result, st, sv)
+
+ return result, nil
+}
+
+func convertStruct(result url.Values, st reflect.Type, sv reflect.Value) {
+ for i := 0; i < st.NumField(); i++ {
+ field := sv.Field(i)
+ tag, omitempty := jsonTag(st.Field(i))
+ if len(tag) == 0 {
+ continue
+ }
+ ft := field.Type()
+
+ kind := ft.Kind()
+ if isPointerKind(kind) {
+ ft = ft.Elem()
+ kind = ft.Kind()
+ if !field.IsNil() {
+ field = reflect.Indirect(field)
+ }
+ }
+
+ switch {
+ case isValueKind(kind):
+ addParam(result, tag, omitempty, field)
+ case kind == reflect.Array || kind == reflect.Slice:
+ if isValueKind(ft.Elem().Kind()) {
+ addListOfParams(result, tag, omitempty, field)
+ }
+ case isStructKind(kind) && !(zeroValue(field) && omitempty):
+ if marshalValue, ok := customMarshalValue(field); ok {
+ addParam(result, tag, omitempty, marshalValue)
+ } else {
+ convertStruct(result, ft, field)
+ }
+ }
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/conversion/queryparams/doc.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/conversion/queryparams/doc.go
new file mode 100644
index 0000000..4c1002a
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/conversion/queryparams/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package queryparams provides conversion from versioned
+// runtime objects to URL query values
+package queryparams
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/fields/doc.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/fields/doc.go
new file mode 100644
index 0000000..49059e2
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/fields/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package fields implements a simple field system, parsing and matching
+// selectors with sets of fields.
+package fields
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/fields/fields.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/fields/fields.go
new file mode 100644
index 0000000..623b27e
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/fields/fields.go
@@ -0,0 +1,62 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fields
+
+import (
+ "sort"
+ "strings"
+)
+
+// Fields allows you to present fields independently from their storage.
+type Fields interface {
+ // Has returns whether the provided field exists.
+ Has(field string) (exists bool)
+
+ // Get returns the value for the provided field.
+ Get(field string) (value string)
+}
+
+// Set is a map of field:value. It implements Fields.
+type Set map[string]string
+
+// String returns all fields listed as a human readable string.
+// Conveniently, exactly the format that ParseSelector takes.
+func (ls Set) String() string {
+ selector := make([]string, 0, len(ls))
+ for key, value := range ls {
+ selector = append(selector, key+"="+value)
+ }
+ // Sort for determinism.
+ sort.StringSlice(selector).Sort()
+ return strings.Join(selector, ",")
+}
+
+// Has returns whether the provided field exists in the map.
+func (ls Set) Has(field string) bool {
+ _, exists := ls[field]
+ return exists
+}
+
+// Get returns the value in the map for the provided field.
+func (ls Set) Get(field string) string {
+ return ls[field]
+}
+
+// AsSelector converts fields into a selectors.
+func (ls Set) AsSelector() Selector {
+ return SelectorFromSet(ls)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/fields/selector.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/fields/selector.go
new file mode 100644
index 0000000..eef44d3
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/fields/selector.go
@@ -0,0 +1,247 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fields
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+)
+
+// Selector represents a field selector.
+type Selector interface {
+ // Matches returns true if this selector matches the given set of fields.
+ Matches(Fields) bool
+
+ // Empty returns true if this selector does not restrict the selection space.
+ Empty() bool
+
+ // RequiresExactMatch allows a caller to introspect whether a given selector
+ // requires a single specific field to be set, and if so returns the value it
+ // requires.
+ RequiresExactMatch(field string) (value string, found bool)
+
+ // Transform returns a new copy of the selector after TransformFunc has been
+ // applied to the entire selector, or an error if fn returns an error.
+ Transform(fn TransformFunc) (Selector, error)
+
+ // String returns a human readable string that represents this selector.
+ String() string
+}
+
+// Everything returns a selector that matches all fields.
+func Everything() Selector {
+ return andTerm{}
+}
+
+type hasTerm struct {
+ field, value string
+}
+
+func (t *hasTerm) Matches(ls Fields) bool {
+ return ls.Get(t.field) == t.value
+}
+
+func (t *hasTerm) Empty() bool {
+ return false
+}
+
+func (t *hasTerm) RequiresExactMatch(field string) (value string, found bool) {
+ if t.field == field {
+ return t.value, true
+ }
+ return "", false
+}
+
+func (t *hasTerm) Transform(fn TransformFunc) (Selector, error) {
+ field, value, err := fn(t.field, t.value)
+ if err != nil {
+ return nil, err
+ }
+ return &hasTerm{field, value}, nil
+}
+
+func (t *hasTerm) String() string {
+ return fmt.Sprintf("%v=%v", t.field, t.value)
+}
+
+type notHasTerm struct {
+ field, value string
+}
+
+func (t *notHasTerm) Matches(ls Fields) bool {
+ return ls.Get(t.field) != t.value
+}
+
+func (t *notHasTerm) Empty() bool {
+ return false
+}
+
+func (t *notHasTerm) RequiresExactMatch(field string) (value string, found bool) {
+ return "", false
+}
+
+func (t *notHasTerm) Transform(fn TransformFunc) (Selector, error) {
+ field, value, err := fn(t.field, t.value)
+ if err != nil {
+ return nil, err
+ }
+ return &notHasTerm{field, value}, nil
+}
+
+func (t *notHasTerm) String() string {
+ return fmt.Sprintf("%v!=%v", t.field, t.value)
+}
+
+type andTerm []Selector
+
+func (t andTerm) Matches(ls Fields) bool {
+ for _, q := range t {
+ if !q.Matches(ls) {
+ return false
+ }
+ }
+ return true
+}
+
+func (t andTerm) Empty() bool {
+ if t == nil {
+ return true
+ }
+ if len([]Selector(t)) == 0 {
+ return true
+ }
+ for i := range t {
+ if !t[i].Empty() {
+ return false
+ }
+ }
+ return true
+}
+
+func (t andTerm) RequiresExactMatch(field string) (string, bool) {
+ if t == nil || len([]Selector(t)) == 0 {
+ return "", false
+ }
+ for i := range t {
+ if value, found := t[i].RequiresExactMatch(field); found {
+ return value, found
+ }
+ }
+ return "", false
+}
+
+func (t andTerm) Transform(fn TransformFunc) (Selector, error) {
+ next := make([]Selector, len([]Selector(t)))
+ for i, s := range []Selector(t) {
+ n, err := s.Transform(fn)
+ if err != nil {
+ return nil, err
+ }
+ next[i] = n
+ }
+ return andTerm(next), nil
+}
+
+func (t andTerm) String() string {
+ var terms []string
+ for _, q := range t {
+ terms = append(terms, q.String())
+ }
+ return strings.Join(terms, ",")
+}
+
+// SelectorFromSet returns a Selector which will match exactly the given Set. A
+// nil Set is considered equivalent to Everything().
+func SelectorFromSet(ls Set) Selector {
+ if ls == nil {
+ return Everything()
+ }
+ items := make([]Selector, 0, len(ls))
+ for field, value := range ls {
+ items = append(items, &hasTerm{field: field, value: value})
+ }
+ if len(items) == 1 {
+ return items[0]
+ }
+ return andTerm(items)
+}
+
+// ParseSelectorOrDie takes a string representing a selector and returns an
+// object suitable for matching, or panic when an error occur.
+func ParseSelectorOrDie(s string) Selector {
+ selector, err := ParseSelector(s)
+ if err != nil {
+ panic(err)
+ }
+ return selector
+}
+
+// ParseSelector takes a string representing a selector and returns an
+// object suitable for matching, or an error.
+func ParseSelector(selector string) (Selector, error) {
+ return parseSelector(selector,
+ func(lhs, rhs string) (newLhs, newRhs string, err error) {
+ return lhs, rhs, nil
+ })
+}
+
+// Parses the selector and runs them through the given TransformFunc.
+func ParseAndTransformSelector(selector string, fn TransformFunc) (Selector, error) {
+ return parseSelector(selector, fn)
+}
+
+// Function to transform selectors.
+type TransformFunc func(field, value string) (newField, newValue string, err error)
+
+func try(selectorPiece, op string) (lhs, rhs string, ok bool) {
+ pieces := strings.Split(selectorPiece, op)
+ if len(pieces) == 2 {
+ return pieces[0], pieces[1], true
+ }
+ return "", "", false
+}
+
+func parseSelector(selector string, fn TransformFunc) (Selector, error) {
+ parts := strings.Split(selector, ",")
+ sort.StringSlice(parts).Sort()
+ var items []Selector
+ for _, part := range parts {
+ if part == "" {
+ continue
+ }
+ if lhs, rhs, ok := try(part, "!="); ok {
+ items = append(items, &notHasTerm{field: lhs, value: rhs})
+ } else if lhs, rhs, ok := try(part, "=="); ok {
+ items = append(items, &hasTerm{field: lhs, value: rhs})
+ } else if lhs, rhs, ok := try(part, "="); ok {
+ items = append(items, &hasTerm{field: lhs, value: rhs})
+ } else {
+ return nil, fmt.Errorf("invalid selector: '%s'; can't understand '%s'", selector, part)
+ }
+ }
+ if len(items) == 1 {
+ return items[0].Transform(fn)
+ }
+ return andTerm(items).Transform(fn)
+}
+
+// OneTermEqualSelector returns an object that matches objects where one field/field equals one value.
+// Cannot return an error.
+func OneTermEqualSelector(k, v string) Selector {
+ return &hasTerm{field: k, value: v}
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/kubelet/qos/doc.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/kubelet/qos/doc.go
new file mode 100644
index 0000000..ebc1cc5
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/kubelet/qos/doc.go
@@ -0,0 +1,25 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// package qos contains helper functions for quality of service.
+// For each resource (memory, CPU) Kubelet supports three classes of containers.
+// Memory guaranteed containers will receive the highest priority and will get all the resources
+// they need.
+// Burstable containers will be guaranteed their request and can “burst” and use more resources
+// when available.
+// Best-Effort containers, which don’t specify a request, can use resources only if not being used
+// by other pods.
+package qos
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/kubelet/qos/policy.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/kubelet/qos/policy.go
new file mode 100644
index 0000000..ad696f3
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/kubelet/qos/policy.go
@@ -0,0 +1,66 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package qos
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+)
+
+const (
+ PodInfraOOMAdj int = -999
+ KubeletOOMScoreAdj int = -999
+ KubeProxyOOMScoreAdj int = -999
+ guaranteedOOMScoreAdj int = -998
+ besteffortOOMScoreAdj int = 1000
+)
+
+// GetContainerOOMAdjust returns the amount by which the OOM score of all processes in the
+// container should be adjusted.
+// The OOM score of a process is the percentage of memory it consumes
+// multiplied by 10 (barring exceptional cases) + a configurable quantity which is between -1000
+// and 1000. Containers with higher OOM scores are killed if the system runs out of memory.
+// See https://lwn.net/Articles/391222/ for more information.
+func GetContainerOOMScoreAdjust(pod *api.Pod, container *api.Container, memoryCapacity int64) int {
+ switch GetPodQOS(pod) {
+ case Guaranteed:
+ // Guaranteed containers should be the last to get killed.
+ return guaranteedOOMScoreAdj
+ case BestEffort:
+ return besteffortOOMScoreAdj
+ }
+
+ // Burstable containers are a middle tier, between Guaranteed and Best-Effort. Ideally,
+ // we want to protect Burstable containers that consume less memory than requested.
+ // The formula below is a heuristic. A container requesting for 10% of a system's
+ // memory will have an OOM score adjust of 900. If a process in container Y
+ // uses over 10% of memory, its OOM score will be 1000. The idea is that containers
+ // which use more than their request will have an OOM score of 1000 and will be prime
+ // targets for OOM kills.
+ // Note that this is a heuristic, it won't work if a container has many small processes.
+ memoryRequest := container.Resources.Requests.Memory().Value()
+ oomScoreAdjust := 1000 - (1000*memoryRequest)/memoryCapacity
+ // A guaranteed pod using 100% of memory can have an OOM score of 1. Ensure
+ // that burstable pods have a higher OOM score adjustment.
+ if oomScoreAdjust < 2 {
+ return 2
+ }
+ // Give burstable pods a higher chance of survival over besteffort pods.
+ if int(oomScoreAdjust) == besteffortOOMScoreAdj {
+ return int(oomScoreAdjust - 1)
+ }
+ return int(oomScoreAdjust)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/kubelet/qos/qos.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/kubelet/qos/qos.go
new file mode 100644
index 0000000..2c0d19d
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/kubelet/qos/qos.go
@@ -0,0 +1,140 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package qos
+
+import (
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/resource"
+)
+
+// isResourceGuaranteed returns true if the container's resource requirements are Guaranteed.
+func isResourceGuaranteed(container *api.Container, resource api.ResourceName) bool {
+ // A container resource is guaranteed if its request == limit.
+ // If request == limit, the user is very confident of resource consumption.
+ req, hasReq := container.Resources.Requests[resource]
+ limit, hasLimit := container.Resources.Limits[resource]
+ if !hasReq || !hasLimit {
+ return false
+ }
+ return req.Cmp(limit) == 0 && req.Value() != 0
+}
+
+// isResourceBestEffort returns true if the container's resource requirements are best-effort.
+func isResourceBestEffort(container *api.Container, resource api.ResourceName) bool {
+ // A container resource is best-effort if its request is unspecified or 0.
+ // If a request is specified, then the user expects some kind of resource guarantee.
+ req, hasReq := container.Resources.Requests[resource]
+ return !hasReq || req.Value() == 0
+}
+
+// GetPodQOS returns the QoS class of a pod.
+// A pod is besteffort if none of its containers have specified any requests or limits.
+// A pod is guaranteed only when requests and limits are specified for all the containers and they are equal.
+// A pod is burstable if limits and requests do not match across all containers.
+func GetPodQOS(pod *api.Pod) QOSClass {
+ requests := api.ResourceList{}
+ limits := api.ResourceList{}
+ zeroQuantity := resource.MustParse("0")
+ isGuaranteed := true
+ for _, container := range pod.Spec.Containers {
+ // process requests
+ for name, quantity := range container.Resources.Requests {
+ if quantity.Cmp(zeroQuantity) == 1 {
+ delta := quantity.Copy()
+ if _, exists := requests[name]; !exists {
+ requests[name] = *delta
+ } else {
+ delta.Add(requests[name])
+ requests[name] = *delta
+ }
+ }
+ }
+ // process limits
+ for name, quantity := range container.Resources.Limits {
+ if quantity.Cmp(zeroQuantity) == 1 {
+ delta := quantity.Copy()
+ if _, exists := limits[name]; !exists {
+ limits[name] = *delta
+ } else {
+ delta.Add(limits[name])
+ limits[name] = *delta
+ }
+ }
+ }
+ if len(container.Resources.Limits) != len(supportedComputeResources) {
+ isGuaranteed = false
+ }
+ }
+ if len(requests) == 0 && len(limits) == 0 {
+ return BestEffort
+ }
+ // Check is requests match limits for all resources.
+ if isGuaranteed {
+ for name, req := range requests {
+ if lim, exists := limits[name]; !exists || lim.Cmp(req) != 0 {
+ isGuaranteed = false
+ break
+ }
+ }
+ }
+ if isGuaranteed &&
+ len(requests) == len(limits) &&
+ len(limits) == len(supportedComputeResources) {
+ return Guaranteed
+ }
+ return Burstable
+}
+
+// QOSList is a set of (resource name, QoS class) pairs.
+type QOSList map[api.ResourceName]QOSClass
+
+// GetQOS returns a mapping of resource name to QoS class of a container
+func GetQOS(container *api.Container) QOSList {
+ resourceToQOS := QOSList{}
+ for resource := range allResources(container) {
+ switch {
+ case isResourceGuaranteed(container, resource):
+ resourceToQOS[resource] = Guaranteed
+ case isResourceBestEffort(container, resource):
+ resourceToQOS[resource] = BestEffort
+ default:
+ resourceToQOS[resource] = Burstable
+ }
+ }
+ return resourceToQOS
+}
+
+// supportedComputeResources is the list of supported compute resources
+var supportedComputeResources = []api.ResourceName{
+ api.ResourceCPU,
+ api.ResourceMemory,
+}
+
+// allResources returns a set of all possible resources whose mapped key value is true if present on the container
+func allResources(container *api.Container) map[api.ResourceName]bool {
+ resources := map[api.ResourceName]bool{}
+ for _, resource := range supportedComputeResources {
+ resources[resource] = false
+ }
+ for resource := range container.Resources.Requests {
+ resources[resource] = true
+ }
+ for resource := range container.Resources.Limits {
+ resources[resource] = true
+ }
+ return resources
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/kubelet/qos/types.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/kubelet/qos/types.go
new file mode 100644
index 0000000..e52dece
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/kubelet/qos/types.go
@@ -0,0 +1,29 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package qos
+
+// QOSClass defines the supported qos classes of Pods/Containers.
+type QOSClass string
+
+const (
+ // Guaranteed is the Guaranteed qos class.
+ Guaranteed QOSClass = "Guaranteed"
+ // Burstable is the Burstable qos class.
+ Burstable QOSClass = "Burstable"
+ // BestEffort is the BestEffort qos class.
+ BestEffort QOSClass = "BestEffort"
+)
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/labels/doc.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/labels/doc.go
new file mode 100644
index 0000000..35ba788
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/labels/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package labels implements a simple label system, parsing and matching
+// selectors with sets of labels.
+package labels
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/labels/labels.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/labels/labels.go
new file mode 100644
index 0000000..637a45f
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/labels/labels.go
@@ -0,0 +1,71 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package labels
+
+import (
+ "sort"
+ "strings"
+)
+
+// Labels allows you to present labels independently from their storage.
+type Labels interface {
+ // Has returns whether the provided label exists.
+ Has(label string) (exists bool)
+
+ // Get returns the value for the provided label.
+ Get(label string) (value string)
+}
+
+// Set is a map of label:value. It implements Labels.
+type Set map[string]string
+
+// String returns all labels listed as a human readable string.
+// Conveniently, exactly the format that ParseSelector takes.
+func (ls Set) String() string {
+ selector := make([]string, 0, len(ls))
+ for key, value := range ls {
+ selector = append(selector, key+"="+value)
+ }
+ // Sort for determinism.
+ sort.StringSlice(selector).Sort()
+ return strings.Join(selector, ",")
+}
+
+// Has returns whether the provided label exists in the map.
+func (ls Set) Has(label string) bool {
+ _, exists := ls[label]
+ return exists
+}
+
+// Get returns the value in the map for the provided label.
+func (ls Set) Get(label string) string {
+ return ls[label]
+}
+
+// AsSelector converts labels into a selectors.
+func (ls Set) AsSelector() Selector {
+ return SelectorFromSet(ls)
+}
+
+// FormatLables convert label map into plain string
+func FormatLabels(labelMap map[string]string) string {
+ l := Set(labelMap).String()
+ if l == "" {
+ l = "<none>"
+ }
+ return l
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/labels/selector.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/labels/selector.go
new file mode 100644
index 0000000..861b6ea
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/labels/selector.go
@@ -0,0 +1,810 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package labels
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/golang/glog"
+ "k8s.io/kubernetes/pkg/util/sets"
+ "k8s.io/kubernetes/pkg/util/validation"
+)
+
+// Selector represents a label selector.
+type Selector interface {
+ // Matches returns true if this selector matches the given set of labels.
+ Matches(Labels) bool
+
+ // Empty returns true if this selector does not restrict the selection space.
+ Empty() bool
+
+ // String returns a human readable string that represents this selector.
+ String() string
+
+ // Add adds requirements to the Selector
+ Add(r ...Requirement) Selector
+}
+
+// Everything returns a selector that matches all labels.
+func Everything() Selector {
+ return internalSelector{}
+}
+
+type nothingSelector struct{}
+
+func (n nothingSelector) Matches(_ Labels) bool { return false }
+func (n nothingSelector) Empty() bool { return false }
+func (n nothingSelector) String() string { return "<null>" }
+func (n nothingSelector) Add(_ ...Requirement) Selector { return n }
+
+// Nothing returns a selector that matches no labels
+func Nothing() Selector {
+ return nothingSelector{}
+}
+
+// Operator represents a key's relationship
+// to a set of values in a Requirement.
+type Operator string
+
+const (
+ DoesNotExistOperator Operator = "!"
+ EqualsOperator Operator = "="
+ DoubleEqualsOperator Operator = "=="
+ InOperator Operator = "in"
+ NotEqualsOperator Operator = "!="
+ NotInOperator Operator = "notin"
+ ExistsOperator Operator = "exists"
+ GreaterThanOperator Operator = "gt"
+ LessThanOperator Operator = "lt"
+)
+
+func NewSelector() Selector {
+ return internalSelector(nil)
+}
+
+type internalSelector []Requirement
+
+// Sort by key to obtain determisitic parser
+type ByKey []Requirement
+
+func (a ByKey) Len() int { return len(a) }
+
+func (a ByKey) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+func (a ByKey) Less(i, j int) bool { return a[i].key < a[j].key }
+
+// Requirement is a selector that contains values, a key
+// and an operator that relates the key and values. The zero
+// value of Requirement is invalid.
+// Requirement implements both set based match and exact match
+// Requirement is initialized via NewRequirement constructor for creating a valid Requirement.
+type Requirement struct {
+ key string
+ operator Operator
+ strValues sets.String
+}
+
+// NewRequirement is the constructor for a Requirement.
+// If any of these rules is violated, an error is returned:
+// (1) The operator can only be In, NotIn, Equals, DoubleEquals, NotEquals, Exists, or DoesNotExist.
+// (2) If the operator is In or NotIn, the values set must be non-empty.
+// (3) If the operator is Equals, DoubleEquals, or NotEquals, the values set must contain one value.
+// (4) If the operator is Exists or DoesNotExist, the value set must be empty.
+// (5) If the operator is Gt or Lt, the values set must contain only one value, which will be interpreted as an integer.
+// (6) The key is invalid due to its length, or sequence
+// of characters. See validateLabelKey for more details.
+//
+// The empty string is a valid value in the input values set.
+func NewRequirement(key string, op Operator, vals sets.String) (*Requirement, error) {
+ if err := validateLabelKey(key); err != nil {
+ return nil, err
+ }
+ switch op {
+ case InOperator, NotInOperator:
+ if len(vals) == 0 {
+ return nil, fmt.Errorf("for 'in', 'notin' operators, values set can't be empty")
+ }
+ case EqualsOperator, DoubleEqualsOperator, NotEqualsOperator:
+ if len(vals) != 1 {
+ return nil, fmt.Errorf("exact-match compatibility requires one single value")
+ }
+ case ExistsOperator, DoesNotExistOperator:
+ if len(vals) != 0 {
+ return nil, fmt.Errorf("values set must be empty for exists and does not exist")
+ }
+ case GreaterThanOperator, LessThanOperator:
+ if len(vals) != 1 {
+ return nil, fmt.Errorf("for 'Gt', 'Lt' operators, exactly one value is required")
+ }
+ for val := range vals {
+ if _, err := strconv.ParseInt(val, 10, 64); err != nil {
+ return nil, fmt.Errorf("for 'Gt', 'Lt' operators, the value must be an integer")
+ }
+ }
+ default:
+ return nil, fmt.Errorf("operator '%v' is not recognized", op)
+ }
+
+ for v := range vals {
+ if err := validateLabelValue(v); err != nil {
+ return nil, err
+ }
+ }
+ return &Requirement{key: key, operator: op, strValues: vals}, nil
+}
+
+// Matches returns true if the Requirement matches the input Labels.
+// There is a match in the following cases:
+// (1) The operator is Exists and Labels has the Requirement's key.
+// (2) The operator is In, Labels has the Requirement's key and Labels'
+// value for that key is in Requirement's value set.
+// (3) The operator is NotIn, Labels has the Requirement's key and
+// Labels' value for that key is not in Requirement's value set.
+// (4) The operator is DoesNotExist or NotIn and Labels does not have the
+// Requirement's key.
+// (5) The operator is GreaterThanOperator or LessThanOperator, and Labels has
+// the Requirement's key and the corresponding value satisfies mathematical inequality.
+func (r *Requirement) Matches(ls Labels) bool {
+ switch r.operator {
+ case InOperator, EqualsOperator, DoubleEqualsOperator:
+ if !ls.Has(r.key) {
+ return false
+ }
+ return r.strValues.Has(ls.Get(r.key))
+ case NotInOperator, NotEqualsOperator:
+ if !ls.Has(r.key) {
+ return true
+ }
+ return !r.strValues.Has(ls.Get(r.key))
+ case ExistsOperator:
+ return ls.Has(r.key)
+ case DoesNotExistOperator:
+ return !ls.Has(r.key)
+ case GreaterThanOperator, LessThanOperator:
+ if !ls.Has(r.key) {
+ return false
+ }
+ lsValue, err := strconv.ParseInt(ls.Get(r.key), 10, 64)
+ if err != nil {
+ glog.V(10).Infof("ParseInt failed for value %+v in label %+v, %+v", ls.Get(r.key), ls, err)
+ return false
+ }
+
+ // There should be only one strValue in r.strValues, and can be converted to a integer.
+ if len(r.strValues) != 1 {
+ glog.V(10).Infof("Invalid values count %+v of requirement %+v, for 'Gt', 'Lt' operators, exactly one value is required", len(r.strValues), r)
+ return false
+ }
+
+ var rValue int64
+ for strValue := range r.strValues {
+ rValue, err = strconv.ParseInt(strValue, 10, 64)
+ if err != nil {
+ glog.V(10).Infof("ParseInt failed for value %+v in requirement %+v, for 'Gt', 'Lt' operators, the value must be an integer", strValue, r)
+ return false
+ }
+ }
+ return (r.operator == GreaterThanOperator && lsValue > rValue) || (r.operator == LessThanOperator && lsValue < rValue)
+ default:
+ return false
+ }
+}
+
+func (r *Requirement) Key() string {
+ return r.key
+}
+func (r *Requirement) Operator() Operator {
+ return r.operator
+}
+func (r *Requirement) Values() sets.String {
+ ret := sets.String{}
+ for k := range r.strValues {
+ ret.Insert(k)
+ }
+ return ret
+}
+
+// Return true if the internalSelector doesn't restrict selection space
+func (lsel internalSelector) Empty() bool {
+ if lsel == nil {
+ return true
+ }
+ return len(lsel) == 0
+}
+
+// String returns a human-readable string that represents this
+// Requirement. If called on an invalid Requirement, an error is
+// returned. See NewRequirement for creating a valid Requirement.
+func (r *Requirement) String() string {
+ var buffer bytes.Buffer
+ if r.operator == DoesNotExistOperator {
+ buffer.WriteString("!")
+ }
+ buffer.WriteString(r.key)
+
+ switch r.operator {
+ case EqualsOperator:
+ buffer.WriteString("=")
+ case DoubleEqualsOperator:
+ buffer.WriteString("==")
+ case NotEqualsOperator:
+ buffer.WriteString("!=")
+ case InOperator:
+ buffer.WriteString(" in ")
+ case NotInOperator:
+ buffer.WriteString(" notin ")
+ case GreaterThanOperator:
+ buffer.WriteString(">")
+ case LessThanOperator:
+ buffer.WriteString("<")
+ case ExistsOperator, DoesNotExistOperator:
+ return buffer.String()
+ }
+
+ switch r.operator {
+ case InOperator, NotInOperator:
+ buffer.WriteString("(")
+ }
+ if len(r.strValues) == 1 {
+ buffer.WriteString(r.strValues.List()[0])
+ } else { // only > 1 since == 0 prohibited by NewRequirement
+ buffer.WriteString(strings.Join(r.strValues.List(), ","))
+ }
+
+ switch r.operator {
+ case InOperator, NotInOperator:
+ buffer.WriteString(")")
+ }
+ return buffer.String()
+}
+
+// Add adds requirements to the selector. It copies the current selector returning a new one
+func (lsel internalSelector) Add(reqs ...Requirement) Selector {
+ var sel internalSelector
+ for ix := range lsel {
+ sel = append(sel, lsel[ix])
+ }
+ for _, r := range reqs {
+ sel = append(sel, r)
+ }
+ sort.Sort(ByKey(sel))
+ return sel
+}
+
+// Matches for a internalSelector returns true if all
+// its Requirements match the input Labels. If any
+// Requirement does not match, false is returned.
+func (lsel internalSelector) Matches(l Labels) bool {
+ for ix := range lsel {
+ if matches := lsel[ix].Matches(l); !matches {
+ return false
+ }
+ }
+ return true
+}
+
+// String returns a comma-separated string of all
+// the internalSelector Requirements' human-readable strings.
+func (lsel internalSelector) String() string {
+ var reqs []string
+ for ix := range lsel {
+ reqs = append(reqs, lsel[ix].String())
+ }
+ return strings.Join(reqs, ",")
+}
+
+// constants definition for lexer token
+type Token int
+
+const (
+ ErrorToken Token = iota
+ EndOfStringToken
+ ClosedParToken
+ CommaToken
+ DoesNotExistToken
+ DoubleEqualsToken
+ EqualsToken
+ GreaterThanToken
+ IdentifierToken // to represent keys and values
+ InToken
+ LessThanToken
+ NotEqualsToken
+ NotInToken
+ OpenParToken
+)
+
+// string2token contains the mapping between lexer Token and token literal
+// (except IdentifierToken, EndOfStringToken and ErrorToken since it makes no sense)
+var string2token = map[string]Token{
+ ")": ClosedParToken,
+ ",": CommaToken,
+ "!": DoesNotExistToken,
+ "==": DoubleEqualsToken,
+ "=": EqualsToken,
+ ">": GreaterThanToken,
+ "in": InToken,
+ "<": LessThanToken,
+ "!=": NotEqualsToken,
+ "notin": NotInToken,
+ "(": OpenParToken,
+}
+
+// The item produced by the lexer. It contains the Token and the literal.
+type ScannedItem struct {
+ tok Token
+ literal string
+}
+
+// isWhitespace returns true if the rune is a space, tab, or newline.
+func isWhitespace(ch byte) bool {
+ return ch == ' ' || ch == '\t' || ch == '\r' || ch == '\n'
+}
+
+// isSpecialSymbol detect if the character ch can be an operator
+func isSpecialSymbol(ch byte) bool {
+ switch ch {
+ case '=', '!', '(', ')', ',', '>', '<':
+ return true
+ }
+ return false
+}
+
+// Lexer represents the Lexer struct for label selector.
+// It contains necessary informationt to tokenize the input string
+type Lexer struct {
+ // s stores the string to be tokenized
+ s string
+ // pos is the position currently tokenized
+ pos int
+}
+
+// read return the character currently lexed
+// increment the position and check the buffer overflow
+func (l *Lexer) read() (b byte) {
+ b = 0
+ if l.pos < len(l.s) {
+ b = l.s[l.pos]
+ l.pos++
+ }
+ return b
+}
+
+// unread 'undoes' the last read character
+func (l *Lexer) unread() {
+ l.pos--
+}
+
+// scanIdOrKeyword scans string to recognize literal token (for example 'in') or an identifier.
+func (l *Lexer) scanIdOrKeyword() (tok Token, lit string) {
+ var buffer []byte
+IdentifierLoop:
+ for {
+ switch ch := l.read(); {
+ case ch == 0:
+ break IdentifierLoop
+ case isSpecialSymbol(ch) || isWhitespace(ch):
+ l.unread()
+ break IdentifierLoop
+ default:
+ buffer = append(buffer, ch)
+ }
+ }
+ s := string(buffer)
+ if val, ok := string2token[s]; ok { // is a literal token?
+ return val, s
+ }
+ return IdentifierToken, s // otherwise is an identifier
+}
+
+// scanSpecialSymbol scans string starting with special symbol.
+// special symbol identify non literal operators. "!=", "==", "="
+func (l *Lexer) scanSpecialSymbol() (Token, string) {
+ lastScannedItem := ScannedItem{}
+ var buffer []byte
+SpecialSymbolLoop:
+ for {
+ switch ch := l.read(); {
+ case ch == 0:
+ break SpecialSymbolLoop
+ case isSpecialSymbol(ch):
+ buffer = append(buffer, ch)
+ if token, ok := string2token[string(buffer)]; ok {
+ lastScannedItem = ScannedItem{tok: token, literal: string(buffer)}
+ } else if lastScannedItem.tok != 0 {
+ l.unread()
+ break SpecialSymbolLoop
+ }
+ default:
+ l.unread()
+ break SpecialSymbolLoop
+ }
+ }
+ if lastScannedItem.tok == 0 {
+ return ErrorToken, fmt.Sprintf("error expected: keyword found '%s'", buffer)
+ }
+ return lastScannedItem.tok, lastScannedItem.literal
+}
+
+// skipWhiteSpaces consumes all blank characters
+// returning the first non blank character
+func (l *Lexer) skipWhiteSpaces(ch byte) byte {
+ for {
+ if !isWhitespace(ch) {
+ return ch
+ }
+ ch = l.read()
+ }
+}
+
+// Lex returns a pair of Token and the literal
+// literal is meaningfull only for IdentifierToken token
+func (l *Lexer) Lex() (tok Token, lit string) {
+ switch ch := l.skipWhiteSpaces(l.read()); {
+ case ch == 0:
+ return EndOfStringToken, ""
+ case isSpecialSymbol(ch):
+ l.unread()
+ return l.scanSpecialSymbol()
+ default:
+ l.unread()
+ return l.scanIdOrKeyword()
+ }
+}
+
+// Parser data structure contains the label selector parser data structure
+type Parser struct {
+ l *Lexer
+ scannedItems []ScannedItem
+ position int
+}
+
+// Parser context represents context during parsing:
+// some literal for example 'in' and 'notin' can be
+// recognized as operator for example 'x in (a)' but
+// it can be recognized as value for example 'value in (in)'
+type ParserContext int
+
+const (
+ KeyAndOperator ParserContext = iota
+ Values
+)
+
+// lookahead func returns the current token and string. No increment of current position
+func (p *Parser) lookahead(context ParserContext) (Token, string) {
+ tok, lit := p.scannedItems[p.position].tok, p.scannedItems[p.position].literal
+ if context == Values {
+ switch tok {
+ case InToken, NotInToken:
+ tok = IdentifierToken
+ }
+ }
+ return tok, lit
+}
+
+// consume returns current token and string. Increments the the position
+func (p *Parser) consume(context ParserContext) (Token, string) {
+ p.position++
+ tok, lit := p.scannedItems[p.position-1].tok, p.scannedItems[p.position-1].literal
+ if context == Values {
+ switch tok {
+ case InToken, NotInToken:
+ tok = IdentifierToken
+ }
+ }
+ return tok, lit
+}
+
+// scan runs through the input string and stores the ScannedItem in an array
+// Parser can now lookahead and consume the tokens
+func (p *Parser) scan() {
+ for {
+ token, literal := p.l.Lex()
+ p.scannedItems = append(p.scannedItems, ScannedItem{token, literal})
+ if token == EndOfStringToken {
+ break
+ }
+ }
+}
+
+// parse runs the left recursive descending algorithm
+// on input string. It returns a list of Requirement objects.
+func (p *Parser) parse() (internalSelector, error) {
+ p.scan() // init scannedItems
+
+ var requirements internalSelector
+ for {
+ tok, lit := p.lookahead(Values)
+ switch tok {
+ case IdentifierToken, DoesNotExistToken:
+ r, err := p.parseRequirement()
+ if err != nil {
+ return nil, fmt.Errorf("unable to parse requirement: %v", err)
+ }
+ requirements = append(requirements, *r)
+ t, l := p.consume(Values)
+ switch t {
+ case EndOfStringToken:
+ return requirements, nil
+ case CommaToken:
+ t2, l2 := p.lookahead(Values)
+ if t2 != IdentifierToken && t2 != DoesNotExistToken {
+ return nil, fmt.Errorf("found '%s', expected: identifier after ','", l2)
+ }
+ default:
+ return nil, fmt.Errorf("found '%s', expected: ',' or 'end of string'", l)
+ }
+ case EndOfStringToken:
+ return requirements, nil
+ default:
+ return nil, fmt.Errorf("found '%s', expected: !, identifier, or 'end of string'", lit)
+ }
+ }
+}
+
+func (p *Parser) parseRequirement() (*Requirement, error) {
+ key, operator, err := p.parseKeyAndInferOperator()
+ if err != nil {
+ return nil, err
+ }
+ if operator == ExistsOperator || operator == DoesNotExistOperator { // operator found lookahead set checked
+ return NewRequirement(key, operator, nil)
+ }
+ operator, err = p.parseOperator()
+ if err != nil {
+ return nil, err
+ }
+ var values sets.String
+ switch operator {
+ case InOperator, NotInOperator:
+ values, err = p.parseValues()
+ case EqualsOperator, DoubleEqualsOperator, NotEqualsOperator, GreaterThanOperator, LessThanOperator:
+ values, err = p.parseExactValue()
+ }
+ if err != nil {
+ return nil, err
+ }
+ return NewRequirement(key, operator, values)
+
+}
+
+// parseKeyAndInferOperator parse literals.
+// in case of no operator '!, in, notin, ==, =, !=' are found
+// the 'exists' operator is inferred
+func (p *Parser) parseKeyAndInferOperator() (string, Operator, error) {
+ var operator Operator
+ tok, literal := p.consume(Values)
+ if tok == DoesNotExistToken {
+ operator = DoesNotExistOperator
+ tok, literal = p.consume(Values)
+ }
+ if tok != IdentifierToken {
+ err := fmt.Errorf("found '%s', expected: identifier", literal)
+ return "", "", err
+ }
+ if err := validateLabelKey(literal); err != nil {
+ return "", "", err
+ }
+ if t, _ := p.lookahead(Values); t == EndOfStringToken || t == CommaToken {
+ if operator != DoesNotExistOperator {
+ operator = ExistsOperator
+ }
+ }
+ return literal, operator, nil
+}
+
+// parseOperator return operator and eventually matchType
+// matchType can be exact
+func (p *Parser) parseOperator() (op Operator, err error) {
+ tok, lit := p.consume(KeyAndOperator)
+ switch tok {
+ // DoesNotExistToken shouldn't be here because it's a unary operator, not a binary operator
+ case InToken:
+ op = InOperator
+ case EqualsToken:
+ op = EqualsOperator
+ case DoubleEqualsToken:
+ op = DoubleEqualsOperator
+ case GreaterThanToken:
+ op = GreaterThanOperator
+ case LessThanToken:
+ op = LessThanOperator
+ case NotInToken:
+ op = NotInOperator
+ case NotEqualsToken:
+ op = NotEqualsOperator
+ default:
+ return "", fmt.Errorf("found '%s', expected: '=', '!=', '==', 'in', notin'", lit)
+ }
+ return op, nil
+}
+
+// parseValues parses the values for set based matching (x,y,z)
+func (p *Parser) parseValues() (sets.String, error) {
+ tok, lit := p.consume(Values)
+ if tok != OpenParToken {
+ return nil, fmt.Errorf("found '%s' expected: '('", lit)
+ }
+ tok, lit = p.lookahead(Values)
+ switch tok {
+ case IdentifierToken, CommaToken:
+ s, err := p.parseIdentifiersList() // handles general cases
+ if err != nil {
+ return s, err
+ }
+ if tok, _ = p.consume(Values); tok != ClosedParToken {
+ return nil, fmt.Errorf("found '%s', expected: ')'", lit)
+ }
+ return s, nil
+ case ClosedParToken: // handles "()"
+ p.consume(Values)
+ return sets.NewString(""), nil
+ default:
+ return nil, fmt.Errorf("found '%s', expected: ',', ')' or identifier", lit)
+ }
+}
+
+// parseIdentifiersList parses a (possibly empty) list of
+// of comma separated (possibly empty) identifiers
+func (p *Parser) parseIdentifiersList() (sets.String, error) {
+ s := sets.NewString()
+ for {
+ tok, lit := p.consume(Values)
+ switch tok {
+ case IdentifierToken:
+ s.Insert(lit)
+ tok2, lit2 := p.lookahead(Values)
+ switch tok2 {
+ case CommaToken:
+ continue
+ case ClosedParToken:
+ return s, nil
+ default:
+ return nil, fmt.Errorf("found '%s', expected: ',' or ')'", lit2)
+ }
+ case CommaToken: // handled here since we can have "(,"
+ if s.Len() == 0 {
+ s.Insert("") // to handle (,
+ }
+ tok2, _ := p.lookahead(Values)
+ if tok2 == ClosedParToken {
+ s.Insert("") // to handle ,) Double "" removed by StringSet
+ return s, nil
+ }
+ if tok2 == CommaToken {
+ p.consume(Values)
+ s.Insert("") // to handle ,, Double "" removed by StringSet
+ }
+ default: // it can be operator
+ return s, fmt.Errorf("found '%s', expected: ',', or identifier", lit)
+ }
+ }
+}
+
+// parseExactValue parses the only value for exact match style
+func (p *Parser) parseExactValue() (sets.String, error) {
+ s := sets.NewString()
+ tok, lit := p.lookahead(Values)
+ if tok == EndOfStringToken || tok == CommaToken {
+ s.Insert("")
+ return s, nil
+ }
+ tok, lit = p.consume(Values)
+ if tok == IdentifierToken {
+ s.Insert(lit)
+ return s, nil
+ }
+ return nil, fmt.Errorf("found '%s', expected: identifier", lit)
+}
+
+// Parse takes a string representing a selector and returns a selector
+// object, or an error. This parsing function differs from ParseSelector
+// as they parse different selectors with different syntaxes.
+// The input will cause an error if it does not follow this form:
+//
+// <selector-syntax> ::= <requirement> | <requirement> "," <selector-syntax> ]
+// <requirement> ::= [!] KEY [ <set-based-restriction> | <exact-match-restriction> ]
+// <set-based-restriction> ::= "" | <inclusion-exclusion> <value-set>
+// <inclusion-exclusion> ::= <inclusion> | <exclusion>
+// <exclusion> ::= "notin"
+// <inclusion> ::= "in"
+// <value-set> ::= "(" <values> ")"
+// <values> ::= VALUE | VALUE "," <values>
+// <exact-match-restriction> ::= ["="|"=="|"!="] VALUE
+// KEY is a sequence of one or more characters following [ DNS_SUBDOMAIN "/" ] DNS_LABEL. Max length is 63 characters.
+// VALUE is a sequence of zero or more characters "([A-Za-z0-9_-\.])". Max length is 63 characters.
+// Delimiter is white space: (' ', '\t')
+// Example of valid syntax:
+// "x in (foo,,baz),y,z notin ()"
+//
+// Note:
+// (1) Inclusion - " in " - denotes that the KEY exists and is equal to any of the
+// VALUEs in its requirement
+// (2) Exclusion - " notin " - denotes that the KEY is not equal to any
+// of the VALUEs in its requirement or does not exist
+// (3) The empty string is a valid VALUE
+// (4) A requirement with just a KEY - as in "y" above - denotes that
+// the KEY exists and can be any VALUE.
+// (5) A requirement with just !KEY requires that the KEY not exist.
+//
+func Parse(selector string) (Selector, error) {
+ parsedSelector, err := parse(selector)
+ if err == nil {
+ return parsedSelector, nil
+ }
+ return nil, err
+}
+
+// parse parses the string representation of the selector and returns the internalSelector struct.
+// The callers of this method can then decide how to return the internalSelector struct to their
+// callers. This function has two callers now, one returns a Selector interface and the other
+// returns a list of requirements.
+func parse(selector string) (internalSelector, error) {
+ p := &Parser{l: &Lexer{s: selector, pos: 0}}
+ items, err := p.parse()
+ if err != nil {
+ return nil, err
+ }
+ sort.Sort(ByKey(items)) // sort to grant determistic parsing
+ return internalSelector(items), err
+}
+
+func validateLabelKey(k string) error {
+ if errs := validation.IsQualifiedName(k); len(errs) != 0 {
+ return fmt.Errorf("invalid label key %q: %s", k, strings.Join(errs, "; "))
+ }
+ return nil
+}
+
+func validateLabelValue(v string) error {
+ if errs := validation.IsValidLabelValue(v); len(errs) != 0 {
+ return fmt.Errorf("invalid label value: %q: %s", v, strings.Join(errs, "; "))
+ }
+ return nil
+}
+
+// SelectorFromSet returns a Selector which will match exactly the given Set. A
+// nil and empty Sets are considered equivalent to Everything().
+func SelectorFromSet(ls Set) Selector {
+ if ls == nil {
+ return internalSelector{}
+ }
+ var requirements internalSelector
+ for label, value := range ls {
+ if r, err := NewRequirement(label, EqualsOperator, sets.NewString(value)); err != nil {
+ //TODO: double check errors when input comes from serialization?
+ return internalSelector{}
+ } else {
+ requirements = append(requirements, *r)
+ }
+ }
+ // sort to have deterministic string representation
+ sort.Sort(ByKey(requirements))
+ return internalSelector(requirements)
+}
+
+// ParseToRequirements takes a string representing a selector and returns a list of
+// requirements. This function is suitable for those callers that perform additional
+// processing on selector requirements.
+// See the documentation for Parse() function for more details.
+// TODO: Consider exporting the internalSelector type instead.
+func ParseToRequirements(selector string) ([]Requirement, error) {
+ return parse(selector)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/master/ports/doc.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/master/ports/doc.go
new file mode 100644
index 0000000..a2a0021
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/master/ports/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package ports defines ports used by various pieces of the kubernetes
+// infrastructure.
+package ports
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/master/ports/ports.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/master/ports/ports.go
new file mode 100644
index 0000000..9c597ba
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/master/ports/ports.go
@@ -0,0 +1,40 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package ports
+
+const (
+ // ProxyPort is the default port for the proxy healthz server.
+ // May be overridden by a flag at startup.
+ ProxyStatusPort = 10249
+ // KubeletPort is the default port for the kubelet server on each host machine.
+ // May be overridden by a flag at startup.
+ KubeletPort = 10250
+ // SchedulerPort is the default port for the scheduler status server.
+ // May be overridden by a flag at startup.
+ SchedulerPort = 10251
+ // ControllerManagerPort is the default port for the controller manager status server.
+ // May be overridden by a flag at startup.
+ ControllerManagerPort = 10252
+ // Port for flannel daemon.
+ FlannelDaemonPort = 10253
+ // KubeletReadOnlyPort exposes basic read-only services from the kubelet.
+ // May be overridden by a flag at startup.
+ // This is necessary for heapster to collect monitoring stats from the kubelet
+ // until heapster can transition to using the SSL endpoint.
+ // TODO(roberthbailey): Remove this once we have a better solution for heapster.
+ KubeletReadOnlyPort = 10255
+)
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/OWNERS b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/OWNERS
new file mode 100644
index 0000000..d038b5e
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/OWNERS
@@ -0,0 +1,5 @@
+assignees:
+ - caesarxuchao
+ - deads2k
+ - lavalamp
+ - smarterclayton
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/codec.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/codec.go
new file mode 100644
index 0000000..3f7681c
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/codec.go
@@ -0,0 +1,198 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package runtime
+
+import (
+ "bytes"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "net/url"
+ "reflect"
+
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/conversion/queryparams"
+)
+
+// codec binds an encoder and decoder.
+type codec struct {
+ Encoder
+ Decoder
+}
+
+// NewCodec creates a Codec from an Encoder and Decoder.
+func NewCodec(e Encoder, d Decoder) Codec {
+ return codec{e, d}
+}
+
+// Encode is a convenience wrapper for encoding to a []byte from an Encoder
+func Encode(e Encoder, obj Object) ([]byte, error) {
+ // TODO: reuse buffer
+ buf := &bytes.Buffer{}
+ if err := e.Encode(obj, buf); err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// Decode is a convenience wrapper for decoding data into an Object.
+func Decode(d Decoder, data []byte) (Object, error) {
+ obj, _, err := d.Decode(data, nil, nil)
+ return obj, err
+}
+
+// DecodeInto performs a Decode into the provided object.
+func DecodeInto(d Decoder, data []byte, into Object) error {
+ out, gvk, err := d.Decode(data, nil, into)
+ if err != nil {
+ return err
+ }
+ if out != into {
+ return fmt.Errorf("unable to decode %s into %v", gvk, reflect.TypeOf(into))
+ }
+ return nil
+}
+
+// EncodeOrDie is a version of Encode which will panic instead of returning an error. For tests.
+func EncodeOrDie(e Encoder, obj Object) string {
+ bytes, err := Encode(e, obj)
+ if err != nil {
+ panic(err)
+ }
+ return string(bytes)
+}
+
+// UseOrCreateObject returns obj if the canonical ObjectKind returned by the provided typer matches gvk, or
+// invokes the ObjectCreator to instantiate a new gvk. Returns an error if the typer cannot find the object.
+func UseOrCreateObject(t ObjectTyper, c ObjectCreater, gvk unversioned.GroupVersionKind, obj Object) (Object, error) {
+ if obj != nil {
+ into, _, err := t.ObjectKinds(obj)
+ if err != nil {
+ return nil, err
+ }
+ if gvk == into[0] {
+ return obj, nil
+ }
+ }
+ return c.New(gvk)
+}
+
+// NoopEncoder converts an Decoder to a Serializer or Codec for code that expects them but only uses decoding.
+type NoopEncoder struct {
+ Decoder
+}
+
+var _ Serializer = NoopEncoder{}
+
+func (n NoopEncoder) Encode(obj Object, w io.Writer) error {
+ return fmt.Errorf("encoding is not allowed for this codec: %v", reflect.TypeOf(n.Decoder))
+}
+
+// NoopDecoder converts an Encoder to a Serializer or Codec for code that expects them but only uses encoding.
+type NoopDecoder struct {
+ Encoder
+}
+
+var _ Serializer = NoopDecoder{}
+
+func (n NoopDecoder) Decode(data []byte, gvk *unversioned.GroupVersionKind, into Object) (Object, *unversioned.GroupVersionKind, error) {
+ return nil, nil, fmt.Errorf("decoding is not allowed for this codec: %v", reflect.TypeOf(n.Encoder))
+}
+
+// NewParameterCodec creates a ParameterCodec capable of transforming url values into versioned objects and back.
+func NewParameterCodec(scheme *Scheme) ParameterCodec {
+ return &parameterCodec{
+ typer: scheme,
+ convertor: scheme,
+ creator: scheme,
+ }
+}
+
+// parameterCodec implements conversion to and from query parameters and objects.
+type parameterCodec struct {
+ typer ObjectTyper
+ convertor ObjectConvertor
+ creator ObjectCreater
+}
+
+var _ ParameterCodec = &parameterCodec{}
+
+// DecodeParameters converts the provided url.Values into an object of type From with the kind of into, and then
+// converts that object to into (if necessary). Returns an error if the operation cannot be completed.
+func (c *parameterCodec) DecodeParameters(parameters url.Values, from unversioned.GroupVersion, into Object) error {
+ if len(parameters) == 0 {
+ return nil
+ }
+ targetGVKs, _, err := c.typer.ObjectKinds(into)
+ if err != nil {
+ return err
+ }
+ targetGVK := targetGVKs[0]
+ if targetGVK.GroupVersion() == from {
+ return c.convertor.Convert(&parameters, into)
+ }
+ input, err := c.creator.New(from.WithKind(targetGVK.Kind))
+ if err != nil {
+ return err
+ }
+ if err := c.convertor.Convert(&parameters, input); err != nil {
+ return err
+ }
+ return c.convertor.Convert(input, into)
+}
+
+// EncodeParameters converts the provided object into the to version, then converts that object to url.Values.
+// Returns an error if conversion is not possible.
+func (c *parameterCodec) EncodeParameters(obj Object, to unversioned.GroupVersion) (url.Values, error) {
+ gvks, _, err := c.typer.ObjectKinds(obj)
+ if err != nil {
+ return nil, err
+ }
+ gvk := gvks[0]
+ if to != gvk.GroupVersion() {
+ out, err := c.convertor.ConvertToVersion(obj, to)
+ if err != nil {
+ return nil, err
+ }
+ obj = out
+ }
+ return queryparams.Convert(obj)
+}
+
+type base64Serializer struct {
+ Serializer
+}
+
+func NewBase64Serializer(s Serializer) Serializer {
+ return &base64Serializer{s}
+}
+
+func (s base64Serializer) Encode(obj Object, stream io.Writer) error {
+ e := base64.NewEncoder(base64.StdEncoding, stream)
+ err := s.Serializer.Encode(obj, e)
+ e.Close()
+ return err
+}
+
+func (s base64Serializer) Decode(data []byte, defaults *unversioned.GroupVersionKind, into Object) (Object, *unversioned.GroupVersionKind, error) {
+ out := make([]byte, base64.StdEncoding.DecodedLen(len(data)))
+ n, err := base64.StdEncoding.Decode(out, data)
+ if err != nil {
+ return nil, nil, err
+ }
+ return s.Serializer.Decode(out[:n], defaults, into)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/codec_check.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/codec_check.go
new file mode 100644
index 0000000..b012696
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/codec_check.go
@@ -0,0 +1,50 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package runtime
+
+import (
+ "fmt"
+ "reflect"
+
+ "k8s.io/kubernetes/pkg/api/unversioned"
+)
+
+// CheckCodec makes sure that the codec can encode objects like internalType,
+// decode all of the external types listed, and also decode them into the given
+// object. (Will modify internalObject.) (Assumes JSON serialization.)
+// TODO: verify that the correct external version is chosen on encode...
+func CheckCodec(c Codec, internalType Object, externalTypes ...unversioned.GroupVersionKind) error {
+ _, err := Encode(c, internalType)
+ if err != nil {
+ return fmt.Errorf("Internal type not encodable: %v", err)
+ }
+ for _, et := range externalTypes {
+ exBytes := []byte(fmt.Sprintf(`{"kind":"%v","apiVersion":"%v"}`, et.Kind, et.GroupVersion().String()))
+ obj, err := Decode(c, exBytes)
+ if err != nil {
+ return fmt.Errorf("external type %s not interpretable: %v", et, err)
+ }
+ if reflect.TypeOf(obj) != reflect.TypeOf(internalType) {
+ return fmt.Errorf("decode of external type %s produced: %#v", et, obj)
+ }
+ err = DecodeInto(c, exBytes, internalType)
+ if err != nil {
+ return fmt.Errorf("external type %s not convertable to internal type: %v", et, err)
+ }
+ }
+ return nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/conversion.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/conversion.go
new file mode 100644
index 0000000..dd6e26a
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/conversion.go
@@ -0,0 +1,98 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Defines conversions between generic types and structs to map query strings
+// to struct objects.
+package runtime
+
+import (
+ "reflect"
+ "strconv"
+ "strings"
+
+ "k8s.io/kubernetes/pkg/conversion"
+)
+
+// JSONKeyMapper uses the struct tags on a conversion to determine the key value for
+// the other side. Use when mapping from a map[string]* to a struct or vice versa.
+func JSONKeyMapper(key string, sourceTag, destTag reflect.StructTag) (string, string) {
+ if s := destTag.Get("json"); len(s) > 0 {
+ return strings.SplitN(s, ",", 2)[0], key
+ }
+ if s := sourceTag.Get("json"); len(s) > 0 {
+ return key, strings.SplitN(s, ",", 2)[0]
+ }
+ return key, key
+}
+
+// DefaultStringConversions are helpers for converting []string and string to real values.
+var DefaultStringConversions = []interface{}{
+ Convert_Slice_string_To_string,
+ Convert_Slice_string_To_int,
+ Convert_Slice_string_To_bool,
+ Convert_Slice_string_To_int64,
+}
+
+func Convert_Slice_string_To_string(input *[]string, out *string, s conversion.Scope) error {
+ if len(*input) == 0 {
+ *out = ""
+ }
+ *out = (*input)[0]
+ return nil
+}
+
+func Convert_Slice_string_To_int(input *[]string, out *int, s conversion.Scope) error {
+ if len(*input) == 0 {
+ *out = 0
+ }
+ str := (*input)[0]
+ i, err := strconv.Atoi(str)
+ if err != nil {
+ return err
+ }
+ *out = i
+ return nil
+}
+
+// Conver_Slice_string_To_bool will convert a string parameter to boolean.
+// Only the absence of a value, a value of "false", or a value of "0" resolve to false.
+// Any other value (including empty string) resolves to true.
+func Convert_Slice_string_To_bool(input *[]string, out *bool, s conversion.Scope) error {
+ if len(*input) == 0 {
+ *out = false
+ return nil
+ }
+ switch strings.ToLower((*input)[0]) {
+ case "false", "0":
+ *out = false
+ default:
+ *out = true
+ }
+ return nil
+}
+
+func Convert_Slice_string_To_int64(input *[]string, out *int64, s conversion.Scope) error {
+ if len(*input) == 0 {
+ *out = 0
+ }
+ str := (*input)[0]
+ i, err := strconv.ParseInt(str, 10, 64)
+ if err != nil {
+ return err
+ }
+ *out = i
+ return nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/deep_copy_generated.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/deep_copy_generated.go
new file mode 100644
index 0000000..6f5f613
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/deep_copy_generated.go
@@ -0,0 +1,63 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by deepcopy-gen. Do not edit it manually!
+
+package runtime
+
+import (
+ conversion "k8s.io/kubernetes/pkg/conversion"
+)
+
+func DeepCopy_runtime_RawExtension(in RawExtension, out *RawExtension, c *conversion.Cloner) error {
+ if in.Raw != nil {
+ in, out := in.Raw, &out.Raw
+ *out = make([]byte, len(in))
+ copy(*out, in)
+ } else {
+ out.Raw = nil
+ }
+ if in.Object == nil {
+ out.Object = nil
+ } else if newVal, err := c.DeepCopy(in.Object); err != nil {
+ return err
+ } else {
+ out.Object = newVal.(Object)
+ }
+ return nil
+}
+
+func DeepCopy_runtime_TypeMeta(in TypeMeta, out *TypeMeta, c *conversion.Cloner) error {
+ out.APIVersion = in.APIVersion
+ out.Kind = in.Kind
+ return nil
+}
+
+func DeepCopy_runtime_Unknown(in Unknown, out *Unknown, c *conversion.Cloner) error {
+ out.TypeMeta = in.TypeMeta
+ if in.Raw != nil {
+ in, out := in.Raw, &out.Raw
+ *out = make([]byte, len(in))
+ copy(*out, in)
+ } else {
+ out.Raw = nil
+ }
+ out.ContentEncoding = in.ContentEncoding
+ out.ContentType = in.ContentType
+ return nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/doc.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/doc.go
new file mode 100644
index 0000000..a9d084d
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/doc.go
@@ -0,0 +1,45 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package runtime includes helper functions for working with API objects
+// that follow the kubernetes API object conventions, which are:
+//
+// 0. Your API objects have a common metadata struct member, TypeMeta.
+// 1. Your code refers to an internal set of API objects.
+// 2. In a separate package, you have an external set of API objects.
+// 3. The external set is considered to be versioned, and no breaking
+// changes are ever made to it (fields may be added but not changed
+// or removed).
+// 4. As your api evolves, you'll make an additional versioned package
+// with every major change.
+// 5. Versioned packages have conversion functions which convert to
+// and from the internal version.
+// 6. You'll continue to support older versions according to your
+// deprecation policy, and you can easily provide a program/library
+// to update old versions into new versions because of 5.
+// 7. All of your serializations and deserializations are handled in a
+// centralized place.
+//
+// Package runtime provides a conversion helper to make 5 easy, and the
+// Encode/Decode/DecodeInto trio to accomplish 7. You can also register
+// additional "codecs" which use a version of your choice. It's
+// recommended that you register your types with runtime in your
+// package's init function.
+//
+// As a bonus, a few common types useful from all api objects and versions
+// are provided in types.go.
+
+package runtime
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/embedded.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/embedded.go
new file mode 100644
index 0000000..eb1f573
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/embedded.go
@@ -0,0 +1,136 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package runtime
+
+import (
+ "errors"
+
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/conversion"
+)
+
+type encodable struct {
+ E Encoder `json:"-"`
+ obj Object
+ versions []unversioned.GroupVersion
+}
+
+func (e encodable) GetObjectKind() unversioned.ObjectKind { return e.obj.GetObjectKind() }
+
+// NewEncodable creates an object that will be encoded with the provided codec on demand.
+// Provided as a convenience for test cases dealing with internal objects.
+func NewEncodable(e Encoder, obj Object, versions ...unversioned.GroupVersion) Object {
+ if _, ok := obj.(*Unknown); ok {
+ return obj
+ }
+ return encodable{e, obj, versions}
+}
+
+func (re encodable) UnmarshalJSON(in []byte) error {
+ return errors.New("runtime.encodable cannot be unmarshalled from JSON")
+}
+
+// Marshal may get called on pointers or values, so implement MarshalJSON on value.
+// http://stackoverflow.com/questions/21390979/custom-marshaljson-never-gets-called-in-go
+func (re encodable) MarshalJSON() ([]byte, error) {
+ return Encode(re.E, re.obj)
+}
+
+// NewEncodableList creates an object that will be encoded with the provided codec on demand.
+// Provided as a convenience for test cases dealing with internal objects.
+func NewEncodableList(e Encoder, objects []Object, versions ...unversioned.GroupVersion) []Object {
+ out := make([]Object, len(objects))
+ for i := range objects {
+ if _, ok := objects[i].(*Unknown); ok {
+ out[i] = objects[i]
+ continue
+ }
+ out[i] = NewEncodable(e, objects[i], versions...)
+ }
+ return out
+}
+
+func (re *Unknown) UnmarshalJSON(in []byte) error {
+ if re == nil {
+ return errors.New("runtime.Unknown: UnmarshalJSON on nil pointer")
+ }
+ re.TypeMeta = TypeMeta{}
+ re.Raw = append(re.Raw[0:0], in...)
+ re.ContentEncoding = ""
+ re.ContentType = ContentTypeJSON
+ return nil
+}
+
+// Marshal may get called on pointers or values, so implement MarshalJSON on value.
+// http://stackoverflow.com/questions/21390979/custom-marshaljson-never-gets-called-in-go
+func (re Unknown) MarshalJSON() ([]byte, error) {
+ // If ContentType is unset, we assume this is JSON.
+ if re.ContentType != "" && re.ContentType != ContentTypeJSON {
+ return nil, errors.New("runtime.Unknown: MarshalJSON on non-json data")
+ }
+ if re.Raw == nil {
+ return []byte("null"), nil
+ }
+ return re.Raw, nil
+}
+
+func Convert_runtime_Object_To_runtime_RawExtension(in *Object, out *RawExtension, s conversion.Scope) error {
+ if in == nil {
+ out.Raw = []byte("null")
+ return nil
+ }
+ obj := *in
+ if unk, ok := obj.(*Unknown); ok {
+ if unk.Raw != nil {
+ out.Raw = unk.Raw
+ return nil
+ }
+ obj = out.Object
+ }
+ if obj == nil {
+ out.Raw = nil
+ return nil
+ }
+ out.Object = obj
+ return nil
+}
+
+func Convert_runtime_RawExtension_To_runtime_Object(in *RawExtension, out *Object, s conversion.Scope) error {
+ if in.Object != nil {
+ *out = in.Object
+ return nil
+ }
+ data := in.Raw
+ if len(data) == 0 || (len(data) == 4 && string(data) == "null") {
+ *out = nil
+ return nil
+ }
+ *out = &Unknown{
+ Raw: data,
+ // TODO: Set ContentEncoding and ContentType appropriately.
+ // Currently we set ContentTypeJSON to make tests passing.
+ ContentType: ContentTypeJSON,
+ }
+ return nil
+}
+
+func DefaultEmbeddedConversions() []interface{} {
+ return []interface{}{
+ Convert_runtime_Object_To_runtime_RawExtension,
+ Convert_runtime_RawExtension_To_runtime_Object,
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/error.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/error.go
new file mode 100644
index 0000000..4041b4d
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/error.go
@@ -0,0 +1,102 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package runtime
+
+import (
+ "fmt"
+ "reflect"
+
+ "k8s.io/kubernetes/pkg/api/unversioned"
+)
+
+type notRegisteredErr struct {
+ gvk unversioned.GroupVersionKind
+ t reflect.Type
+}
+
+// NewNotRegisteredErr is exposed for testing.
+func NewNotRegisteredErr(gvk unversioned.GroupVersionKind, t reflect.Type) error {
+ return &notRegisteredErr{gvk: gvk, t: t}
+}
+
+func (k *notRegisteredErr) Error() string {
+ if k.t != nil {
+ return fmt.Sprintf("no kind is registered for the type %v", k.t)
+ }
+ if len(k.gvk.Kind) == 0 {
+ return fmt.Sprintf("no version %q has been registered", k.gvk.GroupVersion())
+ }
+ if k.gvk.Version == APIVersionInternal {
+ return fmt.Sprintf("no kind %q is registered for the internal version of group %q", k.gvk.Kind, k.gvk.Group)
+ }
+
+ return fmt.Sprintf("no kind %q is registered for version %q", k.gvk.Kind, k.gvk.GroupVersion())
+}
+
+// IsNotRegisteredError returns true if the error indicates the provided
+// object or input data is not registered.
+func IsNotRegisteredError(err error) bool {
+ if err == nil {
+ return false
+ }
+ _, ok := err.(*notRegisteredErr)
+ return ok
+}
+
+type missingKindErr struct {
+ data string
+}
+
+func NewMissingKindErr(data string) error {
+ return &missingKindErr{data}
+}
+
+func (k *missingKindErr) Error() string {
+ return fmt.Sprintf("Object 'Kind' is missing in '%s'", k.data)
+}
+
+// IsMissingKind returns true if the error indicates that the provided object
+// is missing a 'Kind' field.
+func IsMissingKind(err error) bool {
+ if err == nil {
+ return false
+ }
+ _, ok := err.(*missingKindErr)
+ return ok
+}
+
+type missingVersionErr struct {
+ data string
+}
+
+// IsMissingVersion returns true if the error indicates that the provided object
+// is missing a 'Versioj' field.
+func NewMissingVersionErr(data string) error {
+ return &missingVersionErr{data}
+}
+
+func (k *missingVersionErr) Error() string {
+ return fmt.Sprintf("Object 'apiVersion' is missing in '%s'", k.data)
+}
+
+func IsMissingVersion(err error) bool {
+ if err == nil {
+ return false
+ }
+ _, ok := err.(*missingVersionErr)
+ return ok
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/extension.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/extension.go
new file mode 100644
index 0000000..4d23ee9
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/extension.go
@@ -0,0 +1,48 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package runtime
+
+import (
+ "encoding/json"
+ "errors"
+)
+
+func (re *RawExtension) UnmarshalJSON(in []byte) error {
+ if re == nil {
+ return errors.New("runtime.RawExtension: UnmarshalJSON on nil pointer")
+ }
+ re.Raw = append(re.Raw[0:0], in...)
+ return nil
+}
+
+// Marshal may get called on pointers or values, so implement MarshalJSON on value.
+// http://stackoverflow.com/questions/21390979/custom-marshaljson-never-gets-called-in-go
+func (re RawExtension) MarshalJSON() ([]byte, error) {
+ if re.Raw == nil {
+ // TODO: this is to support legacy behavior of JSONPrinter and YAMLPrinter, which
+ // expect to call json.Marshal on arbitrary versioned objects (even those not in
+ // the scheme). pkg/kubectl/resource#AsVersionedObjects and its interaction with
+ // kubectl get on objects not in the scheme needs to be updated to ensure that the
+ // objects that are not part of the scheme are correctly put into the right form.
+ if re.Object != nil {
+ return json.Marshal(re.Object)
+ }
+ return []byte("null"), nil
+ }
+ // TODO: Check whether ContentType is actually JSON before returning it.
+ return re.Raw, nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/generated.pb.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/generated.pb.go
new file mode 100644
index 0000000..889dbfb
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/generated.pb.go
@@ -0,0 +1,689 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by protoc-gen-gogo.
+// source: k8s.io/kubernetes/pkg/runtime/generated.proto
+// DO NOT EDIT!
+
+/*
+ Package runtime is a generated protocol buffer package.
+
+ It is generated from these files:
+ k8s.io/kubernetes/pkg/runtime/generated.proto
+
+ It has these top-level messages:
+ RawExtension
+ TypeMeta
+ Unknown
+*/
+package runtime
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+func (m *RawExtension) Reset() { *m = RawExtension{} }
+func (m *RawExtension) String() string { return proto.CompactTextString(m) }
+func (*RawExtension) ProtoMessage() {}
+
+func (m *TypeMeta) Reset() { *m = TypeMeta{} }
+func (m *TypeMeta) String() string { return proto.CompactTextString(m) }
+func (*TypeMeta) ProtoMessage() {}
+
+func (m *Unknown) Reset() { *m = Unknown{} }
+func (m *Unknown) String() string { return proto.CompactTextString(m) }
+func (*Unknown) ProtoMessage() {}
+
+func init() {
+ proto.RegisterType((*RawExtension)(nil), "k8s.io.kubernetes.pkg.runtime.RawExtension")
+ proto.RegisterType((*TypeMeta)(nil), "k8s.io.kubernetes.pkg.runtime.TypeMeta")
+ proto.RegisterType((*Unknown)(nil), "k8s.io.kubernetes.pkg.runtime.Unknown")
+}
+func (m *RawExtension) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *RawExtension) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Raw != nil {
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Raw)))
+ i += copy(data[i:], m.Raw)
+ }
+ return i, nil
+}
+
+func (m *TypeMeta) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *TypeMeta) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion)))
+ i += copy(data[i:], m.APIVersion)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Kind)))
+ i += copy(data[i:], m.Kind)
+ return i, nil
+}
+
+func (m *Unknown) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Unknown) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.TypeMeta.Size()))
+ n1, err := m.TypeMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n1
+ if m.Raw != nil {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Raw)))
+ i += copy(data[i:], m.Raw)
+ }
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.ContentEncoding)))
+ i += copy(data[i:], m.ContentEncoding)
+ data[i] = 0x22
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.ContentType)))
+ i += copy(data[i:], m.ContentType)
+ return i, nil
+}
+
+func encodeFixed64Generated(data []byte, offset int, v uint64) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ data[offset+4] = uint8(v >> 32)
+ data[offset+5] = uint8(v >> 40)
+ data[offset+6] = uint8(v >> 48)
+ data[offset+7] = uint8(v >> 56)
+ return offset + 8
+}
+func encodeFixed32Generated(data []byte, offset int, v uint32) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ return offset + 4
+}
+func encodeVarintGenerated(data []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ data[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ data[offset] = uint8(v)
+ return offset + 1
+}
+func (m *RawExtension) Size() (n int) {
+ var l int
+ _ = l
+ if m.Raw != nil {
+ l = len(m.Raw)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *TypeMeta) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.APIVersion)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Kind)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *Unknown) Size() (n int) {
+ var l int
+ _ = l
+ l = m.TypeMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Raw != nil {
+ l = len(m.Raw)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = len(m.ContentEncoding)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.ContentType)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *RawExtension) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RawExtension: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RawExtension: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Raw", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Raw = append(m.Raw[:0], data[iNdEx:postIndex]...)
+ if m.Raw == nil {
+ m.Raw = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *TypeMeta) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: TypeMeta: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: TypeMeta: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.APIVersion = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Kind = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Unknown) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Unknown: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Unknown: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TypeMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.TypeMeta.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Raw", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Raw = append(m.Raw[:0], data[iNdEx:postIndex]...)
+ if m.Raw == nil {
+ m.Raw = []byte{}
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ContentEncoding", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ContentEncoding = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ContentType", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ContentType = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenerated(data []byte) (n int, err error) {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if data[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipGenerated(data[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
+)
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/generated.proto b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/generated.proto
new file mode 100644
index 0000000..0e602ab
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/generated.proto
@@ -0,0 +1,124 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.kubernetes.pkg.runtime;
+
+import "k8s.io/kubernetes/pkg/api/resource/generated.proto";
+import "k8s.io/kubernetes/pkg/util/intstr/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "runtime";
+
+// RawExtension is used to hold extensions in external versions.
+//
+// To use this, make a field which has RawExtension as its type in your external, versioned
+// struct, and Object in your internal struct. You also need to register your
+// various plugin types.
+//
+// // Internal package:
+// type MyAPIObject struct {
+// runtime.TypeMeta `json:",inline"`
+// MyPlugin runtime.Object `json:"myPlugin"`
+// }
+// type PluginA struct {
+// AOption string `json:"aOption"`
+// }
+//
+// // External package:
+// type MyAPIObject struct {
+// runtime.TypeMeta `json:",inline"`
+// MyPlugin runtime.RawExtension `json:"myPlugin"`
+// }
+// type PluginA struct {
+// AOption string `json:"aOption"`
+// }
+//
+// // On the wire, the JSON will look something like this:
+// {
+// "kind":"MyAPIObject",
+// "apiVersion":"v1",
+// "myPlugin": {
+// "kind":"PluginA",
+// "aOption":"foo",
+// },
+// }
+//
+// So what happens? Decode first uses json or yaml to unmarshal the serialized data into
+// your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked.
+// The next step is to copy (using pkg/conversion) into the internal struct. The runtime
+// package's DefaultScheme has conversion functions installed which will unpack the
+// JSON stored in RawExtension, turning it into the correct object type, and storing it
+// in the Object. (TODO: In the case where the object is of an unknown type, a
+// runtime.Unknown object will be created and stored.)
+//
+// +k8s:deepcopy-gen=true
+// +protobuf=true
+message RawExtension {
+ // Raw is the underlying serialization of this object.
+ //
+ // TODO: Determine how to detect ContentType and ContentEncoding of 'Raw' data.
+ optional bytes raw = 1;
+}
+
+// TypeMeta is shared by all top level objects. The proper way to use it is to inline it in your type,
+// like this:
+// type MyAwesomeAPIObject struct {
+// runtime.TypeMeta `json:",inline"`
+// ... // other fields
+// }
+// func (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *unversioned.GroupVersionKind) { unversioned.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind
+//
+// TypeMeta is provided here for convenience. You may use it directly from this package or define
+// your own with the same fields.
+//
+// +k8s:deepcopy-gen=true
+// +protobuf=true
+message TypeMeta {
+ optional string apiVersion = 1;
+
+ optional string kind = 2;
+}
+
+// Unknown allows api objects with unknown types to be passed-through. This can be used
+// to deal with the API objects from a plug-in. Unknown objects still have functioning
+// TypeMeta features-- kind, version, etc.
+// TODO: Make this object have easy access to field based accessors and settors for
+// metadata and field mutatation.
+//
+// +k8s:deepcopy-gen=true
+// +protobuf=true
+message Unknown {
+ optional TypeMeta typeMeta = 1;
+
+ // Raw will hold the complete serialized object which couldn't be matched
+ // with a registered type. Most likely, nothing should be done with this
+ // except for passing it through the system.
+ optional bytes raw = 2;
+
+ // ContentEncoding is encoding used to encode 'Raw' data.
+ // Unspecified means no encoding.
+ optional string contentEncoding = 3;
+
+ // ContentType is serialization method used to serialize 'Raw'.
+ // Unspecified means ContentTypeJSON.
+ optional string contentType = 4;
+}
+
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/helper.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/helper.go
new file mode 100644
index 0000000..827cff1
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/helper.go
@@ -0,0 +1,212 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package runtime
+
+import (
+ "fmt"
+ "io"
+ "reflect"
+
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/conversion"
+ "k8s.io/kubernetes/pkg/util/errors"
+)
+
+// unsafeObjectConvertor implements ObjectConvertor using the unsafe conversion path.
+type unsafeObjectConvertor struct {
+ *Scheme
+}
+
+var _ ObjectConvertor = unsafeObjectConvertor{}
+
+// ConvertToVersion converts in to the provided outVersion without copying the input first, which
+// is only safe if the output object is not mutated or reused.
+func (c unsafeObjectConvertor) ConvertToVersion(in Object, outVersion unversioned.GroupVersion) (Object, error) {
+ return c.Scheme.UnsafeConvertToVersion(in, outVersion)
+}
+
+// UnsafeObjectConvertor performs object conversion without copying the object structure,
+// for use when the converted object will not be reused or mutated. Primarily for use within
+// versioned codecs, which use the external object for serialization but do not return it.
+func UnsafeObjectConvertor(scheme *Scheme) ObjectConvertor {
+ return unsafeObjectConvertor{scheme}
+}
+
+// SetField puts the value of src, into fieldName, which must be a member of v.
+// The value of src must be assignable to the field.
+func SetField(src interface{}, v reflect.Value, fieldName string) error {
+ field := v.FieldByName(fieldName)
+ if !field.IsValid() {
+ return fmt.Errorf("couldn't find %v field in %#v", fieldName, v.Interface())
+ }
+ srcValue := reflect.ValueOf(src)
+ if srcValue.Type().AssignableTo(field.Type()) {
+ field.Set(srcValue)
+ return nil
+ }
+ if srcValue.Type().ConvertibleTo(field.Type()) {
+ field.Set(srcValue.Convert(field.Type()))
+ return nil
+ }
+ return fmt.Errorf("couldn't assign/convert %v to %v", srcValue.Type(), field.Type())
+}
+
+// Field puts the value of fieldName, which must be a member of v, into dest,
+// which must be a variable to which this field's value can be assigned.
+func Field(v reflect.Value, fieldName string, dest interface{}) error {
+ field := v.FieldByName(fieldName)
+ if !field.IsValid() {
+ return fmt.Errorf("couldn't find %v field in %#v", fieldName, v.Interface())
+ }
+ destValue, err := conversion.EnforcePtr(dest)
+ if err != nil {
+ return err
+ }
+ if field.Type().AssignableTo(destValue.Type()) {
+ destValue.Set(field)
+ return nil
+ }
+ if field.Type().ConvertibleTo(destValue.Type()) {
+ destValue.Set(field.Convert(destValue.Type()))
+ return nil
+ }
+ return fmt.Errorf("couldn't assign/convert %v to %v", field.Type(), destValue.Type())
+}
+
+// fieldPtr puts the address of fieldName, which must be a member of v,
+// into dest, which must be an address of a variable to which this field's
+// address can be assigned.
+func FieldPtr(v reflect.Value, fieldName string, dest interface{}) error {
+ field := v.FieldByName(fieldName)
+ if !field.IsValid() {
+ return fmt.Errorf("couldn't find %v field in %#v", fieldName, v.Interface())
+ }
+ v, err := conversion.EnforcePtr(dest)
+ if err != nil {
+ return err
+ }
+ field = field.Addr()
+ if field.Type().AssignableTo(v.Type()) {
+ v.Set(field)
+ return nil
+ }
+ if field.Type().ConvertibleTo(v.Type()) {
+ v.Set(field.Convert(v.Type()))
+ return nil
+ }
+ return fmt.Errorf("couldn't assign/convert %v to %v", field.Type(), v.Type())
+}
+
+// EncodeList ensures that each object in an array is converted to a Unknown{} in serialized form.
+// TODO: accept a content type.
+func EncodeList(e Encoder, objects []Object) error {
+ var errs []error
+ for i := range objects {
+ data, err := Encode(e, objects[i])
+ if err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ // TODO: Set ContentEncoding and ContentType.
+ objects[i] = &Unknown{Raw: data}
+ }
+ return errors.NewAggregate(errs)
+}
+
+func decodeListItem(obj *Unknown, decoders []Decoder) (Object, error) {
+ for _, decoder := range decoders {
+ // TODO: Decode based on ContentType.
+ obj, err := Decode(decoder, obj.Raw)
+ if err != nil {
+ if IsNotRegisteredError(err) {
+ continue
+ }
+ return nil, err
+ }
+ return obj, nil
+ }
+ // could not decode, so leave the object as Unknown, but give the decoders the
+ // chance to set Unknown.TypeMeta if it is available.
+ for _, decoder := range decoders {
+ if err := DecodeInto(decoder, obj.Raw, obj); err == nil {
+ return obj, nil
+ }
+ }
+ return obj, nil
+}
+
+// DecodeList alters the list in place, attempting to decode any objects found in
+// the list that have the Unknown type. Any errors that occur are returned
+// after the entire list is processed. Decoders are tried in order.
+func DecodeList(objects []Object, decoders ...Decoder) []error {
+ errs := []error(nil)
+ for i, obj := range objects {
+ switch t := obj.(type) {
+ case *Unknown:
+ decoded, err := decodeListItem(t, decoders)
+ if err != nil {
+ errs = append(errs, err)
+ break
+ }
+ objects[i] = decoded
+ }
+ }
+ return errs
+}
+
+// MultiObjectTyper returns the types of objects across multiple schemes in order.
+type MultiObjectTyper []ObjectTyper
+
+var _ ObjectTyper = MultiObjectTyper{}
+
+func (m MultiObjectTyper) ObjectKinds(obj Object) (gvks []unversioned.GroupVersionKind, unversionedType bool, err error) {
+ for _, t := range m {
+ gvks, unversionedType, err = t.ObjectKinds(obj)
+ if err == nil {
+ return
+ }
+ }
+ return
+}
+
+func (m MultiObjectTyper) Recognizes(gvk unversioned.GroupVersionKind) bool {
+ for _, t := range m {
+ if t.Recognizes(gvk) {
+ return true
+ }
+ }
+ return false
+}
+
+// SetZeroValue would set the object of objPtr to zero value of its type.
+func SetZeroValue(objPtr Object) error {
+ v, err := conversion.EnforcePtr(objPtr)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.Zero(v.Type()))
+ return nil
+}
+
+// DefaultFramer is valid for any stream that can read objects serially without
+// any separation in the stream.
+var DefaultFramer = defaultFramer{}
+
+type defaultFramer struct{}
+
+func (defaultFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser { return r }
+func (defaultFramer) NewFrameWriter(w io.Writer) io.Writer { return w }
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/interfaces.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/interfaces.go
new file mode 100644
index 0000000..e463246
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/interfaces.go
@@ -0,0 +1,217 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package runtime
+
+import (
+ "io"
+ "net/url"
+
+ "k8s.io/kubernetes/pkg/api/unversioned"
+)
+
+const (
+ // APIVersionInternal may be used if you are registering a type that should not
+ // be considered stable or serialized - it is a convention only and has no
+ // special behavior in this package.
+ APIVersionInternal = "__internal"
+)
+
+type Encoder interface {
+ // Encode writes an object to a stream. Implementations may return errors if the versions are
+ // incompatible, or if no conversion is defined.
+ Encode(obj Object, w io.Writer) error
+}
+
+type Decoder interface {
+ // Decode attempts to deserialize the provided data using either the innate typing of the scheme or the
+ // default kind, group, and version provided. It returns a decoded object as well as the kind, group, and
+ // version from the serialized data, or an error. If into is non-nil, it will be used as the target type
+ // and implementations may choose to use it rather than reallocating an object. However, the object is not
+ // guaranteed to be populated. The returned object is not guaranteed to match into. If defaults are
+ // provided, they are applied to the data by default. If no defaults or partial defaults are provided, the
+ // type of the into may be used to guide conversion decisions.
+ Decode(data []byte, defaults *unversioned.GroupVersionKind, into Object) (Object, *unversioned.GroupVersionKind, error)
+}
+
+// Serializer is the core interface for transforming objects into a serialized format and back.
+// Implementations may choose to perform conversion of the object, but no assumptions should be made.
+type Serializer interface {
+ Encoder
+ Decoder
+}
+
+// Codec is a Serializer that deals with the details of versioning objects. It offers the same
+// interface as Serializer, so this is a marker to consumers that care about the version of the objects
+// they receive.
+type Codec Serializer
+
+// ParameterCodec defines methods for serializing and deserializing API objects to url.Values and
+// performing any necessary conversion. Unlike the normal Codec, query parameters are not self describing
+// and the desired version must be specified.
+type ParameterCodec interface {
+ // DecodeParameters takes the given url.Values in the specified group version and decodes them
+ // into the provided object, or returns an error.
+ DecodeParameters(parameters url.Values, from unversioned.GroupVersion, into Object) error
+ // EncodeParameters encodes the provided object as query parameters or returns an error.
+ EncodeParameters(obj Object, to unversioned.GroupVersion) (url.Values, error)
+}
+
+// Framer is a factory for creating readers and writers that obey a particular framing pattern.
+type Framer interface {
+ NewFrameReader(r io.ReadCloser) io.ReadCloser
+ NewFrameWriter(w io.Writer) io.Writer
+}
+
+// SerializerInfo contains information about a specific serialization format
+type SerializerInfo struct {
+ Serializer
+ // EncodesAsText indicates this serializer can be encoded to UTF-8 safely.
+ EncodesAsText bool
+ // MediaType is the value that represents this serializer over the wire.
+ MediaType string
+}
+
+// StreamSerializerInfo contains information about a specific stream serialization format
+type StreamSerializerInfo struct {
+ SerializerInfo
+ // Framer is the factory for retrieving streams that separate objects on the wire
+ Framer
+ // Embedded is the type of the nested serialization that should be used.
+ Embedded SerializerInfo
+}
+
+// NegotiatedSerializer is an interface used for obtaining encoders, decoders, and serializers
+// for multiple supported media types. This would commonly be accepted by a server component
+// that performs HTTP content negotiation to accept multiple formats.
+type NegotiatedSerializer interface {
+ // SupportedMediaTypes is the media types supported for reading and writing single objects.
+ SupportedMediaTypes() []string
+ // SerializerForMediaType returns a serializer for the provided media type. params is the set of
+ // parameters applied to the media type that may modify the resulting output. ok will be false
+ // if no serializer matched the media type.
+ SerializerForMediaType(mediaType string, params map[string]string) (s SerializerInfo, ok bool)
+
+ // SupportedStreamingMediaTypes returns the media types of the supported streaming serializers.
+ // Streaming serializers control how multiple objects are written to a stream output.
+ SupportedStreamingMediaTypes() []string
+ // StreamingSerializerForMediaType returns a serializer for the provided media type that supports
+ // reading and writing multiple objects to a stream. It returns a framer and serializer, or an
+ // error if no such serializer can be created. Params is the set of parameters applied to the
+ // media type that may modify the resulting output. ok will be false if no serializer matched
+ // the media type.
+ StreamingSerializerForMediaType(mediaType string, params map[string]string) (s StreamSerializerInfo, ok bool)
+
+ // EncoderForVersion returns an encoder that ensures objects being written to the provided
+ // serializer are in the provided group version.
+ // TODO: take multiple group versions
+ EncoderForVersion(serializer Encoder, gv unversioned.GroupVersion) Encoder
+ // DecoderForVersion returns a decoder that ensures objects being read by the provided
+ // serializer are in the provided group version by default.
+ // TODO: take multiple group versions
+ DecoderToVersion(serializer Decoder, gv unversioned.GroupVersion) Decoder
+}
+
+// StorageSerializer is an interface used for obtaining encoders, decoders, and serializers
+// that can read and write data at rest. This would commonly be used by client tools that must
+// read files, or server side storage interfaces that persist restful objects.
+type StorageSerializer interface {
+ // SerializerForMediaType returns a serializer for the provided media type. Options is a set of
+ // parameters applied to the media type that may modify the resulting output.
+ SerializerForMediaType(mediaType string, options map[string]string) (SerializerInfo, bool)
+
+ // UniversalDeserializer returns a Serializer that can read objects in multiple supported formats
+ // by introspecting the data at rest.
+ UniversalDeserializer() Decoder
+
+ // EncoderForVersion returns an encoder that ensures objects being written to the provided
+ // serializer are in the provided group version.
+ // TODO: take multiple group versions
+ EncoderForVersion(serializer Encoder, gv unversioned.GroupVersion) Encoder
+ // DecoderForVersion returns a decoder that ensures objects being read by the provided
+ // serializer are in the provided group version by default.
+ // TODO: take multiple group versions
+ DecoderToVersion(serializer Decoder, gv unversioned.GroupVersion) Decoder
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Non-codec interfaces
+
+type ObjectVersioner interface {
+ ConvertToVersion(in Object, outVersion unversioned.GroupVersion) (out Object, err error)
+}
+
+// ObjectConvertor converts an object to a different version.
+type ObjectConvertor interface {
+ // Convert attempts to convert one object into another, or returns an error. This method does
+ // not guarantee the in object is not mutated.
+ Convert(in, out interface{}) error
+ // ConvertToVersion takes the provided object and converts it the provided version. This
+ // method does not guarantee that the in object is not mutated.
+ ConvertToVersion(in Object, outVersion unversioned.GroupVersion) (out Object, err error)
+ ConvertFieldLabel(version, kind, label, value string) (string, string, error)
+}
+
+// ObjectTyper contains methods for extracting the APIVersion and Kind
+// of objects.
+type ObjectTyper interface {
+ // ObjectKinds returns the all possible group,version,kind of the provided object, true if
+ // the object is unversioned, or an error if the object is not recognized
+ // (IsNotRegisteredError will return true).
+ ObjectKinds(Object) ([]unversioned.GroupVersionKind, bool, error)
+ // Recognizes returns true if the scheme is able to handle the provided version and kind,
+ // or more precisely that the provided version is a possible conversion or decoding
+ // target.
+ Recognizes(gvk unversioned.GroupVersionKind) bool
+}
+
+// ObjectCreater contains methods for instantiating an object by kind and version.
+type ObjectCreater interface {
+ New(kind unversioned.GroupVersionKind) (out Object, err error)
+}
+
+// ObjectCopier duplicates an object.
+type ObjectCopier interface {
+ // Copy returns an exact copy of the provided Object, or an error if the
+ // copy could not be completed.
+ Copy(Object) (Object, error)
+}
+
+// ResourceVersioner provides methods for setting and retrieving
+// the resource version from an API object.
+type ResourceVersioner interface {
+ SetResourceVersion(obj Object, version string) error
+ ResourceVersion(obj Object) (string, error)
+}
+
+// SelfLinker provides methods for setting and retrieving the SelfLink field of an API object.
+type SelfLinker interface {
+ SetSelfLink(obj Object, selfLink string) error
+ SelfLink(obj Object) (string, error)
+
+ // Knowing Name is sometimes necessary to use a SelfLinker.
+ Name(obj Object) (string, error)
+ // Knowing Namespace is sometimes necessary to use a SelfLinker
+ Namespace(obj Object) (string, error)
+}
+
+// All API types registered with Scheme must support the Object interface. Since objects in a scheme are
+// expected to be serialized to the wire, the interface an Object must provide to the Scheme allows
+// serializers to set the kind, version, and group the object is represented as. An Object may choose
+// to return a no-op ObjectKindAccessor in cases where it is not expected to be serialized.
+type Object interface {
+ GetObjectKind() unversioned.ObjectKind
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/register.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/register.go
new file mode 100644
index 0000000..39a1eb1
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/register.go
@@ -0,0 +1,66 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package runtime
+
+import (
+ "k8s.io/kubernetes/pkg/api/unversioned"
+)
+
+// SetGroupVersionKind satisfies the ObjectKind interface for all objects that embed TypeMeta
+func (obj *TypeMeta) SetGroupVersionKind(gvk unversioned.GroupVersionKind) {
+ obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind()
+}
+
+// GroupVersionKind satisfies the ObjectKind interface for all objects that embed TypeMeta
+func (obj *TypeMeta) GroupVersionKind() unversioned.GroupVersionKind {
+ return unversioned.FromAPIVersionAndKind(obj.APIVersion, obj.Kind)
+}
+
+func (obj *Unknown) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
+
+func (obj *Unstructured) GetObjectKind() unversioned.ObjectKind { return obj }
+func (obj *UnstructuredList) GetObjectKind() unversioned.ObjectKind { return obj }
+
+// GetObjectKind implements Object for VersionedObjects, returning an empty ObjectKind
+// interface if no objects are provided, or the ObjectKind interface of the object in the
+// highest array position.
+func (obj *VersionedObjects) GetObjectKind() unversioned.ObjectKind {
+ last := obj.Last()
+ if last == nil {
+ return unversioned.EmptyObjectKind
+ }
+ return last.GetObjectKind()
+}
+
+// First returns the leftmost object in the VersionedObjects array, which is usually the
+// object as serialized on the wire.
+func (obj *VersionedObjects) First() Object {
+ if len(obj.Objects) == 0 {
+ return nil
+ }
+ return obj.Objects[0]
+}
+
+// Last is the rightmost object in the VersionedObjects array, which is the object after
+// all transformations have been applied. This is the same object that would be returned
+// by Decode in a normal invocation (without VersionedObjects in the into argument).
+func (obj *VersionedObjects) Last() Object {
+ if len(obj.Objects) == 0 {
+ return nil
+ }
+ return obj.Objects[len(obj.Objects)-1]
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/scheme.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/scheme.go
new file mode 100644
index 0000000..f98ec54
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/scheme.go
@@ -0,0 +1,623 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package runtime
+
+import (
+ "fmt"
+ "net/url"
+ "reflect"
+
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/conversion"
+)
+
+// Scheme defines methods for serializing and deserializing API objects, a type
+// registry for converting group, version, and kind information to and from Go
+// schemas, and mappings between Go schemas of different versions. A scheme is the
+// foundation for a versioned API and versioned configuration over time.
+//
+// In a Scheme, a Type is a particular Go struct, a Version is a point-in-time
+// identifier for a particular representation of that Type (typically backwards
+// compatible), a Kind is the unique name for that Type within the Version, and a
+// Group identifies a set of Versions, Kinds, and Types that evolve over time. An
+// Unversioned Type is one that is not yet formally bound to a type and is promised
+// to be backwards compatible (effectively a "v1" of a Type that does not expect
+// to break in the future).
+//
+// Schemes are not expected to change at runtime and are only threadsafe after
+// registration is complete.
+type Scheme struct {
+ // versionMap allows one to figure out the go type of an object with
+ // the given version and name.
+ gvkToType map[unversioned.GroupVersionKind]reflect.Type
+
+ // typeToGroupVersion allows one to find metadata for a given go object.
+ // The reflect.Type we index by should *not* be a pointer.
+ typeToGVK map[reflect.Type][]unversioned.GroupVersionKind
+
+ // unversionedTypes are transformed without conversion in ConvertToVersion.
+ unversionedTypes map[reflect.Type]unversioned.GroupVersionKind
+
+ // unversionedKinds are the names of kinds that can be created in the context of any group
+ // or version
+ // TODO: resolve the status of unversioned types.
+ unversionedKinds map[string]reflect.Type
+
+ // Map from version and resource to the corresponding func to convert
+ // resource field labels in that version to internal version.
+ fieldLabelConversionFuncs map[string]map[string]FieldLabelConversionFunc
+
+ // converter stores all registered conversion functions. It also has
+ // default coverting behavior.
+ converter *conversion.Converter
+
+ // cloner stores all registered copy functions. It also has default
+ // deep copy behavior.
+ cloner *conversion.Cloner
+}
+
+// Function to convert a field selector to internal representation.
+type FieldLabelConversionFunc func(label, value string) (internalLabel, internalValue string, err error)
+
+// NewScheme creates a new Scheme. This scheme is pluggable by default.
+func NewScheme() *Scheme {
+ s := &Scheme{
+ gvkToType: map[unversioned.GroupVersionKind]reflect.Type{},
+ typeToGVK: map[reflect.Type][]unversioned.GroupVersionKind{},
+ unversionedTypes: map[reflect.Type]unversioned.GroupVersionKind{},
+ unversionedKinds: map[string]reflect.Type{},
+ cloner: conversion.NewCloner(),
+ fieldLabelConversionFuncs: map[string]map[string]FieldLabelConversionFunc{},
+ }
+ s.converter = conversion.NewConverter(s.nameFunc)
+
+ s.AddConversionFuncs(DefaultEmbeddedConversions()...)
+
+ // Enable map[string][]string conversions by default
+ if err := s.AddConversionFuncs(DefaultStringConversions...); err != nil {
+ panic(err)
+ }
+ if err := s.RegisterInputDefaults(&map[string][]string{}, JSONKeyMapper, conversion.AllowDifferentFieldTypeNames|conversion.IgnoreMissingFields); err != nil {
+ panic(err)
+ }
+ if err := s.RegisterInputDefaults(&url.Values{}, JSONKeyMapper, conversion.AllowDifferentFieldTypeNames|conversion.IgnoreMissingFields); err != nil {
+ panic(err)
+ }
+ return s
+}
+
+// nameFunc returns the name of the type that we wish to use to determine when two types attempt
+// a conversion. Defaults to the go name of the type if the type is not registered.
+func (s *Scheme) nameFunc(t reflect.Type) string {
+ // find the preferred names for this type
+ gvks, ok := s.typeToGVK[t]
+ if !ok {
+ return t.Name()
+ }
+
+ for _, gvk := range gvks {
+ internalGV := gvk.GroupVersion()
+ internalGV.Version = "__internal" // this is hacky and maybe should be passed in
+ internalGVK := internalGV.WithKind(gvk.Kind)
+
+ if internalType, exists := s.gvkToType[internalGVK]; exists {
+ return s.typeToGVK[internalType][0].Kind
+ }
+ }
+
+ return gvks[0].Kind
+}
+
+// fromScope gets the input version, desired output version, and desired Scheme
+// from a conversion.Scope.
+func (s *Scheme) fromScope(scope conversion.Scope) *Scheme {
+ return s
+}
+
+// Converter allows access to the converter for the scheme
+func (s *Scheme) Converter() *conversion.Converter {
+ return s.converter
+}
+
+// AddUnversionedTypes registers the provided types as "unversioned", which means that they follow special rules.
+// Whenever an object of this type is serialized, it is serialized with the provided group version and is not
+// converted. Thus unversioned objects are expected to remain backwards compatible forever, as if they were in an
+// API group and version that would never be updated.
+//
+// TODO: there is discussion about removing unversioned and replacing it with objects that are manifest into
+// every version with particular schemas. Resolve this method at that point.
+func (s *Scheme) AddUnversionedTypes(version unversioned.GroupVersion, types ...Object) {
+ s.AddKnownTypes(version, types...)
+ for _, obj := range types {
+ t := reflect.TypeOf(obj).Elem()
+ gvk := version.WithKind(t.Name())
+ s.unversionedTypes[t] = gvk
+ if _, ok := s.unversionedKinds[gvk.Kind]; ok {
+ panic(fmt.Sprintf("%v has already been registered as unversioned kind %q - kind name must be unique", reflect.TypeOf(t), gvk.Kind))
+ }
+ s.unversionedKinds[gvk.Kind] = t
+ }
+}
+
+// AddKnownTypes registers all types passed in 'types' as being members of version 'version'.
+// All objects passed to types should be pointers to structs. The name that go reports for
+// the struct becomes the "kind" field when encoding. Version may not be empty - use the
+// APIVersionInternal constant if you have a type that does not have a formal version.
+func (s *Scheme) AddKnownTypes(gv unversioned.GroupVersion, types ...Object) {
+ if len(gv.Version) == 0 {
+ panic(fmt.Sprintf("version is required on all types: %s %v", gv, types[0]))
+ }
+ for _, obj := range types {
+ t := reflect.TypeOf(obj)
+ if t.Kind() != reflect.Ptr {
+ panic("All types must be pointers to structs.")
+ }
+ t = t.Elem()
+ if t.Kind() != reflect.Struct {
+ panic("All types must be pointers to structs.")
+ }
+
+ gvk := gv.WithKind(t.Name())
+ s.gvkToType[gvk] = t
+ s.typeToGVK[t] = append(s.typeToGVK[t], gvk)
+ }
+}
+
+// AddKnownTypeWithName is like AddKnownTypes, but it lets you specify what this type should
+// be encoded as. Useful for testing when you don't want to make multiple packages to define
+// your structs. Version may not be empty - use the APIVersionInternal constant if you have a
+// type that does not have a formal version.
+func (s *Scheme) AddKnownTypeWithName(gvk unversioned.GroupVersionKind, obj Object) {
+ t := reflect.TypeOf(obj)
+ if len(gvk.Version) == 0 {
+ panic(fmt.Sprintf("version is required on all types: %s %v", gvk, t))
+ }
+ if t.Kind() != reflect.Ptr {
+ panic("All types must be pointers to structs.")
+ }
+ t = t.Elem()
+ if t.Kind() != reflect.Struct {
+ panic("All types must be pointers to structs.")
+ }
+
+ s.gvkToType[gvk] = t
+ s.typeToGVK[t] = append(s.typeToGVK[t], gvk)
+}
+
+// KnownTypes returns the types known for the given version.
+func (s *Scheme) KnownTypes(gv unversioned.GroupVersion) map[string]reflect.Type {
+ types := make(map[string]reflect.Type)
+ for gvk, t := range s.gvkToType {
+ if gv != gvk.GroupVersion() {
+ continue
+ }
+
+ types[gvk.Kind] = t
+ }
+ return types
+}
+
+// ObjectKind returns the group,version,kind of the go object and true if this object
+// is considered unversioned, or an error if it's not a pointer or is unregistered.
+func (s *Scheme) ObjectKind(obj Object) (unversioned.GroupVersionKind, bool, error) {
+ gvks, unversionedType, err := s.ObjectKinds(obj)
+ if err != nil {
+ return unversioned.GroupVersionKind{}, false, err
+ }
+ return gvks[0], unversionedType, nil
+}
+
+// ObjectKinds returns all possible group,version,kind of the go object, true if the
+// object is considered unversioned, or an error if it's not a pointer or is unregistered.
+func (s *Scheme) ObjectKinds(obj Object) ([]unversioned.GroupVersionKind, bool, error) {
+ v, err := conversion.EnforcePtr(obj)
+ if err != nil {
+ return nil, false, err
+ }
+ t := v.Type()
+
+ gvks, ok := s.typeToGVK[t]
+ if !ok {
+ return nil, false, &notRegisteredErr{t: t}
+ }
+ _, unversionedType := s.unversionedTypes[t]
+
+ return gvks, unversionedType, nil
+}
+
+// Recognizes returns true if the scheme is able to handle the provided group,version,kind
+// of an object.
+func (s *Scheme) Recognizes(gvk unversioned.GroupVersionKind) bool {
+ _, exists := s.gvkToType[gvk]
+ return exists
+}
+
+func (s *Scheme) IsUnversioned(obj Object) (bool, bool) {
+ v, err := conversion.EnforcePtr(obj)
+ if err != nil {
+ return false, false
+ }
+ t := v.Type()
+
+ if _, ok := s.typeToGVK[t]; !ok {
+ return false, false
+ }
+ _, ok := s.unversionedTypes[t]
+ return ok, true
+}
+
+// New returns a new API object of the given version and name, or an error if it hasn't
+// been registered. The version and kind fields must be specified.
+func (s *Scheme) New(kind unversioned.GroupVersionKind) (Object, error) {
+ if t, exists := s.gvkToType[kind]; exists {
+ return reflect.New(t).Interface().(Object), nil
+ }
+
+ if t, exists := s.unversionedKinds[kind.Kind]; exists {
+ return reflect.New(t).Interface().(Object), nil
+ }
+ return nil, &notRegisteredErr{gvk: kind}
+}
+
+// AddGenericConversionFunc adds a function that accepts the ConversionFunc call pattern
+// (for two conversion types) to the converter. These functions are checked first during
+// a normal conversion, but are otherwise not called. Use AddConversionFuncs when registering
+// typed conversions.
+func (s *Scheme) AddGenericConversionFunc(fn conversion.GenericConversionFunc) {
+ s.converter.AddGenericConversionFunc(fn)
+}
+
+// Log sets a logger on the scheme. For test purposes only
+func (s *Scheme) Log(l conversion.DebugLogger) {
+ s.converter.Debug = l
+}
+
+// AddIgnoredConversionType identifies a pair of types that should be skipped by
+// conversion (because the data inside them is explicitly dropped during
+// conversion).
+func (s *Scheme) AddIgnoredConversionType(from, to interface{}) error {
+ return s.converter.RegisterIgnoredConversion(from, to)
+}
+
+// AddConversionFuncs adds functions to the list of conversion functions. The given
+// functions should know how to convert between two of your API objects, or their
+// sub-objects. We deduce how to call these functions from the types of their two
+// parameters; see the comment for Converter.Register.
+//
+// Note that, if you need to copy sub-objects that didn't change, you can use the
+// conversion.Scope object that will be passed to your conversion function.
+// Additionally, all conversions started by Scheme will set the SrcVersion and
+// DestVersion fields on the Meta object. Example:
+//
+// s.AddConversionFuncs(
+// func(in *InternalObject, out *ExternalObject, scope conversion.Scope) error {
+// // You can depend on Meta() being non-nil, and this being set to
+// // the source version, e.g., ""
+// s.Meta().SrcVersion
+// // You can depend on this being set to the destination version,
+// // e.g., "v1".
+// s.Meta().DestVersion
+// // Call scope.Convert to copy sub-fields.
+// s.Convert(&in.SubFieldThatMoved, &out.NewLocation.NewName, 0)
+// return nil
+// },
+// )
+//
+// (For more detail about conversion functions, see Converter.Register's comment.)
+//
+// Also note that the default behavior, if you don't add a conversion function, is to
+// sanely copy fields that have the same names and same type names. It's OK if the
+// destination type has extra fields, but it must not remove any. So you only need to
+// add conversion functions for things with changed/removed fields.
+func (s *Scheme) AddConversionFuncs(conversionFuncs ...interface{}) error {
+ for _, f := range conversionFuncs {
+ if err := s.converter.RegisterConversionFunc(f); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Similar to AddConversionFuncs, but registers conversion functions that were
+// automatically generated.
+func (s *Scheme) AddGeneratedConversionFuncs(conversionFuncs ...interface{}) error {
+ for _, f := range conversionFuncs {
+ if err := s.converter.RegisterGeneratedConversionFunc(f); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// AddDeepCopyFuncs adds a function to the list of deep-copy functions.
+// For the expected format of deep-copy function, see the comment for
+// Copier.RegisterDeepCopyFunction.
+func (s *Scheme) AddDeepCopyFuncs(deepCopyFuncs ...interface{}) error {
+ for _, f := range deepCopyFuncs {
+ if err := s.cloner.RegisterDeepCopyFunc(f); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Similar to AddDeepCopyFuncs, but registers deep-copy functions that were
+// automatically generated.
+func (s *Scheme) AddGeneratedDeepCopyFuncs(deepCopyFuncs ...interface{}) error {
+ for _, f := range deepCopyFuncs {
+ if err := s.cloner.RegisterGeneratedDeepCopyFunc(f); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// AddFieldLabelConversionFunc adds a conversion function to convert field selectors
+// of the given kind from the given version to internal version representation.
+func (s *Scheme) AddFieldLabelConversionFunc(version, kind string, conversionFunc FieldLabelConversionFunc) error {
+ if s.fieldLabelConversionFuncs[version] == nil {
+ s.fieldLabelConversionFuncs[version] = map[string]FieldLabelConversionFunc{}
+ }
+
+ s.fieldLabelConversionFuncs[version][kind] = conversionFunc
+ return nil
+}
+
+// AddStructFieldConversion allows you to specify a mechanical copy for a moved
+// or renamed struct field without writing an entire conversion function. See
+// the comment in conversion.Converter.SetStructFieldCopy for parameter details.
+// Call as many times as needed, even on the same fields.
+func (s *Scheme) AddStructFieldConversion(srcFieldType interface{}, srcFieldName string, destFieldType interface{}, destFieldName string) error {
+ return s.converter.SetStructFieldCopy(srcFieldType, srcFieldName, destFieldType, destFieldName)
+}
+
+// RegisterInputDefaults sets the provided field mapping function and field matching
+// as the defaults for the provided input type. The fn may be nil, in which case no
+// mapping will happen by default. Use this method to register a mechanism for handling
+// a specific input type in conversion, such as a map[string]string to structs.
+func (s *Scheme) RegisterInputDefaults(in interface{}, fn conversion.FieldMappingFunc, defaultFlags conversion.FieldMatchingFlags) error {
+ return s.converter.RegisterInputDefaults(in, fn, defaultFlags)
+}
+
+// AddDefaultingFuncs adds functions to the list of default-value functions.
+// Each of the given functions is responsible for applying default values
+// when converting an instance of a versioned API object into an internal
+// API object. These functions do not need to handle sub-objects. We deduce
+// how to call these functions from the types of their two parameters.
+//
+// s.AddDefaultingFuncs(
+// func(obj *v1.Pod) {
+// if obj.OptionalField == "" {
+// obj.OptionalField = "DefaultValue"
+// }
+// },
+// )
+func (s *Scheme) AddDefaultingFuncs(defaultingFuncs ...interface{}) error {
+ for _, f := range defaultingFuncs {
+ err := s.converter.RegisterDefaultingFunc(f)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Copy does a deep copy of an API object.
+func (s *Scheme) Copy(src Object) (Object, error) {
+ dst, err := s.DeepCopy(src)
+ if err != nil {
+ return nil, err
+ }
+ return dst.(Object), nil
+}
+
+// Performs a deep copy of the given object.
+func (s *Scheme) DeepCopy(src interface{}) (interface{}, error) {
+ return s.cloner.DeepCopy(src)
+}
+
+// Convert will attempt to convert in into out. Both must be pointers. For easy
+// testing of conversion functions. Returns an error if the conversion isn't
+// possible. You can call this with types that haven't been registered (for example,
+// a to test conversion of types that are nested within registered types), but in
+// that case, the conversion.Scope object passed to your conversion functions won't
+// have SrcVersion or DestVersion fields set correctly in Meta().
+func (s *Scheme) Convert(in, out interface{}) error {
+ inVersion := unversioned.GroupVersion{Group: "unknown", Version: "unknown"}
+ outVersion := unversioned.GroupVersion{Group: "unknown", Version: "unknown"}
+ if inObj, ok := in.(Object); ok {
+ if gvks, _, err := s.ObjectKinds(inObj); err == nil {
+ inVersion = gvks[0].GroupVersion()
+ }
+ }
+ if outObj, ok := out.(Object); ok {
+ if gvks, _, err := s.ObjectKinds(outObj); err == nil {
+ outVersion = gvks[0].GroupVersion()
+ }
+ }
+ flags, meta := s.generateConvertMeta(inVersion, outVersion, in)
+ if flags == 0 {
+ flags = conversion.AllowDifferentFieldTypeNames
+ }
+ return s.converter.Convert(in, out, flags, meta)
+}
+
+// Converts the given field label and value for an kind field selector from
+// versioned representation to an unversioned one.
+func (s *Scheme) ConvertFieldLabel(version, kind, label, value string) (string, string, error) {
+ if s.fieldLabelConversionFuncs[version] == nil {
+ return "", "", fmt.Errorf("No field label conversion function found for version: %s", version)
+ }
+ conversionFunc, ok := s.fieldLabelConversionFuncs[version][kind]
+ if !ok {
+ return "", "", fmt.Errorf("No field label conversion function found for version %s and kind %s", version, kind)
+ }
+ return conversionFunc(label, value)
+}
+
+// ConvertToVersion attempts to convert an input object to its matching Kind in another
+// version within this scheme. Will return an error if the provided version does not
+// contain the inKind (or a mapping by name defined with AddKnownTypeWithName). Will also
+// return an error if the conversion does not result in a valid Object being
+// returned. The serializer handles loading/serializing nested objects.
+func (s *Scheme) ConvertToVersion(in Object, outVersion unversioned.GroupVersion) (Object, error) {
+ switch in.(type) {
+ case *Unknown, *Unstructured, *UnstructuredList:
+ old := in.GetObjectKind().GroupVersionKind()
+ defer in.GetObjectKind().SetGroupVersionKind(old)
+ setTargetVersion(in, s, outVersion)
+ return in, nil
+ }
+ t := reflect.TypeOf(in)
+ if t.Kind() != reflect.Ptr {
+ return nil, fmt.Errorf("only pointer types may be converted: %v", t)
+ }
+
+ t = t.Elem()
+ if t.Kind() != reflect.Struct {
+ return nil, fmt.Errorf("only pointers to struct types may be converted: %v", t)
+ }
+
+ var kind unversioned.GroupVersionKind
+ if unversionedKind, ok := s.unversionedTypes[t]; ok {
+ kind = unversionedKind
+ } else {
+ kinds, ok := s.typeToGVK[t]
+ if !ok || len(kinds) == 0 {
+ return nil, fmt.Errorf("%v is not a registered type and cannot be converted into version %q", t, outVersion)
+ }
+ kind = kinds[0]
+ }
+
+ outKind := outVersion.WithKind(kind.Kind)
+
+ inKinds, _, err := s.ObjectKinds(in)
+ if err != nil {
+ return nil, err
+ }
+
+ out, err := s.New(outKind)
+ if err != nil {
+ return nil, err
+ }
+
+ flags, meta := s.generateConvertMeta(inKinds[0].GroupVersion(), outVersion, in)
+ if err := s.converter.Convert(in, out, flags, meta); err != nil {
+ return nil, err
+ }
+
+ setTargetVersion(out, s, outVersion)
+ return out, nil
+}
+
+// UnsafeConvertToVersion will convert in to the provided outVersion if such a conversion is possible,
+// but does not guarantee the output object does not share fields with the input object. It attempts to be as
+// efficient as possible when doing conversion.
+func (s *Scheme) UnsafeConvertToVersion(in Object, outVersion unversioned.GroupVersion) (Object, error) {
+ switch t := in.(type) {
+ case *Unknown:
+ t.APIVersion = outVersion.String()
+ return t, nil
+ case *Unstructured:
+ t.SetAPIVersion(outVersion.String())
+ return t, nil
+ case *UnstructuredList:
+ t.SetAPIVersion(outVersion.String())
+ return t, nil
+ }
+
+ // determine the incoming kinds with as few allocations as possible.
+ t := reflect.TypeOf(in)
+ if t.Kind() != reflect.Ptr {
+ return nil, fmt.Errorf("only pointer types may be converted: %v", t)
+ }
+ t = t.Elem()
+ if t.Kind() != reflect.Struct {
+ return nil, fmt.Errorf("only pointers to struct types may be converted: %v", t)
+ }
+ kinds, ok := s.typeToGVK[t]
+ if !ok || len(kinds) == 0 {
+ return nil, fmt.Errorf("%v is not a registered type and cannot be converted into version %q", t, outVersion)
+ }
+
+ // if the Go type is also registered to the destination kind, no conversion is necessary
+ for i := range kinds {
+ if kinds[i].Version == outVersion.Version && kinds[i].Group == outVersion.Group {
+ setTargetKind(in, kinds[i])
+ return in, nil
+ }
+ }
+
+ // type is unversioned, no conversion necessary
+ // it should be possible to avoid this allocation
+ if unversionedKind, ok := s.unversionedTypes[t]; ok {
+ kind := unversionedKind
+ outKind := outVersion.WithKind(kind.Kind)
+ setTargetKind(in, outKind)
+ return in, nil
+ }
+
+ // allocate a new object as the target using the target kind
+ // TODO: this should look in the target group version and find the first kind that matches, rather than the
+ // first kind registered in typeToGVK
+ kind := kinds[0]
+ kind.Version = outVersion.Version
+ kind.Group = outVersion.Group
+ out, err := s.New(kind)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO: try to avoid the allocations here - in fast paths we are not likely to need these flags or meta
+ flags, meta := s.converter.DefaultMeta(t)
+ if err := s.converter.Convert(in, out, flags, meta); err != nil {
+ return nil, err
+ }
+
+ setTargetKind(out, kind)
+ return out, nil
+}
+
+// generateConvertMeta constructs the meta value we pass to Convert.
+func (s *Scheme) generateConvertMeta(srcGroupVersion, destGroupVersion unversioned.GroupVersion, in interface{}) (conversion.FieldMatchingFlags, *conversion.Meta) {
+ return s.converter.DefaultMeta(reflect.TypeOf(in))
+}
+
+// setTargetVersion is deprecated and should be replaced by use of setTargetKind
+func setTargetVersion(obj Object, raw *Scheme, gv unversioned.GroupVersion) {
+ if gv.Version == APIVersionInternal {
+ // internal is a special case
+ obj.GetObjectKind().SetGroupVersionKind(unversioned.GroupVersionKind{})
+ return
+ }
+ if gvks, _, _ := raw.ObjectKinds(obj); len(gvks) > 0 {
+ obj.GetObjectKind().SetGroupVersionKind(unversioned.GroupVersionKind{Group: gv.Group, Version: gv.Version, Kind: gvks[0].Kind})
+ } else {
+ obj.GetObjectKind().SetGroupVersionKind(unversioned.GroupVersionKind{Group: gv.Group, Version: gv.Version})
+ }
+}
+
+// setTargetKind sets the kind on an object, taking into account whether the target kind is the internal version.
+func setTargetKind(obj Object, kind unversioned.GroupVersionKind) {
+ if kind.Version == APIVersionInternal {
+ // internal is a special case
+ // TODO: look at removing the need to special case this
+ obj.GetObjectKind().SetGroupVersionKind(unversioned.GroupVersionKind{})
+ return
+ }
+ obj.GetObjectKind().SetGroupVersionKind(kind)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/serializer/codec_factory.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/serializer/codec_factory.go
new file mode 100644
index 0000000..758aa6b
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/serializer/codec_factory.go
@@ -0,0 +1,364 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package serializer
+
+import (
+ "io"
+
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/runtime"
+ "k8s.io/kubernetes/pkg/runtime/serializer/json"
+ "k8s.io/kubernetes/pkg/runtime/serializer/recognizer"
+ "k8s.io/kubernetes/pkg/runtime/serializer/versioning"
+)
+
+// serializerExtensions are for serializers that are conditionally compiled in
+var serializerExtensions = []func(*runtime.Scheme) (serializerType, bool){}
+
+type serializerType struct {
+ AcceptContentTypes []string
+ ContentType string
+ FileExtensions []string
+ // EncodesAsText should be true if this content type can be represented safely in UTF-8
+ EncodesAsText bool
+
+ Serializer runtime.Serializer
+ PrettySerializer runtime.Serializer
+ // RawSerializer serializes an object without adding a type wrapper. Some serializers, like JSON
+ // automatically include identifying type information with the JSON. Others, like Protobuf, need
+ // a wrapper object that includes type information. This serializer should be set if the serializer
+ // can serialize / deserialize objects without type info. Note that this serializer will always
+ // be expected to pass into or a gvk to Decode, since no type information will be available on
+ // the object itself.
+ RawSerializer runtime.Serializer
+ // Specialize gives the type the opportunity to return a different serializer implementation if
+ // the content type contains alternate operations. Here it is used to implement "pretty" as an
+ // option to application/json, but could also be used to allow serializers to perform type
+ // defaulting or alter output.
+ Specialize func(map[string]string) (runtime.Serializer, bool)
+
+ AcceptStreamContentTypes []string
+ StreamContentType string
+
+ Framer runtime.Framer
+ StreamSerializer runtime.Serializer
+ StreamSpecialize func(map[string]string) (runtime.Serializer, bool)
+}
+
+func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory) []serializerType {
+ jsonSerializer := json.NewSerializer(mf, scheme, scheme, false)
+ jsonPrettySerializer := json.NewSerializer(mf, scheme, scheme, true)
+ yamlSerializer := json.NewYAMLSerializer(mf, scheme, scheme)
+
+ serializers := []serializerType{
+ {
+ AcceptContentTypes: []string{"application/json"},
+ ContentType: "application/json",
+ FileExtensions: []string{"json"},
+ EncodesAsText: true,
+ Serializer: jsonSerializer,
+ PrettySerializer: jsonPrettySerializer,
+
+ AcceptStreamContentTypes: []string{"application/json", "application/json;stream=watch"},
+ StreamContentType: "application/json",
+ Framer: json.Framer,
+ StreamSerializer: jsonSerializer,
+ },
+ {
+ AcceptContentTypes: []string{"application/yaml"},
+ ContentType: "application/yaml",
+ FileExtensions: []string{"yaml"},
+ EncodesAsText: true,
+ Serializer: yamlSerializer,
+
+ // TODO: requires runtime.RawExtension to properly distinguish when the nested content is
+ // yaml, because the yaml encoder invokes MarshalJSON first
+ //AcceptStreamContentTypes: []string{"application/yaml", "application/yaml;stream=watch"},
+ //StreamContentType: "application/yaml;stream=watch",
+ //Framer: json.YAMLFramer,
+ //StreamSerializer: yamlSerializer,
+ },
+ }
+
+ for _, fn := range serializerExtensions {
+ if serializer, ok := fn(scheme); ok {
+ serializers = append(serializers, serializer)
+ }
+ }
+ return serializers
+}
+
+// CodecFactory provides methods for retrieving codecs and serializers for specific
+// versions and content types.
+type CodecFactory struct {
+ scheme *runtime.Scheme
+ serializers []serializerType
+ universal runtime.Decoder
+ accepts []string
+ streamingAccepts []string
+
+ legacySerializer runtime.Serializer
+}
+
+// NewCodecFactory provides methods for retrieving serializers for the supported wire formats
+// and conversion wrappers to define preferred internal and external versions. In the future,
+// as the internal version is used less, callers may instead use a defaulting serializer and
+// only convert objects which are shared internally (Status, common API machinery).
+// TODO: allow other codecs to be compiled in?
+// TODO: accept a scheme interface
+func NewCodecFactory(scheme *runtime.Scheme) CodecFactory {
+ serializers := newSerializersForScheme(scheme, json.DefaultMetaFactory)
+ return newCodecFactory(scheme, serializers)
+}
+
+// newCodecFactory is a helper for testing that allows a different metafactory to be specified.
+func newCodecFactory(scheme *runtime.Scheme, serializers []serializerType) CodecFactory {
+ decoders := make([]runtime.Decoder, 0, len(serializers))
+ accepts := []string{}
+ alreadyAccepted := make(map[string]struct{})
+
+ var legacySerializer runtime.Serializer
+ for _, d := range serializers {
+ decoders = append(decoders, d.Serializer)
+ for _, mediaType := range d.AcceptContentTypes {
+ if _, ok := alreadyAccepted[mediaType]; ok {
+ continue
+ }
+ alreadyAccepted[mediaType] = struct{}{}
+ accepts = append(accepts, mediaType)
+ if mediaType == "application/json" {
+ legacySerializer = d.Serializer
+ }
+ }
+ }
+ if legacySerializer == nil {
+ legacySerializer = serializers[0].Serializer
+ }
+
+ streamAccepts := []string{}
+ alreadyAccepted = make(map[string]struct{})
+ for _, d := range serializers {
+ if len(d.StreamContentType) == 0 {
+ continue
+ }
+ for _, mediaType := range d.AcceptStreamContentTypes {
+ if _, ok := alreadyAccepted[mediaType]; ok {
+ continue
+ }
+ alreadyAccepted[mediaType] = struct{}{}
+ streamAccepts = append(streamAccepts, mediaType)
+ }
+ }
+
+ return CodecFactory{
+ scheme: scheme,
+ serializers: serializers,
+ universal: recognizer.NewDecoder(decoders...),
+
+ accepts: accepts,
+ streamingAccepts: streamAccepts,
+
+ legacySerializer: legacySerializer,
+ }
+}
+
+var _ runtime.NegotiatedSerializer = &CodecFactory{}
+
+// SupportedMediaTypes returns the RFC2046 media types that this factory has serializers for.
+func (f CodecFactory) SupportedMediaTypes() []string {
+ return f.accepts
+}
+
+// SupportedStreamingMediaTypes returns the RFC2046 media types that this factory has stream serializers for.
+func (f CodecFactory) SupportedStreamingMediaTypes() []string {
+ return f.streamingAccepts
+}
+
+// LegacyCodec encodes output to a given API version, and decodes output into the internal form from
+// any recognized source. The returned codec will always encode output to JSON.
+//
+// This method is deprecated - clients and servers should negotiate a serializer by mime-type and
+// invoke CodecForVersions. Callers that need only to read data should use UniversalDecoder().
+func (f CodecFactory) LegacyCodec(version ...unversioned.GroupVersion) runtime.Codec {
+ return versioning.NewCodecForScheme(f.scheme, f.legacySerializer, f.universal, version, nil)
+}
+
+// UniversalDeserializer can convert any stored data recognized by this factory into a Go object that satisfies
+// runtime.Object. It does not perform conversion. It does not perform defaulting.
+func (f CodecFactory) UniversalDeserializer() runtime.Decoder {
+ return f.universal
+}
+
+// UniversalDecoder returns a runtime.Decoder capable of decoding all known API objects in all known formats. Used
+// by clients that do not need to encode objects but want to deserialize API objects stored on disk. Only decodes
+// objects in groups registered with the scheme. The GroupVersions passed may be used to select alternate
+// versions of objects to return - by default, runtime.APIVersionInternal is used. If any versions are specified,
+// unrecognized groups will be returned in the version they are encoded as (no conversion). This decoder performs
+// defaulting.
+//
+// TODO: the decoder will eventually be removed in favor of dealing with objects in their versioned form
+func (f CodecFactory) UniversalDecoder(versions ...unversioned.GroupVersion) runtime.Decoder {
+ return f.CodecForVersions(nil, f.universal, nil, versions)
+}
+
+// CodecFor creates a codec with the provided serializer. If an object is decoded and its group is not in the list,
+// it will default to runtime.APIVersionInternal. If encode is not specified for an object's group, the object is not
+// converted. If encode or decode are nil, no conversion is performed.
+func (f CodecFactory) CodecForVersions(encoder runtime.Encoder, decoder runtime.Decoder, encode []unversioned.GroupVersion, decode []unversioned.GroupVersion) runtime.Codec {
+ return versioning.NewCodecForScheme(f.scheme, encoder, decoder, encode, decode)
+}
+
+// DecoderToVersion returns a decoder that targets the provided group version.
+func (f CodecFactory) DecoderToVersion(decoder runtime.Decoder, gv unversioned.GroupVersion) runtime.Decoder {
+ return f.CodecForVersions(nil, decoder, nil, []unversioned.GroupVersion{gv})
+}
+
+// EncoderForVersion returns an encoder that targets the provided group version.
+func (f CodecFactory) EncoderForVersion(encoder runtime.Encoder, gv unversioned.GroupVersion) runtime.Encoder {
+ return f.CodecForVersions(encoder, nil, []unversioned.GroupVersion{gv}, nil)
+}
+
+// SerializerForMediaType returns a serializer that matches the provided RFC2046 mediaType, or false if no such
+// serializer exists
+func (f CodecFactory) SerializerForMediaType(mediaType string, params map[string]string) (runtime.SerializerInfo, bool) {
+ for _, s := range f.serializers {
+ for _, accepted := range s.AcceptContentTypes {
+ if accepted == mediaType {
+ // specialization abstracts variants to the content type
+ if s.Specialize != nil && len(params) > 0 {
+ serializer, ok := s.Specialize(params)
+ // TODO: return formatted mediaType+params
+ return runtime.SerializerInfo{Serializer: serializer, MediaType: s.ContentType, EncodesAsText: s.EncodesAsText}, ok
+ }
+
+ // legacy support for ?pretty=1 continues, but this is more formally defined
+ if v, ok := params["pretty"]; ok && v == "1" && s.PrettySerializer != nil {
+ return runtime.SerializerInfo{Serializer: s.PrettySerializer, MediaType: s.ContentType, EncodesAsText: s.EncodesAsText}, true
+ }
+
+ // return the base variant
+ return runtime.SerializerInfo{Serializer: s.Serializer, MediaType: s.ContentType, EncodesAsText: s.EncodesAsText}, true
+ }
+ }
+ }
+ return runtime.SerializerInfo{}, false
+}
+
+// StreamingSerializerForMediaType returns a serializer that matches the provided RFC2046 mediaType, or false if no such
+// serializer exists
+func (f CodecFactory) StreamingSerializerForMediaType(mediaType string, params map[string]string) (runtime.StreamSerializerInfo, bool) {
+ for _, s := range f.serializers {
+ for _, accepted := range s.AcceptStreamContentTypes {
+ if accepted == mediaType {
+ // TODO: accept params
+ nested, ok := f.SerializerForMediaType(s.ContentType, nil)
+ if !ok {
+ panic("no serializer defined for internal content type")
+ }
+
+ if s.StreamSpecialize != nil && len(params) > 0 {
+ serializer, ok := s.StreamSpecialize(params)
+ // TODO: return formatted mediaType+params
+ return runtime.StreamSerializerInfo{
+ SerializerInfo: runtime.SerializerInfo{
+ Serializer: serializer,
+ MediaType: s.StreamContentType,
+ EncodesAsText: s.EncodesAsText,
+ },
+ Framer: s.Framer,
+ Embedded: nested,
+ }, ok
+ }
+
+ return runtime.StreamSerializerInfo{
+ SerializerInfo: runtime.SerializerInfo{
+ Serializer: s.StreamSerializer,
+ MediaType: s.StreamContentType,
+ EncodesAsText: s.EncodesAsText,
+ },
+ Framer: s.Framer,
+ Embedded: nested,
+ }, true
+ }
+ }
+ }
+ return runtime.StreamSerializerInfo{}, false
+}
+
+// SerializerForFileExtension returns a serializer for the provided extension, or false if no serializer matches.
+func (f CodecFactory) SerializerForFileExtension(extension string) (runtime.Serializer, bool) {
+ for _, s := range f.serializers {
+ for _, ext := range s.FileExtensions {
+ if extension == ext {
+ return s.Serializer, true
+ }
+ }
+ }
+ return nil, false
+}
+
+// DirectCodecFactory provides methods for retrieving "DirectCodec"s, which do not do conversion.
+type DirectCodecFactory struct {
+ CodecFactory
+}
+
+// EncoderForVersion returns an encoder that does not do conversion. gv is ignored.
+func (f DirectCodecFactory) EncoderForVersion(serializer runtime.Encoder, gv unversioned.GroupVersion) runtime.Encoder {
+ return DirectCodec{
+ runtime.NewCodec(serializer, nil),
+ f.CodecFactory.scheme,
+ }
+}
+
+// DecoderToVersion returns an decoder that does not do conversion. gv is ignored.
+func (f DirectCodecFactory) DecoderToVersion(serializer runtime.Decoder, gv unversioned.GroupVersion) runtime.Decoder {
+ return DirectCodec{
+ runtime.NewCodec(nil, serializer),
+ nil,
+ }
+}
+
+// DirectCodec is a codec that does not do conversion. It sets the gvk during serialization, and removes the gvk during deserialization.
+type DirectCodec struct {
+ runtime.Serializer
+ runtime.ObjectTyper
+}
+
+// EncodeToStream does not do conversion. It sets the gvk during serialization. overrides are ignored.
+func (c DirectCodec) Encode(obj runtime.Object, stream io.Writer) error {
+ gvks, _, err := c.ObjectTyper.ObjectKinds(obj)
+ if err != nil {
+ return err
+ }
+ kind := obj.GetObjectKind()
+ oldGVK := kind.GroupVersionKind()
+ kind.SetGroupVersionKind(gvks[0])
+ err = c.Serializer.Encode(obj, stream)
+ kind.SetGroupVersionKind(oldGVK)
+ return err
+}
+
+// Decode does not do conversion. It removes the gvk during deserialization.
+func (c DirectCodec) Decode(data []byte, defaults *unversioned.GroupVersionKind, into runtime.Object) (runtime.Object, *unversioned.GroupVersionKind, error) {
+ obj, gvk, err := c.Serializer.Decode(data, defaults, into)
+ if obj != nil {
+ kind := obj.GetObjectKind()
+ // clearing the gvk is just a convention of a codec
+ kind.SetGroupVersionKind(unversioned.GroupVersionKind{})
+ }
+ return obj, gvk, err
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/serializer/json/json.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/serializer/json/json.go
new file mode 100644
index 0000000..c26fa50
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/serializer/json/json.go
@@ -0,0 +1,243 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package json
+
+import (
+ "encoding/json"
+ "io"
+
+ "github.com/ghodss/yaml"
+ "github.com/ugorji/go/codec"
+
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/runtime"
+ "k8s.io/kubernetes/pkg/util/framer"
+ utilyaml "k8s.io/kubernetes/pkg/util/yaml"
+)
+
+// NewSerializer creates a JSON serializer that handles encoding versioned objects into the proper JSON form. If typer
+// is not nil, the object has the group, version, and kind fields set.
+func NewSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper, pretty bool) *Serializer {
+ return &Serializer{
+ meta: meta,
+ creater: creater,
+ typer: typer,
+ yaml: false,
+ pretty: pretty,
+ }
+}
+
+// NewYAMLSerializer creates a YAML serializer that handles encoding versioned objects into the proper YAML form. If typer
+// is not nil, the object has the group, version, and kind fields set. This serializer supports only the subset of YAML that
+// matches JSON, and will error if constructs are used that do not serialize to JSON.
+func NewYAMLSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper) *Serializer {
+ return &Serializer{
+ meta: meta,
+ creater: creater,
+ typer: typer,
+ yaml: true,
+ }
+}
+
+type Serializer struct {
+ meta MetaFactory
+ creater runtime.ObjectCreater
+ typer runtime.ObjectTyper
+ yaml bool
+ pretty bool
+}
+
+// Serializer implements Serializer
+var _ runtime.Serializer = &Serializer{}
+
+// Decode attempts to convert the provided data into YAML or JSON, extract the stored schema kind, apply the provided default gvk, and then
+// load that data into an object matching the desired schema kind or the provided into. If into is *runtime.Unknown, the raw data will be
+// extracted and no decoding will be performed. If into is not registered with the typer, then the object will be straight decoded using
+// normal JSON/YAML unmarshalling. If into is provided and the original data is not fully qualified with kind/version/group, the type of
+// the into will be used to alter the returned gvk. On success or most errors, the method will return the calculated schema kind.
+func (s *Serializer) Decode(originalData []byte, gvk *unversioned.GroupVersionKind, into runtime.Object) (runtime.Object, *unversioned.GroupVersionKind, error) {
+ if versioned, ok := into.(*runtime.VersionedObjects); ok {
+ into = versioned.Last()
+ obj, actual, err := s.Decode(originalData, gvk, into)
+ if err != nil {
+ return nil, actual, err
+ }
+ versioned.Objects = []runtime.Object{obj}
+ return versioned, actual, nil
+ }
+
+ data := originalData
+ if s.yaml {
+ altered, err := yaml.YAMLToJSON(data)
+ if err != nil {
+ return nil, nil, err
+ }
+ data = altered
+ }
+
+ actual, err := s.meta.Interpret(data)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if gvk != nil {
+ // apply kind and version defaulting from provided default
+ if len(actual.Kind) == 0 {
+ actual.Kind = gvk.Kind
+ }
+ if len(actual.Version) == 0 && len(actual.Group) == 0 {
+ actual.Group = gvk.Group
+ actual.Version = gvk.Version
+ }
+ if len(actual.Version) == 0 && actual.Group == gvk.Group {
+ actual.Version = gvk.Version
+ }
+ }
+
+ if unk, ok := into.(*runtime.Unknown); ok && unk != nil {
+ unk.Raw = originalData
+ unk.ContentType = runtime.ContentTypeJSON
+ unk.GetObjectKind().SetGroupVersionKind(*actual)
+ return unk, actual, nil
+ }
+
+ if into != nil {
+ types, _, err := s.typer.ObjectKinds(into)
+ switch {
+ case runtime.IsNotRegisteredError(err):
+ if err := codec.NewDecoderBytes(data, new(codec.JsonHandle)).Decode(into); err != nil {
+ return nil, actual, err
+ }
+ return into, actual, nil
+ case err != nil:
+ return nil, actual, err
+ default:
+ typed := types[0]
+ if len(actual.Kind) == 0 {
+ actual.Kind = typed.Kind
+ }
+ if len(actual.Version) == 0 && len(actual.Group) == 0 {
+ actual.Group = typed.Group
+ actual.Version = typed.Version
+ }
+ if len(actual.Version) == 0 && actual.Group == typed.Group {
+ actual.Version = typed.Version
+ }
+ }
+ }
+
+ if len(actual.Kind) == 0 {
+ return nil, actual, runtime.NewMissingKindErr(string(originalData))
+ }
+ if len(actual.Version) == 0 {
+ return nil, actual, runtime.NewMissingVersionErr(string(originalData))
+ }
+
+ // use the target if necessary
+ obj, err := runtime.UseOrCreateObject(s.typer, s.creater, *actual, into)
+ if err != nil {
+ return nil, actual, err
+ }
+
+ if err := codec.NewDecoderBytes(data, new(codec.JsonHandle)).Decode(obj); err != nil {
+ return nil, actual, err
+ }
+ return obj, actual, nil
+}
+
+// Encode serializes the provided object to the given writer.
+func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error {
+ if s.yaml {
+ json, err := json.Marshal(obj)
+ if err != nil {
+ return err
+ }
+ data, err := yaml.JSONToYAML(json)
+ if err != nil {
+ return err
+ }
+ _, err = w.Write(data)
+ return err
+ }
+
+ if s.pretty {
+ data, err := json.MarshalIndent(obj, "", " ")
+ if err != nil {
+ return err
+ }
+ _, err = w.Write(data)
+ return err
+ }
+ encoder := json.NewEncoder(w)
+ return encoder.Encode(obj)
+}
+
+// RecognizesData implements the RecognizingDecoder interface.
+func (s *Serializer) RecognizesData(peek io.Reader) (ok, unknown bool, err error) {
+ if s.yaml {
+ // we could potentially look for '---'
+ return false, true, nil
+ }
+ _, ok = utilyaml.GuessJSONStream(peek, 2048)
+ return ok, false, nil
+}
+
+// Framer is the default JSON framing behavior, with newlines delimiting individual objects.
+var Framer = jsonFramer{}
+
+type jsonFramer struct{}
+
+// NewFrameWriter implements stream framing for this serializer
+func (jsonFramer) NewFrameWriter(w io.Writer) io.Writer {
+ // we can write JSON objects directly to the writer, because they are self-framing
+ return w
+}
+
+// NewFrameReader implements stream framing for this serializer
+func (jsonFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser {
+ // we need to extract the JSON chunks of data to pass to Decode()
+ return framer.NewJSONFramedReader(r)
+}
+
+// Framer is the default JSON framing behavior, with newlines delimiting individual objects.
+var YAMLFramer = yamlFramer{}
+
+type yamlFramer struct{}
+
+// NewFrameWriter implements stream framing for this serializer
+func (yamlFramer) NewFrameWriter(w io.Writer) io.Writer {
+ return yamlFrameWriter{w}
+}
+
+// NewFrameReader implements stream framing for this serializer
+func (yamlFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser {
+ // extract the YAML document chunks directly
+ return utilyaml.NewDocumentDecoder(r)
+}
+
+type yamlFrameWriter struct {
+ w io.Writer
+}
+
+// Write separates each document with the YAML document separator (`---` followed by line
+// break). Writers must write well formed YAML documents (include a final line break).
+func (w yamlFrameWriter) Write(data []byte) (n int, err error) {
+ if _, err := w.w.Write([]byte("---\n")); err != nil {
+ return 0, err
+ }
+ return w.w.Write(data)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/serializer/json/meta.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/serializer/json/meta.go
new file mode 100644
index 0000000..b9bea21
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/serializer/json/meta.go
@@ -0,0 +1,61 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package json
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "k8s.io/kubernetes/pkg/api/unversioned"
+)
+
+// MetaFactory is used to store and retrieve the version and kind
+// information for JSON objects in a serializer.
+type MetaFactory interface {
+ // Interpret should return the version and kind of the wire-format of
+ // the object.
+ Interpret(data []byte) (*unversioned.GroupVersionKind, error)
+}
+
+// DefaultMetaFactory is a default factory for versioning objects in JSON. The object
+// in memory and in the default JSON serialization will use the "kind" and "apiVersion"
+// fields.
+var DefaultMetaFactory = SimpleMetaFactory{}
+
+// SimpleMetaFactory provides default methods for retrieving the type and version of objects
+// that are identified with an "apiVersion" and "kind" fields in their JSON
+// serialization. It may be parameterized with the names of the fields in memory, or an
+// optional list of base structs to search for those fields in memory.
+type SimpleMetaFactory struct {
+}
+
+// Interpret will return the APIVersion and Kind of the JSON wire-format
+// encoding of an object, or an error.
+func (SimpleMetaFactory) Interpret(data []byte) (*unversioned.GroupVersionKind, error) {
+ findKind := struct {
+ APIVersion string `json:"apiVersion,omitempty"`
+ Kind string `json:"kind,omitempty"`
+ }{}
+ if err := json.Unmarshal(data, &findKind); err != nil {
+ return nil, fmt.Errorf("couldn't get version/kind; json parse error: %v", err)
+ }
+ gv, err := unversioned.ParseGroupVersion(findKind.APIVersion)
+ if err != nil {
+ return nil, err
+ }
+ return &unversioned.GroupVersionKind{Group: gv.Group, Version: gv.Version, Kind: findKind.Kind}, nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/serializer/negotiated_codec.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/serializer/negotiated_codec.go
new file mode 100644
index 0000000..59b078c
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/serializer/negotiated_codec.go
@@ -0,0 +1,57 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package serializer
+
+import (
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/runtime"
+)
+
+// TODO: We should figure out what happens when someone asks
+// encoder for version and it conflicts with the raw serializer.
+type negotiatedSerializerWrapper struct {
+ info runtime.SerializerInfo
+ streamInfo runtime.StreamSerializerInfo
+}
+
+func NegotiatedSerializerWrapper(info runtime.SerializerInfo, streamInfo runtime.StreamSerializerInfo) runtime.NegotiatedSerializer {
+ return &negotiatedSerializerWrapper{info, streamInfo}
+}
+
+func (n *negotiatedSerializerWrapper) SupportedMediaTypes() []string {
+ return []string{}
+}
+
+func (n *negotiatedSerializerWrapper) SerializerForMediaType(mediaType string, options map[string]string) (runtime.SerializerInfo, bool) {
+ return n.info, true
+}
+
+func (n *negotiatedSerializerWrapper) SupportedStreamingMediaTypes() []string {
+ return []string{}
+}
+
+func (n *negotiatedSerializerWrapper) StreamingSerializerForMediaType(mediaType string, options map[string]string) (runtime.StreamSerializerInfo, bool) {
+ return n.streamInfo, true
+}
+
+func (n *negotiatedSerializerWrapper) EncoderForVersion(e runtime.Encoder, _ unversioned.GroupVersion) runtime.Encoder {
+ return e
+}
+
+func (n *negotiatedSerializerWrapper) DecoderToVersion(d runtime.Decoder, _gv unversioned.GroupVersion) runtime.Decoder {
+ return d
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/serializer/protobuf/doc.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/serializer/protobuf/doc.go
new file mode 100644
index 0000000..381748d
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/serializer/protobuf/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package protobuf provides a Kubernetes serializer for the protobuf format.
+package protobuf
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/serializer/protobuf/protobuf.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/serializer/protobuf/protobuf.go
new file mode 100644
index 0000000..b9eb9e5
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/serializer/protobuf/protobuf.go
@@ -0,0 +1,433 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package protobuf
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "reflect"
+
+ "github.com/gogo/protobuf/proto"
+
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/runtime"
+ "k8s.io/kubernetes/pkg/util/framer"
+)
+
+var (
+ // protoEncodingPrefix serves as a magic number for an encoded protobuf message on this serializer. All
+ // proto messages serialized by this schema will be preceded by the bytes 0x6b 0x38 0x73, with the fourth
+ // byte being reserved for the encoding style. The only encoding style defined is 0x00, which means that
+ // the rest of the byte stream is a message of type k8s.io.kubernetes.pkg.runtime.Unknown (proto2).
+ //
+ // See k8s.io/kubernetes/pkg/runtime/generated.proto for details of the runtime.Unknown message.
+ //
+ // This encoding scheme is experimental, and is subject to change at any time.
+ protoEncodingPrefix = []byte{0x6b, 0x38, 0x73, 0x00}
+)
+
+type errNotMarshalable struct {
+ t reflect.Type
+}
+
+func (e errNotMarshalable) Error() string {
+ return fmt.Sprintf("object %v does not implement the protobuf marshalling interface and cannot be encoded to a protobuf message", e.t)
+}
+
+func IsNotMarshalable(err error) bool {
+ _, ok := err.(errNotMarshalable)
+ return err != nil && ok
+}
+
+// NewSerializer creates a Protobuf serializer that handles encoding versioned objects into the proper wire form. If a typer
+// is passed, the encoded object will have group, version, and kind fields set. If typer is nil, the objects will be written
+// as-is (any type info passed with the object will be used).
+//
+// This encoding scheme is experimental, and is subject to change at any time.
+func NewSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper, defaultContentType string) *Serializer {
+ return &Serializer{
+ prefix: protoEncodingPrefix,
+ creater: creater,
+ typer: typer,
+ contentType: defaultContentType,
+ }
+}
+
+type Serializer struct {
+ prefix []byte
+ creater runtime.ObjectCreater
+ typer runtime.ObjectTyper
+ contentType string
+}
+
+var _ runtime.Serializer = &Serializer{}
+
+// Decode attempts to convert the provided data into a protobuf message, extract the stored schema kind, apply the provided default
+// gvk, and then load that data into an object matching the desired schema kind or the provided into. If into is *runtime.Unknown,
+// the raw data will be extracted and no decoding will be performed. If into is not registered with the typer, then the object will
+// be straight decoded using normal protobuf unmarshalling (the MarshalTo interface). If into is provided and the original data is
+// not fully qualified with kind/version/group, the type of the into will be used to alter the returned gvk. On success or most
+// errors, the method will return the calculated schema kind.
+func (s *Serializer) Decode(originalData []byte, gvk *unversioned.GroupVersionKind, into runtime.Object) (runtime.Object, *unversioned.GroupVersionKind, error) {
+ if versioned, ok := into.(*runtime.VersionedObjects); ok {
+ into = versioned.Last()
+ obj, actual, err := s.Decode(originalData, gvk, into)
+ if err != nil {
+ return nil, actual, err
+ }
+ // the last item in versioned becomes into, so if versioned was not originally empty we reset the object
+ // array so the first position is the decoded object and the second position is the outermost object.
+ // if there were no objects in the versioned list passed to us, only add ourselves.
+ if into != nil && into != obj {
+ versioned.Objects = []runtime.Object{obj, into}
+ } else {
+ versioned.Objects = []runtime.Object{obj}
+ }
+ return versioned, actual, err
+ }
+
+ prefixLen := len(s.prefix)
+ switch {
+ case len(originalData) == 0:
+ // TODO: treat like decoding {} from JSON with defaulting
+ return nil, nil, fmt.Errorf("empty data")
+ case len(originalData) < prefixLen || !bytes.Equal(s.prefix, originalData[:prefixLen]):
+ return nil, nil, fmt.Errorf("provided data does not appear to be a protobuf message, expected prefix %v", s.prefix)
+ case len(originalData) == prefixLen:
+ // TODO: treat like decoding {} from JSON with defaulting
+ return nil, nil, fmt.Errorf("empty body")
+ }
+
+ data := originalData[prefixLen:]
+ unk := runtime.Unknown{}
+ if err := unk.Unmarshal(data); err != nil {
+ return nil, nil, err
+ }
+
+ actual := unk.GroupVersionKind()
+ copyKindDefaults(&actual, gvk)
+
+ if intoUnknown, ok := into.(*runtime.Unknown); ok && intoUnknown != nil {
+ *intoUnknown = unk
+ if len(intoUnknown.ContentType) == 0 {
+ intoUnknown.ContentType = s.contentType
+ }
+ return intoUnknown, &actual, nil
+ }
+
+ if into != nil {
+ types, _, err := s.typer.ObjectKinds(into)
+ switch {
+ case runtime.IsNotRegisteredError(err):
+ pb, ok := into.(proto.Message)
+ if !ok {
+ return nil, &actual, errNotMarshalable{reflect.TypeOf(into)}
+ }
+ if err := proto.Unmarshal(unk.Raw, pb); err != nil {
+ return nil, &actual, err
+ }
+ return into, &actual, nil
+ case err != nil:
+ return nil, &actual, err
+ default:
+ copyKindDefaults(&actual, &types[0])
+ // if the result of defaulting did not set a version or group, ensure that at least group is set
+ // (copyKindDefaults will not assign Group if version is already set). This guarantees that the group
+ // of into is set if there is no better information from the caller or object.
+ if len(actual.Version) == 0 && len(actual.Group) == 0 {
+ actual.Group = types[0].Group
+ }
+ }
+ }
+
+ if len(actual.Kind) == 0 {
+ return nil, &actual, runtime.NewMissingKindErr(fmt.Sprintf("%#v", unk.TypeMeta))
+ }
+ if len(actual.Version) == 0 {
+ return nil, &actual, runtime.NewMissingVersionErr(fmt.Sprintf("%#v", unk.TypeMeta))
+ }
+
+ return unmarshalToObject(s.typer, s.creater, &actual, into, unk.Raw)
+}
+
+// Encode serializes the provided object to the given writer.
+func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error {
+ var unk runtime.Unknown
+ kind := obj.GetObjectKind().GroupVersionKind()
+ unk = runtime.Unknown{
+ TypeMeta: runtime.TypeMeta{
+ Kind: kind.Kind,
+ APIVersion: kind.GroupVersion().String(),
+ },
+ }
+
+ prefixSize := uint64(len(s.prefix))
+
+ switch t := obj.(type) {
+ case bufferedMarshaller:
+ // this path performs a single allocation during write but requires the caller to implement
+ // the more efficient Size and MarshalTo methods
+ encodedSize := uint64(t.Size())
+ estimatedSize := prefixSize + estimateUnknownSize(&unk, encodedSize)
+ data := make([]byte, estimatedSize)
+
+ i, err := unk.NestedMarshalTo(data[prefixSize:], t, encodedSize)
+ if err != nil {
+ return err
+ }
+
+ copy(data, s.prefix)
+
+ _, err = w.Write(data[:prefixSize+uint64(i)])
+ return err
+
+ case proto.Marshaler:
+ // this path performs extra allocations
+ data, err := t.Marshal()
+ if err != nil {
+ return err
+ }
+ unk.Raw = data
+
+ estimatedSize := prefixSize + uint64(unk.Size())
+ data = make([]byte, estimatedSize)
+
+ i, err := unk.MarshalTo(data[prefixSize:])
+ if err != nil {
+ return err
+ }
+
+ copy(data, s.prefix)
+
+ _, err = w.Write(data[:prefixSize+uint64(i)])
+ return err
+
+ default:
+ // TODO: marshal with a different content type and serializer (JSON for third party objects)
+ return errNotMarshalable{reflect.TypeOf(obj)}
+ }
+}
+
+// RecognizesData implements the RecognizingDecoder interface.
+func (s *Serializer) RecognizesData(peek io.Reader) (bool, error) {
+ prefix := make([]byte, 4)
+ n, err := peek.Read(prefix)
+ if err != nil {
+ if err == io.EOF {
+ return false, nil
+ }
+ return false, err
+ }
+ if n != 4 {
+ return false, nil
+ }
+ return bytes.Equal(s.prefix, prefix), nil
+}
+
+// copyKindDefaults defaults dst to the value in src if dst does not have a value set.
+func copyKindDefaults(dst, src *unversioned.GroupVersionKind) {
+ if src == nil {
+ return
+ }
+ // apply kind and version defaulting from provided default
+ if len(dst.Kind) == 0 {
+ dst.Kind = src.Kind
+ }
+ if len(dst.Version) == 0 && len(src.Version) > 0 {
+ dst.Group = src.Group
+ dst.Version = src.Version
+ }
+}
+
+// bufferedMarshaller describes a more efficient marshalling interface that can avoid allocating multiple
+// byte buffers by pre-calculating the size of the final buffer needed.
+type bufferedMarshaller interface {
+ proto.Sizer
+ runtime.ProtobufMarshaller
+}
+
+// estimateUnknownSize returns the expected bytes consumed by a given runtime.Unknown
+// object with a nil RawJSON struct and the expected size of the provided buffer. The
+// returned size will not be correct if RawJSOn is set on unk.
+func estimateUnknownSize(unk *runtime.Unknown, byteSize uint64) uint64 {
+ size := uint64(unk.Size())
+ // protobuf uses 1 byte for the tag, a varint for the length of the array (at most 8 bytes - uint64 - here),
+ // and the size of the array.
+ size += 1 + 8 + byteSize
+ return size
+}
+
+// NewRawSerializer creates a Protobuf serializer that handles encoding versioned objects into the proper wire form. If typer
+// is not nil, the object has the group, version, and kind fields set. This serializer does not provide type information for the
+// encoded object, and thus is not self describing (callers must know what type is being described in order to decode).
+//
+// This encoding scheme is experimental, and is subject to change at any time.
+func NewRawSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper, defaultContentType string) *RawSerializer {
+ return &RawSerializer{
+ creater: creater,
+ typer: typer,
+ contentType: defaultContentType,
+ }
+}
+
+// RawSerializer encodes and decodes objects without adding a runtime.Unknown wrapper (objects are encoded without identifying
+// type).
+type RawSerializer struct {
+ creater runtime.ObjectCreater
+ typer runtime.ObjectTyper
+ contentType string
+}
+
+var _ runtime.Serializer = &RawSerializer{}
+
+// Decode attempts to convert the provided data into a protobuf message, extract the stored schema kind, apply the provided default
+// gvk, and then load that data into an object matching the desired schema kind or the provided into. If into is *runtime.Unknown,
+// the raw data will be extracted and no decoding will be performed. If into is not registered with the typer, then the object will
+// be straight decoded using normal protobuf unmarshalling (the MarshalTo interface). If into is provided and the original data is
+// not fully qualified with kind/version/group, the type of the into will be used to alter the returned gvk. On success or most
+// errors, the method will return the calculated schema kind.
+func (s *RawSerializer) Decode(originalData []byte, gvk *unversioned.GroupVersionKind, into runtime.Object) (runtime.Object, *unversioned.GroupVersionKind, error) {
+ if into == nil {
+ return nil, nil, fmt.Errorf("this serializer requires an object to decode into: %#v", s)
+ }
+
+ if versioned, ok := into.(*runtime.VersionedObjects); ok {
+ into = versioned.Last()
+ obj, actual, err := s.Decode(originalData, gvk, into)
+ if err != nil {
+ return nil, actual, err
+ }
+ if into != nil && into != obj {
+ versioned.Objects = []runtime.Object{obj, into}
+ } else {
+ versioned.Objects = []runtime.Object{obj}
+ }
+ return versioned, actual, err
+ }
+
+ if len(originalData) == 0 {
+ // TODO: treat like decoding {} from JSON with defaulting
+ return nil, nil, fmt.Errorf("empty data")
+ }
+ data := originalData
+
+ actual := &unversioned.GroupVersionKind{}
+ copyKindDefaults(actual, gvk)
+
+ if intoUnknown, ok := into.(*runtime.Unknown); ok && intoUnknown != nil {
+ intoUnknown.Raw = data
+ intoUnknown.ContentEncoding = ""
+ intoUnknown.ContentType = s.contentType
+ intoUnknown.SetGroupVersionKind(*actual)
+ return intoUnknown, actual, nil
+ }
+
+ types, _, err := s.typer.ObjectKinds(into)
+ switch {
+ case runtime.IsNotRegisteredError(err):
+ pb, ok := into.(proto.Message)
+ if !ok {
+ return nil, actual, errNotMarshalable{reflect.TypeOf(into)}
+ }
+ if err := proto.Unmarshal(data, pb); err != nil {
+ return nil, actual, err
+ }
+ return into, actual, nil
+ case err != nil:
+ return nil, actual, err
+ default:
+ copyKindDefaults(actual, &types[0])
+ // if the result of defaulting did not set a version or group, ensure that at least group is set
+ // (copyKindDefaults will not assign Group if version is already set). This guarantees that the group
+ // of into is set if there is no better information from the caller or object.
+ if len(actual.Version) == 0 && len(actual.Group) == 0 {
+ actual.Group = types[0].Group
+ }
+ }
+
+ if len(actual.Kind) == 0 {
+ return nil, actual, runtime.NewMissingKindErr("<protobuf encoded body - must provide default type>")
+ }
+ if len(actual.Version) == 0 {
+ return nil, actual, runtime.NewMissingVersionErr("<protobuf encoded body - must provide default type>")
+ }
+
+ return unmarshalToObject(s.typer, s.creater, actual, into, data)
+}
+
+// unmarshalToObject is the common code between decode in the raw and normal serializer.
+func unmarshalToObject(typer runtime.ObjectTyper, creater runtime.ObjectCreater, actual *unversioned.GroupVersionKind, into runtime.Object, data []byte) (runtime.Object, *unversioned.GroupVersionKind, error) {
+ // use the target if necessary
+ obj, err := runtime.UseOrCreateObject(typer, creater, *actual, into)
+ if err != nil {
+ return nil, actual, err
+ }
+
+ pb, ok := obj.(proto.Message)
+ if !ok {
+ return nil, actual, errNotMarshalable{reflect.TypeOf(obj)}
+ }
+ if err := proto.Unmarshal(data, pb); err != nil {
+ return nil, actual, err
+ }
+ return obj, actual, nil
+}
+
+// Encode serializes the provided object to the given writer. Overrides is ignored.
+func (s *RawSerializer) Encode(obj runtime.Object, w io.Writer) error {
+ switch t := obj.(type) {
+ case bufferedMarshaller:
+ // this path performs a single allocation during write but requires the caller to implement
+ // the more efficient Size and MarshalTo methods
+ encodedSize := uint64(t.Size())
+ data := make([]byte, encodedSize)
+
+ n, err := t.MarshalTo(data)
+ if err != nil {
+ return err
+ }
+ _, err = w.Write(data[:n])
+ return err
+
+ case proto.Marshaler:
+ // this path performs extra allocations
+ data, err := t.Marshal()
+ if err != nil {
+ return err
+ }
+ _, err = w.Write(data)
+ return err
+
+ default:
+ return errNotMarshalable{reflect.TypeOf(obj)}
+ }
+}
+
+var LengthDelimitedFramer = lengthDelimitedFramer{}
+
+type lengthDelimitedFramer struct{}
+
+// NewFrameWriter implements stream framing for this serializer
+func (lengthDelimitedFramer) NewFrameWriter(w io.Writer) io.Writer {
+ return framer.NewLengthDelimitedFrameWriter(w)
+}
+
+// NewFrameReader implements stream framing for this serializer
+func (lengthDelimitedFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser {
+ return framer.NewLengthDelimitedFrameReader(r)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/serializer/protobuf_extension.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/serializer/protobuf_extension.go
new file mode 100644
index 0000000..5846d94
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/serializer/protobuf_extension.go
@@ -0,0 +1,52 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package serializer
+
+import (
+ "k8s.io/kubernetes/pkg/runtime"
+ "k8s.io/kubernetes/pkg/runtime/serializer/protobuf"
+)
+
+const (
+ // contentTypeProtobuf is the protobuf type exposed for Kubernetes. It is private to prevent others from
+ // depending on it unintentionally.
+ // TODO: potentially move to pkg/api (since it's part of the Kube public API) and pass it in to the
+ // CodecFactory on initialization.
+ contentTypeProtobuf = "application/vnd.kubernetes.protobuf"
+ contentTypeProtobufWatch = contentTypeProtobuf + ";stream=watch"
+)
+
+func protobufSerializer(scheme *runtime.Scheme) (serializerType, bool) {
+ serializer := protobuf.NewSerializer(scheme, scheme, contentTypeProtobuf)
+ raw := protobuf.NewRawSerializer(scheme, scheme, contentTypeProtobuf)
+ return serializerType{
+ AcceptContentTypes: []string{contentTypeProtobuf},
+ ContentType: contentTypeProtobuf,
+ FileExtensions: []string{"pb"},
+ Serializer: serializer,
+ RawSerializer: raw,
+
+ AcceptStreamContentTypes: []string{contentTypeProtobuf, contentTypeProtobufWatch},
+ StreamContentType: contentTypeProtobufWatch,
+ Framer: protobuf.LengthDelimitedFramer,
+ StreamSerializer: raw,
+ }, true
+}
+
+func init() {
+ serializerExtensions = append(serializerExtensions, protobufSerializer)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/serializer/recognizer/recognizer.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/serializer/recognizer/recognizer.go
new file mode 100644
index 0000000..310002a
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/serializer/recognizer/recognizer.go
@@ -0,0 +1,127 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package recognizer
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/runtime"
+)
+
+type RecognizingDecoder interface {
+ runtime.Decoder
+ // RecognizesData should return true if the input provided in the provided reader
+ // belongs to this decoder, or an error if the data could not be read or is ambiguous.
+ // Unknown is true if the data could not be determined to match the decoder type.
+ // Decoders should assume that they can read as much of peek as they need (as the caller
+ // provides) and may return unknown if the data provided is not sufficient to make a
+ // a determination. When peek returns EOF that may mean the end of the input or the
+ // end of buffered input - recognizers should return the best guess at that time.
+ RecognizesData(peek io.Reader) (ok, unknown bool, err error)
+}
+
+// NewDecoder creates a decoder that will attempt multiple decoders in an order defined
+// by:
+//
+// 1. The decoder implements RecognizingDecoder and identifies the data
+// 2. All other decoders, and any decoder that returned true for unknown.
+//
+// The order passed to the constructor is preserved within those priorities.
+func NewDecoder(decoders ...runtime.Decoder) runtime.Decoder {
+ return &decoder{
+ decoders: decoders,
+ }
+}
+
+type decoder struct {
+ decoders []runtime.Decoder
+}
+
+var _ RecognizingDecoder = &decoder{}
+
+func (d *decoder) RecognizesData(peek io.Reader) (bool, bool, error) {
+ var (
+ lastErr error
+ anyUnknown bool
+ )
+ data, _ := bufio.NewReaderSize(peek, 1024).Peek(1024)
+ for _, r := range d.decoders {
+ switch t := r.(type) {
+ case RecognizingDecoder:
+ ok, unknown, err := t.RecognizesData(bytes.NewBuffer(data))
+ if err != nil {
+ lastErr = err
+ continue
+ }
+ anyUnknown = anyUnknown || unknown
+ if !ok {
+ continue
+ }
+ return true, false, nil
+ }
+ }
+ return false, anyUnknown, lastErr
+}
+
+func (d *decoder) Decode(data []byte, gvk *unversioned.GroupVersionKind, into runtime.Object) (runtime.Object, *unversioned.GroupVersionKind, error) {
+ var (
+ lastErr error
+ skipped []runtime.Decoder
+ )
+
+ // try recognizers, record any decoders we need to give a chance later
+ for _, r := range d.decoders {
+ switch t := r.(type) {
+ case RecognizingDecoder:
+ buf := bytes.NewBuffer(data)
+ ok, unknown, err := t.RecognizesData(buf)
+ if err != nil {
+ lastErr = err
+ continue
+ }
+ if unknown {
+ skipped = append(skipped, t)
+ continue
+ }
+ if !ok {
+ continue
+ }
+ return r.Decode(data, gvk, into)
+ default:
+ skipped = append(skipped, t)
+ }
+ }
+
+ // try recognizers that returned unknown or didn't recognize their data
+ for _, r := range skipped {
+ out, actual, err := r.Decode(data, gvk, into)
+ if err != nil {
+ lastErr = err
+ continue
+ }
+ return out, actual, nil
+ }
+
+ if lastErr == nil {
+ lastErr = fmt.Errorf("no serialization format matched the provided data")
+ }
+ return nil, nil, lastErr
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/serializer/streaming/streaming.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/serializer/streaming/streaming.go
new file mode 100644
index 0000000..ac17138
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/serializer/streaming/streaming.go
@@ -0,0 +1,137 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package streaming implements encoder and decoder for streams
+// of runtime.Objects over io.Writer/Readers.
+package streaming
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/runtime"
+)
+
+// Encoder is a runtime.Encoder on a stream.
+type Encoder interface {
+ // Encode will write the provided object to the stream or return an error. It obeys the same
+ // contract as runtime.VersionedEncoder.
+ Encode(obj runtime.Object) error
+}
+
+// Decoder is a runtime.Decoder from a stream.
+type Decoder interface {
+ // Decode will return io.EOF when no more objects are available.
+ Decode(defaults *unversioned.GroupVersionKind, into runtime.Object) (runtime.Object, *unversioned.GroupVersionKind, error)
+ // Close closes the underlying stream.
+ Close() error
+}
+
+// Serializer is a factory for creating encoders and decoders that work over streams.
+type Serializer interface {
+ NewEncoder(w io.Writer) Encoder
+ NewDecoder(r io.ReadCloser) Decoder
+}
+
+type decoder struct {
+ reader io.ReadCloser
+ decoder runtime.Decoder
+ buf []byte
+ maxBytes int
+ resetRead bool
+}
+
+// NewDecoder creates a streaming decoder that reads object chunks from r and decodes them with d.
+// The reader is expected to return ErrShortRead if the provided buffer is not large enough to read
+// an entire object.
+func NewDecoder(r io.ReadCloser, d runtime.Decoder) Decoder {
+ return &decoder{
+ reader: r,
+ decoder: d,
+ buf: make([]byte, 1024),
+ maxBytes: 1024 * 1024,
+ }
+}
+
+var ErrObjectTooLarge = fmt.Errorf("object to decode was longer than maximum allowed size")
+
+// Decode reads the next object from the stream and decodes it.
+func (d *decoder) Decode(defaults *unversioned.GroupVersionKind, into runtime.Object) (runtime.Object, *unversioned.GroupVersionKind, error) {
+ base := 0
+ for {
+ n, err := d.reader.Read(d.buf[base:])
+ if err == io.ErrShortBuffer {
+ if n == 0 {
+ return nil, nil, fmt.Errorf("got short buffer with n=0, base=%d, cap=%d", base, cap(d.buf))
+ }
+ if d.resetRead {
+ continue
+ }
+ // double the buffer size up to maxBytes
+ if len(d.buf) < d.maxBytes {
+ base += n
+ d.buf = append(d.buf, make([]byte, len(d.buf))...)
+ continue
+ }
+ // must read the rest of the frame (until we stop getting ErrShortBuffer)
+ d.resetRead = true
+ base = 0
+ return nil, nil, ErrObjectTooLarge
+ }
+ if err != nil {
+ return nil, nil, err
+ }
+ if d.resetRead {
+ // now that we have drained the large read, continue
+ d.resetRead = false
+ continue
+ }
+ base += n
+ break
+ }
+ return d.decoder.Decode(d.buf[:base], defaults, into)
+}
+
+func (d *decoder) Close() error {
+ return d.reader.Close()
+}
+
+type encoder struct {
+ writer io.Writer
+ encoder runtime.Encoder
+ buf *bytes.Buffer
+}
+
+// NewEncoder returns a new streaming encoder.
+func NewEncoder(w io.Writer, e runtime.Encoder) Encoder {
+ return &encoder{
+ writer: w,
+ encoder: e,
+ buf: &bytes.Buffer{},
+ }
+}
+
+// Encode writes the provided object to the nested writer.
+func (e *encoder) Encode(obj runtime.Object) error {
+ if err := e.encoder.Encode(obj, e.buf); err != nil {
+ return err
+ }
+ _, err := e.writer.Write(e.buf.Bytes())
+ e.buf.Reset()
+ return err
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/serializer/versioning/versioning.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/serializer/versioning/versioning.go
new file mode 100644
index 0000000..6e67964
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/serializer/versioning/versioning.go
@@ -0,0 +1,275 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package versioning
+
+import (
+ "fmt"
+ "io"
+
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/runtime"
+)
+
+// EnableCrossGroupDecoding modifies the given decoder in place, if it is a codec
+// from this package. It allows objects from one group to be auto-decoded into
+// another group. 'destGroup' must already exist in the codec.
+// TODO: this is an encapsulation violation and should be refactored
+func EnableCrossGroupDecoding(d runtime.Decoder, sourceGroup, destGroup string) error {
+ internal, ok := d.(*codec)
+ if !ok {
+ return fmt.Errorf("unsupported decoder type")
+ }
+
+ dest, ok := internal.decodeVersion[destGroup]
+ if !ok {
+ return fmt.Errorf("group %q is not a possible destination group in the given codec", destGroup)
+ }
+ internal.decodeVersion[sourceGroup] = dest
+
+ return nil
+}
+
+// EnableCrossGroupEncoding modifies the given encoder in place, if it is a codec
+// from this package. It allows objects from one group to be auto-decoded into
+// another group. 'destGroup' must already exist in the codec.
+// TODO: this is an encapsulation violation and should be refactored
+func EnableCrossGroupEncoding(e runtime.Encoder, sourceGroup, destGroup string) error {
+ internal, ok := e.(*codec)
+ if !ok {
+ return fmt.Errorf("unsupported encoder type")
+ }
+
+ dest, ok := internal.encodeVersion[destGroup]
+ if !ok {
+ return fmt.Errorf("group %q is not a possible destination group in the given codec", destGroup)
+ }
+ internal.encodeVersion[sourceGroup] = dest
+
+ return nil
+}
+
+// NewCodecForScheme is a convenience method for callers that are using a scheme.
+func NewCodecForScheme(
+ // TODO: I should be a scheme interface?
+ scheme *runtime.Scheme,
+ encoder runtime.Encoder,
+ decoder runtime.Decoder,
+ encodeVersion []unversioned.GroupVersion,
+ decodeVersion []unversioned.GroupVersion,
+) runtime.Codec {
+ return NewCodec(encoder, decoder, runtime.UnsafeObjectConvertor(scheme), scheme, scheme, scheme, encodeVersion, decodeVersion)
+}
+
+// NewCodec takes objects in their internal versions and converts them to external versions before
+// serializing them. It assumes the serializer provided to it only deals with external versions.
+// This class is also a serializer, but is generally used with a specific version.
+func NewCodec(
+ encoder runtime.Encoder,
+ decoder runtime.Decoder,
+ convertor runtime.ObjectConvertor,
+ creater runtime.ObjectCreater,
+ copier runtime.ObjectCopier,
+ typer runtime.ObjectTyper,
+ encodeVersion []unversioned.GroupVersion,
+ decodeVersion []unversioned.GroupVersion,
+) runtime.Codec {
+ internal := &codec{
+ encoder: encoder,
+ decoder: decoder,
+ convertor: convertor,
+ creater: creater,
+ copier: copier,
+ typer: typer,
+ }
+ if encodeVersion != nil {
+ internal.encodeVersion = make(map[string]unversioned.GroupVersion)
+ for _, v := range encodeVersion {
+ // first one for a group wins. This is consistent with best to worst order throughout the codebase
+ if _, ok := internal.encodeVersion[v.Group]; ok {
+ continue
+ }
+ internal.encodeVersion[v.Group] = v
+ }
+ if len(internal.encodeVersion) == 1 {
+ for _, v := range internal.encodeVersion {
+ internal.preferredEncodeVersion = []unversioned.GroupVersion{v}
+ }
+ }
+ }
+ if decodeVersion != nil {
+ internal.decodeVersion = make(map[string]unversioned.GroupVersion)
+ for _, v := range decodeVersion {
+ // first one for a group wins. This is consistent with best to worst order throughout the codebase
+ if _, ok := internal.decodeVersion[v.Group]; ok {
+ continue
+ }
+ internal.decodeVersion[v.Group] = v
+ }
+ }
+
+ return internal
+}
+
+type codec struct {
+ encoder runtime.Encoder
+ decoder runtime.Decoder
+ convertor runtime.ObjectConvertor
+ creater runtime.ObjectCreater
+ copier runtime.ObjectCopier
+ typer runtime.ObjectTyper
+
+ encodeVersion map[string]unversioned.GroupVersion
+ decodeVersion map[string]unversioned.GroupVersion
+
+ preferredEncodeVersion []unversioned.GroupVersion
+}
+
+// Decode attempts a decode of the object, then tries to convert it to the internal version. If into is provided and the decoding is
+// successful, the returned runtime.Object will be the value passed as into. Note that this may bypass conversion if you pass an
+// into that matches the serialized version.
+func (c *codec) Decode(data []byte, defaultGVK *unversioned.GroupVersionKind, into runtime.Object) (runtime.Object, *unversioned.GroupVersionKind, error) {
+ versioned, isVersioned := into.(*runtime.VersionedObjects)
+ if isVersioned {
+ into = versioned.Last()
+ }
+
+ obj, gvk, err := c.decoder.Decode(data, defaultGVK, into)
+ if err != nil {
+ return nil, gvk, err
+ }
+
+ // if we specify a target, use generic conversion.
+ if into != nil {
+ if into == obj {
+ if isVersioned {
+ return versioned, gvk, nil
+ }
+ return into, gvk, nil
+ }
+ if err := c.convertor.Convert(obj, into); err != nil {
+ return nil, gvk, err
+ }
+ if isVersioned {
+ versioned.Objects = []runtime.Object{obj, into}
+ return versioned, gvk, nil
+ }
+ return into, gvk, nil
+ }
+
+ // invoke a version conversion
+ group := gvk.Group
+ if defaultGVK != nil {
+ group = defaultGVK.Group
+ }
+ var targetGV unversioned.GroupVersion
+ if c.decodeVersion == nil {
+ // convert to internal by default
+ targetGV.Group = group
+ targetGV.Version = runtime.APIVersionInternal
+ } else {
+ gv, ok := c.decodeVersion[group]
+ if !ok {
+ // unknown objects are left in their original version
+ if isVersioned {
+ versioned.Objects = []runtime.Object{obj}
+ return versioned, gvk, nil
+ }
+ return obj, gvk, nil
+ }
+ targetGV = gv
+ }
+
+ if gvk.GroupVersion() == targetGV {
+ if isVersioned {
+ versioned.Objects = []runtime.Object{obj}
+ return versioned, gvk, nil
+ }
+ return obj, gvk, nil
+ }
+
+ if isVersioned {
+ // create a copy, because ConvertToVersion does not guarantee non-mutation of objects
+ copied, err := c.copier.Copy(obj)
+ if err != nil {
+ copied = obj
+ }
+ versioned.Objects = []runtime.Object{copied}
+ }
+
+ // Convert if needed.
+ out, err := c.convertor.ConvertToVersion(obj, targetGV)
+ if err != nil {
+ return nil, gvk, err
+ }
+ if isVersioned {
+ versioned.Objects = append(versioned.Objects, out)
+ return versioned, gvk, nil
+ }
+ return out, gvk, nil
+}
+
+// Encode ensures the provided object is output in the appropriate group and version, invoking
+// conversion if necessary. Unversioned objects (according to the ObjectTyper) are output as is.
+func (c *codec) Encode(obj runtime.Object, w io.Writer) error {
+ if _, ok := obj.(*runtime.Unknown); ok {
+ return c.encoder.Encode(obj, w)
+ }
+ gvks, isUnversioned, err := c.typer.ObjectKinds(obj)
+ if err != nil {
+ return err
+ }
+ gvk := gvks[0]
+
+ if c.encodeVersion == nil || isUnversioned {
+ objectKind := obj.GetObjectKind()
+ old := objectKind.GroupVersionKind()
+ objectKind.SetGroupVersionKind(gvk)
+ err = c.encoder.Encode(obj, w)
+ objectKind.SetGroupVersionKind(old)
+ return err
+ }
+
+ targetGV, ok := c.encodeVersion[gvk.Group]
+
+ // attempt a conversion to the sole encode version
+ if !ok && c.preferredEncodeVersion != nil {
+ ok = true
+ targetGV = c.preferredEncodeVersion[0]
+ }
+
+ // if no fallback is available, error
+ if !ok {
+ return fmt.Errorf("the codec does not recognize group %q for kind %q and cannot encode it", gvk.Group, gvk.Kind)
+ }
+
+ // Perform a conversion if necessary
+ objectKind := obj.GetObjectKind()
+ old := objectKind.GroupVersionKind()
+ out, err := c.convertor.ConvertToVersion(obj, targetGV)
+ if err != nil {
+ if ok {
+ return err
+ }
+ } else {
+ obj = out
+ }
+ // Conversion is responsible for setting the proper group, version, and kind onto the outgoing object
+ err = c.encoder.Encode(obj, w)
+ // restore the old GVK, in case conversion returned the same object
+ objectKind.SetGroupVersionKind(old)
+ return err
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/swagger_doc_generator.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/swagger_doc_generator.go
new file mode 100644
index 0000000..29722d5
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/swagger_doc_generator.go
@@ -0,0 +1,262 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package runtime
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/doc"
+ "go/parser"
+ "go/token"
+ "io"
+ "reflect"
+ "strings"
+)
+
+// Pair of strings. We keed the name of fields and the doc
+type Pair struct {
+ Name, Doc string
+}
+
+// KubeTypes is an array to represent all available types in a parsed file. [0] is for the type itself
+type KubeTypes []Pair
+
+func astFrom(filePath string) *doc.Package {
+ fset := token.NewFileSet()
+ m := make(map[string]*ast.File)
+
+ f, err := parser.ParseFile(fset, filePath, nil, parser.ParseComments)
+ if err != nil {
+ fmt.Println(err)
+ return nil
+ }
+
+ m[filePath] = f
+ apkg, _ := ast.NewPackage(fset, m, nil, nil)
+
+ return doc.New(apkg, "", 0)
+}
+
+func fmtRawDoc(rawDoc string) string {
+ var buffer bytes.Buffer
+ delPrevChar := func() {
+ if buffer.Len() > 0 {
+ buffer.Truncate(buffer.Len() - 1) // Delete the last " " or "\n"
+ }
+ }
+
+ // Ignore all lines after ---
+ rawDoc = strings.Split(rawDoc, "---")[0]
+
+ for _, line := range strings.Split(rawDoc, "\n") {
+ line = strings.TrimRight(line, " ")
+ leading := strings.TrimLeft(line, " ")
+ switch {
+ case len(line) == 0: // Keep paragraphs
+ delPrevChar()
+ buffer.WriteString("\n\n")
+ case strings.HasPrefix(leading, "TODO"): // Ignore one line TODOs
+ case strings.HasPrefix(leading, "+"): // Ignore instructions to go2idl
+ default:
+ if strings.HasPrefix(line, " ") || strings.HasPrefix(line, "\t") {
+ delPrevChar()
+ line = "\n" + line + "\n" // Replace it with newline. This is useful when we have a line with: "Example:\n\tJSON-someting..."
+ } else {
+ line += " "
+ }
+ buffer.WriteString(line)
+ }
+ }
+
+ postDoc := strings.TrimRight(buffer.String(), "\n")
+ postDoc = strings.Replace(postDoc, "\\\"", "\"", -1) // replace user's \" to "
+ postDoc = strings.Replace(postDoc, "\"", "\\\"", -1) // Escape "
+ postDoc = strings.Replace(postDoc, "\n", "\\n", -1)
+ postDoc = strings.Replace(postDoc, "\t", "\\t", -1)
+
+ return postDoc
+}
+
+// fieldName returns the name of the field as it should appear in JSON format
+// "-" indicates that this field is not part of the JSON representation
+func fieldName(field *ast.Field) string {
+ jsonTag := ""
+ if field.Tag != nil {
+ jsonTag = reflect.StructTag(field.Tag.Value[1 : len(field.Tag.Value)-1]).Get("json") // Delete first and last quotation
+ if strings.Contains(jsonTag, "inline") {
+ return "-"
+ }
+ }
+
+ jsonTag = strings.Split(jsonTag, ",")[0] // This can return "-"
+ if jsonTag == "" {
+ if field.Names != nil {
+ return field.Names[0].Name
+ }
+ return field.Type.(*ast.Ident).Name
+ }
+ return jsonTag
+}
+
+// A buffer of lines that will be written.
+type bufferedLine struct {
+ line string
+ indentation int
+}
+
+type buffer struct {
+ lines []bufferedLine
+}
+
+func newBuffer() *buffer {
+ return &buffer{
+ lines: make([]bufferedLine, 0),
+ }
+}
+
+func (b *buffer) addLine(line string, indent int) {
+ b.lines = append(b.lines, bufferedLine{line, indent})
+}
+
+func (b *buffer) flushLines(w io.Writer) error {
+ for _, line := range b.lines {
+ indentation := strings.Repeat("\t", line.indentation)
+ fullLine := fmt.Sprintf("%s%s", indentation, line.line)
+ if _, err := io.WriteString(w, fullLine); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeFuncHeader(b *buffer, structName string, indent int) {
+ s := fmt.Sprintf("var map_%s = map[string]string {\n", structName)
+ b.addLine(s, indent)
+}
+
+func writeFuncFooter(b *buffer, structName string, indent int) {
+ b.addLine("}\n", indent) // Closes the map definition
+
+ s := fmt.Sprintf("func (%s) SwaggerDoc() map[string]string {\n", structName)
+ b.addLine(s, indent)
+ s = fmt.Sprintf("return map_%s\n", structName)
+ b.addLine(s, indent+1)
+ b.addLine("}\n", indent) // Closes the function definition
+}
+
+func writeMapBody(b *buffer, kubeType []Pair, indent int) {
+ format := "\"%s\": \"%s\",\n"
+ for _, pair := range kubeType {
+ s := fmt.Sprintf(format, pair.Name, pair.Doc)
+ b.addLine(s, indent+2)
+ }
+}
+
+// ParseDocumentationFrom gets all types' documentation and returns them as an
+// array. Each type is again represented as an array (we have to use arrays as we
+// need to be sure for the order of the fields). This function returns fields and
+// struct definitions that have no documentation as {name, ""}.
+func ParseDocumentationFrom(src string) []KubeTypes {
+ var docForTypes []KubeTypes
+
+ pkg := astFrom(src)
+
+ for _, kubType := range pkg.Types {
+ if structType, ok := kubType.Decl.Specs[0].(*ast.TypeSpec).Type.(*ast.StructType); ok {
+ var ks KubeTypes
+ ks = append(ks, Pair{kubType.Name, fmtRawDoc(kubType.Doc)})
+
+ for _, field := range structType.Fields.List {
+ if n := fieldName(field); n != "-" {
+ fieldDoc := fmtRawDoc(field.Doc.Text())
+ ks = append(ks, Pair{n, fieldDoc})
+ }
+ }
+ docForTypes = append(docForTypes, ks)
+ }
+ }
+
+ return docForTypes
+}
+
+// WriteSwaggerDocFunc writes a declaration of a function as a string. This function is used in
+// Swagger as a documentation source for structs and theirs fields
+func WriteSwaggerDocFunc(kubeTypes []KubeTypes, w io.Writer) error {
+ for _, kubeType := range kubeTypes {
+ structName := kubeType[0].Name
+ kubeType[0].Name = ""
+
+ // Ignore empty documentation
+ docfulTypes := make(KubeTypes, 0, len(kubeType))
+ for _, pair := range kubeType {
+ if pair.Doc != "" {
+ docfulTypes = append(docfulTypes, pair)
+ }
+ }
+
+ if len(docfulTypes) == 0 {
+ continue // If no fields and the struct have documentation, skip the function definition
+ }
+
+ indent := 0
+ buffer := newBuffer()
+
+ writeFuncHeader(buffer, structName, indent)
+ writeMapBody(buffer, docfulTypes, indent)
+ writeFuncFooter(buffer, structName, indent)
+ buffer.addLine("\n", 0)
+
+ if err := buffer.flushLines(w); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// VerifySwaggerDocsExist writes in a io.Writer a list of structs and fields that
+// are missing of documentation.
+func VerifySwaggerDocsExist(kubeTypes []KubeTypes, w io.Writer) (int, error) {
+ missingDocs := 0
+ buffer := newBuffer()
+
+ for _, kubeType := range kubeTypes {
+ structName := kubeType[0].Name
+ if kubeType[0].Doc == "" {
+ format := "Missing documentation for the struct itself: %s\n"
+ s := fmt.Sprintf(format, structName)
+ buffer.addLine(s, 0)
+ missingDocs++
+ }
+ kubeType = kubeType[1:] // Skip struct definition
+
+ for _, pair := range kubeType { // Iterate only the fields
+ if pair.Doc == "" {
+ format := "In struct: %s, field documentation is missing: %s\n"
+ s := fmt.Sprintf(format, structName, pair.Name)
+ buffer.addLine(s, 0)
+ missingDocs++
+ }
+ }
+ }
+
+ if err := buffer.flushLines(w); err != nil {
+ return -1, err
+ }
+ return missingDocs, nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/types.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/types.go
new file mode 100644
index 0000000..5033c0d
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/types.go
@@ -0,0 +1,514 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package runtime
+
+import (
+ "fmt"
+
+ "github.com/golang/glog"
+
+ "k8s.io/kubernetes/pkg/api/meta/metatypes"
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/types"
+)
+
+// Note that the types provided in this file are not versioned and are intended to be
+// safe to use from within all versions of every API object.
+
+// TypeMeta is shared by all top level objects. The proper way to use it is to inline it in your type,
+// like this:
+// type MyAwesomeAPIObject struct {
+// runtime.TypeMeta `json:",inline"`
+// ... // other fields
+// }
+// func (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *unversioned.GroupVersionKind) { unversioned.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind
+//
+// TypeMeta is provided here for convenience. You may use it directly from this package or define
+// your own with the same fields.
+//
+// +k8s:deepcopy-gen=true
+// +protobuf=true
+type TypeMeta struct {
+ APIVersion string `json:"apiVersion,omitempty" yaml:"apiVersion,omitempty" protobuf:"bytes,1,opt,name=apiVersion"`
+ Kind string `json:"kind,omitempty" yaml:"kind,omitempty" protobuf:"bytes,2,opt,name=kind"`
+}
+
+const (
+ ContentTypeJSON string = "application/json"
+)
+
+// RawExtension is used to hold extensions in external versions.
+//
+// To use this, make a field which has RawExtension as its type in your external, versioned
+// struct, and Object in your internal struct. You also need to register your
+// various plugin types.
+//
+// // Internal package:
+// type MyAPIObject struct {
+// runtime.TypeMeta `json:",inline"`
+// MyPlugin runtime.Object `json:"myPlugin"`
+// }
+// type PluginA struct {
+// AOption string `json:"aOption"`
+// }
+//
+// // External package:
+// type MyAPIObject struct {
+// runtime.TypeMeta `json:",inline"`
+// MyPlugin runtime.RawExtension `json:"myPlugin"`
+// }
+// type PluginA struct {
+// AOption string `json:"aOption"`
+// }
+//
+// // On the wire, the JSON will look something like this:
+// {
+// "kind":"MyAPIObject",
+// "apiVersion":"v1",
+// "myPlugin": {
+// "kind":"PluginA",
+// "aOption":"foo",
+// },
+// }
+//
+// So what happens? Decode first uses json or yaml to unmarshal the serialized data into
+// your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked.
+// The next step is to copy (using pkg/conversion) into the internal struct. The runtime
+// package's DefaultScheme has conversion functions installed which will unpack the
+// JSON stored in RawExtension, turning it into the correct object type, and storing it
+// in the Object. (TODO: In the case where the object is of an unknown type, a
+// runtime.Unknown object will be created and stored.)
+//
+// +k8s:deepcopy-gen=true
+// +protobuf=true
+type RawExtension struct {
+ // Raw is the underlying serialization of this object.
+ //
+ // TODO: Determine how to detect ContentType and ContentEncoding of 'Raw' data.
+ Raw []byte `protobuf:"bytes,1,opt,name=raw"`
+ // Object can hold a representation of this extension - useful for working with versioned
+ // structs.
+ Object Object `json:"-"`
+}
+
+// Unknown allows api objects with unknown types to be passed-through. This can be used
+// to deal with the API objects from a plug-in. Unknown objects still have functioning
+// TypeMeta features-- kind, version, etc.
+// TODO: Make this object have easy access to field based accessors and settors for
+// metadata and field mutatation.
+//
+// +k8s:deepcopy-gen=true
+// +protobuf=true
+type Unknown struct {
+ TypeMeta `json:",inline" protobuf:"bytes,1,opt,name=typeMeta"`
+ // Raw will hold the complete serialized object which couldn't be matched
+ // with a registered type. Most likely, nothing should be done with this
+ // except for passing it through the system.
+ Raw []byte `protobuf:"bytes,2,opt,name=raw"`
+ // ContentEncoding is encoding used to encode 'Raw' data.
+ // Unspecified means no encoding.
+ ContentEncoding string `protobuf:"bytes,3,opt,name=contentEncoding"`
+ // ContentType is serialization method used to serialize 'Raw'.
+ // Unspecified means ContentTypeJSON.
+ ContentType string `protobuf:"bytes,4,opt,name=contentType"`
+}
+
+// Unstructured allows objects that do not have Golang structs registered to be manipulated
+// generically. This can be used to deal with the API objects from a plug-in. Unstructured
+// objects still have functioning TypeMeta features-- kind, version, etc.
+// TODO: Make this object have easy access to field based accessors and settors for
+// metadata and field mutatation.
+type Unstructured struct {
+ // Object is a JSON compatible map with string, float, int, []interface{}, or map[string]interface{}
+ // children.
+ Object map[string]interface{}
+}
+
+func getNestedField(obj map[string]interface{}, fields ...string) interface{} {
+ var val interface{} = obj
+ for _, field := range fields {
+ if _, ok := val.(map[string]interface{}); !ok {
+ return nil
+ }
+ val = val.(map[string]interface{})[field]
+ }
+ return val
+}
+
+func getNestedString(obj map[string]interface{}, fields ...string) string {
+ if str, ok := getNestedField(obj, fields...).(string); ok {
+ return str
+ }
+ return ""
+}
+
+func getNestedSlice(obj map[string]interface{}, fields ...string) []string {
+ if m, ok := getNestedField(obj, fields...).([]interface{}); ok {
+ strSlice := make([]string, 0, len(m))
+ for _, v := range m {
+ if str, ok := v.(string); ok {
+ strSlice = append(strSlice, str)
+ }
+ }
+ return strSlice
+ }
+ return nil
+}
+
+func getNestedMap(obj map[string]interface{}, fields ...string) map[string]string {
+ if m, ok := getNestedField(obj, fields...).(map[string]interface{}); ok {
+ strMap := make(map[string]string, len(m))
+ for k, v := range m {
+ if str, ok := v.(string); ok {
+ strMap[k] = str
+ }
+ }
+ return strMap
+ }
+ return nil
+}
+
+func setNestedField(obj map[string]interface{}, value interface{}, fields ...string) {
+ m := obj
+ if len(fields) > 1 {
+ for _, field := range fields[0 : len(fields)-1] {
+ if _, ok := m[field].(map[string]interface{}); !ok {
+ m[field] = make(map[string]interface{})
+ }
+ m = m[field].(map[string]interface{})
+ }
+ }
+ m[fields[len(fields)-1]] = value
+}
+
+func setNestedSlice(obj map[string]interface{}, value []string, fields ...string) {
+ m := make([]interface{}, 0, len(value))
+ for _, v := range value {
+ m = append(m, v)
+ }
+ setNestedField(obj, m, fields...)
+}
+
+func setNestedMap(obj map[string]interface{}, value map[string]string, fields ...string) {
+ m := make(map[string]interface{}, len(value))
+ for k, v := range value {
+ m[k] = v
+ }
+ setNestedField(obj, m, fields...)
+}
+
+func (u *Unstructured) setNestedField(value interface{}, fields ...string) {
+ if u.Object == nil {
+ u.Object = make(map[string]interface{})
+ }
+ setNestedField(u.Object, value, fields...)
+}
+
+func (u *Unstructured) setNestedSlice(value []string, fields ...string) {
+ if u.Object == nil {
+ u.Object = make(map[string]interface{})
+ }
+ setNestedSlice(u.Object, value, fields...)
+}
+
+func (u *Unstructured) setNestedMap(value map[string]string, fields ...string) {
+ if u.Object == nil {
+ u.Object = make(map[string]interface{})
+ }
+ setNestedMap(u.Object, value, fields...)
+}
+
+func extractOwnerReference(src interface{}) metatypes.OwnerReference {
+ v := src.(map[string]interface{})
+ controllerPtr, ok := (getNestedField(v, "controller")).(*bool)
+ if !ok {
+ controllerPtr = nil
+ } else {
+ if controllerPtr != nil {
+ controller := *controllerPtr
+ controllerPtr = &controller
+ }
+ }
+ return metatypes.OwnerReference{
+ Kind: getNestedString(v, "kind"),
+ Name: getNestedString(v, "name"),
+ APIVersion: getNestedString(v, "apiVersion"),
+ UID: (types.UID)(getNestedString(v, "uid")),
+ Controller: controllerPtr,
+ }
+}
+
+func setOwnerReference(src metatypes.OwnerReference) map[string]interface{} {
+ ret := make(map[string]interface{})
+ controllerPtr := src.Controller
+ if controllerPtr != nil {
+ controller := *controllerPtr
+ controllerPtr = &controller
+ }
+ setNestedField(ret, src.Kind, "kind")
+ setNestedField(ret, src.Name, "name")
+ setNestedField(ret, src.APIVersion, "apiVersion")
+ setNestedField(ret, string(src.UID), "uid")
+ setNestedField(ret, controllerPtr, "controller")
+ return ret
+}
+
+func getOwnerReferences(object map[string]interface{}) ([]map[string]interface{}, error) {
+ field := getNestedField(object, "metadata", "ownerReferences")
+ if field == nil {
+ return nil, fmt.Errorf("cannot find field metadata.ownerReferences in %v", object)
+ }
+ ownerReferences, ok := field.([]map[string]interface{})
+ if ok {
+ return ownerReferences, nil
+ }
+ // TODO: This is hacky...
+ interfaces, ok := field.([]interface{})
+ if !ok {
+ return nil, fmt.Errorf("expect metadata.ownerReferences to be a slice in %#v", object)
+ }
+ ownerReferences = make([]map[string]interface{}, 0, len(interfaces))
+ for i := 0; i < len(interfaces); i++ {
+ r, ok := interfaces[i].(map[string]interface{})
+ if !ok {
+ return nil, fmt.Errorf("expect element metadata.ownerReferences to be a map[string]interface{} in %#v", object)
+ }
+ ownerReferences = append(ownerReferences, r)
+ }
+ return ownerReferences, nil
+}
+
+func (u *Unstructured) GetOwnerReferences() []metatypes.OwnerReference {
+ original, err := getOwnerReferences(u.Object)
+ if err != nil {
+ glog.V(6).Info(err)
+ return nil
+ }
+ ret := make([]metatypes.OwnerReference, 0, len(original))
+ for i := 0; i < len(original); i++ {
+ ret = append(ret, extractOwnerReference(original[i]))
+ }
+ return ret
+}
+
+func (u *Unstructured) SetOwnerReferences(references []metatypes.OwnerReference) {
+ var newReferences = make([]map[string]interface{}, 0, len(references))
+ for i := 0; i < len(references); i++ {
+ newReferences = append(newReferences, setOwnerReference(references[i]))
+ }
+ u.setNestedField(newReferences, "metadata", "ownerReferences")
+}
+
+func (u *Unstructured) GetAPIVersion() string {
+ return getNestedString(u.Object, "apiVersion")
+}
+
+func (u *Unstructured) SetAPIVersion(version string) {
+ u.setNestedField(version, "apiVersion")
+}
+
+func (u *Unstructured) GetKind() string {
+ return getNestedString(u.Object, "kind")
+}
+
+func (u *Unstructured) SetKind(kind string) {
+ u.setNestedField(kind, "kind")
+}
+
+func (u *Unstructured) GetNamespace() string {
+ return getNestedString(u.Object, "metadata", "namespace")
+}
+
+func (u *Unstructured) SetNamespace(namespace string) {
+ u.setNestedField(namespace, "metadata", "namespace")
+}
+
+func (u *Unstructured) GetName() string {
+ return getNestedString(u.Object, "metadata", "name")
+}
+
+func (u *Unstructured) SetName(name string) {
+ u.setNestedField(name, "metadata", "name")
+}
+
+func (u *Unstructured) GetGenerateName() string {
+ return getNestedString(u.Object, "metadata", "generateName")
+}
+
+func (u *Unstructured) SetGenerateName(name string) {
+ u.setNestedField(name, "metadata", "generateName")
+}
+
+func (u *Unstructured) GetUID() types.UID {
+ return types.UID(getNestedString(u.Object, "metadata", "uid"))
+}
+
+func (u *Unstructured) SetUID(uid types.UID) {
+ u.setNestedField(string(uid), "metadata", "uid")
+}
+
+func (u *Unstructured) GetResourceVersion() string {
+ return getNestedString(u.Object, "metadata", "resourceVersion")
+}
+
+func (u *Unstructured) SetResourceVersion(version string) {
+ u.setNestedField(version, "metadata", "resourceVersion")
+}
+
+func (u *Unstructured) GetSelfLink() string {
+ return getNestedString(u.Object, "metadata", "selfLink")
+}
+
+func (u *Unstructured) SetSelfLink(selfLink string) {
+ u.setNestedField(selfLink, "metadata", "selfLink")
+}
+
+func (u *Unstructured) GetCreationTimestamp() unversioned.Time {
+ var timestamp unversioned.Time
+ timestamp.UnmarshalQueryParameter(getNestedString(u.Object, "metadata", "creationTimestamp"))
+ return timestamp
+}
+
+func (u *Unstructured) SetCreationTimestamp(timestamp unversioned.Time) {
+ ts, _ := timestamp.MarshalQueryParameter()
+ u.setNestedField(ts, "metadata", "creationTimestamp")
+}
+
+func (u *Unstructured) GetDeletionTimestamp() *unversioned.Time {
+ var timestamp unversioned.Time
+ timestamp.UnmarshalQueryParameter(getNestedString(u.Object, "metadata", "deletionTimestamp"))
+ if timestamp.IsZero() {
+ return nil
+ }
+ return &timestamp
+}
+
+func (u *Unstructured) SetDeletionTimestamp(timestamp *unversioned.Time) {
+ ts, _ := timestamp.MarshalQueryParameter()
+ u.setNestedField(ts, "metadata", "deletionTimestamp")
+}
+
+func (u *Unstructured) GetLabels() map[string]string {
+ return getNestedMap(u.Object, "metadata", "labels")
+}
+
+func (u *Unstructured) SetLabels(labels map[string]string) {
+ u.setNestedMap(labels, "metadata", "labels")
+}
+
+func (u *Unstructured) GetAnnotations() map[string]string {
+ return getNestedMap(u.Object, "metadata", "annotations")
+}
+
+func (u *Unstructured) SetAnnotations(annotations map[string]string) {
+ u.setNestedMap(annotations, "metadata", "annotations")
+}
+
+func (u *Unstructured) SetGroupVersionKind(gvk unversioned.GroupVersionKind) {
+ u.SetAPIVersion(gvk.GroupVersion().String())
+ u.SetKind(gvk.Kind)
+}
+
+func (u *Unstructured) GroupVersionKind() unversioned.GroupVersionKind {
+ gv, err := unversioned.ParseGroupVersion(u.GetAPIVersion())
+ if err != nil {
+ return unversioned.GroupVersionKind{}
+ }
+ gvk := gv.WithKind(u.GetKind())
+ return gvk
+}
+
+func (u *Unstructured) GetFinalizers() []string {
+ return getNestedSlice(u.Object, "metadata", "finalizers")
+}
+
+func (u *Unstructured) SetFinalizers(finalizers []string) {
+ u.setNestedSlice(finalizers, "metadata", "finalizers")
+}
+
+// UnstructuredList allows lists that do not have Golang structs
+// registered to be manipulated generically. This can be used to deal
+// with the API lists from a plug-in.
+type UnstructuredList struct {
+ Object map[string]interface{}
+
+ // Items is a list of unstructured objects.
+ Items []*Unstructured `json:"items"`
+}
+
+func (u *UnstructuredList) setNestedField(value interface{}, fields ...string) {
+ if u.Object == nil {
+ u.Object = make(map[string]interface{})
+ }
+ setNestedField(u.Object, value, fields...)
+}
+
+func (u *UnstructuredList) GetAPIVersion() string {
+ return getNestedString(u.Object, "apiVersion")
+}
+
+func (u *UnstructuredList) SetAPIVersion(version string) {
+ u.setNestedField(version, "apiVersion")
+}
+
+func (u *UnstructuredList) GetKind() string {
+ return getNestedString(u.Object, "kind")
+}
+
+func (u *UnstructuredList) SetKind(kind string) {
+ u.setNestedField(kind, "kind")
+}
+
+func (u *UnstructuredList) GetResourceVersion() string {
+ return getNestedString(u.Object, "metadata", "resourceVersion")
+}
+
+func (u *UnstructuredList) SetResourceVersion(version string) {
+ u.setNestedField(version, "metadata", "resourceVersion")
+}
+
+func (u *UnstructuredList) GetSelfLink() string {
+ return getNestedString(u.Object, "metadata", "selfLink")
+}
+
+func (u *UnstructuredList) SetSelfLink(selfLink string) {
+ u.setNestedField(selfLink, "metadata", "selfLink")
+}
+
+func (u *UnstructuredList) SetGroupVersionKind(gvk unversioned.GroupVersionKind) {
+ u.SetAPIVersion(gvk.GroupVersion().String())
+ u.SetKind(gvk.Kind)
+}
+
+func (u *UnstructuredList) GroupVersionKind() unversioned.GroupVersionKind {
+ gv, err := unversioned.ParseGroupVersion(u.GetAPIVersion())
+ if err != nil {
+ return unversioned.GroupVersionKind{}
+ }
+ gvk := gv.WithKind(u.GetKind())
+ return gvk
+}
+
+// VersionedObjects is used by Decoders to give callers a way to access all versions
+// of an object during the decoding process.
+type VersionedObjects struct {
+ // Objects is the set of objects retrieved during decoding, in order of conversion.
+ // The 0 index is the object as serialized on the wire. If conversion has occurred,
+ // other objects may be present. The right most object is the same as would be returned
+ // by a normal Decode call.
+ Objects []Object
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/types_proto.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/types_proto.go
new file mode 100644
index 0000000..ead96ee
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/types_proto.go
@@ -0,0 +1,69 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package runtime
+
+import (
+ "fmt"
+)
+
+type ProtobufMarshaller interface {
+ MarshalTo(data []byte) (int, error)
+}
+
+// NestedMarshalTo allows a caller to avoid extra allocations during serialization of an Unknown
+// that will contain an object that implements ProtobufMarshaller.
+func (m *Unknown) NestedMarshalTo(data []byte, b ProtobufMarshaller, size uint64) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.TypeMeta.Size()))
+ n1, err := m.TypeMeta.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n1
+
+ if b != nil {
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, size)
+ n2, err := b.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ if uint64(n2) != size {
+ // programmer error: the Size() method for protobuf does not match the results of MarshalTo, which means the proto
+ // struct returned would be wrong.
+ return 0, fmt.Errorf("the Size() value of %T was %d, but NestedMarshalTo wrote %d bytes to data", b, size, n2)
+ }
+ i += n2
+ }
+
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.ContentEncoding)))
+ i += copy(data[i:], m.ContentEncoding)
+
+ data[i] = 0x22
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.ContentType)))
+ i += copy(data[i:], m.ContentType)
+ return i, nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/unstructured.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/unstructured.go
new file mode 100644
index 0000000..048e6dc
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/runtime/unstructured.go
@@ -0,0 +1,199 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package runtime
+
+import (
+ gojson "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/util/json"
+)
+
+// UnstructuredJSONScheme is capable of converting JSON data into the Unstructured
+// type, which can be used for generic access to objects without a predefined scheme.
+// TODO: move into serializer/json.
+var UnstructuredJSONScheme Codec = unstructuredJSONScheme{}
+
+type unstructuredJSONScheme struct{}
+
+func (s unstructuredJSONScheme) Decode(data []byte, _ *unversioned.GroupVersionKind, obj Object) (Object, *unversioned.GroupVersionKind, error) {
+ var err error
+ if obj != nil {
+ err = s.decodeInto(data, obj)
+ } else {
+ obj, err = s.decode(data)
+ }
+
+ if err != nil {
+ return nil, nil, err
+ }
+
+ gvk := obj.GetObjectKind().GroupVersionKind()
+ if len(gvk.Kind) == 0 {
+ return nil, &gvk, NewMissingKindErr(string(data))
+ }
+
+ return obj, &gvk, nil
+}
+
+func (unstructuredJSONScheme) Encode(obj Object, w io.Writer) error {
+ switch t := obj.(type) {
+ case *Unstructured:
+ return json.NewEncoder(w).Encode(t.Object)
+ case *UnstructuredList:
+ items := make([]map[string]interface{}, 0, len(t.Items))
+ for _, i := range t.Items {
+ items = append(items, i.Object)
+ }
+ t.Object["items"] = items
+ defer func() { delete(t.Object, "items") }()
+ return json.NewEncoder(w).Encode(t.Object)
+ case *Unknown:
+ // TODO: Unstructured needs to deal with ContentType.
+ _, err := w.Write(t.Raw)
+ return err
+ default:
+ return json.NewEncoder(w).Encode(t)
+ }
+}
+
+func (s unstructuredJSONScheme) decode(data []byte) (Object, error) {
+ type detector struct {
+ Items gojson.RawMessage
+ }
+ var det detector
+ if err := json.Unmarshal(data, &det); err != nil {
+ return nil, err
+ }
+
+ if det.Items != nil {
+ list := &UnstructuredList{}
+ err := s.decodeToList(data, list)
+ return list, err
+ }
+
+ // No Items field, so it wasn't a list.
+ unstruct := &Unstructured{}
+ err := s.decodeToUnstructured(data, unstruct)
+ return unstruct, err
+}
+func (s unstructuredJSONScheme) decodeInto(data []byte, obj Object) error {
+ switch x := obj.(type) {
+ case *Unstructured:
+ return s.decodeToUnstructured(data, x)
+ case *UnstructuredList:
+ return s.decodeToList(data, x)
+ case *VersionedObjects:
+ u := new(Unstructured)
+ err := s.decodeToUnstructured(data, u)
+ if err == nil {
+ x.Objects = []Object{u}
+ }
+ return err
+ default:
+ return json.Unmarshal(data, x)
+ }
+}
+
+func (unstructuredJSONScheme) decodeToUnstructured(data []byte, unstruct *Unstructured) error {
+ m := make(map[string]interface{})
+ if err := json.Unmarshal(data, &m); err != nil {
+ return err
+ }
+
+ unstruct.Object = m
+
+ return nil
+}
+
+func (s unstructuredJSONScheme) decodeToList(data []byte, list *UnstructuredList) error {
+ type decodeList struct {
+ Items []gojson.RawMessage
+ }
+
+ var dList decodeList
+ if err := json.Unmarshal(data, &dList); err != nil {
+ return err
+ }
+
+ if err := json.Unmarshal(data, &list.Object); err != nil {
+ return err
+ }
+
+ // For typed lists, e.g., a PodList, API server doesn't set each item's
+ // APIVersion and Kind. We need to set it.
+ listAPIVersion := list.GetAPIVersion()
+ listKind := list.GetKind()
+ itemKind := strings.TrimSuffix(listKind, "List")
+
+ delete(list.Object, "items")
+ list.Items = nil
+ for _, i := range dList.Items {
+ unstruct := &Unstructured{}
+ if err := s.decodeToUnstructured([]byte(i), unstruct); err != nil {
+ return err
+ }
+ // This is hacky. Set the item's Kind and APIVersion to those inferred
+ // from the List.
+ if len(unstruct.GetKind()) == 0 && len(unstruct.GetAPIVersion()) == 0 {
+ unstruct.SetKind(itemKind)
+ unstruct.SetAPIVersion(listAPIVersion)
+ }
+ list.Items = append(list.Items, unstruct)
+ }
+ return nil
+}
+
+// UnstructuredObjectConverter is an ObjectConverter for use with
+// Unstructured objects. Since it has no schema or type information,
+// it will only succeed for no-op conversions. This is provided as a
+// sane implementation for APIs that require an object converter.
+type UnstructuredObjectConverter struct{}
+
+func (UnstructuredObjectConverter) Convert(in, out interface{}) error {
+ unstructIn, ok := in.(*Unstructured)
+ if !ok {
+ return fmt.Errorf("input type %T in not valid for unstructured conversion", in)
+ }
+
+ unstructOut, ok := out.(*Unstructured)
+ if !ok {
+ return fmt.Errorf("output type %T in not valid for unstructured conversion", out)
+ }
+
+ // maybe deep copy the map? It is documented in the
+ // ObjectConverter interface that this function is not
+ // guaranteeed to not mutate the input. Or maybe set the input
+ // object to nil.
+ unstructOut.Object = unstructIn.Object
+ return nil
+}
+
+func (UnstructuredObjectConverter) ConvertToVersion(in Object, outVersion unversioned.GroupVersion) (Object, error) {
+ if gvk := in.GetObjectKind().GroupVersionKind(); gvk.GroupVersion() != outVersion {
+ return nil, errors.New("unstructured converter cannot convert versions")
+ }
+ return in, nil
+}
+
+func (UnstructuredObjectConverter) ConvertFieldLabel(version, kind, label, value string) (string, string, error) {
+ return "", "", errors.New("unstructured cannot convert field labels")
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/types/doc.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/types/doc.go
new file mode 100644
index 0000000..783cbcd
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/types/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package types implements various generic types used throughout kubernetes.
+package types
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/types/namespacedname.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/types/namespacedname.go
new file mode 100644
index 0000000..70a9ac3
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/types/namespacedname.go
@@ -0,0 +1,35 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package types
+
+// NamespacedName comprises a resource name, with a mandatory namespace,
+// rendered as "<namespace>/<name>". Being a type captures intent and
+// helps make sure that UIDs, namespaced names and non-namespaced names
+// do not get conflated in code. For most use cases, namespace and name
+// will already have been format validated at the API entry point, so we
+// don't do that here. Where that's not the case (e.g. in testing),
+// consider using NamespacedNameOrDie() in testing.go in this package.
+
+type NamespacedName struct {
+ Namespace string
+ Name string
+}
+
+// String returns the general purpose string representation
+func (n NamespacedName) String() string {
+ return n.Namespace + "/" + n.Name
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/types/uid.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/types/uid.go
new file mode 100644
index 0000000..8693392
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/types/uid.go
@@ -0,0 +1,22 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package types
+
+// UID is a type that holds unique ID values, including UUIDs. Because we
+// don't ONLY use UUIDs, this is an alias to string. Being a type captures
+// intent and helps make sure that UIDs and names do not get conflated.
+type UID string
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/types/unix_user_id.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/types/unix_user_id.go
new file mode 100644
index 0000000..dc770c1
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/types/unix_user_id.go
@@ -0,0 +1,23 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package types
+
+// int64 is used as a safe bet against wrap-around (uid's are general
+// int32) and to support uid_t -1, and -2.
+
+type UnixUserID int64
+type UnixGroupID int64
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/clock.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/clock.go
new file mode 100644
index 0000000..71aca9e
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/clock.go
@@ -0,0 +1,218 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "sync"
+ "time"
+)
+
+// Clock allows for injecting fake or real clocks into code that
+// needs to do arbitrary things based on time.
+type Clock interface {
+ Now() time.Time
+ Since(time.Time) time.Duration
+ After(d time.Duration) <-chan time.Time
+ Sleep(d time.Duration)
+ Tick(d time.Duration) <-chan time.Time
+}
+
+var (
+ _ = Clock(RealClock{})
+ _ = Clock(&FakeClock{})
+ _ = Clock(&IntervalClock{})
+)
+
+// RealClock really calls time.Now()
+type RealClock struct{}
+
+// Now returns the current time.
+func (RealClock) Now() time.Time {
+ return time.Now()
+}
+
+// Since returns time since the specified timestamp.
+func (RealClock) Since(ts time.Time) time.Duration {
+ return time.Since(ts)
+}
+
+// Same as time.After(d).
+func (RealClock) After(d time.Duration) <-chan time.Time {
+ return time.After(d)
+}
+
+func (RealClock) Tick(d time.Duration) <-chan time.Time {
+ return time.Tick(d)
+}
+
+func (RealClock) Sleep(d time.Duration) {
+ time.Sleep(d)
+}
+
+// FakeClock implements Clock, but returns an arbitrary time.
+type FakeClock struct {
+ lock sync.RWMutex
+ time time.Time
+
+ // waiters are waiting for the fake time to pass their specified time
+ waiters []fakeClockWaiter
+}
+
+type fakeClockWaiter struct {
+ targetTime time.Time
+ stepInterval time.Duration
+ skipIfBlocked bool
+ destChan chan<- time.Time
+}
+
+func NewFakeClock(t time.Time) *FakeClock {
+ return &FakeClock{
+ time: t,
+ }
+}
+
+// Now returns f's time.
+func (f *FakeClock) Now() time.Time {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+ return f.time
+}
+
+// Since returns time since the time in f.
+func (f *FakeClock) Since(ts time.Time) time.Duration {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+ return f.time.Sub(ts)
+}
+
+// Fake version of time.After(d).
+func (f *FakeClock) After(d time.Duration) <-chan time.Time {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ stopTime := f.time.Add(d)
+ ch := make(chan time.Time, 1) // Don't block!
+ f.waiters = append(f.waiters, fakeClockWaiter{
+ targetTime: stopTime,
+ destChan: ch,
+ })
+ return ch
+}
+
+func (f *FakeClock) Tick(d time.Duration) <-chan time.Time {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ tickTime := f.time.Add(d)
+ ch := make(chan time.Time, 1) // hold one tick
+ f.waiters = append(f.waiters, fakeClockWaiter{
+ targetTime: tickTime,
+ stepInterval: d,
+ skipIfBlocked: true,
+ destChan: ch,
+ })
+
+ return ch
+}
+
+// Move clock by Duration, notify anyone that's called After or Tick
+func (f *FakeClock) Step(d time.Duration) {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ f.setTimeLocked(f.time.Add(d))
+}
+
+// Sets the time.
+func (f *FakeClock) SetTime(t time.Time) {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ f.setTimeLocked(t)
+}
+
+// Actually changes the time and checks any waiters. f must be write-locked.
+func (f *FakeClock) setTimeLocked(t time.Time) {
+ f.time = t
+ newWaiters := make([]fakeClockWaiter, 0, len(f.waiters))
+ for i := range f.waiters {
+ w := &f.waiters[i]
+ if !w.targetTime.After(t) {
+
+ if w.skipIfBlocked {
+ select {
+ case w.destChan <- t:
+ default:
+ }
+ } else {
+ w.destChan <- t
+ }
+
+ if w.stepInterval > 0 {
+ for !w.targetTime.After(t) {
+ w.targetTime = w.targetTime.Add(w.stepInterval)
+ }
+ newWaiters = append(newWaiters, *w)
+ }
+
+ } else {
+ newWaiters = append(newWaiters, f.waiters[i])
+ }
+ }
+ f.waiters = newWaiters
+}
+
+// Returns true if After has been called on f but not yet satisfied (so you can
+// write race-free tests).
+func (f *FakeClock) HasWaiters() bool {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+ return len(f.waiters) > 0
+}
+
+func (f *FakeClock) Sleep(d time.Duration) {
+ f.Step(d)
+}
+
+// IntervalClock implements Clock, but each invocation of Now steps the clock forward the specified duration
+type IntervalClock struct {
+ Time time.Time
+ Duration time.Duration
+}
+
+// Now returns i's time.
+func (i *IntervalClock) Now() time.Time {
+ i.Time = i.Time.Add(i.Duration)
+ return i.Time
+}
+
+// Since returns time since the time in i.
+func (i *IntervalClock) Since(ts time.Time) time.Duration {
+ return i.Time.Sub(ts)
+}
+
+// Unimplemented, will panic.
+// TODO: make interval clock use FakeClock so this can be implemented.
+func (*IntervalClock) After(d time.Duration) <-chan time.Time {
+ panic("IntervalClock doesn't implement After")
+}
+
+// Unimplemented, will panic.
+// TODO: make interval clock use FakeClock so this can be implemented.
+func (*IntervalClock) Tick(d time.Duration) <-chan time.Time {
+ panic("IntervalClock doesn't implement Tick")
+}
+
+func (*IntervalClock) Sleep(d time.Duration) {
+ panic("IntervalClock doesn't implement Sleep")
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/crypto/crypto.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/crypto/crypto.go
new file mode 100644
index 0000000..b573c8a
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/crypto/crypto.go
@@ -0,0 +1,190 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package crypto
+
+import (
+ "bytes"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "math/big"
+ "net"
+ "os"
+ "path/filepath"
+ "time"
+)
+
+// ShouldGenSelfSignedCerts returns false if the certificate or key files already exists,
+// otherwise returns true.
+func ShouldGenSelfSignedCerts(certPath, keyPath string) bool {
+ if canReadFile(certPath) || canReadFile(keyPath) {
+ return false
+ }
+
+ return true
+}
+
+// If the file represented by path exists and
+// readable, returns true otherwise returns false.
+func canReadFile(path string) bool {
+ f, err := os.Open(path)
+ if err != nil {
+ return false
+ }
+
+ defer f.Close()
+
+ return true
+}
+
+// GenerateSelfSignedCert creates a self-signed certificate and key for the given host.
+// Host may be an IP or a DNS name
+// You may also specify additional subject alt names (either ip or dns names) for the certificate
+// The certificate will be created with file mode 0644. The key will be created with file mode 0600.
+// If the certificate or key files already exist, they will be overwritten.
+// Any parent directories of the certPath or keyPath will be created as needed with file mode 0755.
+func GenerateSelfSignedCert(host, certPath, keyPath string, alternateIPs []net.IP, alternateDNS []string) error {
+ priv, err := rsa.GenerateKey(rand.Reader, 2048)
+ if err != nil {
+ return err
+ }
+
+ template := x509.Certificate{
+ SerialNumber: big.NewInt(1),
+ Subject: pkix.Name{
+ CommonName: fmt.Sprintf("%s@%d", host, time.Now().Unix()),
+ },
+ NotBefore: time.Now(),
+ NotAfter: time.Now().Add(time.Hour * 24 * 365),
+
+ KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
+ ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
+ BasicConstraintsValid: true,
+ IsCA: true,
+ }
+
+ if ip := net.ParseIP(host); ip != nil {
+ template.IPAddresses = append(template.IPAddresses, ip)
+ } else {
+ template.DNSNames = append(template.DNSNames, host)
+ }
+
+ template.IPAddresses = append(template.IPAddresses, alternateIPs...)
+ template.DNSNames = append(template.DNSNames, alternateDNS...)
+
+ derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv)
+ if err != nil {
+ return err
+ }
+
+ // Generate cert
+ certBuffer := bytes.Buffer{}
+ if err := pem.Encode(&certBuffer, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}); err != nil {
+ return err
+ }
+
+ // Generate key
+ keyBuffer := bytes.Buffer{}
+ if err := pem.Encode(&keyBuffer, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)}); err != nil {
+ return err
+ }
+
+ // Write cert
+ if err := os.MkdirAll(filepath.Dir(certPath), os.FileMode(0755)); err != nil {
+ return err
+ }
+ if err := ioutil.WriteFile(certPath, certBuffer.Bytes(), os.FileMode(0644)); err != nil {
+ return err
+ }
+
+ // Write key
+ if err := os.MkdirAll(filepath.Dir(keyPath), os.FileMode(0755)); err != nil {
+ return err
+ }
+ if err := ioutil.WriteFile(keyPath, keyBuffer.Bytes(), os.FileMode(0600)); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// CertPoolFromFile returns an x509.CertPool containing the certificates in the given PEM-encoded file.
+// Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates
+func CertPoolFromFile(filename string) (*x509.CertPool, error) {
+ certs, err := certificatesFromFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ pool := x509.NewCertPool()
+ for _, cert := range certs {
+ pool.AddCert(cert)
+ }
+ return pool, nil
+}
+
+// certificatesFromFile returns the x509.Certificates contained in the given PEM-encoded file.
+// Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates
+func certificatesFromFile(file string) ([]*x509.Certificate, error) {
+ if len(file) == 0 {
+ return nil, errors.New("error reading certificates from an empty filename")
+ }
+ pemBlock, err := ioutil.ReadFile(file)
+ if err != nil {
+ return nil, err
+ }
+ certs, err := CertsFromPEM(pemBlock)
+ if err != nil {
+ return nil, fmt.Errorf("error reading %s: %s", file, err)
+ }
+ return certs, nil
+}
+
+// CertsFromPEM returns the x509.Certificates contained in the given PEM-encoded byte array
+// Returns an error if a certificate could not be parsed, or if the data does not contain any certificates
+func CertsFromPEM(pemCerts []byte) ([]*x509.Certificate, error) {
+ ok := false
+ certs := []*x509.Certificate{}
+ for len(pemCerts) > 0 {
+ var block *pem.Block
+ block, pemCerts = pem.Decode(pemCerts)
+ if block == nil {
+ break
+ }
+ // Only use PEM "CERTIFICATE" blocks without extra headers
+ if block.Type != "CERTIFICATE" || len(block.Headers) != 0 {
+ continue
+ }
+
+ cert, err := x509.ParseCertificate(block.Bytes)
+ if err != nil {
+ return certs, err
+ }
+
+ certs = append(certs, cert)
+ ok = true
+ }
+
+ if !ok {
+ return certs, errors.New("could not read any certificates")
+ }
+ return certs, nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/doc.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/doc.go
new file mode 100644
index 0000000..1747db5
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package util implements various utility functions used in both testing and implementation
+// of Kubernetes. Package util may not depend on any other package in the Kubernetes
+// package tree.
+package util
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/errors/doc.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/errors/doc.go
new file mode 100644
index 0000000..b3b39bc
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/errors/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package errors implements various utility functions and types around errors.
+package errors
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/errors/errors.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/errors/errors.go
new file mode 100644
index 0000000..0445c14
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/errors/errors.go
@@ -0,0 +1,156 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package errors
+
+import (
+ "errors"
+ "fmt"
+)
+
+// Aggregate represents an object that contains multiple errors, but does not
+// necessarily have singular semantic meaning.
+type Aggregate interface {
+ error
+ Errors() []error
+}
+
+// NewAggregate converts a slice of errors into an Aggregate interface, which
+// is itself an implementation of the error interface. If the slice is empty,
+// this returns nil.
+func NewAggregate(errlist []error) Aggregate {
+ if len(errlist) == 0 {
+ return nil
+ }
+ return aggregate(errlist)
+}
+
+// This helper implements the error and Errors interfaces. Keeping it private
+// prevents people from making an aggregate of 0 errors, which is not
+// an error, but does satisfy the error interface.
+type aggregate []error
+
+// Error is part of the error interface.
+func (agg aggregate) Error() string {
+ if len(agg) == 0 {
+ // This should never happen, really.
+ return ""
+ }
+ if len(agg) == 1 {
+ return agg[0].Error()
+ }
+ result := fmt.Sprintf("[%s", agg[0].Error())
+ for i := 1; i < len(agg); i++ {
+ result += fmt.Sprintf(", %s", agg[i].Error())
+ }
+ result += "]"
+ return result
+}
+
+// Errors is part of the Aggregate interface.
+func (agg aggregate) Errors() []error {
+ return []error(agg)
+}
+
+// Matcher is used to match errors. Returns true if the error matches.
+type Matcher func(error) bool
+
+// FilterOut removes all errors that match any of the matchers from the input
+// error. If the input is a singular error, only that error is tested. If the
+// input implements the Aggregate interface, the list of errors will be
+// processed recursively.
+//
+// This can be used, for example, to remove known-OK errors (such as io.EOF or
+// os.PathNotFound) from a list of errors.
+func FilterOut(err error, fns ...Matcher) error {
+ if err == nil {
+ return nil
+ }
+ if agg, ok := err.(Aggregate); ok {
+ return NewAggregate(filterErrors(agg.Errors(), fns...))
+ }
+ if !matchesError(err, fns...) {
+ return err
+ }
+ return nil
+}
+
+// matchesError returns true if any Matcher returns true
+func matchesError(err error, fns ...Matcher) bool {
+ for _, fn := range fns {
+ if fn(err) {
+ return true
+ }
+ }
+ return false
+}
+
+// filterErrors returns any errors (or nested errors, if the list contains
+// nested Errors) for which all fns return false. If no errors
+// remain a nil list is returned. The resulting silec will have all
+// nested slices flattened as a side effect.
+func filterErrors(list []error, fns ...Matcher) []error {
+ result := []error{}
+ for _, err := range list {
+ r := FilterOut(err, fns...)
+ if r != nil {
+ result = append(result, r)
+ }
+ }
+ return result
+}
+
+// Flatten takes an Aggregate, which may hold other Aggregates in arbitrary
+// nesting, and flattens them all into a single Aggregate, recursively.
+func Flatten(agg Aggregate) Aggregate {
+ result := []error{}
+ if agg == nil {
+ return nil
+ }
+ for _, err := range agg.Errors() {
+ if a, ok := err.(Aggregate); ok {
+ r := Flatten(a)
+ if r != nil {
+ result = append(result, r.Errors()...)
+ }
+ } else {
+ if err != nil {
+ result = append(result, err)
+ }
+ }
+ }
+ return NewAggregate(result)
+}
+
+// AggregateGoroutines runs the provided functions in parallel, stuffing all
+// non-nil errors into the returned Aggregate.
+// Returns nil if all the functions complete successfully.
+func AggregateGoroutines(funcs ...func() error) Aggregate {
+ errChan := make(chan error, len(funcs))
+ for _, f := range funcs {
+ go func(f func() error) { errChan <- f() }(f)
+ }
+ errs := make([]error, 0)
+ for i := 0; i < cap(errChan); i++ {
+ if err := <-errChan; err != nil {
+ errs = append(errs, err)
+ }
+ }
+ return NewAggregate(errs)
+}
+
+// ErrPreconditionViolated is returned when the precondition is violated
+var ErrPreconditionViolated = errors.New("precondition is violated")
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/flowcontrol/backoff.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/flowcontrol/backoff.go
new file mode 100644
index 0000000..59b9976
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/flowcontrol/backoff.go
@@ -0,0 +1,149 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package flowcontrol
+
+import (
+ "sync"
+ "time"
+
+ "k8s.io/kubernetes/pkg/util"
+ "k8s.io/kubernetes/pkg/util/integer"
+)
+
+type backoffEntry struct {
+ backoff time.Duration
+ lastUpdate time.Time
+}
+
+type Backoff struct {
+ sync.Mutex
+ Clock util.Clock
+ defaultDuration time.Duration
+ maxDuration time.Duration
+ perItemBackoff map[string]*backoffEntry
+}
+
+func NewFakeBackOff(initial, max time.Duration, tc *util.FakeClock) *Backoff {
+ return &Backoff{
+ perItemBackoff: map[string]*backoffEntry{},
+ Clock: tc,
+ defaultDuration: initial,
+ maxDuration: max,
+ }
+}
+
+func NewBackOff(initial, max time.Duration) *Backoff {
+ return &Backoff{
+ perItemBackoff: map[string]*backoffEntry{},
+ Clock: util.RealClock{},
+ defaultDuration: initial,
+ maxDuration: max,
+ }
+}
+
+// Get the current backoff Duration
+func (p *Backoff) Get(id string) time.Duration {
+ p.Lock()
+ defer p.Unlock()
+ var delay time.Duration
+ entry, ok := p.perItemBackoff[id]
+ if ok {
+ delay = entry.backoff
+ }
+ return delay
+}
+
+// move backoff to the next mark, capping at maxDuration
+func (p *Backoff) Next(id string, eventTime time.Time) {
+ p.Lock()
+ defer p.Unlock()
+ entry, ok := p.perItemBackoff[id]
+ if !ok || hasExpired(eventTime, entry.lastUpdate, p.maxDuration) {
+ entry = p.initEntryUnsafe(id)
+ } else {
+ delay := entry.backoff * 2 // exponential
+ entry.backoff = time.Duration(integer.Int64Min(int64(delay), int64(p.maxDuration)))
+ }
+ entry.lastUpdate = p.Clock.Now()
+}
+
+// Reset forces clearing of all backoff data for a given key.
+func (p *Backoff) Reset(id string) {
+ p.Lock()
+ defer p.Unlock()
+ delete(p.perItemBackoff, id)
+}
+
+// Returns True if the elapsed time since eventTime is smaller than the current backoff window
+func (p *Backoff) IsInBackOffSince(id string, eventTime time.Time) bool {
+ p.Lock()
+ defer p.Unlock()
+ entry, ok := p.perItemBackoff[id]
+ if !ok {
+ return false
+ }
+ if hasExpired(eventTime, entry.lastUpdate, p.maxDuration) {
+ return false
+ }
+ return p.Clock.Now().Sub(eventTime) < entry.backoff
+}
+
+// Returns True if time since lastupdate is less than the current backoff window.
+func (p *Backoff) IsInBackOffSinceUpdate(id string, eventTime time.Time) bool {
+ p.Lock()
+ defer p.Unlock()
+ entry, ok := p.perItemBackoff[id]
+ if !ok {
+ return false
+ }
+ if hasExpired(eventTime, entry.lastUpdate, p.maxDuration) {
+ return false
+ }
+ return eventTime.Sub(entry.lastUpdate) < entry.backoff
+}
+
+// Garbage collect records that have aged past maxDuration. Backoff users are expected
+// to invoke this periodically.
+func (p *Backoff) GC() {
+ p.Lock()
+ defer p.Unlock()
+ now := p.Clock.Now()
+ for id, entry := range p.perItemBackoff {
+ if now.Sub(entry.lastUpdate) > p.maxDuration*2 {
+ // GC when entry has not been updated for 2*maxDuration
+ delete(p.perItemBackoff, id)
+ }
+ }
+}
+
+func (p *Backoff) DeleteEntry(id string) {
+ p.Lock()
+ defer p.Unlock()
+ delete(p.perItemBackoff, id)
+}
+
+// Take a lock on *Backoff, before calling initEntryUnsafe
+func (p *Backoff) initEntryUnsafe(id string) *backoffEntry {
+ entry := &backoffEntry{backoff: p.defaultDuration}
+ p.perItemBackoff[id] = entry
+ return entry
+}
+
+// After 2*maxDuration we restart the backoff factor to the beginning
+func hasExpired(eventTime time.Time, lastUpdate time.Time, maxDuration time.Duration) bool {
+ return eventTime.Sub(lastUpdate) > maxDuration*2 // consider stable if it's ok for twice the maxDuration
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/flowcontrol/throttle.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/flowcontrol/throttle.go
new file mode 100644
index 0000000..482ba7d
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/flowcontrol/throttle.go
@@ -0,0 +1,116 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package flowcontrol
+
+import (
+ "sync"
+
+ "github.com/juju/ratelimit"
+)
+
+type RateLimiter interface {
+ // TryAccept returns true if a token is taken immediately. Otherwise,
+ // it returns false.
+ TryAccept() bool
+ // Accept returns once a token becomes available.
+ Accept()
+ // Stop stops the rate limiter, subsequent calls to CanAccept will return false
+ Stop()
+ // Saturation returns a percentage number which describes how saturated
+ // this rate limiter is.
+ // Usually we use token bucket rate limiter. In that case,
+ // 1.0 means no tokens are available; 0.0 means we have a full bucket of tokens to use.
+ Saturation() float64
+}
+
+type tokenBucketRateLimiter struct {
+ limiter *ratelimit.Bucket
+}
+
+// NewTokenBucketRateLimiter creates a rate limiter which implements a token bucket approach.
+// The rate limiter allows bursts of up to 'burst' to exceed the QPS, while still maintaining a
+// smoothed qps rate of 'qps'.
+// The bucket is initially filled with 'burst' tokens, and refills at a rate of 'qps'.
+// The maximum number of tokens in the bucket is capped at 'burst'.
+func NewTokenBucketRateLimiter(qps float32, burst int) RateLimiter {
+ limiter := ratelimit.NewBucketWithRate(float64(qps), int64(burst))
+ return &tokenBucketRateLimiter{limiter}
+}
+
+func (t *tokenBucketRateLimiter) TryAccept() bool {
+ return t.limiter.TakeAvailable(1) == 1
+}
+
+func (t *tokenBucketRateLimiter) Saturation() float64 {
+ capacity := t.limiter.Capacity()
+ avail := t.limiter.Available()
+ return float64(capacity-avail) / float64(capacity)
+}
+
+// Accept will block until a token becomes available
+func (t *tokenBucketRateLimiter) Accept() {
+ t.limiter.Wait(1)
+}
+
+func (t *tokenBucketRateLimiter) Stop() {
+}
+
+type fakeAlwaysRateLimiter struct{}
+
+func NewFakeAlwaysRateLimiter() RateLimiter {
+ return &fakeAlwaysRateLimiter{}
+}
+
+func (t *fakeAlwaysRateLimiter) TryAccept() bool {
+ return true
+}
+
+func (t *fakeAlwaysRateLimiter) Saturation() float64 {
+ return 0
+}
+
+func (t *fakeAlwaysRateLimiter) Stop() {}
+
+func (t *fakeAlwaysRateLimiter) Accept() {}
+
+type fakeNeverRateLimiter struct {
+ wg sync.WaitGroup
+}
+
+func NewFakeNeverRateLimiter() RateLimiter {
+ wg := sync.WaitGroup{}
+ wg.Add(1)
+ return &fakeNeverRateLimiter{
+ wg: wg,
+ }
+}
+
+func (t *fakeNeverRateLimiter) TryAccept() bool {
+ return false
+}
+
+func (t *fakeNeverRateLimiter) Saturation() float64 {
+ return 1
+}
+
+func (t *fakeNeverRateLimiter) Stop() {
+ t.wg.Done()
+}
+
+func (t *fakeNeverRateLimiter) Accept() {
+ t.wg.Wait()
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/framer/framer.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/framer/framer.go
new file mode 100644
index 0000000..066680f
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/framer/framer.go
@@ -0,0 +1,167 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package framer implements simple frame decoding techniques for an io.ReadCloser
+package framer
+
+import (
+ "encoding/binary"
+ "encoding/json"
+ "io"
+)
+
+type lengthDelimitedFrameWriter struct {
+ w io.Writer
+ h [4]byte
+}
+
+func NewLengthDelimitedFrameWriter(w io.Writer) io.Writer {
+ return &lengthDelimitedFrameWriter{w: w}
+}
+
+// Write writes a single frame to the nested writer, prepending it with the length in
+// in bytes of data (as a 4 byte, bigendian uint32).
+func (w *lengthDelimitedFrameWriter) Write(data []byte) (int, error) {
+ binary.BigEndian.PutUint32(w.h[:], uint32(len(data)))
+ n, err := w.w.Write(w.h[:])
+ if err != nil {
+ return 0, err
+ }
+ if n != len(w.h) {
+ return 0, io.ErrShortWrite
+ }
+ return w.w.Write(data)
+}
+
+type lengthDelimitedFrameReader struct {
+ r io.ReadCloser
+ remaining int
+}
+
+// NewLengthDelimitedFrameReader returns an io.Reader that will decode length-prefixed
+// frames off of a stream.
+//
+// The protocol is:
+//
+// stream: message ...
+// message: prefix body
+// prefix: 4 byte uint32 in BigEndian order, denotes length of body
+// body: bytes (0..prefix)
+//
+// If the buffer passed to Read is not long enough to contain an entire frame, io.ErrShortRead
+// will be returned along with the number of bytes read.
+func NewLengthDelimitedFrameReader(r io.ReadCloser) io.ReadCloser {
+ return &lengthDelimitedFrameReader{r: r}
+}
+
+// Read attempts to read an entire frame into data. If that is not possible, io.ErrShortBuffer
+// is returned and subsequent calls will attempt to read the last frame. A frame is complete when
+// err is nil.
+func (r *lengthDelimitedFrameReader) Read(data []byte) (int, error) {
+ if r.remaining <= 0 {
+ header := [4]byte{}
+ n, err := io.ReadAtLeast(r.r, header[:4], 4)
+ if err != nil {
+ return 0, err
+ }
+ if n != 4 {
+ return 0, io.ErrUnexpectedEOF
+ }
+ frameLength := int(binary.BigEndian.Uint32(header[:]))
+ r.remaining = frameLength
+ }
+
+ expect := r.remaining
+ max := expect
+ if max > len(data) {
+ max = len(data)
+ }
+ n, err := io.ReadAtLeast(r.r, data[:max], int(max))
+ r.remaining -= n
+ if err == io.ErrShortBuffer || r.remaining > 0 {
+ return n, io.ErrShortBuffer
+ }
+ if err != nil {
+ return n, err
+ }
+ if n != expect {
+ return n, io.ErrUnexpectedEOF
+ }
+
+ return n, nil
+}
+
+func (r *lengthDelimitedFrameReader) Close() error {
+ return r.r.Close()
+}
+
+type jsonFrameReader struct {
+ r io.ReadCloser
+ decoder *json.Decoder
+ remaining []byte
+}
+
+// NewJSONFramedReader returns an io.Reader that will decode individual JSON objects off
+// of a wire.
+//
+// The boundaries between each frame are valid JSON objects. A JSON parsing error will terminate
+// the read.
+func NewJSONFramedReader(r io.ReadCloser) io.ReadCloser {
+ return &jsonFrameReader{
+ r: r,
+ decoder: json.NewDecoder(r),
+ }
+}
+
+// ReadFrame decodes the next JSON object in the stream, or returns an error. The returned
+// byte slice will be modified the next time ReadFrame is invoked and should not be altered.
+func (r *jsonFrameReader) Read(data []byte) (int, error) {
+ // Return whatever remaining data exists from an in progress frame
+ if n := len(r.remaining); n > 0 {
+ if n <= len(data) {
+ data = append(data[0:0], r.remaining...)
+ r.remaining = nil
+ return n, nil
+ }
+
+ n = len(data)
+ data = append(data[0:0], r.remaining[:n]...)
+ r.remaining = r.remaining[n:]
+ return n, io.ErrShortBuffer
+ }
+
+ // RawMessage#Unmarshal appends to data - we reset the slice down to 0 and will either see
+ // data written to data, or be larger than data and a different array.
+ n := len(data)
+ m := json.RawMessage(data[:0])
+ if err := r.decoder.Decode(&m); err != nil {
+ return 0, err
+ }
+
+ // If capacity of data is less than length of the message, decoder will allocate a new slice
+ // and set m to it, which means we need to copy the partial result back into data and preserve
+ // the remaining result for subsequent reads.
+ if len(m) > n {
+ data = append(data[0:0], m[:n]...)
+ r.remaining = m[n:]
+ return n, io.ErrShortBuffer
+ }
+ return len(m), nil
+}
+
+func (r *jsonFrameReader) Close() error {
+ return r.r.Close()
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/hash/hash.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/hash/hash.go
new file mode 100644
index 0000000..803f066
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/hash/hash.go
@@ -0,0 +1,37 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package hash
+
+import (
+ "hash"
+
+ "github.com/davecgh/go-spew/spew"
+)
+
+// DeepHashObject writes specified object to hash using the spew library
+// which follows pointers and prints actual values of the nested objects
+// ensuring the hash does not change when a pointer changes.
+func DeepHashObject(hasher hash.Hash, objectToWrite interface{}) {
+ hasher.Reset()
+ printer := spew.ConfigState{
+ Indent: " ",
+ SortKeys: true,
+ DisableMethods: true,
+ SpewKeys: true,
+ }
+ printer.Fprintf(hasher, "%#v", objectToWrite)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/homedir/homedir.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/homedir/homedir.go
new file mode 100644
index 0000000..4034754
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/homedir/homedir.go
@@ -0,0 +1,40 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package homedir
+
+import (
+ "os"
+ "runtime"
+)
+
+// HomeDir returns the home directory for the current user
+func HomeDir() string {
+ if runtime.GOOS == "windows" {
+ if homeDrive, homePath := os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH"); len(homeDrive) > 0 && len(homePath) > 0 {
+ homeDir := homeDrive + homePath
+ if _, err := os.Stat(homeDir); err == nil {
+ return homeDir
+ }
+ }
+ if userProfile := os.Getenv("USERPROFILE"); len(userProfile) > 0 {
+ if _, err := os.Stat(userProfile); err == nil {
+ return userProfile
+ }
+ }
+ }
+ return os.Getenv("HOME")
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/integer/integer.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/integer/integer.go
new file mode 100644
index 0000000..c6ea106
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/integer/integer.go
@@ -0,0 +1,67 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package integer
+
+func IntMax(a, b int) int {
+ if b > a {
+ return b
+ }
+ return a
+}
+
+func IntMin(a, b int) int {
+ if b < a {
+ return b
+ }
+ return a
+}
+
+func Int32Max(a, b int32) int32 {
+ if b > a {
+ return b
+ }
+ return a
+}
+
+func Int32Min(a, b int32) int32 {
+ if b < a {
+ return b
+ }
+ return a
+}
+
+func Int64Max(a, b int64) int64 {
+ if b > a {
+ return b
+ }
+ return a
+}
+
+func Int64Min(a, b int64) int64 {
+ if b < a {
+ return b
+ }
+ return a
+}
+
+// RoundToInt32 rounds floats into integer numbers.
+func RoundToInt32(a float64) int32 {
+ if a < 0 {
+ return int32(a - 0.5)
+ }
+ return int32(a + 0.5)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/intstr/generated.pb.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/intstr/generated.pb.go
new file mode 100644
index 0000000..3c2bf4f
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/intstr/generated.pb.go
@@ -0,0 +1,347 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by protoc-gen-gogo.
+// source: k8s.io/kubernetes/pkg/util/intstr/generated.proto
+// DO NOT EDIT!
+
+/*
+ Package intstr is a generated protocol buffer package.
+
+ It is generated from these files:
+ k8s.io/kubernetes/pkg/util/intstr/generated.proto
+
+ It has these top-level messages:
+ IntOrString
+*/
+package intstr
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+func (m *IntOrString) Reset() { *m = IntOrString{} }
+func (*IntOrString) ProtoMessage() {}
+
+func init() {
+ proto.RegisterType((*IntOrString)(nil), "k8s.io.kubernetes.pkg.util.intstr.IntOrString")
+}
+func (m *IntOrString) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *IntOrString) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0x8
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Type))
+ data[i] = 0x10
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.IntVal))
+ data[i] = 0x1a
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.StrVal)))
+ i += copy(data[i:], m.StrVal)
+ return i, nil
+}
+
+func encodeFixed64Generated(data []byte, offset int, v uint64) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ data[offset+4] = uint8(v >> 32)
+ data[offset+5] = uint8(v >> 40)
+ data[offset+6] = uint8(v >> 48)
+ data[offset+7] = uint8(v >> 56)
+ return offset + 8
+}
+func encodeFixed32Generated(data []byte, offset int, v uint32) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ return offset + 4
+}
+func encodeVarintGenerated(data []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ data[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ data[offset] = uint8(v)
+ return offset + 1
+}
+func (m *IntOrString) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovGenerated(uint64(m.Type))
+ n += 1 + sovGenerated(uint64(m.IntVal))
+ l = len(m.StrVal)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *IntOrString) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: IntOrString: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: IntOrString: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ m.Type = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.Type |= (Type(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IntVal", wireType)
+ }
+ m.IntVal = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ m.IntVal |= (int32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field StrVal", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.StrVal = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenerated(data []byte) (n int, err error) {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if data[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipGenerated(data[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
+)
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/intstr/generated.proto b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/intstr/generated.proto
new file mode 100644
index 0000000..dd508e1
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/intstr/generated.proto
@@ -0,0 +1,42 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.kubernetes.pkg.util.intstr;
+
+// Package-wide variables from generator "generated".
+option go_package = "intstr";
+
+// IntOrString is a type that can hold an int32 or a string. When used in
+// JSON or YAML marshalling and unmarshalling, it produces or consumes the
+// inner type. This allows you to have, for example, a JSON field that can
+// accept a name or number.
+// TODO: Rename to Int32OrString
+//
+// +protobuf=true
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+message IntOrString {
+ optional int64 type = 1;
+
+ optional int32 intVal = 2;
+
+ optional string strVal = 3;
+}
+
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/intstr/intstr.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/intstr/intstr.go
new file mode 100644
index 0000000..59e7a06
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/intstr/intstr.go
@@ -0,0 +1,147 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package intstr
+
+import (
+ "encoding/json"
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+
+ "github.com/google/gofuzz"
+)
+
+// IntOrString is a type that can hold an int32 or a string. When used in
+// JSON or YAML marshalling and unmarshalling, it produces or consumes the
+// inner type. This allows you to have, for example, a JSON field that can
+// accept a name or number.
+// TODO: Rename to Int32OrString
+//
+// +protobuf=true
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+type IntOrString struct {
+ Type Type `protobuf:"varint,1,opt,name=type,casttype=Type"`
+ IntVal int32 `protobuf:"varint,2,opt,name=intVal"`
+ StrVal string `protobuf:"bytes,3,opt,name=strVal"`
+}
+
+// Type represents the stored type of IntOrString.
+type Type int
+
+const (
+ Int Type = iota // The IntOrString holds an int.
+ String // The IntOrString holds a string.
+)
+
+// FromInt creates an IntOrString object with an int32 value. It is
+// your responsibility not to call this method with a value greater
+// than int32.
+// TODO: convert to (val int32)
+func FromInt(val int) IntOrString {
+ return IntOrString{Type: Int, IntVal: int32(val)}
+}
+
+// FromString creates an IntOrString object with a string value.
+func FromString(val string) IntOrString {
+ return IntOrString{Type: String, StrVal: val}
+}
+
+// UnmarshalJSON implements the json.Unmarshaller interface.
+func (intstr *IntOrString) UnmarshalJSON(value []byte) error {
+ if value[0] == '"' {
+ intstr.Type = String
+ return json.Unmarshal(value, &intstr.StrVal)
+ }
+ intstr.Type = Int
+ return json.Unmarshal(value, &intstr.IntVal)
+}
+
+// String returns the string value, or the Itoa of the int value.
+func (intstr *IntOrString) String() string {
+ if intstr.Type == String {
+ return intstr.StrVal
+ }
+ return strconv.Itoa(intstr.IntValue())
+}
+
+// IntValue returns the IntVal if type Int, or if
+// it is a String, will attempt a conversion to int.
+func (intstr *IntOrString) IntValue() int {
+ if intstr.Type == String {
+ i, _ := strconv.Atoi(intstr.StrVal)
+ return i
+ }
+ return int(intstr.IntVal)
+}
+
+// MarshalJSON implements the json.Marshaller interface.
+func (intstr IntOrString) MarshalJSON() ([]byte, error) {
+ switch intstr.Type {
+ case Int:
+ return json.Marshal(intstr.IntVal)
+ case String:
+ return json.Marshal(intstr.StrVal)
+ default:
+ return []byte{}, fmt.Errorf("impossible IntOrString.Type")
+ }
+}
+
+func (intstr *IntOrString) Fuzz(c fuzz.Continue) {
+ if intstr == nil {
+ return
+ }
+ if c.RandBool() {
+ intstr.Type = Int
+ c.Fuzz(&intstr.IntVal)
+ intstr.StrVal = ""
+ } else {
+ intstr.Type = String
+ intstr.IntVal = 0
+ c.Fuzz(&intstr.StrVal)
+ }
+}
+
+func GetValueFromIntOrPercent(intOrPercent *IntOrString, total int, roundUp bool) (int, error) {
+ value, isPercent, err := getIntOrPercentValue(intOrPercent)
+ if err != nil {
+ return 0, fmt.Errorf("invalid value for IntOrString: %v", err)
+ }
+ if isPercent {
+ if roundUp {
+ value = int(math.Ceil(float64(value) * (float64(total)) / 100))
+ } else {
+ value = int(math.Floor(float64(value) * (float64(total)) / 100))
+ }
+ }
+ return value, nil
+}
+
+func getIntOrPercentValue(intOrStr *IntOrString) (int, bool, error) {
+ switch intOrStr.Type {
+ case Int:
+ return intOrStr.IntValue(), false, nil
+ case String:
+ s := strings.Replace(intOrStr.StrVal, "%", "", -1)
+ v, err := strconv.Atoi(s)
+ if err != nil {
+ return 0, false, fmt.Errorf("invalid value %q: %v", intOrStr.StrVal, err)
+ }
+ return int(v), true, nil
+ }
+ return 0, false, fmt.Errorf("invalid type: neither int nor percentage")
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/json/json.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/json/json.go
new file mode 100644
index 0000000..e8054a1
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/json/json.go
@@ -0,0 +1,107 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package json
+
+import (
+ "bytes"
+ "encoding/json"
+ "io"
+)
+
+// NewEncoder delegates to json.NewEncoder
+// It is only here so this package can be a drop-in for common encoding/json uses
+func NewEncoder(w io.Writer) *json.Encoder {
+ return json.NewEncoder(w)
+}
+
+// Marshal delegates to json.Marshal
+// It is only here so this package can be a drop-in for common encoding/json uses
+func Marshal(v interface{}) ([]byte, error) {
+ return json.Marshal(v)
+}
+
+// Unmarshal unmarshals the given data
+// If v is a *map[string]interface{}, numbers are converted to int64 or float64
+func Unmarshal(data []byte, v interface{}) error {
+ switch v := v.(type) {
+ case *map[string]interface{}:
+ // Build a decoder from the given data
+ decoder := json.NewDecoder(bytes.NewBuffer(data))
+ // Preserve numbers, rather than casting to float64 automatically
+ decoder.UseNumber()
+ // Run the decode
+ if err := decoder.Decode(v); err != nil {
+ return err
+ }
+ // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64
+ return convertMapNumbers(*v)
+
+ default:
+ return json.Unmarshal(data, v)
+ }
+}
+
+// convertMapNumbers traverses the map, converting any json.Number values to int64 or float64.
+// values which are map[string]interface{} or []interface{} are recursively visited
+func convertMapNumbers(m map[string]interface{}) error {
+ var err error
+ for k, v := range m {
+ switch v := v.(type) {
+ case json.Number:
+ m[k], err = convertNumber(v)
+ case map[string]interface{}:
+ err = convertMapNumbers(v)
+ case []interface{}:
+ err = convertSliceNumbers(v)
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// convertSliceNumbers traverses the slice, converting any json.Number values to int64 or float64.
+// values which are map[string]interface{} or []interface{} are recursively visited
+func convertSliceNumbers(s []interface{}) error {
+ var err error
+ for i, v := range s {
+ switch v := v.(type) {
+ case json.Number:
+ s[i], err = convertNumber(v)
+ case map[string]interface{}:
+ err = convertMapNumbers(v)
+ case []interface{}:
+ err = convertSliceNumbers(v)
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// convertNumber converts a json.Number to an int64 or float64, or returns an error
+func convertNumber(n json.Number) (interface{}, error) {
+ // Attempt to convert to an int64 first
+ if i, err := n.Int64(); err == nil {
+ return i, nil
+ }
+ // Return a float64 (default json.Decode() behavior)
+ // An overflow will return an error
+ return n.Float64()
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/line_delimiter.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/line_delimiter.go
new file mode 100644
index 0000000..9f64260
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/line_delimiter.go
@@ -0,0 +1,63 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "bytes"
+ "io"
+ "strings"
+)
+
+// A Line Delimiter is a filter that will
+type LineDelimiter struct {
+ output io.Writer
+ delimiter []byte
+ buf bytes.Buffer
+}
+
+// NewLineDelimiter allocates a new io.Writer that will split input on lines
+// and bracket each line with the delimiter string. This can be useful in
+// output tests where it is difficult to see and test trailing whitespace.
+func NewLineDelimiter(output io.Writer, delimiter string) *LineDelimiter {
+ return &LineDelimiter{output: output, delimiter: []byte(delimiter)}
+}
+
+// Write writes buf to the LineDelimiter ld. The only errors returned are ones
+// encountered while writing to the underlying output stream.
+func (ld *LineDelimiter) Write(buf []byte) (n int, err error) {
+ return ld.buf.Write(buf)
+}
+
+// Flush all lines up until now. This will assume insert a linebreak at the current point of the stream.
+func (ld *LineDelimiter) Flush() (err error) {
+ lines := strings.Split(ld.buf.String(), "\n")
+ for _, line := range lines {
+ if _, err = ld.output.Write(ld.delimiter); err != nil {
+ return
+ }
+ if _, err = ld.output.Write([]byte(line)); err != nil {
+ return
+ }
+ if _, err = ld.output.Write(ld.delimiter); err != nil {
+ return
+ }
+ if _, err = ld.output.Write([]byte("\n")); err != nil {
+ return
+ }
+ }
+ return
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/logs.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/logs.go
new file mode 100644
index 0000000..ea27f48
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/logs.go
@@ -0,0 +1,61 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "flag"
+ "log"
+ "time"
+
+ "github.com/golang/glog"
+ "github.com/spf13/pflag"
+ "k8s.io/kubernetes/pkg/util/wait"
+)
+
+var logFlushFreq = pflag.Duration("log-flush-frequency", 5*time.Second, "Maximum number of seconds between log flushes")
+
+// TODO(thockin): This is temporary until we agree on log dirs and put those into each cmd.
+func init() {
+ flag.Set("logtostderr", "true")
+}
+
+// GlogWriter serves as a bridge between the standard log package and the glog package.
+type GlogWriter struct{}
+
+// Write implements the io.Writer interface.
+func (writer GlogWriter) Write(data []byte) (n int, err error) {
+ glog.Info(string(data))
+ return len(data), nil
+}
+
+// InitLogs initializes logs the way we want for kubernetes.
+func InitLogs() {
+ log.SetOutput(GlogWriter{})
+ log.SetFlags(0)
+ // The default glog flush interval is 30 seconds, which is frighteningly long.
+ go wait.Until(glog.Flush, *logFlushFreq, wait.NeverStop)
+}
+
+// FlushLogs flushes logs immediately.
+func FlushLogs() {
+ glog.Flush()
+}
+
+// NewLogger creates a new log.Logger which sends logs to glog.Info.
+func NewLogger(prefix string) *log.Logger {
+ return log.New(GlogWriter{}, prefix, 0)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/net/http.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/net/http.go
new file mode 100644
index 0000000..582fb9a
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/net/http.go
@@ -0,0 +1,235 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package net
+
+import (
+ "crypto/tls"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "strconv"
+ "strings"
+
+ "github.com/golang/glog"
+ "golang.org/x/net/http2"
+)
+
+// IsProbableEOF returns true if the given error resembles a connection termination
+// scenario that would justify assuming that the watch is empty.
+// These errors are what the Go http stack returns back to us which are general
+// connection closure errors (strongly correlated) and callers that need to
+// differentiate probable errors in connection behavior between normal "this is
+// disconnected" should use the method.
+func IsProbableEOF(err error) bool {
+ if uerr, ok := err.(*url.Error); ok {
+ err = uerr.Err
+ }
+ switch {
+ case err == io.EOF:
+ return true
+ case err.Error() == "http: can't write HTTP request on broken connection":
+ return true
+ case strings.Contains(err.Error(), "connection reset by peer"):
+ return true
+ case strings.Contains(strings.ToLower(err.Error()), "use of closed network connection"):
+ return true
+ }
+ return false
+}
+
+var defaultTransport = http.DefaultTransport.(*http.Transport)
+
+// SetOldTransportDefaults applies the defaults from http.DefaultTransport
+// for the Proxy, Dial, and TLSHandshakeTimeout fields if unset
+func SetOldTransportDefaults(t *http.Transport) *http.Transport {
+ if t.Proxy == nil || isDefault(t.Proxy) {
+ // http.ProxyFromEnvironment doesn't respect CIDRs and that makes it impossible to exclude things like pod and service IPs from proxy settings
+ // ProxierWithNoProxyCIDR allows CIDR rules in NO_PROXY
+ t.Proxy = NewProxierWithNoProxyCIDR(http.ProxyFromEnvironment)
+ }
+ if t.Dial == nil {
+ t.Dial = defaultTransport.Dial
+ }
+ if t.TLSHandshakeTimeout == 0 {
+ t.TLSHandshakeTimeout = defaultTransport.TLSHandshakeTimeout
+ }
+ return t
+}
+
+// SetTransportDefaults applies the defaults from http.DefaultTransport
+// for the Proxy, Dial, and TLSHandshakeTimeout fields if unset
+func SetTransportDefaults(t *http.Transport) *http.Transport {
+ t = SetOldTransportDefaults(t)
+ // Allow clients to disable http2 if needed.
+ if s := os.Getenv("DISABLE_HTTP2"); len(s) > 0 {
+ glog.Infof("HTTP2 has been explicitly disabled")
+ } else {
+ if err := http2.ConfigureTransport(t); err != nil {
+ glog.Warningf("Transport failed http2 configuration: %v", err)
+ }
+ }
+ return t
+}
+
+type RoundTripperWrapper interface {
+ http.RoundTripper
+ WrappedRoundTripper() http.RoundTripper
+}
+
+type DialFunc func(net, addr string) (net.Conn, error)
+
+func Dialer(transport http.RoundTripper) (DialFunc, error) {
+ if transport == nil {
+ return nil, nil
+ }
+
+ switch transport := transport.(type) {
+ case *http.Transport:
+ return transport.Dial, nil
+ case RoundTripperWrapper:
+ return Dialer(transport.WrappedRoundTripper())
+ default:
+ return nil, fmt.Errorf("unknown transport type: %v", transport)
+ }
+}
+
+func TLSClientConfig(transport http.RoundTripper) (*tls.Config, error) {
+ if transport == nil {
+ return nil, nil
+ }
+
+ switch transport := transport.(type) {
+ case *http.Transport:
+ return transport.TLSClientConfig, nil
+ case RoundTripperWrapper:
+ return TLSClientConfig(transport.WrappedRoundTripper())
+ default:
+ return nil, fmt.Errorf("unknown transport type: %v", transport)
+ }
+}
+
+func FormatURL(scheme string, host string, port int, path string) *url.URL {
+ return &url.URL{
+ Scheme: scheme,
+ Host: net.JoinHostPort(host, strconv.Itoa(port)),
+ Path: path,
+ }
+}
+
+func GetHTTPClient(req *http.Request) string {
+ if userAgent, ok := req.Header["User-Agent"]; ok {
+ if len(userAgent) > 0 {
+ return userAgent[0]
+ }
+ }
+ return "unknown"
+}
+
+// Extracts and returns the clients IP from the given request.
+// Looks at X-Forwarded-For header, X-Real-Ip header and request.RemoteAddr in that order.
+// Returns nil if none of them are set or is set to an invalid value.
+func GetClientIP(req *http.Request) net.IP {
+ hdr := req.Header
+ // First check the X-Forwarded-For header for requests via proxy.
+ hdrForwardedFor := hdr.Get("X-Forwarded-For")
+ if hdrForwardedFor != "" {
+ // X-Forwarded-For can be a csv of IPs in case of multiple proxies.
+ // Use the first valid one.
+ parts := strings.Split(hdrForwardedFor, ",")
+ for _, part := range parts {
+ ip := net.ParseIP(strings.TrimSpace(part))
+ if ip != nil {
+ return ip
+ }
+ }
+ }
+
+ // Try the X-Real-Ip header.
+ hdrRealIp := hdr.Get("X-Real-Ip")
+ if hdrRealIp != "" {
+ ip := net.ParseIP(hdrRealIp)
+ if ip != nil {
+ return ip
+ }
+ }
+
+ // Fallback to Remote Address in request, which will give the correct client IP when there is no proxy.
+ // Remote Address in Go's HTTP server is in the form host:port so we need to split that first.
+ host, _, err := net.SplitHostPort(req.RemoteAddr)
+ if err == nil {
+ return net.ParseIP(host)
+ }
+
+ // Fallback if Remote Address was just IP.
+ return net.ParseIP(req.RemoteAddr)
+}
+
+var defaultProxyFuncPointer = fmt.Sprintf("%p", http.ProxyFromEnvironment)
+
+// isDefault checks to see if the transportProxierFunc is pointing to the default one
+func isDefault(transportProxier func(*http.Request) (*url.URL, error)) bool {
+ transportProxierPointer := fmt.Sprintf("%p", transportProxier)
+ return transportProxierPointer == defaultProxyFuncPointer
+}
+
+// NewProxierWithNoProxyCIDR constructs a Proxier function that respects CIDRs in NO_PROXY and delegates if
+// no matching CIDRs are found
+func NewProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error)) func(req *http.Request) (*url.URL, error) {
+ // we wrap the default method, so we only need to perform our check if the NO_PROXY envvar has a CIDR in it
+ noProxyEnv := os.Getenv("NO_PROXY")
+ noProxyRules := strings.Split(noProxyEnv, ",")
+
+ cidrs := []*net.IPNet{}
+ for _, noProxyRule := range noProxyRules {
+ _, cidr, _ := net.ParseCIDR(noProxyRule)
+ if cidr != nil {
+ cidrs = append(cidrs, cidr)
+ }
+ }
+
+ if len(cidrs) == 0 {
+ return delegate
+ }
+
+ return func(req *http.Request) (*url.URL, error) {
+ host := req.URL.Host
+ // for some urls, the Host is already the host, not the host:port
+ if net.ParseIP(host) == nil {
+ var err error
+ host, _, err = net.SplitHostPort(req.URL.Host)
+ if err != nil {
+ return delegate(req)
+ }
+ }
+
+ ip := net.ParseIP(host)
+ if ip == nil {
+ return delegate(req)
+ }
+
+ for _, cidr := range cidrs {
+ if cidr.Contains(ip) {
+ return nil, nil
+ }
+ }
+
+ return delegate(req)
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/net/interface.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/net/interface.go
new file mode 100644
index 0000000..a1e53d2
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/net/interface.go
@@ -0,0 +1,278 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package net
+
+import (
+ "bufio"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "net"
+ "os"
+
+ "strings"
+
+ "github.com/golang/glog"
+)
+
+type Route struct {
+ Interface string
+ Destination net.IP
+ Gateway net.IP
+ // TODO: add more fields here if needed
+}
+
+func getRoutes(input io.Reader) ([]Route, error) {
+ routes := []Route{}
+ if input == nil {
+ return nil, fmt.Errorf("input is nil")
+ }
+ scanner := bufio.NewReader(input)
+ for {
+ line, err := scanner.ReadString('\n')
+ if err == io.EOF {
+ break
+ }
+ //ignore the headers in the route info
+ if strings.HasPrefix(line, "Iface") {
+ continue
+ }
+ fields := strings.Fields(line)
+ routes = append(routes, Route{})
+ route := &routes[len(routes)-1]
+ route.Interface = fields[0]
+ ip, err := parseIP(fields[1])
+ if err != nil {
+ return nil, err
+ }
+ route.Destination = ip
+ ip, err = parseIP(fields[2])
+ if err != nil {
+ return nil, err
+ }
+ route.Gateway = ip
+ }
+ return routes, nil
+}
+
+func parseIP(str string) (net.IP, error) {
+ if str == "" {
+ return nil, fmt.Errorf("input is nil")
+ }
+ bytes, err := hex.DecodeString(str)
+ if err != nil {
+ return nil, err
+ }
+ //TODO add ipv6 support
+ if len(bytes) != net.IPv4len {
+ return nil, fmt.Errorf("only IPv4 is supported")
+ }
+ bytes[0], bytes[1], bytes[2], bytes[3] = bytes[3], bytes[2], bytes[1], bytes[0]
+ return net.IP(bytes), nil
+}
+
+func isInterfaceUp(intf *net.Interface) bool {
+ if intf == nil {
+ return false
+ }
+ if intf.Flags&net.FlagUp != 0 {
+ glog.V(4).Infof("Interface %v is up", intf.Name)
+ return true
+ }
+ return false
+}
+
+//getFinalIP method receives all the IP addrs of a Interface
+//and returns a nil if the address is Loopback, Ipv6, link-local or nil.
+//It returns a valid IPv4 if an Ipv4 address is found in the array.
+func getFinalIP(addrs []net.Addr) (net.IP, error) {
+ if len(addrs) > 0 {
+ for i := range addrs {
+ glog.V(4).Infof("Checking addr %s.", addrs[i].String())
+ ip, _, err := net.ParseCIDR(addrs[i].String())
+ if err != nil {
+ return nil, err
+ }
+ //Only IPv4
+ //TODO : add IPv6 support
+ if ip.To4() != nil {
+ if !ip.IsLoopback() && !ip.IsLinkLocalMulticast() && !ip.IsLinkLocalUnicast() {
+ glog.V(4).Infof("IP found %v", ip)
+ return ip, nil
+ } else {
+ glog.V(4).Infof("Loopback/link-local found %v", ip)
+ }
+ } else {
+ glog.V(4).Infof("%v is not a valid IPv4 address", ip)
+ }
+
+ }
+ }
+ return nil, nil
+}
+
+func getIPFromInterface(intfName string, nw networkInterfacer) (net.IP, error) {
+ intf, err := nw.InterfaceByName(intfName)
+ if err != nil {
+ return nil, err
+ }
+ if isInterfaceUp(intf) {
+ addrs, err := nw.Addrs(intf)
+ if err != nil {
+ return nil, err
+ }
+ glog.V(4).Infof("Interface %q has %d addresses :%v.", intfName, len(addrs), addrs)
+ finalIP, err := getFinalIP(addrs)
+ if err != nil {
+ return nil, err
+ }
+ if finalIP != nil {
+ glog.V(4).Infof("valid IPv4 address for interface %q found as %v.", intfName, finalIP)
+ return finalIP, nil
+ }
+ }
+
+ return nil, nil
+}
+
+func flagsSet(flags net.Flags, test net.Flags) bool {
+ return flags&test != 0
+}
+
+func flagsClear(flags net.Flags, test net.Flags) bool {
+ return flags&test == 0
+}
+
+func chooseHostInterfaceNativeGo() (net.IP, error) {
+ intfs, err := net.Interfaces()
+ if err != nil {
+ return nil, err
+ }
+ i := 0
+ var ip net.IP
+ for i = range intfs {
+ if flagsSet(intfs[i].Flags, net.FlagUp) && flagsClear(intfs[i].Flags, net.FlagLoopback|net.FlagPointToPoint) {
+ addrs, err := intfs[i].Addrs()
+ if err != nil {
+ return nil, err
+ }
+ if len(addrs) > 0 {
+ for _, addr := range addrs {
+ if addrIP, _, err := net.ParseCIDR(addr.String()); err == nil {
+ if addrIP.To4() != nil {
+ ip = addrIP.To4()
+ if !ip.IsLinkLocalMulticast() && !ip.IsLinkLocalUnicast() {
+ break
+ }
+ }
+ }
+ }
+ if ip != nil {
+ // This interface should suffice.
+ break
+ }
+ }
+ }
+ }
+ if ip == nil {
+ return nil, fmt.Errorf("no acceptable interface from host")
+ }
+ glog.V(4).Infof("Choosing interface %s (IP %v) as default", intfs[i].Name, ip)
+ return ip, nil
+}
+
+//ChooseHostInterface is a method used fetch an IP for a daemon.
+//It uses data from /proc/net/route file.
+//For a node with no internet connection ,it returns error
+//For a multi n/w interface node it returns the IP of the interface with gateway on it.
+func ChooseHostInterface() (net.IP, error) {
+ inFile, err := os.Open("/proc/net/route")
+ if err != nil {
+ if os.IsNotExist(err) {
+ return chooseHostInterfaceNativeGo()
+ }
+ return nil, err
+ }
+ defer inFile.Close()
+ var nw networkInterfacer = networkInterface{}
+ return chooseHostInterfaceFromRoute(inFile, nw)
+}
+
+type networkInterfacer interface {
+ InterfaceByName(intfName string) (*net.Interface, error)
+ Addrs(intf *net.Interface) ([]net.Addr, error)
+}
+
+type networkInterface struct{}
+
+func (_ networkInterface) InterfaceByName(intfName string) (*net.Interface, error) {
+ intf, err := net.InterfaceByName(intfName)
+ if err != nil {
+ return nil, err
+ }
+ return intf, nil
+}
+
+func (_ networkInterface) Addrs(intf *net.Interface) ([]net.Addr, error) {
+ addrs, err := intf.Addrs()
+ if err != nil {
+ return nil, err
+ }
+ return addrs, nil
+}
+
+func chooseHostInterfaceFromRoute(inFile io.Reader, nw networkInterfacer) (net.IP, error) {
+ routes, err := getRoutes(inFile)
+ if err != nil {
+ return nil, err
+ }
+ zero := net.IP{0, 0, 0, 0}
+ var finalIP net.IP
+ for i := range routes {
+ //find interface with gateway
+ if routes[i].Destination.Equal(zero) {
+ glog.V(4).Infof("Default route transits interface %q", routes[i].Interface)
+ finalIP, err := getIPFromInterface(routes[i].Interface, nw)
+ if err != nil {
+ return nil, err
+ }
+ if finalIP != nil {
+ glog.V(4).Infof("Choosing IP %v ", finalIP)
+ return finalIP, nil
+ }
+ }
+ }
+ glog.V(4).Infof("No valid IP found")
+ if finalIP == nil {
+ return nil, fmt.Errorf("Unable to select an IP.")
+ }
+ return nil, nil
+}
+
+// If bind-address is usable, return it directly
+// If bind-address is not usable (unset, 0.0.0.0, or loopback), we will use the host's default
+// interface.
+func ChooseBindAddress(bindAddress net.IP) (net.IP, error) {
+ if bindAddress == nil || bindAddress.IsUnspecified() || bindAddress.IsLoopback() {
+ hostIP, err := ChooseHostInterface()
+ if err != nil {
+ return nil, err
+ }
+ bindAddress = hostIP
+ }
+ return bindAddress, nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/net/port_range.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/net/port_range.go
new file mode 100644
index 0000000..6afdbf2
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/net/port_range.go
@@ -0,0 +1,108 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package net
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// PortRange represents a range of TCP/UDP ports. To represent a single port,
+// set Size to 1.
+type PortRange struct {
+ Base int
+ Size int
+}
+
+// Contains tests whether a given port falls within the PortRange.
+func (pr *PortRange) Contains(p int) bool {
+ return (p >= pr.Base) && ((p - pr.Base) < pr.Size)
+}
+
+// String converts the PortRange to a string representation, which can be
+// parsed by PortRange.Set or ParsePortRange.
+func (pr PortRange) String() string {
+ if pr.Size == 0 {
+ return ""
+ }
+ return fmt.Sprintf("%d-%d", pr.Base, pr.Base+pr.Size-1)
+}
+
+// Set parses a string of the form "min-max", inclusive at both ends, and
+// sets the PortRange from it. This is part of the flag.Value and pflag.Value
+// interfaces.
+func (pr *PortRange) Set(value string) error {
+ value = strings.TrimSpace(value)
+
+ // TODO: Accept "80" syntax
+ // TODO: Accept "80+8" syntax
+
+ if value == "" {
+ pr.Base = 0
+ pr.Size = 0
+ return nil
+ }
+
+ hyphenIndex := strings.Index(value, "-")
+ if hyphenIndex == -1 {
+ return fmt.Errorf("expected hyphen in port range")
+ }
+
+ var err error
+ var low int
+ var high int
+ low, err = strconv.Atoi(value[:hyphenIndex])
+ if err == nil {
+ high, err = strconv.Atoi(value[hyphenIndex+1:])
+ }
+ if err != nil {
+ return fmt.Errorf("unable to parse port range: %s", value)
+ }
+
+ if high < low {
+ return fmt.Errorf("end port cannot be less than start port: %s", value)
+ }
+ pr.Base = low
+ pr.Size = 1 + high - low
+ return nil
+}
+
+// Type returns a descriptive string about this type. This is part of the
+// pflag.Value interface.
+func (*PortRange) Type() string {
+ return "portRange"
+}
+
+// ParsePortRange parses a string of the form "min-max", inclusive at both
+// ends, and initializs a new PortRange from it.
+func ParsePortRange(value string) (*PortRange, error) {
+ pr := &PortRange{}
+ err := pr.Set(value)
+ if err != nil {
+ return nil, err
+ }
+ return pr, nil
+}
+
+func ParsePortRangeOrDie(value string) *PortRange {
+ pr, err := ParsePortRange(value)
+ if err != nil {
+ panic(fmt.Sprintf("couldn't parse port range %q: %v", value, err))
+ }
+ return pr
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/net/port_split.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/net/port_split.go
new file mode 100644
index 0000000..29c985e
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/net/port_split.go
@@ -0,0 +1,77 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package net
+
+import (
+ "strings"
+
+ "k8s.io/kubernetes/pkg/util/sets"
+)
+
+var validSchemes = sets.NewString("http", "https", "")
+
+// SplitSchemeNamePort takes a string of the following forms:
+// * "<name>", returns "", "<name>","", true
+// * "<name>:<port>", returns "", "<name>","<port>",true
+// * "<scheme>:<name>:<port>", returns "<scheme>","<name>","<port>",true
+//
+// Name must be non-empty or valid will be returned false.
+// Scheme must be "http" or "https" if specified
+// Port is returned as a string, and it is not required to be numeric (could be
+// used for a named port, for example).
+func SplitSchemeNamePort(id string) (scheme, name, port string, valid bool) {
+ parts := strings.Split(id, ":")
+ switch len(parts) {
+ case 1:
+ name = parts[0]
+ case 2:
+ name = parts[0]
+ port = parts[1]
+ case 3:
+ scheme = parts[0]
+ name = parts[1]
+ port = parts[2]
+ default:
+ return "", "", "", false
+ }
+
+ if len(name) > 0 && validSchemes.Has(scheme) {
+ return scheme, name, port, true
+ } else {
+ return "", "", "", false
+ }
+}
+
+// JoinSchemeNamePort returns a string that specifies the scheme, name, and port:
+// * "<name>"
+// * "<name>:<port>"
+// * "<scheme>:<name>:<port>"
+// None of the parameters may contain a ':' character
+// Name is required
+// Scheme must be "", "http", or "https"
+func JoinSchemeNamePort(scheme, name, port string) string {
+ if len(scheme) > 0 {
+ // Must include three segments to specify scheme
+ return scheme + ":" + name + ":" + port
+ }
+ if len(port) > 0 {
+ // Must include two segments to specify port
+ return name + ":" + port
+ }
+ // Return name alone
+ return name
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/net/sets/README.md b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/net/sets/README.md
new file mode 100644
index 0000000..b0f238a
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/net/sets/README.md
@@ -0,0 +1,17 @@
+This package contains hand-coded set implementations that should be similar to
+the autogenerated ones in `pkg/util/sets`.
+
+We can't simply use net.IPNet as a map-key in Go (because it contains a
+`[]byte`).
+
+We could use the same workaround we use here (a string representation as the
+key) to autogenerate sets. If we do that, or decide on an alternate approach,
+we should replace the implementations in this package with the autogenerated
+versions.
+
+It is expected that callers will alias this import as `netsets`
+i.e. `import netsets "k8s.io/kubernetes/pkg/util/net/sets"`
+
+
+
+[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/pkg/util/net/sets/README.md?pixel)]()
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/net/sets/ipnet.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/net/sets/ipnet.go
new file mode 100644
index 0000000..5b6fe93
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/net/sets/ipnet.go
@@ -0,0 +1,119 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package sets
+
+import (
+ "net"
+ "strings"
+)
+
+type IPNet map[string]*net.IPNet
+
+func ParseIPNets(specs ...string) (IPNet, error) {
+ ipnetset := make(IPNet)
+ for _, spec := range specs {
+ spec = strings.TrimSpace(spec)
+ _, ipnet, err := net.ParseCIDR(spec)
+ if err != nil {
+ return nil, err
+ }
+ k := ipnet.String() // In case of normalization
+ ipnetset[k] = ipnet
+ }
+ return ipnetset, nil
+}
+
+// Insert adds items to the set.
+func (s IPNet) Insert(items ...*net.IPNet) {
+ for _, item := range items {
+ s[item.String()] = item
+ }
+}
+
+// Delete removes all items from the set.
+func (s IPNet) Delete(items ...*net.IPNet) {
+ for _, item := range items {
+ delete(s, item.String())
+ }
+}
+
+// Has returns true if and only if item is contained in the set.
+func (s IPNet) Has(item *net.IPNet) bool {
+ _, contained := s[item.String()]
+ return contained
+}
+
+// HasAll returns true if and only if all items are contained in the set.
+func (s IPNet) HasAll(items ...*net.IPNet) bool {
+ for _, item := range items {
+ if !s.Has(item) {
+ return false
+ }
+ }
+ return true
+}
+
+// Difference returns a set of objects that are not in s2
+// For example:
+// s1 = {a1, a2, a3}
+// s2 = {a1, a2, a4, a5}
+// s1.Difference(s2) = {a3}
+// s2.Difference(s1) = {a4, a5}
+func (s IPNet) Difference(s2 IPNet) IPNet {
+ result := make(IPNet)
+ for k, i := range s {
+ _, found := s2[k]
+ if found {
+ continue
+ }
+ result[k] = i
+ }
+ return result
+}
+
+// StringSlice returns a []string with the String representation of each element in the set.
+// Order is undefined.
+func (s IPNet) StringSlice() []string {
+ a := make([]string, 0, len(s))
+ for k := range s {
+ a = append(a, k)
+ }
+ return a
+}
+
+// IsSuperset returns true if and only if s1 is a superset of s2.
+func (s1 IPNet) IsSuperset(s2 IPNet) bool {
+ for k := range s2 {
+ _, found := s1[k]
+ if !found {
+ return false
+ }
+ }
+ return true
+}
+
+// Equal returns true if and only if s1 is equal (as a set) to s2.
+// Two sets are equal if their membership is identical.
+// (In practice, this means same elements, order doesn't matter)
+func (s1 IPNet) Equal(s2 IPNet) bool {
+ return len(s1) == len(s2) && s1.IsSuperset(s2)
+}
+
+// Len returns the size of the set.
+func (s IPNet) Len() int {
+ return len(s)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/net/util.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/net/util.go
new file mode 100644
index 0000000..1348f4d
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/net/util.go
@@ -0,0 +1,36 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package net
+
+import (
+ "net"
+ "reflect"
+)
+
+// IPNetEqual checks if the two input IPNets are representing the same subnet.
+// For example,
+// 10.0.0.1/24 and 10.0.0.0/24 are the same subnet.
+// 10.0.0.1/24 and 10.0.0.0/25 are not the same subnet.
+func IPNetEqual(ipnet1, ipnet2 *net.IPNet) bool {
+ if ipnet1 == nil || ipnet2 == nil {
+ return false
+ }
+ if reflect.DeepEqual(ipnet1.Mask, ipnet2.Mask) && ipnet1.Contains(ipnet2.IP) && ipnet2.Contains(ipnet1.IP) {
+ return true
+ }
+ return false
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/parsers/parsers.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/parsers/parsers.go
new file mode 100644
index 0000000..4e70cc6
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/parsers/parsers.go
@@ -0,0 +1,54 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package parsers
+
+import (
+ "fmt"
+
+ dockerref "github.com/docker/distribution/reference"
+)
+
+const (
+ DefaultImageTag = "latest"
+)
+
+// ParseImageName parses a docker image string into three parts: repo, tag and digest.
+// If both tag and digest are empty, a default image tag will be returned.
+func ParseImageName(image string) (string, string, string, error) {
+ named, err := dockerref.ParseNamed(image)
+ if err != nil {
+ return "", "", "", fmt.Errorf("couldn't parse image name: %v", err)
+ }
+
+ repoToPull := named.Name()
+ var tag, digest string
+
+ tagged, ok := named.(dockerref.Tagged)
+ if ok {
+ tag = tagged.Tag()
+ }
+
+ digested, ok := named.(dockerref.Digested)
+ if ok {
+ digest = digested.Digest().String()
+ }
+ // If no tag was specified, use the default "latest".
+ if len(tag) == 0 && len(digest) == 0 {
+ tag = DefaultImageTag
+ }
+ return repoToPull, tag, digest, nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/rand/rand.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/rand/rand.go
new file mode 100644
index 0000000..134c152
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/rand/rand.go
@@ -0,0 +1,83 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package rand provides utilities related to randomization.
+package rand
+
+import (
+ "math/rand"
+ "sync"
+ "time"
+)
+
+var letters = []rune("abcdefghijklmnopqrstuvwxyz0123456789")
+var numLetters = len(letters)
+var rng = struct {
+ sync.Mutex
+ rand *rand.Rand
+}{
+ rand: rand.New(rand.NewSource(time.Now().UTC().UnixNano())),
+}
+
+// Intn generates an integer in range [0,max).
+// By design this should panic if input is invalid, <= 0.
+func Intn(max int) int {
+ rng.Lock()
+ defer rng.Unlock()
+ return rng.rand.Intn(max)
+}
+
+// IntnRange generates an integer in range [min,max).
+// By design this should panic if input is invalid, <= 0.
+func IntnRange(min, max int) int {
+ rng.Lock()
+ defer rng.Unlock()
+ return rng.rand.Intn(max-min) + min
+}
+
+// IntnRange generates an int64 integer in range [min,max).
+// By design this should panic if input is invalid, <= 0.
+func Int63nRange(min, max int64) int64 {
+ rng.Lock()
+ defer rng.Unlock()
+ return rng.rand.Int63n(max-min) + min
+}
+
+// Seed seeds the rng with the provided seed.
+func Seed(seed int64) {
+ rng.Lock()
+ defer rng.Unlock()
+
+ rng.rand = rand.New(rand.NewSource(seed))
+}
+
+// Perm returns, as a slice of n ints, a pseudo-random permutation of the integers [0,n)
+// from the default Source.
+func Perm(n int) []int {
+ rng.Lock()
+ defer rng.Unlock()
+ return rng.rand.Perm(n)
+}
+
+// String generates a random alphanumeric string n characters long. This will
+// panic if n is less than zero.
+func String(length int) string {
+ b := make([]rune, length)
+ for i := range b {
+ b[i] = letters[Intn(numLetters)]
+ }
+ return string(b)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/resource_container_linux.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/resource_container_linux.go
new file mode 100644
index 0000000..a844e4c
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/resource_container_linux.go
@@ -0,0 +1,49 @@
+// +build linux
+
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "os"
+ "syscall"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups/fs"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+// Creates resource-only containerName if it does not already exist and moves
+// the current process to it.
+//
+// containerName must be an absolute container name.
+func RunInResourceContainer(containerName string) error {
+ manager := fs.Manager{
+ Cgroups: &configs.Cgroup{
+ Parent: "/",
+ Name: containerName,
+ Resources: &configs.Resources{
+ AllowAllDevices: true,
+ },
+ },
+ }
+
+ return manager.Apply(os.Getpid())
+}
+
+func ApplyRLimitForSelf(maxOpenFiles uint64) {
+ syscall.Setrlimit(syscall.RLIMIT_NOFILE, &syscall.Rlimit{Max: maxOpenFiles, Cur: maxOpenFiles})
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/resource_container_unsupported.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/resource_container_unsupported.go
new file mode 100644
index 0000000..ba861b0
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/resource_container_unsupported.go
@@ -0,0 +1,31 @@
+// +build !linux
+
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "errors"
+)
+
+func RunInResourceContainer(containerName string) error {
+ return errors.New("resource-only containers unsupported in this platform")
+}
+
+func ApplyRLimitForSelf(maxOpenFiles uint64) error {
+ return errors.New("SetRLimit unsupported in this platform")
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/runner.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/runner.go
new file mode 100644
index 0000000..9e977ee
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/runner.go
@@ -0,0 +1,58 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "sync"
+)
+
+// Runner is an abstraction to make it easy to start and stop groups of things that can be
+// described by a single function which waits on a channel close to exit.
+type Runner struct {
+ lock sync.Mutex
+ loopFuncs []func(stop chan struct{})
+ stop *chan struct{}
+}
+
+// NewRunner makes a runner for the given function(s). The function(s) should loop until
+// the channel is closed.
+func NewRunner(f ...func(stop chan struct{})) *Runner {
+ return &Runner{loopFuncs: f}
+}
+
+// Start begins running.
+func (r *Runner) Start() {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+ if r.stop == nil {
+ c := make(chan struct{})
+ r.stop = &c
+ for i := range r.loopFuncs {
+ go r.loopFuncs[i](*r.stop)
+ }
+ }
+}
+
+// Stop stops running.
+func (r *Runner) Stop() {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+ if r.stop != nil {
+ close(*r.stop)
+ r.stop = nil
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/runtime/runtime.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/runtime/runtime.go
new file mode 100644
index 0000000..464d3ee
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/runtime/runtime.go
@@ -0,0 +1,94 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package runtime
+
+import (
+ "fmt"
+ "github.com/golang/glog"
+ "runtime"
+)
+
+// For testing, bypass HandleCrash.
+var ReallyCrash bool
+
+// PanicHandlers is a list of functions which will be invoked when a panic happens.
+var PanicHandlers = []func(interface{}){logPanic}
+
+//TODO search the public functions
+// HandleCrash simply catches a crash and logs an error. Meant to be called via defer.
+// Additional context-specific handlers can be provided, and will be called in case of panic
+func HandleCrash(additionalHandlers ...func(interface{})) {
+ if ReallyCrash {
+ return
+ }
+ if r := recover(); r != nil {
+ for _, fn := range PanicHandlers {
+ fn(r)
+ }
+ for _, fn := range additionalHandlers {
+ fn(r)
+ }
+ }
+}
+
+// logPanic logs the caller tree when a panic occurs.
+func logPanic(r interface{}) {
+ callers := ""
+ for i := 0; true; i++ {
+ _, file, line, ok := runtime.Caller(i)
+ if !ok {
+ break
+ }
+ callers = callers + fmt.Sprintf("%v:%v\n", file, line)
+ }
+ glog.Errorf("Recovered from panic: %#v (%v)\n%v", r, r, callers)
+}
+
+// ErrorHandlers is a list of functions which will be invoked when an unreturnable
+// error occurs.
+var ErrorHandlers = []func(error){logError}
+
+// HandlerError is a method to invoke when a non-user facing piece of code cannot
+// return an error and needs to indicate it has been ignored. Invoking this method
+// is preferable to logging the error - the default behavior is to log but the
+// errors may be sent to a remote server for analysis.
+func HandleError(err error) {
+ // this is sometimes called with a nil error. We probably shouldn't fail and should do nothing instead
+ if err == nil {
+ return
+ }
+
+ for _, fn := range ErrorHandlers {
+ fn(err)
+ }
+}
+
+// logError prints an error with the call stack of the location it was reported
+func logError(err error) {
+ glog.ErrorDepth(2, err)
+}
+
+// GetCaller returns the caller of the function that calls it.
+func GetCaller() string {
+ var pc [1]uintptr
+ runtime.Callers(3, pc[:])
+ f := runtime.FuncForPC(pc[0])
+ if f == nil {
+ return fmt.Sprintf("Unable to find caller")
+ }
+ return f.Name()
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/sets/byte.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/sets/byte.go
new file mode 100644
index 0000000..45f5d4f
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/sets/byte.go
@@ -0,0 +1,194 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by set-gen. Do not edit it manually!
+
+package sets
+
+import (
+ "reflect"
+ "sort"
+)
+
+// sets.Byte is a set of bytes, implemented via map[byte]struct{} for minimal memory consumption.
+type Byte map[byte]Empty
+
+// New creates a Byte from a list of values.
+func NewByte(items ...byte) Byte {
+ ss := Byte{}
+ ss.Insert(items...)
+ return ss
+}
+
+// ByteKeySet creates a Byte from a keys of a map[byte](? extends interface{}).
+// If the value passed in is not actually a map, this will panic.
+func ByteKeySet(theMap interface{}) Byte {
+ v := reflect.ValueOf(theMap)
+ ret := Byte{}
+
+ for _, keyValue := range v.MapKeys() {
+ ret.Insert(keyValue.Interface().(byte))
+ }
+ return ret
+}
+
+// Insert adds items to the set.
+func (s Byte) Insert(items ...byte) {
+ for _, item := range items {
+ s[item] = Empty{}
+ }
+}
+
+// Delete removes all items from the set.
+func (s Byte) Delete(items ...byte) {
+ for _, item := range items {
+ delete(s, item)
+ }
+}
+
+// Has returns true if and only if item is contained in the set.
+func (s Byte) Has(item byte) bool {
+ _, contained := s[item]
+ return contained
+}
+
+// HasAll returns true if and only if all items are contained in the set.
+func (s Byte) HasAll(items ...byte) bool {
+ for _, item := range items {
+ if !s.Has(item) {
+ return false
+ }
+ }
+ return true
+}
+
+// HasAny returns true if any items are contained in the set.
+func (s Byte) HasAny(items ...byte) bool {
+ for _, item := range items {
+ if s.Has(item) {
+ return true
+ }
+ }
+ return false
+}
+
+// Difference returns a set of objects that are not in s2
+// For example:
+// s1 = {a1, a2, a3}
+// s2 = {a1, a2, a4, a5}
+// s1.Difference(s2) = {a3}
+// s2.Difference(s1) = {a4, a5}
+func (s Byte) Difference(s2 Byte) Byte {
+ result := NewByte()
+ for key := range s {
+ if !s2.Has(key) {
+ result.Insert(key)
+ }
+ }
+ return result
+}
+
+// Union returns a new set which includes items in either s1 or s2.
+// For example:
+// s1 = {a1, a2}
+// s2 = {a3, a4}
+// s1.Union(s2) = {a1, a2, a3, a4}
+// s2.Union(s1) = {a1, a2, a3, a4}
+func (s1 Byte) Union(s2 Byte) Byte {
+ result := NewByte()
+ for key := range s1 {
+ result.Insert(key)
+ }
+ for key := range s2 {
+ result.Insert(key)
+ }
+ return result
+}
+
+// Intersection returns a new set which includes the item in BOTH s1 and s2
+// For example:
+// s1 = {a1, a2}
+// s2 = {a2, a3}
+// s1.Intersection(s2) = {a2}
+func (s1 Byte) Intersection(s2 Byte) Byte {
+ var walk, other Byte
+ result := NewByte()
+ if s1.Len() < s2.Len() {
+ walk = s1
+ other = s2
+ } else {
+ walk = s2
+ other = s1
+ }
+ for key := range walk {
+ if other.Has(key) {
+ result.Insert(key)
+ }
+ }
+ return result
+}
+
+// IsSuperset returns true if and only if s1 is a superset of s2.
+func (s1 Byte) IsSuperset(s2 Byte) bool {
+ for item := range s2 {
+ if !s1.Has(item) {
+ return false
+ }
+ }
+ return true
+}
+
+// Equal returns true if and only if s1 is equal (as a set) to s2.
+// Two sets are equal if their membership is identical.
+// (In practice, this means same elements, order doesn't matter)
+func (s1 Byte) Equal(s2 Byte) bool {
+ return len(s1) == len(s2) && s1.IsSuperset(s2)
+}
+
+type sortableSliceOfByte []byte
+
+func (s sortableSliceOfByte) Len() int { return len(s) }
+func (s sortableSliceOfByte) Less(i, j int) bool { return lessByte(s[i], s[j]) }
+func (s sortableSliceOfByte) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// List returns the contents as a sorted byte slice.
+func (s Byte) List() []byte {
+ res := make(sortableSliceOfByte, 0, len(s))
+ for key := range s {
+ res = append(res, key)
+ }
+ sort.Sort(res)
+ return []byte(res)
+}
+
+// Returns a single element from the set.
+func (s Byte) PopAny() (byte, bool) {
+ for key := range s {
+ s.Delete(key)
+ return key, true
+ }
+ var zeroValue byte
+ return zeroValue, false
+}
+
+// Len returns the size of the set.
+func (s Byte) Len() int {
+ return len(s)
+}
+
+func lessByte(lhs, rhs byte) bool {
+ return lhs < rhs
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/sets/doc.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/sets/doc.go
new file mode 100644
index 0000000..c5e5416
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/sets/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by set-gen. Do not edit it manually!
+
+// Package sets has auto-generated set types.
+package sets
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/sets/empty.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/sets/empty.go
new file mode 100644
index 0000000..5654edd
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/sets/empty.go
@@ -0,0 +1,23 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by set-gen. Do not edit it manually!
+
+package sets
+
+// Empty is public since it is used by some internal API objects for conversions between external
+// string arrays and internal sets, and conversion logic requires public types today.
+type Empty struct{}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/sets/int.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/sets/int.go
new file mode 100644
index 0000000..4b8c331
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/sets/int.go
@@ -0,0 +1,194 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by set-gen. Do not edit it manually!
+
+package sets
+
+import (
+ "reflect"
+ "sort"
+)
+
+// sets.Int is a set of ints, implemented via map[int]struct{} for minimal memory consumption.
+type Int map[int]Empty
+
+// New creates a Int from a list of values.
+func NewInt(items ...int) Int {
+ ss := Int{}
+ ss.Insert(items...)
+ return ss
+}
+
+// IntKeySet creates a Int from a keys of a map[int](? extends interface{}).
+// If the value passed in is not actually a map, this will panic.
+func IntKeySet(theMap interface{}) Int {
+ v := reflect.ValueOf(theMap)
+ ret := Int{}
+
+ for _, keyValue := range v.MapKeys() {
+ ret.Insert(keyValue.Interface().(int))
+ }
+ return ret
+}
+
+// Insert adds items to the set.
+func (s Int) Insert(items ...int) {
+ for _, item := range items {
+ s[item] = Empty{}
+ }
+}
+
+// Delete removes all items from the set.
+func (s Int) Delete(items ...int) {
+ for _, item := range items {
+ delete(s, item)
+ }
+}
+
+// Has returns true if and only if item is contained in the set.
+func (s Int) Has(item int) bool {
+ _, contained := s[item]
+ return contained
+}
+
+// HasAll returns true if and only if all items are contained in the set.
+func (s Int) HasAll(items ...int) bool {
+ for _, item := range items {
+ if !s.Has(item) {
+ return false
+ }
+ }
+ return true
+}
+
+// HasAny returns true if any items are contained in the set.
+func (s Int) HasAny(items ...int) bool {
+ for _, item := range items {
+ if s.Has(item) {
+ return true
+ }
+ }
+ return false
+}
+
+// Difference returns a set of objects that are not in s2
+// For example:
+// s1 = {a1, a2, a3}
+// s2 = {a1, a2, a4, a5}
+// s1.Difference(s2) = {a3}
+// s2.Difference(s1) = {a4, a5}
+func (s Int) Difference(s2 Int) Int {
+ result := NewInt()
+ for key := range s {
+ if !s2.Has(key) {
+ result.Insert(key)
+ }
+ }
+ return result
+}
+
+// Union returns a new set which includes items in either s1 or s2.
+// For example:
+// s1 = {a1, a2}
+// s2 = {a3, a4}
+// s1.Union(s2) = {a1, a2, a3, a4}
+// s2.Union(s1) = {a1, a2, a3, a4}
+func (s1 Int) Union(s2 Int) Int {
+ result := NewInt()
+ for key := range s1 {
+ result.Insert(key)
+ }
+ for key := range s2 {
+ result.Insert(key)
+ }
+ return result
+}
+
+// Intersection returns a new set which includes the item in BOTH s1 and s2
+// For example:
+// s1 = {a1, a2}
+// s2 = {a2, a3}
+// s1.Intersection(s2) = {a2}
+func (s1 Int) Intersection(s2 Int) Int {
+ var walk, other Int
+ result := NewInt()
+ if s1.Len() < s2.Len() {
+ walk = s1
+ other = s2
+ } else {
+ walk = s2
+ other = s1
+ }
+ for key := range walk {
+ if other.Has(key) {
+ result.Insert(key)
+ }
+ }
+ return result
+}
+
+// IsSuperset returns true if and only if s1 is a superset of s2.
+func (s1 Int) IsSuperset(s2 Int) bool {
+ for item := range s2 {
+ if !s1.Has(item) {
+ return false
+ }
+ }
+ return true
+}
+
+// Equal returns true if and only if s1 is equal (as a set) to s2.
+// Two sets are equal if their membership is identical.
+// (In practice, this means same elements, order doesn't matter)
+func (s1 Int) Equal(s2 Int) bool {
+ return len(s1) == len(s2) && s1.IsSuperset(s2)
+}
+
+type sortableSliceOfInt []int
+
+func (s sortableSliceOfInt) Len() int { return len(s) }
+func (s sortableSliceOfInt) Less(i, j int) bool { return lessInt(s[i], s[j]) }
+func (s sortableSliceOfInt) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// List returns the contents as a sorted int slice.
+func (s Int) List() []int {
+ res := make(sortableSliceOfInt, 0, len(s))
+ for key := range s {
+ res = append(res, key)
+ }
+ sort.Sort(res)
+ return []int(res)
+}
+
+// Returns a single element from the set.
+func (s Int) PopAny() (int, bool) {
+ for key := range s {
+ s.Delete(key)
+ return key, true
+ }
+ var zeroValue int
+ return zeroValue, false
+}
+
+// Len returns the size of the set.
+func (s Int) Len() int {
+ return len(s)
+}
+
+func lessInt(lhs, rhs int) bool {
+ return lhs < rhs
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/sets/int64.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/sets/int64.go
new file mode 100644
index 0000000..b6a97e7
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/sets/int64.go
@@ -0,0 +1,194 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by set-gen. Do not edit it manually!
+
+package sets
+
+import (
+ "reflect"
+ "sort"
+)
+
+// sets.Int64 is a set of int64s, implemented via map[int64]struct{} for minimal memory consumption.
+type Int64 map[int64]Empty
+
+// New creates a Int64 from a list of values.
+func NewInt64(items ...int64) Int64 {
+ ss := Int64{}
+ ss.Insert(items...)
+ return ss
+}
+
+// Int64KeySet creates a Int64 from a keys of a map[int64](? extends interface{}).
+// If the value passed in is not actually a map, this will panic.
+func Int64KeySet(theMap interface{}) Int64 {
+ v := reflect.ValueOf(theMap)
+ ret := Int64{}
+
+ for _, keyValue := range v.MapKeys() {
+ ret.Insert(keyValue.Interface().(int64))
+ }
+ return ret
+}
+
+// Insert adds items to the set.
+func (s Int64) Insert(items ...int64) {
+ for _, item := range items {
+ s[item] = Empty{}
+ }
+}
+
+// Delete removes all items from the set.
+func (s Int64) Delete(items ...int64) {
+ for _, item := range items {
+ delete(s, item)
+ }
+}
+
+// Has returns true if and only if item is contained in the set.
+func (s Int64) Has(item int64) bool {
+ _, contained := s[item]
+ return contained
+}
+
+// HasAll returns true if and only if all items are contained in the set.
+func (s Int64) HasAll(items ...int64) bool {
+ for _, item := range items {
+ if !s.Has(item) {
+ return false
+ }
+ }
+ return true
+}
+
+// HasAny returns true if any items are contained in the set.
+func (s Int64) HasAny(items ...int64) bool {
+ for _, item := range items {
+ if s.Has(item) {
+ return true
+ }
+ }
+ return false
+}
+
+// Difference returns a set of objects that are not in s2
+// For example:
+// s1 = {a1, a2, a3}
+// s2 = {a1, a2, a4, a5}
+// s1.Difference(s2) = {a3}
+// s2.Difference(s1) = {a4, a5}
+func (s Int64) Difference(s2 Int64) Int64 {
+ result := NewInt64()
+ for key := range s {
+ if !s2.Has(key) {
+ result.Insert(key)
+ }
+ }
+ return result
+}
+
+// Union returns a new set which includes items in either s1 or s2.
+// For example:
+// s1 = {a1, a2}
+// s2 = {a3, a4}
+// s1.Union(s2) = {a1, a2, a3, a4}
+// s2.Union(s1) = {a1, a2, a3, a4}
+func (s1 Int64) Union(s2 Int64) Int64 {
+ result := NewInt64()
+ for key := range s1 {
+ result.Insert(key)
+ }
+ for key := range s2 {
+ result.Insert(key)
+ }
+ return result
+}
+
+// Intersection returns a new set which includes the item in BOTH s1 and s2
+// For example:
+// s1 = {a1, a2}
+// s2 = {a2, a3}
+// s1.Intersection(s2) = {a2}
+func (s1 Int64) Intersection(s2 Int64) Int64 {
+ var walk, other Int64
+ result := NewInt64()
+ if s1.Len() < s2.Len() {
+ walk = s1
+ other = s2
+ } else {
+ walk = s2
+ other = s1
+ }
+ for key := range walk {
+ if other.Has(key) {
+ result.Insert(key)
+ }
+ }
+ return result
+}
+
+// IsSuperset returns true if and only if s1 is a superset of s2.
+func (s1 Int64) IsSuperset(s2 Int64) bool {
+ for item := range s2 {
+ if !s1.Has(item) {
+ return false
+ }
+ }
+ return true
+}
+
+// Equal returns true if and only if s1 is equal (as a set) to s2.
+// Two sets are equal if their membership is identical.
+// (In practice, this means same elements, order doesn't matter)
+func (s1 Int64) Equal(s2 Int64) bool {
+ return len(s1) == len(s2) && s1.IsSuperset(s2)
+}
+
+type sortableSliceOfInt64 []int64
+
+func (s sortableSliceOfInt64) Len() int { return len(s) }
+func (s sortableSliceOfInt64) Less(i, j int) bool { return lessInt64(s[i], s[j]) }
+func (s sortableSliceOfInt64) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// List returns the contents as a sorted int64 slice.
+func (s Int64) List() []int64 {
+ res := make(sortableSliceOfInt64, 0, len(s))
+ for key := range s {
+ res = append(res, key)
+ }
+ sort.Sort(res)
+ return []int64(res)
+}
+
+// Returns a single element from the set.
+func (s Int64) PopAny() (int64, bool) {
+ for key := range s {
+ s.Delete(key)
+ return key, true
+ }
+ var zeroValue int64
+ return zeroValue, false
+}
+
+// Len returns the size of the set.
+func (s Int64) Len() int {
+ return len(s)
+}
+
+func lessInt64(lhs, rhs int64) bool {
+ return lhs < rhs
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/sets/string.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/sets/string.go
new file mode 100644
index 0000000..2094b32
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/sets/string.go
@@ -0,0 +1,194 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by set-gen. Do not edit it manually!
+
+package sets
+
+import (
+ "reflect"
+ "sort"
+)
+
+// sets.String is a set of strings, implemented via map[string]struct{} for minimal memory consumption.
+type String map[string]Empty
+
+// New creates a String from a list of values.
+func NewString(items ...string) String {
+ ss := String{}
+ ss.Insert(items...)
+ return ss
+}
+
+// StringKeySet creates a String from a keys of a map[string](? extends interface{}).
+// If the value passed in is not actually a map, this will panic.
+func StringKeySet(theMap interface{}) String {
+ v := reflect.ValueOf(theMap)
+ ret := String{}
+
+ for _, keyValue := range v.MapKeys() {
+ ret.Insert(keyValue.Interface().(string))
+ }
+ return ret
+}
+
+// Insert adds items to the set.
+func (s String) Insert(items ...string) {
+ for _, item := range items {
+ s[item] = Empty{}
+ }
+}
+
+// Delete removes all items from the set.
+func (s String) Delete(items ...string) {
+ for _, item := range items {
+ delete(s, item)
+ }
+}
+
+// Has returns true if and only if item is contained in the set.
+func (s String) Has(item string) bool {
+ _, contained := s[item]
+ return contained
+}
+
+// HasAll returns true if and only if all items are contained in the set.
+func (s String) HasAll(items ...string) bool {
+ for _, item := range items {
+ if !s.Has(item) {
+ return false
+ }
+ }
+ return true
+}
+
+// HasAny returns true if any items are contained in the set.
+func (s String) HasAny(items ...string) bool {
+ for _, item := range items {
+ if s.Has(item) {
+ return true
+ }
+ }
+ return false
+}
+
+// Difference returns a set of objects that are not in s2
+// For example:
+// s1 = {a1, a2, a3}
+// s2 = {a1, a2, a4, a5}
+// s1.Difference(s2) = {a3}
+// s2.Difference(s1) = {a4, a5}
+func (s String) Difference(s2 String) String {
+ result := NewString()
+ for key := range s {
+ if !s2.Has(key) {
+ result.Insert(key)
+ }
+ }
+ return result
+}
+
+// Union returns a new set which includes items in either s1 or s2.
+// For example:
+// s1 = {a1, a2}
+// s2 = {a3, a4}
+// s1.Union(s2) = {a1, a2, a3, a4}
+// s2.Union(s1) = {a1, a2, a3, a4}
+func (s1 String) Union(s2 String) String {
+ result := NewString()
+ for key := range s1 {
+ result.Insert(key)
+ }
+ for key := range s2 {
+ result.Insert(key)
+ }
+ return result
+}
+
+// Intersection returns a new set which includes the item in BOTH s1 and s2
+// For example:
+// s1 = {a1, a2}
+// s2 = {a2, a3}
+// s1.Intersection(s2) = {a2}
+func (s1 String) Intersection(s2 String) String {
+ var walk, other String
+ result := NewString()
+ if s1.Len() < s2.Len() {
+ walk = s1
+ other = s2
+ } else {
+ walk = s2
+ other = s1
+ }
+ for key := range walk {
+ if other.Has(key) {
+ result.Insert(key)
+ }
+ }
+ return result
+}
+
+// IsSuperset returns true if and only if s1 is a superset of s2.
+func (s1 String) IsSuperset(s2 String) bool {
+ for item := range s2 {
+ if !s1.Has(item) {
+ return false
+ }
+ }
+ return true
+}
+
+// Equal returns true if and only if s1 is equal (as a set) to s2.
+// Two sets are equal if their membership is identical.
+// (In practice, this means same elements, order doesn't matter)
+func (s1 String) Equal(s2 String) bool {
+ return len(s1) == len(s2) && s1.IsSuperset(s2)
+}
+
+type sortableSliceOfString []string
+
+func (s sortableSliceOfString) Len() int { return len(s) }
+func (s sortableSliceOfString) Less(i, j int) bool { return lessString(s[i], s[j]) }
+func (s sortableSliceOfString) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// List returns the contents as a sorted string slice.
+func (s String) List() []string {
+ res := make(sortableSliceOfString, 0, len(s))
+ for key := range s {
+ res = append(res, key)
+ }
+ sort.Sort(res)
+ return []string(res)
+}
+
+// Returns a single element from the set.
+func (s String) PopAny() (string, bool) {
+ for key := range s {
+ s.Delete(key)
+ return key, true
+ }
+ var zeroValue string
+ return zeroValue, false
+}
+
+// Len returns the size of the set.
+func (s String) Len() int {
+ return len(s)
+}
+
+func lessString(lhs, rhs string) bool {
+ return lhs < rhs
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/string_flag.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/string_flag.go
new file mode 100644
index 0000000..9d6a00a
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/string_flag.go
@@ -0,0 +1,56 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+// StringFlag is a string flag compatible with flags and pflags that keeps track of whether it had a value supplied or not.
+type StringFlag struct {
+ // If Set has been invoked this value is true
+ provided bool
+ // The exact value provided on the flag
+ value string
+}
+
+func NewStringFlag(defaultVal string) StringFlag {
+ return StringFlag{value: defaultVal}
+}
+
+func (f *StringFlag) Default(value string) {
+ f.value = value
+}
+
+func (f StringFlag) String() string {
+ return f.value
+}
+
+func (f StringFlag) Value() string {
+ return f.value
+}
+
+func (f *StringFlag) Set(value string) error {
+ f.value = value
+ f.provided = true
+
+ return nil
+}
+
+func (f StringFlag) Provided() bool {
+ return f.provided
+}
+
+func (f *StringFlag) Type() string {
+ return "string"
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/template.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/template.go
new file mode 100644
index 0000000..d09d7dc
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/template.go
@@ -0,0 +1,48 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "bytes"
+ "go/doc"
+ "io"
+ "strings"
+ "text/template"
+)
+
+func wrap(indent string, s string) string {
+ var buf bytes.Buffer
+ doc.ToText(&buf, s, indent, indent+" ", 80-len(indent))
+ return buf.String()
+}
+
+// ExecuteTemplate executes templateText with data and output written to w.
+func ExecuteTemplate(w io.Writer, templateText string, data interface{}) error {
+ t := template.New("top")
+ t.Funcs(template.FuncMap{
+ "trim": strings.TrimSpace,
+ "wrap": wrap,
+ })
+ template.Must(t.Parse(templateText))
+ return t.Execute(w, data)
+}
+
+func ExecuteTemplateToString(templateText string, data interface{}) (string, error) {
+ b := bytes.Buffer{}
+ err := ExecuteTemplate(&b, templateText, data)
+ return b.String(), err
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/trace.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/trace.go
new file mode 100644
index 0000000..fe93db8
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/trace.go
@@ -0,0 +1,72 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "bytes"
+ "fmt"
+ "time"
+
+ "github.com/golang/glog"
+)
+
+type traceStep struct {
+ stepTime time.Time
+ msg string
+}
+
+type Trace struct {
+ name string
+ startTime time.Time
+ steps []traceStep
+}
+
+func NewTrace(name string) *Trace {
+ return &Trace{name, time.Now(), nil}
+}
+
+func (t *Trace) Step(msg string) {
+ if t.steps == nil {
+ // traces almost always have less than 6 steps, do this to avoid more than a single allocation
+ t.steps = make([]traceStep, 0, 6)
+ }
+ t.steps = append(t.steps, traceStep{time.Now(), msg})
+}
+
+func (t *Trace) Log() {
+ endTime := time.Now()
+ var buffer bytes.Buffer
+
+ buffer.WriteString(fmt.Sprintf("Trace %q (started %v):\n", t.name, t.startTime))
+ lastStepTime := t.startTime
+ for _, step := range t.steps {
+ buffer.WriteString(fmt.Sprintf("[%v] [%v] %v\n", step.stepTime.Sub(t.startTime), step.stepTime.Sub(lastStepTime), step.msg))
+ lastStepTime = step.stepTime
+ }
+ buffer.WriteString(fmt.Sprintf("[%v] [%v] END\n", endTime.Sub(t.startTime), endTime.Sub(lastStepTime)))
+ glog.Info(buffer.String())
+}
+
+func (t *Trace) LogIfLong(threshold time.Duration) {
+ if time.Since(t.startTime) >= threshold {
+ t.Log()
+ }
+}
+
+func (t *Trace) TotalTime() time.Duration {
+ return time.Since(t.startTime)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/umask.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/umask.go
new file mode 100644
index 0000000..35ccce5
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/umask.go
@@ -0,0 +1,27 @@
+// +build !windows
+
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "syscall"
+)
+
+func Umask(mask int) (old int, err error) {
+ return syscall.Umask(mask), nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/umask_windows.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/umask_windows.go
new file mode 100644
index 0000000..8c1b2cb
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/umask_windows.go
@@ -0,0 +1,27 @@
+// +build windows
+
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "errors"
+)
+
+func Umask(mask int) (old int, err error) {
+ return 0, errors.New("platform and architecture is not supported")
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/util.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/util.go
new file mode 100644
index 0000000..7a94149
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/util.go
@@ -0,0 +1,147 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "fmt"
+ "os"
+ "reflect"
+ "regexp"
+)
+
+// Takes a list of strings and compiles them into a list of regular expressions
+func CompileRegexps(regexpStrings []string) ([]*regexp.Regexp, error) {
+ regexps := []*regexp.Regexp{}
+ for _, regexpStr := range regexpStrings {
+ r, err := regexp.Compile(regexpStr)
+ if err != nil {
+ return []*regexp.Regexp{}, err
+ }
+ regexps = append(regexps, r)
+ }
+ return regexps, nil
+}
+
+// Detects if using systemd as the init system
+// Please note that simply reading /proc/1/cmdline can be misleading because
+// some installation of various init programs can automatically make /sbin/init
+// a symlink or even a renamed version of their main program.
+// TODO(dchen1107): realiably detects the init system using on the system:
+// systemd, upstart, initd, etc.
+func UsingSystemdInitSystem() bool {
+ if _, err := os.Stat("/run/systemd/system"); err == nil {
+ return true
+ }
+
+ return false
+}
+
+// Tests whether all pointer fields in a struct are nil. This is useful when,
+// for example, an API struct is handled by plugins which need to distinguish
+// "no plugin accepted this spec" from "this spec is empty".
+//
+// This function is only valid for structs and pointers to structs. Any other
+// type will cause a panic. Passing a typed nil pointer will return true.
+func AllPtrFieldsNil(obj interface{}) bool {
+ v := reflect.ValueOf(obj)
+ if !v.IsValid() {
+ panic(fmt.Sprintf("reflect.ValueOf() produced a non-valid Value for %#v", obj))
+ }
+ if v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ return true
+ }
+ v = v.Elem()
+ }
+ for i := 0; i < v.NumField(); i++ {
+ if v.Field(i).Kind() == reflect.Ptr && !v.Field(i).IsNil() {
+ return false
+ }
+ }
+ return true
+}
+
+func FileExists(filename string) (bool, error) {
+ if _, err := os.Stat(filename); os.IsNotExist(err) {
+ return false, nil
+ } else if err != nil {
+ return false, err
+ }
+ return true, nil
+}
+
+// borrowed from ioutil.ReadDir
+// ReadDir reads the directory named by dirname and returns
+// a list of directory entries, minus those with lstat errors
+func ReadDirNoExit(dirname string) ([]os.FileInfo, []error, error) {
+ if dirname == "" {
+ dirname = "."
+ }
+
+ f, err := os.Open(dirname)
+ if err != nil {
+ return nil, nil, err
+ }
+ defer f.Close()
+
+ names, err := f.Readdirnames(-1)
+ list := make([]os.FileInfo, 0, len(names))
+ errs := make([]error, 0, len(names))
+ for _, filename := range names {
+ fip, lerr := os.Lstat(dirname + "/" + filename)
+ if os.IsNotExist(lerr) {
+ // File disappeared between readdir + stat.
+ // Just treat it as if it didn't exist.
+ continue
+ }
+
+ list = append(list, fip)
+ errs = append(errs, lerr)
+ }
+
+ return list, errs, nil
+}
+
+// IntPtr returns a pointer to an int
+func IntPtr(i int) *int {
+ o := i
+ return &o
+}
+
+// Int32Ptr returns a pointer to an int32
+func Int32Ptr(i int32) *int32 {
+ o := i
+ return &o
+}
+
+// IntPtrDerefOr dereference the int ptr and returns it i not nil,
+// else returns def.
+func IntPtrDerefOr(ptr *int, def int) int {
+ if ptr != nil {
+ return *ptr
+ }
+ return def
+}
+
+// Int32PtrDerefOr dereference the int32 ptr and returns it i not nil,
+// else returns def.
+func Int32PtrDerefOr(ptr *int32, def int32) int32 {
+ if ptr != nil {
+ return *ptr
+ }
+ return def
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/uuid.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/uuid.go
new file mode 100644
index 0000000..23abe11
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/uuid.go
@@ -0,0 +1,42 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "sync"
+
+ "github.com/pborman/uuid"
+ "k8s.io/kubernetes/pkg/types"
+)
+
+var uuidLock sync.Mutex
+var lastUUID uuid.UUID
+
+func NewUUID() types.UID {
+ uuidLock.Lock()
+ defer uuidLock.Unlock()
+ result := uuid.NewUUID()
+ // The UUID package is naive and can generate identical UUIDs if the
+ // time interval is quick enough.
+ // The UUID uses 100 ns increments so it's short enough to actively
+ // wait for a new value.
+ for uuid.Equal(lastUUID, result) == true {
+ result = uuid.NewUUID()
+ }
+ lastUUID = result
+ return types.UID(result.String())
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/validation/field/errors.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/validation/field/errors.go
new file mode 100644
index 0000000..b4a6c5c
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/validation/field/errors.go
@@ -0,0 +1,228 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package field
+
+import (
+ "encoding/json"
+ "fmt"
+ "strings"
+
+ utilerrors "k8s.io/kubernetes/pkg/util/errors"
+)
+
+// Error is an implementation of the 'error' interface, which represents a
+// field-level validation error.
+type Error struct {
+ Type ErrorType
+ Field string
+ BadValue interface{}
+ Detail string
+}
+
+var _ error = &Error{}
+
+// Error implements the error interface.
+func (v *Error) Error() string {
+ return fmt.Sprintf("%s: %s", v.Field, v.ErrorBody())
+}
+
+// ErrorBody returns the error message without the field name. This is useful
+// for building nice-looking higher-level error reporting.
+func (v *Error) ErrorBody() string {
+ var s string
+ switch v.Type {
+ case ErrorTypeRequired, ErrorTypeForbidden, ErrorTypeTooLong, ErrorTypeInternal:
+ s = fmt.Sprintf("%s", v.Type)
+ default:
+ var bad string
+ badBytes, err := json.Marshal(v.BadValue)
+ if err != nil {
+ bad = err.Error()
+ } else {
+ bad = string(badBytes)
+ }
+ s = fmt.Sprintf("%s: %s", v.Type, bad)
+ }
+ if len(v.Detail) != 0 {
+ s += fmt.Sprintf(": %s", v.Detail)
+ }
+ return s
+}
+
+// ErrorType is a machine readable value providing more detail about why
+// a field is invalid. These values are expected to match 1-1 with
+// CauseType in api/types.go.
+type ErrorType string
+
+// TODO: These values are duplicated in api/types.go, but there's a circular dep. Fix it.
+const (
+ // ErrorTypeNotFound is used to report failure to find a requested value
+ // (e.g. looking up an ID). See NotFound().
+ ErrorTypeNotFound ErrorType = "FieldValueNotFound"
+ // ErrorTypeRequired is used to report required values that are not
+ // provided (e.g. empty strings, null values, or empty arrays). See
+ // Required().
+ ErrorTypeRequired ErrorType = "FieldValueRequired"
+ // ErrorTypeDuplicate is used to report collisions of values that must be
+ // unique (e.g. unique IDs). See Duplicate().
+ ErrorTypeDuplicate ErrorType = "FieldValueDuplicate"
+ // ErrorTypeInvalid is used to report malformed values (e.g. failed regex
+ // match, too long, out of bounds). See Invalid().
+ ErrorTypeInvalid ErrorType = "FieldValueInvalid"
+ // ErrorTypeNotSupported is used to report unknown values for enumerated
+ // fields (e.g. a list of valid values). See NotSupported().
+ ErrorTypeNotSupported ErrorType = "FieldValueNotSupported"
+ // ErrorTypeForbidden is used to report valid (as per formatting rules)
+ // values which would be accepted under some conditions, but which are not
+ // permitted by the current conditions (such as security policy). See
+ // Forbidden().
+ ErrorTypeForbidden ErrorType = "FieldValueForbidden"
+ // ErrorTypeTooLong is used to report that the given value is too long.
+ // This is similar to ErrorTypeInvalid, but the error will not include the
+ // too-long value. See TooLong().
+ ErrorTypeTooLong ErrorType = "FieldValueTooLong"
+ // ErrorTypeInternal is used to report other errors that are not related
+ // to user input. See InternalError().
+ ErrorTypeInternal ErrorType = "InternalError"
+)
+
+// String converts a ErrorType into its corresponding canonical error message.
+func (t ErrorType) String() string {
+ switch t {
+ case ErrorTypeNotFound:
+ return "Not found"
+ case ErrorTypeRequired:
+ return "Required value"
+ case ErrorTypeDuplicate:
+ return "Duplicate value"
+ case ErrorTypeInvalid:
+ return "Invalid value"
+ case ErrorTypeNotSupported:
+ return "Unsupported value"
+ case ErrorTypeForbidden:
+ return "Forbidden"
+ case ErrorTypeTooLong:
+ return "Too long"
+ case ErrorTypeInternal:
+ return "Internal error"
+ default:
+ panic(fmt.Sprintf("unrecognized validation error: %q", string(t)))
+ }
+}
+
+// NotFound returns a *Error indicating "value not found". This is
+// used to report failure to find a requested value (e.g. looking up an ID).
+func NotFound(field *Path, value interface{}) *Error {
+ return &Error{ErrorTypeNotFound, field.String(), value, ""}
+}
+
+// Required returns a *Error indicating "value required". This is used
+// to report required values that are not provided (e.g. empty strings, null
+// values, or empty arrays).
+func Required(field *Path, detail string) *Error {
+ return &Error{ErrorTypeRequired, field.String(), "", detail}
+}
+
+// Duplicate returns a *Error indicating "duplicate value". This is
+// used to report collisions of values that must be unique (e.g. names or IDs).
+func Duplicate(field *Path, value interface{}) *Error {
+ return &Error{ErrorTypeDuplicate, field.String(), value, ""}
+}
+
+// Invalid returns a *Error indicating "invalid value". This is used
+// to report malformed values (e.g. failed regex match, too long, out of bounds).
+func Invalid(field *Path, value interface{}, detail string) *Error {
+ return &Error{ErrorTypeInvalid, field.String(), value, detail}
+}
+
+// NotSupported returns a *Error indicating "unsupported value".
+// This is used to report unknown values for enumerated fields (e.g. a list of
+// valid values).
+func NotSupported(field *Path, value interface{}, validValues []string) *Error {
+ detail := ""
+ if validValues != nil && len(validValues) > 0 {
+ detail = "supported values: " + strings.Join(validValues, ", ")
+ }
+ return &Error{ErrorTypeNotSupported, field.String(), value, detail}
+}
+
+// Forbidden returns a *Error indicating "forbidden". This is used to
+// report valid (as per formatting rules) values which would be accepted under
+// some conditions, but which are not permitted by current conditions (e.g.
+// security policy).
+func Forbidden(field *Path, detail string) *Error {
+ return &Error{ErrorTypeForbidden, field.String(), "", detail}
+}
+
+// TooLong returns a *Error indicating "too long". This is used to
+// report that the given value is too long. This is similar to
+// Invalid, but the returned error will not include the too-long
+// value.
+func TooLong(field *Path, value interface{}, maxLength int) *Error {
+ return &Error{ErrorTypeTooLong, field.String(), value, fmt.Sprintf("must have at most %d characters", maxLength)}
+}
+
+// InternalError returns a *Error indicating "internal error". This is used
+// to signal that an error was found that was not directly related to user
+// input. The err argument must be non-nil.
+func InternalError(field *Path, err error) *Error {
+ return &Error{ErrorTypeInternal, field.String(), nil, err.Error()}
+}
+
+// ErrorList holds a set of Errors. It is plausible that we might one day have
+// non-field errors in this same umbrella package, but for now we don't, so
+// we can keep it simple and leave ErrorList here.
+type ErrorList []*Error
+
+// NewErrorTypeMatcher returns an errors.Matcher that returns true
+// if the provided error is a Error and has the provided ErrorType.
+func NewErrorTypeMatcher(t ErrorType) utilerrors.Matcher {
+ return func(err error) bool {
+ if e, ok := err.(*Error); ok {
+ return e.Type == t
+ }
+ return false
+ }
+}
+
+// ToAggregate converts the ErrorList into an errors.Aggregate.
+func (list ErrorList) ToAggregate() utilerrors.Aggregate {
+ errs := make([]error, len(list))
+ for i := range list {
+ errs[i] = list[i]
+ }
+ return utilerrors.NewAggregate(errs)
+}
+
+func fromAggregate(agg utilerrors.Aggregate) ErrorList {
+ errs := agg.Errors()
+ list := make(ErrorList, len(errs))
+ for i := range errs {
+ list[i] = errs[i].(*Error)
+ }
+ return list
+}
+
+// Filter removes items from the ErrorList that match the provided fns.
+func (list ErrorList) Filter(fns ...utilerrors.Matcher) ErrorList {
+ err := utilerrors.FilterOut(list.ToAggregate(), fns...)
+ if err == nil {
+ return nil
+ }
+ // FilterOut takes an Aggregate and returns an Aggregate
+ return fromAggregate(err.(utilerrors.Aggregate))
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/validation/field/path.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/validation/field/path.go
new file mode 100644
index 0000000..2efc8ee
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/validation/field/path.go
@@ -0,0 +1,91 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package field
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+)
+
+// Path represents the path from some root to a particular field.
+type Path struct {
+ name string // the name of this field or "" if this is an index
+ index string // if name == "", this is a subscript (index or map key) of the previous element
+ parent *Path // nil if this is the root element
+}
+
+// NewPath creates a root Path object.
+func NewPath(name string, moreNames ...string) *Path {
+ r := &Path{name: name, parent: nil}
+ for _, anotherName := range moreNames {
+ r = &Path{name: anotherName, parent: r}
+ }
+ return r
+}
+
+// Root returns the root element of this Path.
+func (p *Path) Root() *Path {
+ for ; p.parent != nil; p = p.parent {
+ // Do nothing.
+ }
+ return p
+}
+
+// Child creates a new Path that is a child of the method receiver.
+func (p *Path) Child(name string, moreNames ...string) *Path {
+ r := NewPath(name, moreNames...)
+ r.Root().parent = p
+ return r
+}
+
+// Index indicates that the previous Path is to be subscripted by an int.
+// This sets the same underlying value as Key.
+func (p *Path) Index(index int) *Path {
+ return &Path{index: strconv.Itoa(index), parent: p}
+}
+
+// Key indicates that the previous Path is to be subscripted by a string.
+// This sets the same underlying value as Index.
+func (p *Path) Key(key string) *Path {
+ return &Path{index: key, parent: p}
+}
+
+// String produces a string representation of the Path.
+func (p *Path) String() string {
+ // make a slice to iterate
+ elems := []*Path{}
+ for ; p != nil; p = p.parent {
+ elems = append(elems, p)
+ }
+
+ // iterate, but it has to be backwards
+ buf := bytes.NewBuffer(nil)
+ for i := range elems {
+ p := elems[len(elems)-1-i]
+ if p.parent != nil && len(p.name) > 0 {
+ // This is either the root or it is a subscript.
+ buf.WriteString(".")
+ }
+ if len(p.name) > 0 {
+ buf.WriteString(p.name)
+ } else {
+ fmt.Fprintf(buf, "[%s]", p.index)
+ }
+ }
+ return buf.String()
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/validation/validation.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/validation/validation.go
new file mode 100644
index 0000000..6e6a027
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/validation/validation.go
@@ -0,0 +1,306 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validation
+
+import (
+ "fmt"
+ "math"
+ "net"
+ "regexp"
+ "strings"
+)
+
+const qnameCharFmt string = "[A-Za-z0-9]"
+const qnameExtCharFmt string = "[-A-Za-z0-9_.]"
+const qualifiedNameFmt string = "(" + qnameCharFmt + qnameExtCharFmt + "*)?" + qnameCharFmt
+const qualifiedNameMaxLength int = 63
+
+var qualifiedNameRegexp = regexp.MustCompile("^" + qualifiedNameFmt + "$")
+
+// IsQualifiedName tests whether the value passed is what Kubernetes calls a
+// "qualified name". This is a format used in various places throughout the
+// system. If the value is not valid, a list of error strings is returned.
+// Otherwise an empty list (or nil) is returned.
+func IsQualifiedName(value string) []string {
+ var errs []string
+ parts := strings.Split(value, "/")
+ var name string
+ switch len(parts) {
+ case 1:
+ name = parts[0]
+ case 2:
+ var prefix string
+ prefix, name = parts[0], parts[1]
+ if len(prefix) == 0 {
+ errs = append(errs, "prefix part "+EmptyError())
+ } else if msgs := IsDNS1123Subdomain(prefix); len(msgs) != 0 {
+ errs = append(errs, prefixEach(msgs, "prefix part ")...)
+ }
+ default:
+ return append(errs, RegexError(qualifiedNameFmt, "MyName", "my.name", "123-abc")+
+ " with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName'")
+ }
+
+ if len(name) == 0 {
+ errs = append(errs, "name part "+EmptyError())
+ } else if len(name) > qualifiedNameMaxLength {
+ errs = append(errs, "name part "+MaxLenError(qualifiedNameMaxLength))
+ }
+ if !qualifiedNameRegexp.MatchString(name) {
+ errs = append(errs, "name part "+RegexError(qualifiedNameFmt, "MyName", "my.name", "123-abc"))
+ }
+ return errs
+}
+
+const labelValueFmt string = "(" + qualifiedNameFmt + ")?"
+const LabelValueMaxLength int = 63
+
+var labelValueRegexp = regexp.MustCompile("^" + labelValueFmt + "$")
+
+// IsValidLabelValue tests whether the value passed is a valid label value. If
+// the value is not valid, a list of error strings is returned. Otherwise an
+// empty list (or nil) is returned.
+func IsValidLabelValue(value string) []string {
+ var errs []string
+ if len(value) > LabelValueMaxLength {
+ errs = append(errs, MaxLenError(LabelValueMaxLength))
+ }
+ if !labelValueRegexp.MatchString(value) {
+ errs = append(errs, RegexError(labelValueFmt, "MyValue", "my_value", "12345"))
+ }
+ return errs
+}
+
+const dns1123LabelFmt string = "[a-z0-9]([-a-z0-9]*[a-z0-9])?"
+const DNS1123LabelMaxLength int = 63
+
+var dns1123LabelRegexp = regexp.MustCompile("^" + dns1123LabelFmt + "$")
+
+// IsDNS1123Label tests for a string that conforms to the definition of a label in
+// DNS (RFC 1123).
+func IsDNS1123Label(value string) []string {
+ var errs []string
+ if len(value) > DNS1123LabelMaxLength {
+ errs = append(errs, MaxLenError(DNS1123LabelMaxLength))
+ }
+ if !dns1123LabelRegexp.MatchString(value) {
+ errs = append(errs, RegexError(dns1123LabelFmt, "my-name", "123-abc"))
+ }
+ return errs
+}
+
+const dns1123SubdomainFmt string = dns1123LabelFmt + "(\\." + dns1123LabelFmt + ")*"
+const DNS1123SubdomainMaxLength int = 253
+
+var dns1123SubdomainRegexp = regexp.MustCompile("^" + dns1123SubdomainFmt + "$")
+
+// IsDNS1123Subdomain tests for a string that conforms to the definition of a
+// subdomain in DNS (RFC 1123).
+func IsDNS1123Subdomain(value string) []string {
+ var errs []string
+ if len(value) > DNS1123SubdomainMaxLength {
+ errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength))
+ }
+ if !dns1123SubdomainRegexp.MatchString(value) {
+ errs = append(errs, RegexError(dns1123SubdomainFmt, "example.com"))
+ }
+ return errs
+}
+
+const dns952LabelFmt string = "[a-z]([-a-z0-9]*[a-z0-9])?"
+const DNS952LabelMaxLength int = 24
+
+var dns952LabelRegexp = regexp.MustCompile("^" + dns952LabelFmt + "$")
+
+// IsDNS952Label tests for a string that conforms to the definition of a label in
+// DNS (RFC 952).
+func IsDNS952Label(value string) []string {
+ var errs []string
+ if len(value) > DNS952LabelMaxLength {
+ errs = append(errs, MaxLenError(DNS952LabelMaxLength))
+ }
+ if !dns952LabelRegexp.MatchString(value) {
+ errs = append(errs, RegexError(dns952LabelFmt, "my-name", "abc-123"))
+ }
+ return errs
+}
+
+const cIdentifierFmt string = "[A-Za-z_][A-Za-z0-9_]*"
+
+var cIdentifierRegexp = regexp.MustCompile("^" + cIdentifierFmt + "$")
+
+// IsCIdentifier tests for a string that conforms the definition of an identifier
+// in C. This checks the format, but not the length.
+func IsCIdentifier(value string) []string {
+ if !cIdentifierRegexp.MatchString(value) {
+ return []string{RegexError(cIdentifierFmt, "my_name", "MY_NAME", "MyName")}
+ }
+ return nil
+}
+
+// IsValidPortNum tests that the argument is a valid, non-zero port number.
+func IsValidPortNum(port int) []string {
+ if 1 <= port && port <= 65535 {
+ return nil
+ }
+ return []string{InclusiveRangeError(1, 65535)}
+}
+
+// Now in libcontainer UID/GID limits is 0 ~ 1<<31 - 1
+// TODO: once we have a type for UID/GID we should make these that type.
+const (
+ minUserID = 0
+ maxUserID = math.MaxInt32
+ minGroupID = 0
+ maxGroupID = math.MaxInt32
+)
+
+// IsValidGroupId tests that the argument is a valid Unix GID.
+func IsValidGroupId(gid int64) []string {
+ if minGroupID <= gid && gid <= maxGroupID {
+ return nil
+ }
+ return []string{InclusiveRangeError(minGroupID, maxGroupID)}
+}
+
+// IsValidUserId tests that the argument is a valid Unix UID.
+func IsValidUserId(uid int64) []string {
+ if minUserID <= uid && uid <= maxUserID {
+ return nil
+ }
+ return []string{InclusiveRangeError(minUserID, maxUserID)}
+}
+
+var portNameCharsetRegex = regexp.MustCompile("^[-a-z0-9]+$")
+var portNameOneLetterRegexp = regexp.MustCompile("[a-z]")
+
+// IsValidPortName check that the argument is valid syntax. It must be
+// non-empty and no more than 15 characters long. It may contain only [-a-z0-9]
+// and must contain at least one letter [a-z]. It must not start or end with a
+// hyphen, nor contain adjacent hyphens.
+//
+// Note: We only allow lower-case characters, even though RFC 6335 is case
+// insensitive.
+func IsValidPortName(port string) []string {
+ var errs []string
+ if len(port) > 15 {
+ errs = append(errs, MaxLenError(15))
+ }
+ if !portNameCharsetRegex.MatchString(port) {
+ errs = append(errs, "must contain only alpha-numeric characters (a-z, 0-9), and hyphens (-)")
+ }
+ if !portNameOneLetterRegexp.MatchString(port) {
+ errs = append(errs, "must contain at least one letter (a-z)")
+ }
+ if strings.Contains(port, "--") {
+ errs = append(errs, "must not contain consecutive hyphens")
+ }
+ if len(port) > 0 && (port[0] == '-' || port[len(port)-1] == '-') {
+ errs = append(errs, "must not begin or end with a hyphen")
+ }
+ return errs
+}
+
+// IsValidIP tests that the argument is a valid IP address.
+func IsValidIP(value string) []string {
+ if net.ParseIP(value) == nil {
+ return []string{"must be a valid IP address, (e.g. 10.9.8.7)"}
+ }
+ return nil
+}
+
+const percentFmt string = "[0-9]+%"
+
+var percentRegexp = regexp.MustCompile("^" + percentFmt + "$")
+
+func IsValidPercent(percent string) []string {
+ if !percentRegexp.MatchString(percent) {
+ return []string{RegexError(percentFmt, "1%", "93%")}
+ }
+ return nil
+}
+
+const httpHeaderNameFmt string = "[-A-Za-z0-9]+"
+
+var httpHeaderNameRegexp = regexp.MustCompile("^" + httpHeaderNameFmt + "$")
+
+// IsHTTPHeaderName checks that a string conforms to the Go HTTP library's
+// definition of a valid header field name (a stricter subset than RFC7230).
+func IsHTTPHeaderName(value string) []string {
+ if !httpHeaderNameRegexp.MatchString(value) {
+ return []string{RegexError(httpHeaderNameFmt, "X-Header-Name")}
+ }
+ return nil
+}
+
+const configMapKeyFmt = "\\.?" + dns1123SubdomainFmt
+
+var configMapKeyRegexp = regexp.MustCompile("^" + configMapKeyFmt + "$")
+
+// IsConfigMapKey tests for a string that conforms to the definition of a
+// subdomain in DNS (RFC 1123), except that a leading dot is allowed
+func IsConfigMapKey(value string) []string {
+ var errs []string
+ if len(value) > DNS1123SubdomainMaxLength {
+ errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength))
+ }
+ if !configMapKeyRegexp.MatchString(value) {
+ errs = append(errs, RegexError(configMapKeyFmt, "key.name"))
+ }
+ return errs
+}
+
+// MaxLenError returns a string explanation of a "string too long" validation
+// failure.
+func MaxLenError(length int) string {
+ return fmt.Sprintf("must be no more than %d characters", length)
+}
+
+// RegexError returns a string explanation of a regex validation failure.
+func RegexError(fmt string, examples ...string) string {
+ s := "must match the regex " + fmt
+ if len(examples) == 0 {
+ return s
+ }
+ s += " (e.g. "
+ for i := range examples {
+ if i > 0 {
+ s += " or "
+ }
+ s += "'" + examples[i] + "'"
+ }
+ return s + ")"
+}
+
+// EmptyError returns a string explanation of a "must not be empty" validation
+// failure.
+func EmptyError() string {
+ return "must be non-empty"
+}
+
+func prefixEach(msgs []string, prefix string) []string {
+ for i := range msgs {
+ msgs[i] = prefix + msgs[i]
+ }
+ return msgs
+}
+
+// InclusiveRangeError returns a string explanation of a numeric "must be
+// between" validation failure.
+func InclusiveRangeError(lo, hi int) string {
+ return fmt.Sprintf(`must be between %d and %d, inclusive`, lo, hi)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/wait/doc.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/wait/doc.go
new file mode 100644
index 0000000..ff89dc1
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/wait/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package wait provides tools for polling or listening for changes
+// to a condition.
+package wait
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/wait/wait.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/wait/wait.go
new file mode 100644
index 0000000..bd4543e
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/wait/wait.go
@@ -0,0 +1,268 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package wait
+
+import (
+ "errors"
+ "math/rand"
+ "time"
+
+ "k8s.io/kubernetes/pkg/util/runtime"
+)
+
+// For any test of the style:
+// ...
+// <- time.After(timeout):
+// t.Errorf("Timed out")
+// The value for timeout should effectively be "forever." Obviously we don't want our tests to truly lock up forever, but 30s
+// is long enough that it is effectively forever for the things that can slow down a run on a heavily contended machine
+// (GC, seeks, etc), but not so long as to make a developer ctrl-c a test run if they do happen to break that test.
+var ForeverTestTimeout = time.Second * 30
+
+// NeverStop may be passed to Until to make it never stop.
+var NeverStop <-chan struct{} = make(chan struct{})
+
+// Forever is syntactic sugar on top of Until
+func Forever(f func(), period time.Duration) {
+ Until(f, period, NeverStop)
+}
+
+// Until loops until stop channel is closed, running f every period.
+// Until is syntactic sugar on top of JitterUntil with zero jitter
+// factor, with sliding = true (which means the timer for period
+// starts after the f completes).
+func Until(f func(), period time.Duration, stopCh <-chan struct{}) {
+ JitterUntil(f, period, 0.0, true, stopCh)
+}
+
+// NonSlidingUntil loops until stop channel is closed, running f every
+// period. NonSlidingUntil is syntactic sugar on top of JitterUntil
+// with zero jitter factor, with sliding = false (meaning the timer for
+// period starts at the same time as the function starts).
+func NonSlidingUntil(f func(), period time.Duration, stopCh <-chan struct{}) {
+ JitterUntil(f, period, 0.0, false, stopCh)
+}
+
+// JitterUntil loops until stop channel is closed, running f every period.
+// If jitterFactor is positive, the period is jittered before every run of f.
+// If jitterFactor is not positive, the period is unchanged.
+// Catches any panics, and keeps going. f may not be invoked if
+// stop channel is already closed. Pass NeverStop to Until if you
+// don't want it stop.
+func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding bool, stopCh <-chan struct{}) {
+ select {
+ case <-stopCh:
+ return
+ default:
+ }
+
+ for {
+ jitteredPeriod := period
+ if jitterFactor > 0.0 {
+ jitteredPeriod = Jitter(period, jitterFactor)
+ }
+
+ var t *time.Timer
+ if !sliding {
+ t = time.NewTimer(jitteredPeriod)
+ }
+
+ func() {
+ defer runtime.HandleCrash()
+ f()
+ }()
+
+ if sliding {
+ t = time.NewTimer(jitteredPeriod)
+ } else {
+ // The timer we created could already have fired, so be
+ // careful and check stopCh first.
+ select {
+ case <-stopCh:
+ return
+ default:
+ }
+ }
+
+ select {
+ case <-stopCh:
+ return
+ case <-t.C:
+ }
+ }
+}
+
+// Jitter returns a time.Duration between duration and duration + maxFactor * duration,
+// to allow clients to avoid converging on periodic behavior. If maxFactor is 0.0, a
+// suggested default value will be chosen.
+func Jitter(duration time.Duration, maxFactor float64) time.Duration {
+ if maxFactor <= 0.0 {
+ maxFactor = 1.0
+ }
+ wait := duration + time.Duration(rand.Float64()*maxFactor*float64(duration))
+ return wait
+}
+
+// ErrWaitTimeout is returned when the condition exited without success
+var ErrWaitTimeout = errors.New("timed out waiting for the condition")
+
+// ConditionFunc returns true if the condition is satisfied, or an error
+// if the loop should be aborted.
+type ConditionFunc func() (done bool, err error)
+
+// Backoff is parameters applied to a Backoff function.
+type Backoff struct {
+ Duration time.Duration
+ Factor float64
+ Jitter float64
+ Steps int
+}
+
+// ExponentialBackoff repeats a condition check up to steps times, increasing the wait
+// by multipling the previous duration by factor. If jitter is greater than zero,
+// a random amount of each duration is added (between duration and duration*(1+jitter)).
+// If the condition never returns true, ErrWaitTimeout is returned. All other errors
+// terminate immediately.
+func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error {
+ duration := backoff.Duration
+ for i := 0; i < backoff.Steps; i++ {
+ if i != 0 {
+ adjusted := duration
+ if backoff.Jitter > 0.0 {
+ adjusted = Jitter(duration, backoff.Jitter)
+ }
+ time.Sleep(adjusted)
+ duration = time.Duration(float64(duration) * backoff.Factor)
+ }
+ if ok, err := condition(); err != nil || ok {
+ return err
+ }
+ }
+ return ErrWaitTimeout
+}
+
+// Poll tries a condition func until it returns true, an error, or the timeout
+// is reached. condition will always be invoked at least once but some intervals
+// may be missed if the condition takes too long or the time window is too short.
+// If you want to Poll something forever, see PollInfinite.
+// Poll always waits the interval before the first check of the condition.
+func Poll(interval, timeout time.Duration, condition ConditionFunc) error {
+ return pollInternal(poller(interval, timeout), condition)
+}
+
+func pollInternal(wait WaitFunc, condition ConditionFunc) error {
+ done := make(chan struct{})
+ defer close(done)
+ return WaitFor(wait, condition, done)
+}
+
+// PollImmediate is identical to Poll, except that it performs the first check
+// immediately, not waiting interval beforehand.
+func PollImmediate(interval, timeout time.Duration, condition ConditionFunc) error {
+ return pollImmediateInternal(poller(interval, timeout), condition)
+}
+
+func pollImmediateInternal(wait WaitFunc, condition ConditionFunc) error {
+ done, err := condition()
+ if err != nil {
+ return err
+ }
+ if done {
+ return nil
+ }
+ return pollInternal(wait, condition)
+}
+
+// PollInfinite polls forever.
+func PollInfinite(interval time.Duration, condition ConditionFunc) error {
+ done := make(chan struct{})
+ defer close(done)
+ return WaitFor(poller(interval, 0), condition, done)
+}
+
+// WaitFunc creates a channel that receives an item every time a test
+// should be executed and is closed when the last test should be invoked.
+type WaitFunc func(done <-chan struct{}) <-chan struct{}
+
+// WaitFor gets a channel from wait(), and then invokes fn once for every value
+// placed on the channel and once more when the channel is closed. If fn
+// returns an error the loop ends and that error is returned, and if fn returns
+// true the loop ends and nil is returned. ErrWaitTimeout will be returned if
+// the channel is closed without fn ever returning true.
+func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error {
+ c := wait(done)
+ for {
+ _, open := <-c
+ ok, err := fn()
+ if err != nil {
+ return err
+ }
+ if ok {
+ return nil
+ }
+ if !open {
+ break
+ }
+ }
+ return ErrWaitTimeout
+}
+
+// poller returns a WaitFunc that will send to the channel every
+// interval until timeout has elapsed and then close the channel.
+// Over very short intervals you may receive no ticks before
+// the channel is closed. If timeout is 0, the channel
+// will never be closed.
+func poller(interval, timeout time.Duration) WaitFunc {
+ return WaitFunc(func(done <-chan struct{}) <-chan struct{} {
+ ch := make(chan struct{})
+
+ go func() {
+ defer close(ch)
+
+ tick := time.NewTicker(interval)
+ defer tick.Stop()
+
+ var after <-chan time.Time
+ if timeout != 0 {
+ // time.After is more convenient, but it
+ // potentially leaves timers around much longer
+ // than necessary if we exit early.
+ timer := time.NewTimer(timeout)
+ after = timer.C
+ defer timer.Stop()
+ }
+
+ for {
+ select {
+ case <-tick.C:
+ // If the consumer isn't ready for this signal drop it and
+ // check the other channels.
+ select {
+ case ch <- struct{}{}:
+ default:
+ }
+ case <-after:
+ return
+ case <-done:
+ return
+ }
+ }
+ }()
+
+ return ch
+ })
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/yaml/decoder.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/yaml/decoder.go
new file mode 100644
index 0000000..6a9f05a
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/util/yaml/decoder.go
@@ -0,0 +1,247 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package yaml
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "strings"
+ "unicode"
+
+ "github.com/ghodss/yaml"
+ "github.com/golang/glog"
+)
+
+// ToJSON converts a single YAML document into a JSON document
+// or returns an error. If the document appears to be JSON the
+// YAML decoding path is not used (so that error messages are
+// JSON specific).
+func ToJSON(data []byte) ([]byte, error) {
+ if hasJSONPrefix(data) {
+ return data, nil
+ }
+ return yaml.YAMLToJSON(data)
+}
+
+// YAMLToJSONDecoder decodes YAML documents from an io.Reader by
+// separating individual documents. It first converts the YAML
+// body to JSON, then unmarshals the JSON.
+type YAMLToJSONDecoder struct {
+ scanner *bufio.Scanner
+}
+
+// NewYAMLToJSONDecoder decodes YAML documents from the provided
+// stream in chunks by converting each document (as defined by
+// the YAML spec) into its own chunk, converting it to JSON via
+// yaml.YAMLToJSON, and then passing it to json.Decoder.
+func NewYAMLToJSONDecoder(r io.Reader) *YAMLToJSONDecoder {
+ scanner := bufio.NewScanner(r)
+ scanner.Split(splitYAMLDocument)
+ return &YAMLToJSONDecoder{
+ scanner: scanner,
+ }
+}
+
+// Decode reads a YAML document as JSON from the stream or returns
+// an error. The decoding rules match json.Unmarshal, not
+// yaml.Unmarshal.
+func (d *YAMLToJSONDecoder) Decode(into interface{}) error {
+ if d.scanner.Scan() {
+ data, err := yaml.YAMLToJSON(d.scanner.Bytes())
+ if err != nil {
+ return err
+ }
+ return json.Unmarshal(data, into)
+ }
+ err := d.scanner.Err()
+ if err == nil {
+ err = io.EOF
+ }
+ return err
+}
+
+// YAMLDecoder reads chunks of objects and returns ErrShortBuffer if
+// the data is not sufficient.
+type YAMLDecoder struct {
+ r io.ReadCloser
+ scanner *bufio.Scanner
+ remaining []byte
+}
+
+// NewDocumentDecoder decodes YAML documents from the provided
+// stream in chunks by converting each document (as defined by
+// the YAML spec) into its own chunk. io.ErrShortBuffer will be
+// returned if the entire buffer could not be read to assist
+// the caller in framing the chunk.
+func NewDocumentDecoder(r io.ReadCloser) io.ReadCloser {
+ scanner := bufio.NewScanner(r)
+ scanner.Split(splitYAMLDocument)
+ return &YAMLDecoder{
+ r: r,
+ scanner: scanner,
+ }
+}
+
+// Read reads the previous slice into the buffer, or attempts to read
+// the next chunk.
+// TODO: switch to readline approach.
+func (d *YAMLDecoder) Read(data []byte) (n int, err error) {
+ left := len(d.remaining)
+ if left == 0 {
+ // return the next chunk from the stream
+ if !d.scanner.Scan() {
+ err := d.scanner.Err()
+ if err == nil {
+ err = io.EOF
+ }
+ return 0, err
+ }
+ out := d.scanner.Bytes()
+ d.remaining = out
+ left = len(out)
+ }
+
+ // fits within data
+ if left <= len(data) {
+ copy(data, d.remaining)
+ d.remaining = nil
+ return len(d.remaining), nil
+ }
+
+ // caller will need to reread
+ copy(data, d.remaining[:left])
+ d.remaining = d.remaining[left:]
+ return len(data), io.ErrShortBuffer
+}
+
+func (d *YAMLDecoder) Close() error {
+ return d.r.Close()
+}
+
+const yamlSeparator = "\n---"
+
+// splitYAMLDocument is a bufio.SplitFunc for splitting YAML streams into individual documents.
+func splitYAMLDocument(data []byte, atEOF bool) (advance int, token []byte, err error) {
+ if atEOF && len(data) == 0 {
+ return 0, nil, nil
+ }
+ sep := len([]byte(yamlSeparator))
+ if i := bytes.Index(data, []byte(yamlSeparator)); i >= 0 {
+ // We have a potential document terminator
+ i += sep
+ after := data[i:]
+ if len(after) == 0 {
+ // we can't read any more characters
+ if atEOF {
+ return len(data), data[:len(data)-sep], nil
+ }
+ return 0, nil, nil
+ }
+ if j := bytes.IndexByte(after, '\n'); j >= 0 {
+ return i + j + 1, data[0 : i-sep], nil
+ }
+ return 0, nil, nil
+ }
+ // If we're at EOF, we have a final, non-terminated line. Return it.
+ if atEOF {
+ return len(data), data, nil
+ }
+ // Request more data.
+ return 0, nil, nil
+}
+
+// decoder is a convenience interface for Decode.
+type decoder interface {
+ Decode(into interface{}) error
+}
+
+// YAMLOrJSONDecoder attempts to decode a stream of JSON documents or
+// YAML documents by sniffing for a leading { character.
+type YAMLOrJSONDecoder struct {
+ r io.Reader
+ bufferSize int
+
+ decoder decoder
+}
+
+// NewYAMLOrJSONDecoder returns a decoder that will process YAML documents
+// or JSON documents from the given reader as a stream. bufferSize determines
+// how far into the stream the decoder will look to figure out whether this
+// is a JSON stream (has whitespace followed by an open brace).
+func NewYAMLOrJSONDecoder(r io.Reader, bufferSize int) *YAMLOrJSONDecoder {
+ return &YAMLOrJSONDecoder{
+ r: r,
+ bufferSize: bufferSize,
+ }
+}
+
+// Decode unmarshals the next object from the underlying stream into the
+// provide object, or returns an error.
+func (d *YAMLOrJSONDecoder) Decode(into interface{}) error {
+ if d.decoder == nil {
+ buffer, isJSON := GuessJSONStream(d.r, d.bufferSize)
+ if isJSON {
+ glog.V(4).Infof("decoding stream as JSON")
+ d.decoder = json.NewDecoder(buffer)
+ } else {
+ glog.V(4).Infof("decoding stream as YAML")
+ d.decoder = NewYAMLToJSONDecoder(buffer)
+ }
+ }
+ err := d.decoder.Decode(into)
+ if jsonDecoder, ok := d.decoder.(*json.Decoder); ok {
+ if syntax, ok := err.(*json.SyntaxError); ok {
+ data, readErr := ioutil.ReadAll(jsonDecoder.Buffered())
+ if readErr != nil {
+ glog.V(4).Infof("reading stream failed: %v", readErr)
+ }
+ js := string(data)
+ start := strings.LastIndex(js[:syntax.Offset], "\n") + 1
+ line := strings.Count(js[:start], "\n")
+ return fmt.Errorf("json: line %d: %s", line, syntax.Error())
+ }
+ }
+ return err
+}
+
+// GuessJSONStream scans the provided reader up to size, looking
+// for an open brace indicating this is JSON. It will return the
+// bufio.Reader it creates for the consumer.
+func GuessJSONStream(r io.Reader, size int) (io.Reader, bool) {
+ buffer := bufio.NewReaderSize(r, size)
+ b, _ := buffer.Peek(size)
+ return buffer, hasJSONPrefix(b)
+}
+
+var jsonPrefix = []byte("{")
+
+// hasJSONPrefix returns true if the provided buffer appears to start with
+// a JSON open brace.
+func hasJSONPrefix(buf []byte) bool {
+ return hasPrefix(buf, jsonPrefix)
+}
+
+// Return true if the first non-whitespace bytes in buf is
+// prefix.
+func hasPrefix(buf []byte, prefix []byte) bool {
+ trim := bytes.TrimLeftFunc(buf, unicode.IsSpace)
+ return bytes.HasPrefix(trim, prefix)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/version/base.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/version/base.go
new file mode 100644
index 0000000..475bd06
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/version/base.go
@@ -0,0 +1,59 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package version
+
+// Base version information.
+//
+// This is the fallback data used when version information from git is not
+// provided via go ldflags. It provides an approximation of the Kubernetes
+// version for ad-hoc builds (e.g. `go build`) that cannot get the version
+// information from git.
+//
+// If you are looking at these fields in the git tree, they look
+// strange. They are modified on the fly by the build process. The
+// in-tree values are dummy values used for "git archive", which also
+// works for GitHub tar downloads.
+//
+// When releasing a new Kubernetes version, this file is updated by
+// build/mark_new_version.sh to reflect the new version, and then a
+// git annotated tag (using format vX.Y where X == Major version and Y
+// == Minor version) is created to point to the commit that updates
+// pkg/version/base.go
+var (
+ // TODO: Deprecate gitMajor and gitMinor, use only gitVersion
+ // instead. First step in deprecation, keep the fields but make
+ // them irrelevant. (Next we'll take it out, which may muck with
+ // scripts consuming the kubectl version output - but most of
+ // these should be looking at gitVersion already anyways.)
+ gitMajor string = "" // major version, always numeric
+ gitMinor string = "" // minor version, numeric possibly followed by "+"
+
+ // semantic version, dervied by build scripts (see
+ // https://github.com/kubernetes/kubernetes/blob/master/docs/design/versioning.md
+ // for a detailed discussion of this field)
+ //
+ // TODO: This field is still called "gitVersion" for legacy
+ // reasons. For prerelease versions, the build metadata on the
+ // semantic version is a git hash, but the version itself is no
+ // longer the direct output of "git describe", but a slight
+ // translation to be semver compliant.
+ gitVersion string = "v0.0.0-master+$Format:%h$"
+ gitCommit string = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD)
+ gitTreeState string = "not a git tree" // state of git tree, either "clean" or "dirty"
+
+ buildDate string = "1970-01-01T00:00:00Z" // build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ')
+)
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/version/doc.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/version/doc.go
new file mode 100644
index 0000000..ccedec7
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/version/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package version supplies version information collected at build time to
+// kubernetes components.
+package version
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/version/semver.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/version/semver.go
new file mode 100644
index 0000000..1f4067e
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/version/semver.go
@@ -0,0 +1,50 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package version
+
+import (
+ "strings"
+ "unicode"
+
+ "github.com/blang/semver"
+ "github.com/golang/glog"
+)
+
+func Parse(gitversion string) (semver.Version, error) {
+ // optionally trim leading spaces then one v
+ var seen bool
+ gitversion = strings.TrimLeftFunc(gitversion, func(ch rune) bool {
+ if seen {
+ return false
+ }
+ if ch == 'v' {
+ seen = true
+ return true
+ }
+ return unicode.IsSpace(ch)
+ })
+
+ return semver.Make(gitversion)
+}
+
+func MustParse(gitversion string) semver.Version {
+ v, err := Parse(gitversion)
+ if err != nil {
+ glog.Fatalf("failed to parse semver from gitversion %q: %v", gitversion, err)
+ }
+ return v
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/version/version.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/version/version.go
new file mode 100644
index 0000000..1e93132
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/version/version.go
@@ -0,0 +1,76 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package version
+
+import (
+ "fmt"
+ "runtime"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+// Info contains versioning information.
+// TODO: Add []string of api versions supported? It's still unclear
+// how we'll want to distribute that information.
+type Info struct {
+ Major string `json:"major"`
+ Minor string `json:"minor"`
+ GitVersion string `json:"gitVersion"`
+ GitCommit string `json:"gitCommit"`
+ GitTreeState string `json:"gitTreeState"`
+ BuildDate string `json:"buildDate"`
+ GoVersion string `json:"goVersion"`
+ Compiler string `json:"compiler"`
+ Platform string `json:"platform"`
+}
+
+// Get returns the overall codebase version. It's for detecting
+// what code a binary was built from.
+func Get() Info {
+ // These variables typically come from -ldflags settings and in
+ // their absence fallback to the settings in pkg/version/base.go
+ return Info{
+ Major: gitMajor,
+ Minor: gitMinor,
+ GitVersion: gitVersion,
+ GitCommit: gitCommit,
+ GitTreeState: gitTreeState,
+ BuildDate: buildDate,
+ GoVersion: runtime.Version(),
+ Compiler: runtime.Compiler,
+ Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH),
+ }
+}
+
+// String returns info as a human-friendly version string.
+func (info Info) String() string {
+ return info.GitVersion
+}
+
+func init() {
+ buildInfo := prometheus.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Name: "kubernetes_build_info",
+ Help: "A metric with a constant '1' value labeled by major, minor, git version, git commit, git tree state, build date, Go version, and compiler from which Kubernetes was built, and platform on which it is running.",
+ },
+ []string{"major", "minor", "gitVersion", "gitCommit", "gitTreeState", "buildDate", "goVersion", "compiler", "platform"},
+ )
+ info := Get()
+ buildInfo.WithLabelValues(info.Major, info.Minor, info.GitVersion, info.GitCommit, info.GitTreeState, info.BuildDate, info.GoVersion, info.Compiler, info.Platform).Set(1)
+
+ prometheus.MustRegister(buildInfo)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/watch/doc.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/watch/doc.go
new file mode 100644
index 0000000..5fde5e7
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/watch/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package watch contains a generic watchable interface, and a fake for
+// testing code that uses the watch interface.
+package watch
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/watch/filter.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/watch/filter.go
new file mode 100644
index 0000000..3ca27f2
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/watch/filter.go
@@ -0,0 +1,109 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package watch
+
+import (
+ "sync"
+)
+
+// FilterFunc should take an event, possibly modify it in some way, and return
+// the modified event. If the event should be ignored, then return keep=false.
+type FilterFunc func(in Event) (out Event, keep bool)
+
+// Filter passes all events through f before allowing them to pass on.
+// Putting a filter on a watch, as an unavoidable side-effect due to the way
+// go channels work, effectively causes the watch's event channel to have its
+// queue length increased by one.
+//
+// WARNING: filter has a fatal flaw, in that it can't properly update the
+// Type field (Add/Modified/Deleted) to reflect items beginning to pass the
+// filter when they previously didn't.
+//
+func Filter(w Interface, f FilterFunc) Interface {
+ fw := &filteredWatch{
+ incoming: w,
+ result: make(chan Event),
+ f: f,
+ }
+ go fw.loop()
+ return fw
+}
+
+type filteredWatch struct {
+ incoming Interface
+ result chan Event
+ f FilterFunc
+}
+
+// ResultChan returns a channel which will receive filtered events.
+func (fw *filteredWatch) ResultChan() <-chan Event {
+ return fw.result
+}
+
+// Stop stops the upstream watch, which will eventually stop this watch.
+func (fw *filteredWatch) Stop() {
+ fw.incoming.Stop()
+}
+
+// loop waits for new values, filters them, and resends them.
+func (fw *filteredWatch) loop() {
+ defer close(fw.result)
+ for {
+ event, ok := <-fw.incoming.ResultChan()
+ if !ok {
+ break
+ }
+ filtered, keep := fw.f(event)
+ if keep {
+ fw.result <- filtered
+ }
+ }
+}
+
+// Recorder records all events that are sent from the watch until it is closed.
+type Recorder struct {
+ Interface
+
+ lock sync.Mutex
+ events []Event
+}
+
+var _ Interface = &Recorder{}
+
+// NewRecorder wraps an Interface and records any changes sent across it.
+func NewRecorder(w Interface) *Recorder {
+ r := &Recorder{}
+ r.Interface = Filter(w, r.record)
+ return r
+}
+
+// record is a FilterFunc and tracks each received event.
+func (r *Recorder) record(in Event) (Event, bool) {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+ r.events = append(r.events, in)
+ return in, true
+}
+
+// Events returns a copy of the events sent across this recorder.
+func (r *Recorder) Events() []Event {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+ copied := make([]Event, len(r.events))
+ copy(copied, r.events)
+ return copied
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/watch/mux.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/watch/mux.go
new file mode 100644
index 0000000..ec6de05
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/watch/mux.go
@@ -0,0 +1,257 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package watch
+
+import (
+ "sync"
+
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/runtime"
+)
+
+// FullChannelBehavior controls how the Broadcaster reacts if a watcher's watch
+// channel is full.
+type FullChannelBehavior int
+
+const (
+ WaitIfChannelFull FullChannelBehavior = iota
+ DropIfChannelFull
+)
+
+// Buffer the incoming queue a little bit even though it should rarely ever accumulate
+// anything, just in case a few events are received in such a short window that
+// Broadcaster can't move them onto the watchers' queues fast enough.
+const incomingQueueLength = 25
+
+// Broadcaster distributes event notifications among any number of watchers. Every event
+// is delivered to every watcher.
+type Broadcaster struct {
+ // TODO: see if this lock is needed now that new watchers go through
+ // the incoming channel.
+ lock sync.Mutex
+
+ watchers map[int64]*broadcasterWatcher
+ nextWatcher int64
+ distributing sync.WaitGroup
+
+ incoming chan Event
+
+ // How large to make watcher's channel.
+ watchQueueLength int
+ // If one of the watch channels is full, don't wait for it to become empty.
+ // Instead just deliver it to the watchers that do have space in their
+ // channels and move on to the next event.
+ // It's more fair to do this on a per-watcher basis than to do it on the
+ // "incoming" channel, which would allow one slow watcher to prevent all
+ // other watchers from getting new events.
+ fullChannelBehavior FullChannelBehavior
+}
+
+// NewBroadcaster creates a new Broadcaster. queueLength is the maximum number of events to queue per watcher.
+// It is guaranteed that events will be distributed in the order in which they occur,
+// but the order in which a single event is distributed among all of the watchers is unspecified.
+func NewBroadcaster(queueLength int, fullChannelBehavior FullChannelBehavior) *Broadcaster {
+ m := &Broadcaster{
+ watchers: map[int64]*broadcasterWatcher{},
+ incoming: make(chan Event, incomingQueueLength),
+ watchQueueLength: queueLength,
+ fullChannelBehavior: fullChannelBehavior,
+ }
+ m.distributing.Add(1)
+ go m.loop()
+ return m
+}
+
+const internalRunFunctionMarker = "internal-do-function"
+
+// a function type we can shoehorn into the queue.
+type functionFakeRuntimeObject func()
+
+func (obj functionFakeRuntimeObject) GetObjectKind() unversioned.ObjectKind {
+ return unversioned.EmptyObjectKind
+}
+
+// Execute f, blocking the incoming queue (and waiting for it to drain first).
+// The purpose of this terrible hack is so that watchers added after an event
+// won't ever see that event, and will always see any event after they are
+// added.
+func (b *Broadcaster) blockQueue(f func()) {
+ var wg sync.WaitGroup
+ wg.Add(1)
+ b.incoming <- Event{
+ Type: internalRunFunctionMarker,
+ Object: functionFakeRuntimeObject(func() {
+ defer wg.Done()
+ f()
+ }),
+ }
+ wg.Wait()
+}
+
+// Watch adds a new watcher to the list and returns an Interface for it.
+// Note: new watchers will only receive new events. They won't get an entire history
+// of previous events.
+func (m *Broadcaster) Watch() Interface {
+ var w *broadcasterWatcher
+ m.blockQueue(func() {
+ m.lock.Lock()
+ defer m.lock.Unlock()
+ id := m.nextWatcher
+ m.nextWatcher++
+ w = &broadcasterWatcher{
+ result: make(chan Event, m.watchQueueLength),
+ stopped: make(chan struct{}),
+ id: id,
+ m: m,
+ }
+ m.watchers[id] = w
+ })
+ return w
+}
+
+// WatchWithPrefix adds a new watcher to the list and returns an Interface for it. It sends
+// queuedEvents down the new watch before beginning to send ordinary events from Broadcaster.
+// The returned watch will have a queue length that is at least large enough to accommodate
+// all of the items in queuedEvents.
+func (m *Broadcaster) WatchWithPrefix(queuedEvents []Event) Interface {
+ var w *broadcasterWatcher
+ m.blockQueue(func() {
+ m.lock.Lock()
+ defer m.lock.Unlock()
+ id := m.nextWatcher
+ m.nextWatcher++
+ length := m.watchQueueLength
+ if n := len(queuedEvents) + 1; n > length {
+ length = n
+ }
+ w = &broadcasterWatcher{
+ result: make(chan Event, length),
+ stopped: make(chan struct{}),
+ id: id,
+ m: m,
+ }
+ m.watchers[id] = w
+ for _, e := range queuedEvents {
+ w.result <- e
+ }
+ })
+ return w
+}
+
+// stopWatching stops the given watcher and removes it from the list.
+func (m *Broadcaster) stopWatching(id int64) {
+ m.lock.Lock()
+ defer m.lock.Unlock()
+ w, ok := m.watchers[id]
+ if !ok {
+ // No need to do anything, it's already been removed from the list.
+ return
+ }
+ delete(m.watchers, id)
+ close(w.result)
+}
+
+// closeAll disconnects all watchers (presumably in response to a Shutdown call).
+func (m *Broadcaster) closeAll() {
+ m.lock.Lock()
+ defer m.lock.Unlock()
+ for _, w := range m.watchers {
+ close(w.result)
+ }
+ // Delete everything from the map, since presence/absence in the map is used
+ // by stopWatching to avoid double-closing the channel.
+ m.watchers = map[int64]*broadcasterWatcher{}
+}
+
+// Action distributes the given event among all watchers.
+func (m *Broadcaster) Action(action EventType, obj runtime.Object) {
+ m.incoming <- Event{action, obj}
+}
+
+// Shutdown disconnects all watchers (but any queued events will still be distributed).
+// You must not call Action or Watch* after calling Shutdown. This call blocks
+// until all events have been distributed through the outbound channels. Note
+// that since they can be buffered, this means that the watchers might not
+// have received the data yet as it can remain sitting in the buffered
+// channel.
+func (m *Broadcaster) Shutdown() {
+ close(m.incoming)
+ m.distributing.Wait()
+}
+
+// loop receives from m.incoming and distributes to all watchers.
+func (m *Broadcaster) loop() {
+ // Deliberately not catching crashes here. Yes, bring down the process if there's a
+ // bug in watch.Broadcaster.
+ for {
+ event, ok := <-m.incoming
+ if !ok {
+ break
+ }
+ if event.Type == internalRunFunctionMarker {
+ event.Object.(functionFakeRuntimeObject)()
+ continue
+ }
+ m.distribute(event)
+ }
+ m.closeAll()
+ m.distributing.Done()
+}
+
+// distribute sends event to all watchers. Blocking.
+func (m *Broadcaster) distribute(event Event) {
+ m.lock.Lock()
+ defer m.lock.Unlock()
+ if m.fullChannelBehavior == DropIfChannelFull {
+ for _, w := range m.watchers {
+ select {
+ case w.result <- event:
+ case <-w.stopped:
+ default: // Don't block if the event can't be queued.
+ }
+ }
+ } else {
+ for _, w := range m.watchers {
+ select {
+ case w.result <- event:
+ case <-w.stopped:
+ }
+ }
+ }
+}
+
+// broadcasterWatcher handles a single watcher of a broadcaster
+type broadcasterWatcher struct {
+ result chan Event
+ stopped chan struct{}
+ stop sync.Once
+ id int64
+ m *Broadcaster
+}
+
+// ResultChan returns a channel to use for waiting on events.
+func (mw *broadcasterWatcher) ResultChan() <-chan Event {
+ return mw.result
+}
+
+// Stop stops watching and removes mw from its list.
+func (mw *broadcasterWatcher) Stop() {
+ mw.stop.Do(func() {
+ close(mw.stopped)
+ mw.m.stopWatching(mw.id)
+ })
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/watch/streamwatcher.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/watch/streamwatcher.go
new file mode 100644
index 0000000..26cf61d
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/watch/streamwatcher.go
@@ -0,0 +1,119 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package watch
+
+import (
+ "io"
+ "sync"
+
+ "github.com/golang/glog"
+ "k8s.io/kubernetes/pkg/runtime"
+ "k8s.io/kubernetes/pkg/util/net"
+ utilruntime "k8s.io/kubernetes/pkg/util/runtime"
+)
+
+// Decoder allows StreamWatcher to watch any stream for which a Decoder can be written.
+type Decoder interface {
+ // Decode should return the type of event, the decoded object, or an error.
+ // An error will cause StreamWatcher to call Close(). Decode should block until
+ // it has data or an error occurs.
+ Decode() (action EventType, object runtime.Object, err error)
+
+ // Close should close the underlying io.Reader, signalling to the source of
+ // the stream that it is no longer being watched. Close() must cause any
+ // outstanding call to Decode() to return with an error of some sort.
+ Close()
+}
+
+// StreamWatcher turns any stream for which you can write a Decoder interface
+// into a watch.Interface.
+type StreamWatcher struct {
+ sync.Mutex
+ source Decoder
+ result chan Event
+ stopped bool
+}
+
+// NewStreamWatcher creates a StreamWatcher from the given decoder.
+func NewStreamWatcher(d Decoder) *StreamWatcher {
+ sw := &StreamWatcher{
+ source: d,
+ // It's easy for a consumer to add buffering via an extra
+ // goroutine/channel, but impossible for them to remove it,
+ // so nonbuffered is better.
+ result: make(chan Event),
+ }
+ go sw.receive()
+ return sw
+}
+
+// ResultChan implements Interface.
+func (sw *StreamWatcher) ResultChan() <-chan Event {
+ return sw.result
+}
+
+// Stop implements Interface.
+func (sw *StreamWatcher) Stop() {
+ // Call Close() exactly once by locking and setting a flag.
+ sw.Lock()
+ defer sw.Unlock()
+ if !sw.stopped {
+ sw.stopped = true
+ sw.source.Close()
+ }
+}
+
+// stopping returns true if Stop() was called previously.
+func (sw *StreamWatcher) stopping() bool {
+ sw.Lock()
+ defer sw.Unlock()
+ return sw.stopped
+}
+
+// receive reads result from the decoder in a loop and sends down the result channel.
+func (sw *StreamWatcher) receive() {
+ defer close(sw.result)
+ defer sw.Stop()
+ defer utilruntime.HandleCrash()
+ for {
+ action, obj, err := sw.source.Decode()
+ if err != nil {
+ // Ignore expected error.
+ if sw.stopping() {
+ return
+ }
+ switch err {
+ case io.EOF:
+ // watch closed normally
+ case io.ErrUnexpectedEOF:
+ glog.V(1).Infof("Unexpected EOF during watch stream event decoding: %v", err)
+ default:
+ msg := "Unable to decode an event from the watch stream: %v"
+ if net.IsProbableEOF(err) {
+ glog.V(5).Infof(msg, err)
+ } else {
+ glog.Errorf(msg, err)
+ }
+ }
+ return
+ }
+ sw.result <- Event{
+ Type: action,
+ Object: obj,
+ }
+ }
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/watch/until.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/watch/until.go
new file mode 100644
index 0000000..4259f51
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/watch/until.go
@@ -0,0 +1,82 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package watch
+
+import (
+ "time"
+
+ "k8s.io/kubernetes/pkg/util/wait"
+)
+
+// ConditionFunc returns true if the condition has been reached, false if it has not been reached yet,
+// or an error if the condition cannot be checked and should terminate. In general, it is better to define
+// level driven conditions over edge driven conditions (pod has ready=true, vs pod modified and ready changed
+// from false to true).
+type ConditionFunc func(event Event) (bool, error)
+
+// Until reads items from the watch until each provided condition succeeds, and then returns the last watch
+// encountered. The first condition that returns an error terminates the watch (and the event is also returned).
+// If no event has been received, the returned event will be nil.
+// Conditions are satisfied sequentially so as to provide a useful primitive for higher level composition.
+func Until(timeout time.Duration, watcher Interface, conditions ...ConditionFunc) (*Event, error) {
+ ch := watcher.ResultChan()
+ defer watcher.Stop()
+ var after <-chan time.Time
+ if timeout > 0 {
+ after = time.After(timeout)
+ } else {
+ ch := make(chan time.Time)
+ close(ch)
+ after = ch
+ }
+ var lastEvent *Event
+ for _, condition := range conditions {
+ // check the next condition against the previous event and short circuit waiting for the next watch
+ if lastEvent != nil {
+ done, err := condition(*lastEvent)
+ if err != nil {
+ return lastEvent, err
+ }
+ if done {
+ break
+ }
+ }
+ ConditionSucceeded:
+ for {
+ select {
+ case event, ok := <-ch:
+ if !ok {
+ return lastEvent, wait.ErrWaitTimeout
+ }
+ lastEvent = &event
+
+ // TODO: check for watch expired error and retry watch from latest point?
+ done, err := condition(event)
+ if err != nil {
+ return lastEvent, err
+ }
+ if done {
+ break ConditionSucceeded
+ }
+
+ case <-after:
+ return lastEvent, wait.ErrWaitTimeout
+ }
+ }
+ }
+ return lastEvent, nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/watch/versioned/decoder.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/watch/versioned/decoder.go
new file mode 100644
index 0000000..e586527
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/watch/versioned/decoder.go
@@ -0,0 +1,71 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package versioned
+
+import (
+ "fmt"
+
+ "k8s.io/kubernetes/pkg/runtime"
+ "k8s.io/kubernetes/pkg/runtime/serializer/streaming"
+ "k8s.io/kubernetes/pkg/watch"
+)
+
+// Decoder implements the watch.Decoder interface for io.ReadClosers that
+// have contents which consist of a series of watchEvent objects encoded
+// with the given streaming decoder. The internal objects will be then
+// decoded by the embedded decoder.
+type Decoder struct {
+ decoder streaming.Decoder
+ embeddedDecoder runtime.Decoder
+}
+
+// NewDecoder creates an Decoder for the given writer and codec.
+func NewDecoder(decoder streaming.Decoder, embeddedDecoder runtime.Decoder) *Decoder {
+ return &Decoder{
+ decoder: decoder,
+ embeddedDecoder: embeddedDecoder,
+ }
+}
+
+// Decode blocks until it can return the next object in the reader. Returns an error
+// if the reader is closed or an object can't be decoded.
+func (d *Decoder) Decode() (watch.EventType, runtime.Object, error) {
+ var got Event
+ res, _, err := d.decoder.Decode(nil, &got)
+ if err != nil {
+ return "", nil, err
+ }
+ if res != &got {
+ return "", nil, fmt.Errorf("unable to decode to versioned.Event")
+ }
+ switch got.Type {
+ case string(watch.Added), string(watch.Modified), string(watch.Deleted), string(watch.Error):
+ default:
+ return "", nil, fmt.Errorf("got invalid watch event type: %v", got.Type)
+ }
+
+ obj, err := runtime.Decode(d.embeddedDecoder, got.Object.Raw)
+ if err != nil {
+ return "", nil, fmt.Errorf("unable to decode watch event: %v", err)
+ }
+ return watch.EventType(got.Type), obj, nil
+}
+
+// Close closes the underlying r.
+func (d *Decoder) Close() {
+ d.decoder.Close()
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/watch/versioned/encoder.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/watch/versioned/encoder.go
new file mode 100644
index 0000000..df23e0b
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/watch/versioned/encoder.go
@@ -0,0 +1,51 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package versioned
+
+import (
+ "encoding/json"
+
+ "k8s.io/kubernetes/pkg/runtime"
+ "k8s.io/kubernetes/pkg/runtime/serializer/streaming"
+ "k8s.io/kubernetes/pkg/watch"
+)
+
+// Encoder serializes watch.Events into io.Writer. The internal objects
+// are encoded using embedded encoder, and the outer Event is serialized
+// using encoder.
+type Encoder struct {
+ encoder streaming.Encoder
+ embeddedEncoder runtime.Encoder
+}
+
+func NewEncoder(encoder streaming.Encoder, embeddedEncoder runtime.Encoder) *Encoder {
+ return &Encoder{
+ encoder: encoder,
+ embeddedEncoder: embeddedEncoder,
+ }
+}
+
+// Encode writes an event to the writer. Returns an error
+// if the writer is closed or an object can't be encoded.
+func (e *Encoder) Encode(event *watch.Event) error {
+ data, err := runtime.Encode(e.embeddedEncoder, event.Object)
+ if err != nil {
+ return err
+ }
+ // FIXME: get rid of json.RawMessage.
+ return e.encoder.Encode(&Event{string(event.Type), runtime.RawExtension{Raw: json.RawMessage(data)}})
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/watch/versioned/generated.pb.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/watch/versioned/generated.pb.go
new file mode 100644
index 0000000..d2576eb
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/watch/versioned/generated.pb.go
@@ -0,0 +1,342 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by protoc-gen-gogo.
+// source: k8s.io/kubernetes/pkg/watch/versioned/generated.proto
+// DO NOT EDIT!
+
+/*
+ Package versioned is a generated protocol buffer package.
+
+ It is generated from these files:
+ k8s.io/kubernetes/pkg/watch/versioned/generated.proto
+
+ It has these top-level messages:
+ Event
+*/
+package versioned
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+func (m *Event) Reset() { *m = Event{} }
+func (m *Event) String() string { return proto.CompactTextString(m) }
+func (*Event) ProtoMessage() {}
+
+func init() {
+ proto.RegisterType((*Event)(nil), "k8s.io.kubernetes.pkg.watch.versioned.Event")
+}
+func (m *Event) Marshal() (data []byte, err error) {
+ size := m.Size()
+ data = make([]byte, size)
+ n, err := m.MarshalTo(data)
+ if err != nil {
+ return nil, err
+ }
+ return data[:n], nil
+}
+
+func (m *Event) MarshalTo(data []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ data[i] = 0xa
+ i++
+ i = encodeVarintGenerated(data, i, uint64(len(m.Type)))
+ i += copy(data[i:], m.Type)
+ data[i] = 0x12
+ i++
+ i = encodeVarintGenerated(data, i, uint64(m.Object.Size()))
+ n1, err := m.Object.MarshalTo(data[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n1
+ return i, nil
+}
+
+func encodeFixed64Generated(data []byte, offset int, v uint64) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ data[offset+4] = uint8(v >> 32)
+ data[offset+5] = uint8(v >> 40)
+ data[offset+6] = uint8(v >> 48)
+ data[offset+7] = uint8(v >> 56)
+ return offset + 8
+}
+func encodeFixed32Generated(data []byte, offset int, v uint32) int {
+ data[offset] = uint8(v)
+ data[offset+1] = uint8(v >> 8)
+ data[offset+2] = uint8(v >> 16)
+ data[offset+3] = uint8(v >> 24)
+ return offset + 4
+}
+func encodeVarintGenerated(data []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ data[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ data[offset] = uint8(v)
+ return offset + 1
+}
+func (m *Event) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Object.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *Event) Unmarshal(data []byte) error {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Event: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = string(data[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Object.Unmarshal(data[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(data[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenerated(data []byte) (n int, err error) {
+ l := len(data)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if data[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipGenerated(data[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
+)
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/watch/versioned/generated.proto b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/watch/versioned/generated.proto
new file mode 100644
index 0000000..8d55065
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/watch/versioned/generated.proto
@@ -0,0 +1,43 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.kubernetes.pkg.watch.versioned;
+
+import "k8s.io/kubernetes/pkg/runtime/generated.proto";
+import "k8s.io/kubernetes/pkg/util/intstr/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "versioned";
+
+// Event represents a single event to a watched resource.
+//
+// +protobuf=true
+message Event {
+ optional string type = 1;
+
+ // Object is:
+ // * If Type is Added or Modified: the new state of the object.
+ // * If Type is Deleted: the state of the object immediately before deletion.
+ // * If Type is Error: *api.Status is recommended; other types may make sense
+ // depending on context.
+ optional k8s.io.kubernetes.pkg.runtime.RawExtension object = 2;
+}
+
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/watch/versioned/register.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/watch/versioned/register.go
new file mode 100644
index 0000000..e90a021
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/watch/versioned/register.go
@@ -0,0 +1,84 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package versioned
+
+import (
+ "k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/conversion"
+ "k8s.io/kubernetes/pkg/runtime"
+ "k8s.io/kubernetes/pkg/watch"
+)
+
+// WatchEventKind is name reserved for serializing watch events.
+const WatchEventKind = "WatchEvent"
+
+// AddToGroupVersion registers the watch external and internal kinds with the scheme, and ensures the proper
+// conversions are in place.
+func AddToGroupVersion(scheme *runtime.Scheme, groupVersion unversioned.GroupVersion) {
+ scheme.AddKnownTypeWithName(groupVersion.WithKind(WatchEventKind), &Event{})
+ scheme.AddKnownTypeWithName(
+ unversioned.GroupVersion{Group: groupVersion.Group, Version: runtime.APIVersionInternal}.WithKind(WatchEventKind),
+ &InternalEvent{},
+ )
+ scheme.AddConversionFuncs(
+ Convert_versioned_Event_to_watch_Event,
+ Convert_versioned_InternalEvent_to_versioned_Event,
+ Convert_watch_Event_to_versioned_Event,
+ Convert_versioned_Event_to_versioned_InternalEvent,
+ )
+}
+
+func Convert_watch_Event_to_versioned_Event(in *watch.Event, out *Event, s conversion.Scope) error {
+ out.Type = string(in.Type)
+ switch t := in.Object.(type) {
+ case *runtime.Unknown:
+ // TODO: handle other fields on Unknown and detect type
+ out.Object.Raw = t.Raw
+ case nil:
+ default:
+ out.Object.Object = in.Object
+ }
+ return nil
+}
+
+func Convert_versioned_InternalEvent_to_versioned_Event(in *InternalEvent, out *Event, s conversion.Scope) error {
+ return Convert_watch_Event_to_versioned_Event((*watch.Event)(in), out, s)
+}
+
+func Convert_versioned_Event_to_watch_Event(in *Event, out *watch.Event, s conversion.Scope) error {
+ out.Type = watch.EventType(in.Type)
+ if in.Object.Object != nil {
+ out.Object = in.Object.Object
+ } else if in.Object.Raw != nil {
+ // TODO: handle other fields on Unknown and detect type
+ out.Object = &runtime.Unknown{
+ Raw: in.Object.Raw,
+ ContentType: runtime.ContentTypeJSON,
+ }
+ }
+ return nil
+}
+
+func Convert_versioned_Event_to_versioned_InternalEvent(in *Event, out *InternalEvent, s conversion.Scope) error {
+ return Convert_versioned_Event_to_watch_Event(in, (*watch.Event)(out), s)
+}
+
+// InternalEvent makes watch.Event versioned
+type InternalEvent watch.Event
+
+func (e *InternalEvent) GetObjectKind() unversioned.ObjectKind { return unversioned.EmptyObjectKind }
+func (e *Event) GetObjectKind() unversioned.ObjectKind { return unversioned.EmptyObjectKind }
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/watch/versioned/types.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/watch/versioned/types.go
new file mode 100644
index 0000000..f8e968c
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/watch/versioned/types.go
@@ -0,0 +1,37 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package versioned contains the versioned types for watch. This is the first
+// serialization version unless otherwise noted.
+package versioned
+
+import (
+ "k8s.io/kubernetes/pkg/runtime"
+)
+
+// Event represents a single event to a watched resource.
+//
+// +protobuf=true
+type Event struct {
+ Type string `json:"type" protobuf:"bytes,1,opt,name=type"`
+
+ // Object is:
+ // * If Type is Added or Modified: the new state of the object.
+ // * If Type is Deleted: the state of the object immediately before deletion.
+ // * If Type is Error: *api.Status is recommended; other types may make sense
+ // depending on context.
+ Object runtime.RawExtension `json:"object" protobuf:"bytes,2,opt,name=object"`
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/pkg/watch/watch.go b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/watch/watch.go
new file mode 100644
index 0000000..96b2fe3
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/pkg/watch/watch.go
@@ -0,0 +1,137 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package watch
+
+import (
+ "sync"
+
+ "k8s.io/kubernetes/pkg/runtime"
+)
+
+// Interface can be implemented by anything that knows how to watch and report changes.
+type Interface interface {
+ // Stops watching. Will close the channel returned by ResultChan(). Releases
+ // any resources used by the watch.
+ Stop()
+
+ // Returns a chan which will receive all the events. If an error occurs
+ // or Stop() is called, this channel will be closed, in which case the
+ // watch should be completely cleaned up.
+ ResultChan() <-chan Event
+}
+
+// EventType defines the possible types of events.
+type EventType string
+
+const (
+ Added EventType = "ADDED"
+ Modified EventType = "MODIFIED"
+ Deleted EventType = "DELETED"
+ Error EventType = "ERROR"
+)
+
+// Event represents a single event to a watched resource.
+type Event struct {
+ Type EventType
+
+ // Object is:
+ // * If Type is Added or Modified: the new state of the object.
+ // * If Type is Deleted: the state of the object immediately before deletion.
+ // * If Type is Error: *api.Status is recommended; other types may make sense
+ // depending on context.
+ Object runtime.Object
+}
+
+type emptyWatch chan Event
+
+// NewEmptyWatch returns a watch interface that returns no results and is closed.
+// May be used in certain error conditions where no information is available but
+// an error is not warranted.
+func NewEmptyWatch() Interface {
+ ch := make(chan Event)
+ close(ch)
+ return emptyWatch(ch)
+}
+
+// Stop implements Interface
+func (w emptyWatch) Stop() {
+}
+
+// ResultChan implements Interface
+func (w emptyWatch) ResultChan() <-chan Event {
+ return chan Event(w)
+}
+
+// FakeWatcher lets you test anything that consumes a watch.Interface; threadsafe.
+type FakeWatcher struct {
+ result chan Event
+ Stopped bool
+ sync.Mutex
+}
+
+func NewFake() *FakeWatcher {
+ return &FakeWatcher{
+ result: make(chan Event),
+ }
+}
+
+// Stop implements Interface.Stop().
+func (f *FakeWatcher) Stop() {
+ f.Lock()
+ defer f.Unlock()
+ if !f.Stopped {
+ close(f.result)
+ f.Stopped = true
+ }
+}
+
+// Reset prepares the watcher to be reused.
+func (f *FakeWatcher) Reset() {
+ f.Lock()
+ defer f.Unlock()
+ f.Stopped = false
+ f.result = make(chan Event)
+}
+
+func (f *FakeWatcher) ResultChan() <-chan Event {
+ return f.result
+}
+
+// Add sends an add event.
+func (f *FakeWatcher) Add(obj runtime.Object) {
+ f.result <- Event{Added, obj}
+}
+
+// Modify sends a modify event.
+func (f *FakeWatcher) Modify(obj runtime.Object) {
+ f.result <- Event{Modified, obj}
+}
+
+// Delete sends a delete event.
+func (f *FakeWatcher) Delete(lastValue runtime.Object) {
+ f.result <- Event{Deleted, lastValue}
+}
+
+// Error sends an Error event.
+func (f *FakeWatcher) Error(errValue runtime.Object) {
+ f.result <- Event{Error, errValue}
+}
+
+// Action sends an event of the requested type, for table-based testing.
+func (f *FakeWatcher) Action(action EventType, obj runtime.Object) {
+ f.result <- Event{action, obj}
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/gcp/gcp.go b/src/kube2msb/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/gcp/gcp.go
new file mode 100644
index 0000000..32cbb36
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/gcp/gcp.go
@@ -0,0 +1,106 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package gcp
+
+import (
+ "net/http"
+ "time"
+
+ "github.com/golang/glog"
+ "golang.org/x/net/context"
+ "golang.org/x/oauth2"
+ "golang.org/x/oauth2/google"
+
+ "k8s.io/kubernetes/pkg/client/restclient"
+)
+
+func init() {
+ if err := restclient.RegisterAuthProviderPlugin("gcp", newGCPAuthProvider); err != nil {
+ glog.Fatalf("Failed to register gcp auth plugin: %v", err)
+ }
+}
+
+type gcpAuthProvider struct {
+ tokenSource oauth2.TokenSource
+ persister restclient.AuthProviderConfigPersister
+}
+
+func newGCPAuthProvider(_ string, gcpConfig map[string]string, persister restclient.AuthProviderConfigPersister) (restclient.AuthProvider, error) {
+ ts, err := newCachedTokenSource(gcpConfig["access-token"], gcpConfig["expiry"], persister)
+ if err != nil {
+ return nil, err
+ }
+ return &gcpAuthProvider{ts, persister}, nil
+}
+
+func (g *gcpAuthProvider) WrapTransport(rt http.RoundTripper) http.RoundTripper {
+ return &oauth2.Transport{
+ Source: g.tokenSource,
+ Base: rt,
+ }
+}
+
+func (g *gcpAuthProvider) Login() error { return nil }
+
+type cachedTokenSource struct {
+ source oauth2.TokenSource
+ accessToken string
+ expiry time.Time
+ persister restclient.AuthProviderConfigPersister
+}
+
+func newCachedTokenSource(accessToken, expiry string, persister restclient.AuthProviderConfigPersister) (*cachedTokenSource, error) {
+ var expiryTime time.Time
+ if parsedTime, err := time.Parse(time.RFC3339Nano, expiry); err == nil {
+ expiryTime = parsedTime
+ }
+ ts, err := google.DefaultTokenSource(context.Background(), "https://www.googleapis.com/auth/cloud-platform")
+ if err != nil {
+ return nil, err
+ }
+ return &cachedTokenSource{
+ source: ts,
+ accessToken: accessToken,
+ expiry: expiryTime,
+ persister: persister,
+ }, nil
+}
+
+func (t *cachedTokenSource) Token() (*oauth2.Token, error) {
+ tok := &oauth2.Token{
+ AccessToken: t.accessToken,
+ TokenType: "Bearer",
+ Expiry: t.expiry,
+ }
+ if tok.Valid() && !tok.Expiry.IsZero() {
+ return tok, nil
+ }
+ tok, err := t.source.Token()
+ if err != nil {
+ return nil, err
+ }
+ if t.persister != nil {
+ cached := map[string]string{
+ "access-token": tok.AccessToken,
+ "expiry": tok.Expiry.Format(time.RFC3339Nano),
+ }
+ if err := t.persister.Persist(cached); err != nil {
+ glog.V(4).Infof("Failed to persist token: %v", err)
+ }
+ }
+ return tok, nil
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/oidc/OWNERS b/src/kube2msb/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/oidc/OWNERS
new file mode 100644
index 0000000..ecf3349
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/oidc/OWNERS
@@ -0,0 +1,2 @@
+assignees:
+ - bobbyrullo
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/oidc/oidc.go b/src/kube2msb/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/oidc/oidc.go
new file mode 100644
index 0000000..690a452
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/oidc/oidc.go
@@ -0,0 +1,270 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package oidc
+
+import (
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "net/http"
+ "strings"
+ "time"
+
+ "github.com/coreos/go-oidc/jose"
+ "github.com/coreos/go-oidc/oauth2"
+ "github.com/coreos/go-oidc/oidc"
+ "github.com/golang/glog"
+
+ "k8s.io/kubernetes/pkg/client/restclient"
+ "k8s.io/kubernetes/pkg/util/wait"
+)
+
+const (
+ cfgIssuerUrl = "idp-issuer-url"
+ cfgClientID = "client-id"
+ cfgClientSecret = "client-secret"
+ cfgCertificateAuthority = "idp-certificate-authority"
+ cfgCertificateAuthorityData = "idp-certificate-authority-data"
+ cfgExtraScopes = "extra-scopes"
+ cfgIDToken = "id-token"
+ cfgRefreshToken = "refresh-token"
+)
+
+var (
+ backoff = wait.Backoff{
+ Duration: 1 * time.Second,
+ Factor: 2,
+ Jitter: .1,
+ Steps: 5,
+ }
+)
+
+func init() {
+ if err := restclient.RegisterAuthProviderPlugin("oidc", newOIDCAuthProvider); err != nil {
+ glog.Fatalf("Failed to register oidc auth plugin: %v", err)
+ }
+}
+
+func newOIDCAuthProvider(_ string, cfg map[string]string, persister restclient.AuthProviderConfigPersister) (restclient.AuthProvider, error) {
+ issuer := cfg[cfgIssuerUrl]
+ if issuer == "" {
+ return nil, fmt.Errorf("Must provide %s", cfgIssuerUrl)
+ }
+
+ clientID := cfg[cfgClientID]
+ if clientID == "" {
+ return nil, fmt.Errorf("Must provide %s", cfgClientID)
+ }
+
+ clientSecret := cfg[cfgClientSecret]
+ if clientSecret == "" {
+ return nil, fmt.Errorf("Must provide %s", cfgClientSecret)
+ }
+
+ var certAuthData []byte
+ var err error
+ if cfg[cfgCertificateAuthorityData] != "" {
+ certAuthData, err = base64.StdEncoding.DecodeString(cfg[cfgCertificateAuthorityData])
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ clientConfig := restclient.Config{
+ TLSClientConfig: restclient.TLSClientConfig{
+ CAFile: cfg[cfgCertificateAuthority],
+ CAData: certAuthData,
+ },
+ }
+
+ trans, err := restclient.TransportFor(&clientConfig)
+ if err != nil {
+ return nil, err
+ }
+ hc := &http.Client{Transport: trans}
+
+ providerCfg, err := oidc.FetchProviderConfig(hc, strings.TrimSuffix(issuer, "/"))
+ if err != nil {
+ return nil, fmt.Errorf("error fetching provider config: %v", err)
+ }
+
+ scopes := strings.Split(cfg[cfgExtraScopes], ",")
+ oidcCfg := oidc.ClientConfig{
+ HTTPClient: hc,
+ Credentials: oidc.ClientCredentials{
+ ID: clientID,
+ Secret: clientSecret,
+ },
+ ProviderConfig: providerCfg,
+ Scope: append(scopes, oidc.DefaultScope...),
+ }
+
+ client, err := oidc.NewClient(oidcCfg)
+ if err != nil {
+ return nil, fmt.Errorf("error creating OIDC Client: %v", err)
+ }
+
+ oClient := &oidcClient{client}
+
+ var initialIDToken jose.JWT
+ if cfg[cfgIDToken] != "" {
+ initialIDToken, err = jose.ParseJWT(cfg[cfgIDToken])
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return &oidcAuthProvider{
+ initialIDToken: initialIDToken,
+ refresher: &idTokenRefresher{
+ client: oClient,
+ cfg: cfg,
+ persister: persister,
+ },
+ }, nil
+}
+
+type oidcAuthProvider struct {
+ refresher *idTokenRefresher
+ initialIDToken jose.JWT
+}
+
+func (g *oidcAuthProvider) WrapTransport(rt http.RoundTripper) http.RoundTripper {
+ at := &oidc.AuthenticatedTransport{
+ TokenRefresher: g.refresher,
+ RoundTripper: rt,
+ }
+ at.SetJWT(g.initialIDToken)
+ return &roundTripper{
+ wrapped: at,
+ refresher: g.refresher,
+ }
+}
+
+func (g *oidcAuthProvider) Login() error {
+ return errors.New("not yet implemented")
+}
+
+type OIDCClient interface {
+ refreshToken(rt string) (oauth2.TokenResponse, error)
+ verifyJWT(jwt jose.JWT) error
+}
+
+type roundTripper struct {
+ refresher *idTokenRefresher
+ wrapped *oidc.AuthenticatedTransport
+}
+
+func (r *roundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+ var res *http.Response
+ var err error
+ firstTime := true
+ wait.ExponentialBackoff(backoff, func() (bool, error) {
+ if !firstTime {
+ var jwt jose.JWT
+ jwt, err = r.refresher.Refresh()
+ if err != nil {
+ return true, nil
+ }
+ r.wrapped.SetJWT(jwt)
+ } else {
+ firstTime = false
+ }
+
+ res, err = r.wrapped.RoundTrip(req)
+ if err != nil {
+ return true, nil
+ }
+ if res.StatusCode == http.StatusUnauthorized {
+ return false, nil
+ }
+ return true, nil
+ })
+ return res, err
+}
+
+type idTokenRefresher struct {
+ cfg map[string]string
+ client OIDCClient
+ persister restclient.AuthProviderConfigPersister
+ intialIDToken jose.JWT
+}
+
+func (r *idTokenRefresher) Verify(jwt jose.JWT) error {
+ claims, err := jwt.Claims()
+ if err != nil {
+ return err
+ }
+
+ now := time.Now()
+ exp, ok, err := claims.TimeClaim("exp")
+ switch {
+ case err != nil:
+ return fmt.Errorf("failed to parse 'exp' claim: %v", err)
+ case !ok:
+ return errors.New("missing required 'exp' claim")
+ case exp.Before(now):
+ return fmt.Errorf("token already expired at: %v", exp)
+ }
+
+ return nil
+}
+
+func (r *idTokenRefresher) Refresh() (jose.JWT, error) {
+ rt, ok := r.cfg[cfgRefreshToken]
+ if !ok {
+ return jose.JWT{}, errors.New("No valid id-token, and cannot refresh without refresh-token")
+ }
+
+ tokens, err := r.client.refreshToken(rt)
+ if err != nil {
+ return jose.JWT{}, fmt.Errorf("could not refresh token: %v", err)
+ }
+ jwt, err := jose.ParseJWT(tokens.IDToken)
+ if err != nil {
+ return jose.JWT{}, err
+ }
+
+ if tokens.RefreshToken != "" && tokens.RefreshToken != rt {
+ r.cfg[cfgRefreshToken] = tokens.RefreshToken
+ }
+ r.cfg[cfgIDToken] = jwt.Encode()
+
+ err = r.persister.Persist(r.cfg)
+ if err != nil {
+ return jose.JWT{}, fmt.Errorf("could not perist new tokens: %v", err)
+ }
+
+ return jwt, r.client.verifyJWT(jwt)
+}
+
+type oidcClient struct {
+ client *oidc.Client
+}
+
+func (o *oidcClient) refreshToken(rt string) (oauth2.TokenResponse, error) {
+ oac, err := o.client.OAuthClient()
+ if err != nil {
+ return oauth2.TokenResponse{}, err
+ }
+
+ return oac.RequestToken(oauth2.GrantTypeRefreshToken, rt)
+}
+
+func (o *oidcClient) verifyJWT(jwt jose.JWT) error {
+ return o.client.VerifyJWT(jwt)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/plugins.go b/src/kube2msb/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/plugins.go
new file mode 100644
index 0000000..17d3ad4
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/plugins.go
@@ -0,0 +1,23 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugins
+
+import (
+ // Initialize all known client auth plugins.
+ _ "k8s.io/kubernetes/plugin/pkg/client/auth/gcp"
+ _ "k8s.io/kubernetes/plugin/pkg/client/auth/oidc"
+)
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/third_party/forked/golang/LICENSE b/src/kube2msb/vendor/k8s.io/kubernetes/third_party/forked/golang/LICENSE
new file mode 100644
index 0000000..7448756
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/third_party/forked/golang/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/third_party/forked/golang/PATENTS b/src/kube2msb/vendor/k8s.io/kubernetes/third_party/forked/golang/PATENTS
new file mode 100644
index 0000000..7330990
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/third_party/forked/golang/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/third_party/forked/golang/reflect/deep_equal.go b/src/kube2msb/vendor/k8s.io/kubernetes/third_party/forked/golang/reflect/deep_equal.go
new file mode 100644
index 0000000..9e45dbe
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/third_party/forked/golang/reflect/deep_equal.go
@@ -0,0 +1,388 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package reflect is a fork of go's standard library reflection package, which
+// allows for deep equal with equality functions defined.
+package reflect
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+// Equalities is a map from type to a function comparing two values of
+// that type.
+type Equalities map[reflect.Type]reflect.Value
+
+// For convenience, panics on errrors
+func EqualitiesOrDie(funcs ...interface{}) Equalities {
+ e := Equalities{}
+ if err := e.AddFuncs(funcs...); err != nil {
+ panic(err)
+ }
+ return e
+}
+
+// AddFuncs is a shortcut for multiple calls to AddFunc.
+func (e Equalities) AddFuncs(funcs ...interface{}) error {
+ for _, f := range funcs {
+ if err := e.AddFunc(f); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// AddFunc uses func as an equality function: it must take
+// two parameters of the same type, and return a boolean.
+func (e Equalities) AddFunc(eqFunc interface{}) error {
+ fv := reflect.ValueOf(eqFunc)
+ ft := fv.Type()
+ if ft.Kind() != reflect.Func {
+ return fmt.Errorf("expected func, got: %v", ft)
+ }
+ if ft.NumIn() != 2 {
+ return fmt.Errorf("expected three 'in' params, got: %v", ft)
+ }
+ if ft.NumOut() != 1 {
+ return fmt.Errorf("expected one 'out' param, got: %v", ft)
+ }
+ if ft.In(0) != ft.In(1) {
+ return fmt.Errorf("expected arg 1 and 2 to have same type, but got %v", ft)
+ }
+ var forReturnType bool
+ boolType := reflect.TypeOf(forReturnType)
+ if ft.Out(0) != boolType {
+ return fmt.Errorf("expected bool return, got: %v", ft)
+ }
+ e[ft.In(0)] = fv
+ return nil
+}
+
+// Below here is forked from go's reflect/deepequal.go
+
+// During deepValueEqual, must keep track of checks that are
+// in progress. The comparison algorithm assumes that all
+// checks in progress are true when it reencounters them.
+// Visited comparisons are stored in a map indexed by visit.
+type visit struct {
+ a1 uintptr
+ a2 uintptr
+ typ reflect.Type
+}
+
+// unexportedTypePanic is thrown when you use this DeepEqual on something that has an
+// unexported type. It indicates a programmer error, so should not occur at runtime,
+// which is why it's not public and thus impossible to catch.
+type unexportedTypePanic []reflect.Type
+
+func (u unexportedTypePanic) Error() string { return u.String() }
+func (u unexportedTypePanic) String() string {
+ strs := make([]string, len(u))
+ for i, t := range u {
+ strs[i] = fmt.Sprintf("%v", t)
+ }
+ return "an unexported field was encountered, nested like this: " + strings.Join(strs, " -> ")
+}
+
+func makeUsefulPanic(v reflect.Value) {
+ if x := recover(); x != nil {
+ if u, ok := x.(unexportedTypePanic); ok {
+ u = append(unexportedTypePanic{v.Type()}, u...)
+ x = u
+ }
+ panic(x)
+ }
+}
+
+// Tests for deep equality using reflected types. The map argument tracks
+// comparisons that have already been seen, which allows short circuiting on
+// recursive types.
+func (e Equalities) deepValueEqual(v1, v2 reflect.Value, visited map[visit]bool, depth int) bool {
+ defer makeUsefulPanic(v1)
+
+ if !v1.IsValid() || !v2.IsValid() {
+ return v1.IsValid() == v2.IsValid()
+ }
+ if v1.Type() != v2.Type() {
+ return false
+ }
+ if fv, ok := e[v1.Type()]; ok {
+ return fv.Call([]reflect.Value{v1, v2})[0].Bool()
+ }
+
+ hard := func(k reflect.Kind) bool {
+ switch k {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.Struct:
+ return true
+ }
+ return false
+ }
+
+ if v1.CanAddr() && v2.CanAddr() && hard(v1.Kind()) {
+ addr1 := v1.UnsafeAddr()
+ addr2 := v2.UnsafeAddr()
+ if addr1 > addr2 {
+ // Canonicalize order to reduce number of entries in visited.
+ addr1, addr2 = addr2, addr1
+ }
+
+ // Short circuit if references are identical ...
+ if addr1 == addr2 {
+ return true
+ }
+
+ // ... or already seen
+ typ := v1.Type()
+ v := visit{addr1, addr2, typ}
+ if visited[v] {
+ return true
+ }
+
+ // Remember for later.
+ visited[v] = true
+ }
+
+ switch v1.Kind() {
+ case reflect.Array:
+ // We don't need to check length here because length is part of
+ // an array's type, which has already been filtered for.
+ for i := 0; i < v1.Len(); i++ {
+ if !e.deepValueEqual(v1.Index(i), v2.Index(i), visited, depth+1) {
+ return false
+ }
+ }
+ return true
+ case reflect.Slice:
+ if (v1.IsNil() || v1.Len() == 0) != (v2.IsNil() || v2.Len() == 0) {
+ return false
+ }
+ if v1.IsNil() || v1.Len() == 0 {
+ return true
+ }
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ if v1.Pointer() == v2.Pointer() {
+ return true
+ }
+ for i := 0; i < v1.Len(); i++ {
+ if !e.deepValueEqual(v1.Index(i), v2.Index(i), visited, depth+1) {
+ return false
+ }
+ }
+ return true
+ case reflect.Interface:
+ if v1.IsNil() || v2.IsNil() {
+ return v1.IsNil() == v2.IsNil()
+ }
+ return e.deepValueEqual(v1.Elem(), v2.Elem(), visited, depth+1)
+ case reflect.Ptr:
+ return e.deepValueEqual(v1.Elem(), v2.Elem(), visited, depth+1)
+ case reflect.Struct:
+ for i, n := 0, v1.NumField(); i < n; i++ {
+ if !e.deepValueEqual(v1.Field(i), v2.Field(i), visited, depth+1) {
+ return false
+ }
+ }
+ return true
+ case reflect.Map:
+ if (v1.IsNil() || v1.Len() == 0) != (v2.IsNil() || v2.Len() == 0) {
+ return false
+ }
+ if v1.IsNil() || v1.Len() == 0 {
+ return true
+ }
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ if v1.Pointer() == v2.Pointer() {
+ return true
+ }
+ for _, k := range v1.MapKeys() {
+ if !e.deepValueEqual(v1.MapIndex(k), v2.MapIndex(k), visited, depth+1) {
+ return false
+ }
+ }
+ return true
+ case reflect.Func:
+ if v1.IsNil() && v2.IsNil() {
+ return true
+ }
+ // Can't do better than this:
+ return false
+ default:
+ // Normal equality suffices
+ if !v1.CanInterface() || !v2.CanInterface() {
+ panic(unexportedTypePanic{})
+ }
+ return v1.Interface() == v2.Interface()
+ }
+}
+
+// DeepEqual is like reflect.DeepEqual, but focused on semantic equality
+// instead of memory equality.
+//
+// It will use e's equality functions if it finds types that match.
+//
+// An empty slice *is* equal to a nil slice for our purposes; same for maps.
+//
+// Unexported field members cannot be compared and will cause an imformative panic; you must add an Equality
+// function for these types.
+func (e Equalities) DeepEqual(a1, a2 interface{}) bool {
+ if a1 == nil || a2 == nil {
+ return a1 == a2
+ }
+ v1 := reflect.ValueOf(a1)
+ v2 := reflect.ValueOf(a2)
+ if v1.Type() != v2.Type() {
+ return false
+ }
+ return e.deepValueEqual(v1, v2, make(map[visit]bool), 0)
+}
+
+func (e Equalities) deepValueDerive(v1, v2 reflect.Value, visited map[visit]bool, depth int) bool {
+ defer makeUsefulPanic(v1)
+
+ if !v1.IsValid() || !v2.IsValid() {
+ return v1.IsValid() == v2.IsValid()
+ }
+ if v1.Type() != v2.Type() {
+ return false
+ }
+ if fv, ok := e[v1.Type()]; ok {
+ return fv.Call([]reflect.Value{v1, v2})[0].Bool()
+ }
+
+ hard := func(k reflect.Kind) bool {
+ switch k {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.Struct:
+ return true
+ }
+ return false
+ }
+
+ if v1.CanAddr() && v2.CanAddr() && hard(v1.Kind()) {
+ addr1 := v1.UnsafeAddr()
+ addr2 := v2.UnsafeAddr()
+ if addr1 > addr2 {
+ // Canonicalize order to reduce number of entries in visited.
+ addr1, addr2 = addr2, addr1
+ }
+
+ // Short circuit if references are identical ...
+ if addr1 == addr2 {
+ return true
+ }
+
+ // ... or already seen
+ typ := v1.Type()
+ v := visit{addr1, addr2, typ}
+ if visited[v] {
+ return true
+ }
+
+ // Remember for later.
+ visited[v] = true
+ }
+
+ switch v1.Kind() {
+ case reflect.Array:
+ // We don't need to check length here because length is part of
+ // an array's type, which has already been filtered for.
+ for i := 0; i < v1.Len(); i++ {
+ if !e.deepValueDerive(v1.Index(i), v2.Index(i), visited, depth+1) {
+ return false
+ }
+ }
+ return true
+ case reflect.Slice:
+ if v1.IsNil() || v1.Len() == 0 {
+ return true
+ }
+ if v1.Len() > v2.Len() {
+ return false
+ }
+ if v1.Pointer() == v2.Pointer() {
+ return true
+ }
+ for i := 0; i < v1.Len(); i++ {
+ if !e.deepValueDerive(v1.Index(i), v2.Index(i), visited, depth+1) {
+ return false
+ }
+ }
+ return true
+ case reflect.String:
+ if v1.Len() == 0 {
+ return true
+ }
+ if v1.Len() > v2.Len() {
+ return false
+ }
+ return v1.String() == v2.String()
+ case reflect.Interface:
+ if v1.IsNil() {
+ return true
+ }
+ return e.deepValueDerive(v1.Elem(), v2.Elem(), visited, depth+1)
+ case reflect.Ptr:
+ if v1.IsNil() {
+ return true
+ }
+ return e.deepValueDerive(v1.Elem(), v2.Elem(), visited, depth+1)
+ case reflect.Struct:
+ for i, n := 0, v1.NumField(); i < n; i++ {
+ if !e.deepValueDerive(v1.Field(i), v2.Field(i), visited, depth+1) {
+ return false
+ }
+ }
+ return true
+ case reflect.Map:
+ if v1.IsNil() || v1.Len() == 0 {
+ return true
+ }
+ if v1.Len() > v2.Len() {
+ return false
+ }
+ if v1.Pointer() == v2.Pointer() {
+ return true
+ }
+ for _, k := range v1.MapKeys() {
+ if !e.deepValueDerive(v1.MapIndex(k), v2.MapIndex(k), visited, depth+1) {
+ return false
+ }
+ }
+ return true
+ case reflect.Func:
+ if v1.IsNil() && v2.IsNil() {
+ return true
+ }
+ // Can't do better than this:
+ return false
+ default:
+ // Normal equality suffices
+ if !v1.CanInterface() || !v2.CanInterface() {
+ panic(unexportedTypePanic{})
+ }
+ return v1.Interface() == v2.Interface()
+ }
+}
+
+// DeepDerivative is similar to DeepEqual except that unset fields in a1 are
+// ignored (not compared). This allows us to focus on the fields that matter to
+// the semantic comparison.
+//
+// The unset fields include a nil pointer and an empty string.
+func (e Equalities) DeepDerivative(a1, a2 interface{}) bool {
+ if a1 == nil {
+ return true
+ }
+ v1 := reflect.ValueOf(a1)
+ v2 := reflect.ValueOf(a2)
+ if v1.Type() != v2.Type() {
+ return false
+ }
+ return e.deepValueDerive(v1, v2, make(map[visit]bool), 0)
+}
diff --git a/src/kube2msb/vendor/k8s.io/kubernetes/third_party/forked/golang/reflect/type.go b/src/kube2msb/vendor/k8s.io/kubernetes/third_party/forked/golang/reflect/type.go
new file mode 100644
index 0000000..67957ee
--- /dev/null
+++ b/src/kube2msb/vendor/k8s.io/kubernetes/third_party/forked/golang/reflect/type.go
@@ -0,0 +1,91 @@
+//This package is copied from Go library reflect/type.go.
+//The struct tag library provides no way to extract the list of struct tags, only
+//a specific tag
+package reflect
+
+import (
+ "fmt"
+
+ "strconv"
+ "strings"
+)
+
+type StructTag struct {
+ Name string
+ Value string
+}
+
+func (t StructTag) String() string {
+ return fmt.Sprintf("%s:%q", t.Name, t.Value)
+}
+
+type StructTags []StructTag
+
+func (tags StructTags) String() string {
+ s := make([]string, 0, len(tags))
+ for _, tag := range tags {
+ s = append(s, tag.String())
+ }
+ return "`" + strings.Join(s, " ") + "`"
+}
+
+func (tags StructTags) Has(name string) bool {
+ for i := range tags {
+ if tags[i].Name == name {
+ return true
+ }
+ }
+ return false
+}
+
+// ParseStructTags returns the full set of fields in a struct tag in the order they appear in
+// the struct tag.
+func ParseStructTags(tag string) (StructTags, error) {
+ tags := StructTags{}
+ for tag != "" {
+ // Skip leading space.
+ i := 0
+ for i < len(tag) && tag[i] == ' ' {
+ i++
+ }
+ tag = tag[i:]
+ if tag == "" {
+ break
+ }
+
+ // Scan to colon. A space, a quote or a control character is a syntax error.
+ // Strictly speaking, control chars include the range [0x7f, 0x9f], not just
+ // [0x00, 0x1f], but in practice, we ignore the multi-byte control characters
+ // as it is simpler to inspect the tag's bytes than the tag's runes.
+ i = 0
+ for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f {
+ i++
+ }
+ if i == 0 || i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' {
+ break
+ }
+ name := string(tag[:i])
+ tag = tag[i+1:]
+
+ // Scan quoted string to find value.
+ i = 1
+ for i < len(tag) && tag[i] != '"' {
+ if tag[i] == '\\' {
+ i++
+ }
+ i++
+ }
+ if i >= len(tag) {
+ break
+ }
+ qvalue := string(tag[:i+1])
+ tag = tag[i+1:]
+
+ value, err := strconv.Unquote(qvalue)
+ if err != nil {
+ return nil, err
+ }
+ tags = append(tags, StructTag{Name: name, Value: value})
+ }
+ return tags, nil
+}
diff --git a/src/kube2msb/vendor/vendor.json b/src/kube2msb/vendor/vendor.json
new file mode 100644
index 0000000..f6f1d8e
--- /dev/null
+++ b/src/kube2msb/vendor/vendor.json
@@ -0,0 +1,1018 @@
+{
+ "comment": "",
+ "ignore": "test",
+ "package": [
+ {
+ "checksumSHA1": "onKNQqlwxR0qasVo2UFMOxPiTT4=",
+ "origin": "k8s.io/kubernetes/vendor/github.com/Sirupsen/logrus",
+ "path": "github.com/Sirupsen/logrus",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "4QnLdmB1kG3N+KlDd1N+G9TWAGQ=",
+ "origin": "k8s.io/kubernetes/vendor/github.com/beorn7/perks/quantile",
+ "path": "github.com/beorn7/perks/quantile",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "pNgZdLLrOLbAc6uVni/v23H4/Rg=",
+ "origin": "k8s.io/kubernetes/vendor/github.com/blang/semver",
+ "path": "github.com/blang/semver",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "9/TbRuVS9LYXN69EN1y+6HlTRi4=",
+ "origin": "k8s.io/kubernetes/vendor/github.com/coreos/go-oidc/http",
+ "path": "github.com/coreos/go-oidc/http",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "8yvt1xKCgNwuuavJdxRnvaIjrIc=",
+ "origin": "k8s.io/kubernetes/vendor/github.com/coreos/go-oidc/jose",
+ "path": "github.com/coreos/go-oidc/jose",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "KrqPjrU5qLvUvJipDZgMWkgDvgE=",
+ "origin": "k8s.io/kubernetes/vendor/github.com/coreos/go-oidc/key",
+ "path": "github.com/coreos/go-oidc/key",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "DkhOJljW4QShU2A7R9jhUYSbeLw=",
+ "origin": "k8s.io/kubernetes/vendor/github.com/coreos/go-oidc/oauth2",
+ "path": "github.com/coreos/go-oidc/oauth2",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "vHHHKyzYNocN9jFiqGL0pRxE/kY=",
+ "origin": "k8s.io/kubernetes/vendor/github.com/coreos/go-oidc/oidc",
+ "path": "github.com/coreos/go-oidc/oidc",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "d50/+u/LFlXvEV10HiEoXB9OsGg=",
+ "origin": "k8s.io/kubernetes/vendor/github.com/coreos/go-systemd/journal",
+ "path": "github.com/coreos/go-systemd/journal",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "SQm8aSsW9XF7F2NZ8vtmkB9xrSM=",
+ "origin": "k8s.io/kubernetes/vendor/github.com/coreos/pkg/capnslog",
+ "path": "github.com/coreos/pkg/capnslog",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "vhHfyZLaugP3bKfJjnZe2+5XaHc=",
+ "origin": "k8s.io/kubernetes/vendor/github.com/coreos/pkg/health",
+ "path": "github.com/coreos/pkg/health",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "gRHhz6qleTtTUH/UIXwGeNxBFR8=",
+ "origin": "k8s.io/kubernetes/vendor/github.com/coreos/pkg/httputil",
+ "path": "github.com/coreos/pkg/httputil",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "etBdQ0LN6ojGunfvUt6B5C3FNrQ=",
+ "origin": "k8s.io/kubernetes/vendor/github.com/coreos/pkg/timeutil",
+ "path": "github.com/coreos/pkg/timeutil",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "5rPfda8jFccr3A6heL+JAmi9K9g=",
+ "origin": "k8s.io/kubernetes/vendor/github.com/davecgh/go-spew/spew",
+ "path": "github.com/davecgh/go-spew/spew",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "f1wARLDzsF/JoyN01yoxXEwFIp8=",
+ "origin": "k8s.io/kubernetes/vendor/github.com/docker/distribution/digest",
+ "path": "github.com/docker/distribution/digest",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "PzXRTLmmqWXxmDqdIXLcRYBma18=",
+ "origin": "k8s.io/kubernetes/vendor/github.com/docker/distribution/reference",
+ "path": "github.com/docker/distribution/reference",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "qGXit74pcrjdCsusaYiwRsC4rnQ=",
+ "origin": "k8s.io/kubernetes/vendor/github.com/docker/go-units",
+ "path": "github.com/docker/go-units",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "SBnkw7srw/+1XzrBmCgp21yiS9c=",
+ "origin": "k8s.io/kubernetes/vendor/github.com/emicklei/go-restful",
+ "path": "github.com/emicklei/go-restful",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "3xWz4fZ9xW+CfADpYoPFcZCYJ4E=",
+ "origin": "k8s.io/kubernetes/vendor/github.com/emicklei/go-restful/log",
+ "path": "github.com/emicklei/go-restful/log",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "T+eylSXEakE0hRB50znBx0Azjm4=",
+ "origin": "k8s.io/kubernetes/vendor/github.com/emicklei/go-restful/swagger",
+ "path": "github.com/emicklei/go-restful/swagger",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "17vYJDDMJO63/G/tRFIUuSlMCbE=",
+ "origin": "k8s.io/kubernetes/vendor/github.com/ghodss/yaml",
+ "path": "github.com/ghodss/yaml",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "gcbkO3393bmFUCHR2jyGLVWInw8=",
+ "origin": "k8s.io/kubernetes/vendor/github.com/gogo/protobuf/proto",
+ "path": "github.com/gogo/protobuf/proto",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "URsJa4y/sUUw/STmbeYx9EKqaYE=",
+ "origin": "k8s.io/kubernetes/vendor/github.com/golang/glog",
+ "path": "github.com/golang/glog",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "GhvPGgDUBDSANMWJJLFfGbcR4C4=",
+ "origin": "k8s.io/kubernetes/vendor/github.com/golang/protobuf/proto",
+ "path": "github.com/golang/protobuf/proto",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "lVAyYXsBkm8nojYVsA20KULd+pI=",
+ "origin": "k8s.io/kubernetes/vendor/github.com/google/cadvisor/info/v1",
+ "path": "github.com/google/cadvisor/info/v1",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "MGFnNo7z324UK7kjSC5v+G+CsCI=",
+ "origin": "k8s.io/kubernetes/vendor/github.com/google/gofuzz",
+ "path": "github.com/google/gofuzz",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "UVO2SkAXrDSdvDhXjjpR1j/JS7M=",
+ "origin": "k8s.io/kubernetes/vendor/github.com/imdario/mergo",
+ "path": "github.com/imdario/mergo",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "7BbrBEIA6vmz+3G4H/DJ8IoGB1M=",
+ "origin": "k8s.io/kubernetes/vendor/github.com/jonboulle/clockwork",
+ "path": "github.com/jonboulle/clockwork",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "sKheT5xw89Tbu2Q071FQO27CVmE=",
+ "origin": "k8s.io/kubernetes/vendor/github.com/juju/ratelimit",
+ "path": "github.com/juju/ratelimit",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "Q2vw4HZBbnU8BLFt8VrzStwqSJg=",
+ "origin": "k8s.io/kubernetes/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil",
+ "path": "github.com/matttproud/golang_protobuf_extensions/pbutil",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "eWLoStyA1TZgrn9XBhfWYnbpvd4=",
+ "origin": "k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups",
+ "path": "github.com/opencontainers/runc/libcontainer/cgroups",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "zyYrufueFSmhxd2a1yFxA+GDa/I=",
+ "origin": "k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs",
+ "path": "github.com/opencontainers/runc/libcontainer/cgroups/fs",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "n+zW2Rz36eb30qQTZH8tvzuz2So=",
+ "origin": "k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/configs",
+ "path": "github.com/opencontainers/runc/libcontainer/configs",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "wkWvZgHmwOJQBcAJgSIMafF3Z7Q=",
+ "origin": "k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/system",
+ "path": "github.com/opencontainers/runc/libcontainer/system",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "wCxd53xfZIkrpUhpCy7d02p5bUA=",
+ "origin": "k8s.io/kubernetes/vendor/github.com/opencontainers/runc/libcontainer/utils",
+ "path": "github.com/opencontainers/runc/libcontainer/utils",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "3YJklSuzSE1Rt8A+2dhiWSmf/fw=",
+ "origin": "k8s.io/kubernetes/vendor/github.com/pborman/uuid",
+ "path": "github.com/pborman/uuid",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "b+6thkf0NbbradA246HneZ4R0d0=",
+ "origin": "k8s.io/kubernetes/vendor/github.com/prometheus/client_golang/prometheus",
+ "path": "github.com/prometheus/client_golang/prometheus",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "DvwvOlPNAgRntBzt3b3OSRMS2N4=",
+ "origin": "k8s.io/kubernetes/vendor/github.com/prometheus/client_model/go",
+ "path": "github.com/prometheus/client_model/go",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "DOkbYZAGVR8Xv/dqg5PZbsS6Aeg=",
+ "origin": "k8s.io/kubernetes/vendor/github.com/prometheus/common/expfmt",
+ "path": "github.com/prometheus/common/expfmt",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "GWlM3d2vPYyNATtTFgftS10/A9w=",
+ "origin": "k8s.io/kubernetes/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg",
+ "path": "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "phCwHdzGarRNOEboPL96LZxI/Fc=",
+ "origin": "k8s.io/kubernetes/vendor/github.com/prometheus/common/model",
+ "path": "github.com/prometheus/common/model",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "t7Qx5mp6+SwYfagYiIHVrXxe6JE=",
+ "origin": "k8s.io/kubernetes/vendor/github.com/prometheus/procfs",
+ "path": "github.com/prometheus/procfs",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "wF4p4RxVdADjhkh7EVDJ9qa+ApA=",
+ "origin": "k8s.io/kubernetes/vendor/github.com/spf13/pflag",
+ "path": "github.com/spf13/pflag",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "MFM0LORAlH5Hxv82QKaEJ3NJ75c=",
+ "origin": "k8s.io/kubernetes/vendor/github.com/ugorji/go/codec",
+ "path": "github.com/ugorji/go/codec",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "nJE3D2CS5WnukPIlM9wzErF7tak=",
+ "origin": "k8s.io/kubernetes/vendor/golang.org/x/net/context",
+ "path": "golang.org/x/net/context",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "//mZ8ad7+HBR9mpzn4OGSVNGLdI=",
+ "origin": "k8s.io/kubernetes/vendor/golang.org/x/net/context/ctxhttp",
+ "path": "golang.org/x/net/context/ctxhttp",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "JeoTQcWsQQLIKXmj3uXBmo3cHJs=",
+ "origin": "k8s.io/kubernetes/vendor/golang.org/x/net/http2",
+ "path": "golang.org/x/net/http2",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "N10BXQOqndtLNkb6PzgqWmsmtOs=",
+ "origin": "k8s.io/kubernetes/vendor/golang.org/x/net/http2/hpack",
+ "path": "golang.org/x/net/http2/hpack",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "UsYq0E2gsfK4Lo3BIOX5HdIW8xw=",
+ "origin": "k8s.io/kubernetes/vendor/golang.org/x/oauth2",
+ "path": "golang.org/x/oauth2",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "IXhcHFrMDaMK4/4DDzqM6cDHB9A=",
+ "origin": "k8s.io/kubernetes/vendor/golang.org/x/oauth2/google",
+ "path": "golang.org/x/oauth2/google",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "aql/i4ppXpn4JI04xMIWMk11Wz4=",
+ "origin": "k8s.io/kubernetes/vendor/golang.org/x/oauth2/internal",
+ "path": "golang.org/x/oauth2/internal",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "b5Qt4EG2int1Ztqg7Ru5EDDMves=",
+ "origin": "k8s.io/kubernetes/vendor/golang.org/x/oauth2/jws",
+ "path": "golang.org/x/oauth2/jws",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "jL08IHZGYSCX1bYoVsJFMRMag/A=",
+ "origin": "k8s.io/kubernetes/vendor/golang.org/x/oauth2/jwt",
+ "path": "golang.org/x/oauth2/jwt",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "hDPTUmuGGVz4UWKZoAoW2JTv/us=",
+ "origin": "k8s.io/kubernetes/vendor/google.golang.org/cloud/compute/metadata",
+ "path": "google.golang.org/cloud/compute/metadata",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "U7dGDNwEHORvJFMoNSXErKE7ITg=",
+ "origin": "k8s.io/kubernetes/vendor/google.golang.org/cloud/internal",
+ "path": "google.golang.org/cloud/internal",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "pfQwQtWlFezJq0Viroa/L+v+yDM=",
+ "origin": "k8s.io/kubernetes/vendor/gopkg.in/inf.v0",
+ "path": "gopkg.in/inf.v0",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "+OgOXBoiQ+X+C2dsAeiOHwBIEH0=",
+ "origin": "k8s.io/kubernetes/vendor/gopkg.in/yaml.v2",
+ "path": "gopkg.in/yaml.v2",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "LYSjIbRI9LN9xUeEUCFiqjBfVk8=",
+ "path": "k8s.io/kubernetes/pkg/api",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "4RgJA0QYB8AdYPmLyrBo/HMslGY=",
+ "path": "k8s.io/kubernetes/pkg/api/endpoints",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "hrwdrN90T4j6jPL2pkUPqdPE+p0=",
+ "path": "k8s.io/kubernetes/pkg/api/errors",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "5Gs+61jtvl+RhLuT3IQ8koTYme0=",
+ "path": "k8s.io/kubernetes/pkg/api/install",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "Nh33IOPiITIzKiv/YcKX8zfST/s=",
+ "path": "k8s.io/kubernetes/pkg/api/meta",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "fXBrLjl8OUQCG49QpmWS/lrNP2k=",
+ "path": "k8s.io/kubernetes/pkg/api/meta/metatypes",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "XIZ5HDht1CBlCIya9XRu+amvTlQ=",
+ "path": "k8s.io/kubernetes/pkg/api/pod",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "gXu52mCaz8x0HT5rq48ARcQQ5mc=",
+ "path": "k8s.io/kubernetes/pkg/api/resource",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "FqnE+dsy79rbzCy4C6zpC+TQi5o=",
+ "path": "k8s.io/kubernetes/pkg/api/service",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "lKKHT2Etwn0ics3YeiUMebqeRPM=",
+ "path": "k8s.io/kubernetes/pkg/api/unversioned",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "WtxDahcsSH636vKzVFo6bGbMAsQ=",
+ "path": "k8s.io/kubernetes/pkg/api/unversioned/validation",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "ylmbqQmJcvG9+rd99TlQAlj28Ls=",
+ "path": "k8s.io/kubernetes/pkg/api/util",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "AyTU4L6jYDDYKgHfO3yqT7ZRJZ0=",
+ "path": "k8s.io/kubernetes/pkg/api/v1",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "NfszzXe4pT33+4ZogFz8g9RRvzk=",
+ "path": "k8s.io/kubernetes/pkg/api/validation",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "ukVZpDt1bM0zHV9oSglatAyC7ww=",
+ "path": "k8s.io/kubernetes/pkg/apimachinery",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "rfxXviRGKfv2hRnvyBpKD35fUJU=",
+ "path": "k8s.io/kubernetes/pkg/apimachinery/registered",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "eepRyRuCUqhEELqCmM6ujh0F1qY=",
+ "path": "k8s.io/kubernetes/pkg/apis/apps",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "QmlWYvz0KOj3L94Cqm78mW1aEDI=",
+ "path": "k8s.io/kubernetes/pkg/apis/apps/install",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "USkOrTAMNYt1FCgzfZ9K2tSQVHE=",
+ "path": "k8s.io/kubernetes/pkg/apis/apps/v1alpha1",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "O24NLtCYEIzdzm0zXduTjROtsKo=",
+ "path": "k8s.io/kubernetes/pkg/apis/authentication.k8s.io",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "+MenGD6qkUJ9GfJ79ni4eLmc+g8=",
+ "path": "k8s.io/kubernetes/pkg/apis/authentication.k8s.io/install",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "/kOCCl/jq51dglPzpp0oMg9gx6Y=",
+ "path": "k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "uvCNTDFInHQOZ4ZwoqjjQPudbFA=",
+ "path": "k8s.io/kubernetes/pkg/apis/authorization",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "91LbUzZnJJ3tdD6bNYUaH1ZYO8A=",
+ "path": "k8s.io/kubernetes/pkg/apis/authorization/install",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "LOtVLBtv9ohsndb6KzHl8ZHoHyI=",
+ "path": "k8s.io/kubernetes/pkg/apis/authorization/v1beta1",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "JJ7a2cKL7Ut7ivhbJYY4znzm8M0=",
+ "path": "k8s.io/kubernetes/pkg/apis/autoscaling",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "o+W6no9361QTD6zUqGYq9ifjSo8=",
+ "path": "k8s.io/kubernetes/pkg/apis/autoscaling/install",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "u36bX1648I1YY+FwyijuXg4ByNE=",
+ "path": "k8s.io/kubernetes/pkg/apis/autoscaling/v1",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "CDvEPjWdkS+N/EK9YUAi8t8EPJA=",
+ "path": "k8s.io/kubernetes/pkg/apis/batch",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "3JXr8/dGpsGidItKJdgJHWFtDhM=",
+ "path": "k8s.io/kubernetes/pkg/apis/batch/install",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "OCY3ESN36C+JY/nvajLWjMHByow=",
+ "path": "k8s.io/kubernetes/pkg/apis/batch/v1",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "OPcZZJd7NkcLTsiJGbUHGNsztDw=",
+ "path": "k8s.io/kubernetes/pkg/apis/batch/v2alpha1",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "Ba9B89k1SN6t1OVTx+gdq1UND58=",
+ "path": "k8s.io/kubernetes/pkg/apis/certificates",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "AsUBhP9tuvZLKgn/VDUK36hlufQ=",
+ "path": "k8s.io/kubernetes/pkg/apis/certificates/install",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "U+rog6rTYu6CdQnRbxBS9DZdDeM=",
+ "path": "k8s.io/kubernetes/pkg/apis/certificates/v1alpha1",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "utW0xn1wBE9MyoLwx+qWQXvCcEA=",
+ "path": "k8s.io/kubernetes/pkg/apis/componentconfig",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "GuhSIPc/YRfYY2HCksQjrsD6Yes=",
+ "path": "k8s.io/kubernetes/pkg/apis/componentconfig/install",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "kex+BL5gUvO5NbViltzImonRqqA=",
+ "path": "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "90v8KicbPZ3J8b1ZYn/t3zUAIqw=",
+ "path": "k8s.io/kubernetes/pkg/apis/extensions",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "1BaUWEn7M84YOhRrexEGa/j9nyo=",
+ "path": "k8s.io/kubernetes/pkg/apis/extensions/install",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "kzh8A68C/UyHogZaGHHyEgGJ1J4=",
+ "path": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "8Lt6m6P/Pz0s85CmfzfAMnp28c0=",
+ "path": "k8s.io/kubernetes/pkg/apis/policy",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "ccX4K2/P6l6JMjrMqXFT6gPtFkQ=",
+ "path": "k8s.io/kubernetes/pkg/apis/policy/install",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "9DOJnAqagzzPfrWslJjjDDC6KzA=",
+ "path": "k8s.io/kubernetes/pkg/apis/policy/v1alpha1",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "E8kn/YoGzoofszIGq9Y6J1usm4o=",
+ "path": "k8s.io/kubernetes/pkg/apis/rbac",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "+AkNUe82PInauEwEBrdTgeIOjBA=",
+ "path": "k8s.io/kubernetes/pkg/apis/rbac/install",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "TmsZgAy7tS109HT5pHtrjMoXWbA=",
+ "path": "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "LygDzYtTRWS8/uMxZw7SoybAK6o=",
+ "path": "k8s.io/kubernetes/pkg/auth/user",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "vtDlp+5ftF3XEW3BBamOivrKX8c=",
+ "path": "k8s.io/kubernetes/pkg/capabilities",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "66w7VcVxcey1J2MMz5T8pRG6EoQ=",
+ "path": "k8s.io/kubernetes/pkg/client/cache",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "S+49qCYLgRN5tYRAdDnYqmXJiYM=",
+ "path": "k8s.io/kubernetes/pkg/client/metrics",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "6tSguYQj+ccQEUlhyPNe1rntgQE=",
+ "path": "k8s.io/kubernetes/pkg/client/restclient",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "WbaK/R0QAIuUscnT9Ot8cweo79o=",
+ "path": "k8s.io/kubernetes/pkg/client/transport",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "PHGtJto8xMsgPLTRLoajKyRTMCg=",
+ "path": "k8s.io/kubernetes/pkg/client/typed/discovery",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "CRRdDKVJs34G6NmFaaHs8dGJ9Es=",
+ "path": "k8s.io/kubernetes/pkg/client/unversioned",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "wqqu/x+7SprDHiMi+CCdgdoFr6s=",
+ "path": "k8s.io/kubernetes/pkg/client/unversioned/auth",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "0yu/hrY4K5pDoYYvbXj3i6pRNxI=",
+ "path": "k8s.io/kubernetes/pkg/client/unversioned/clientcmd",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "+XvyrvOr0frnr3YaYlwp2mETPJs=",
+ "path": "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "KAgb24ve1JJu6J/Ln7taV+J0hpU=",
+ "path": "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/latest",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "qak9hy+bz+bjQQ+R6SImFbsiWeo=",
+ "path": "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "Qb6h65gq8y1GGf8DCbR0DvcPHvM=",
+ "path": "k8s.io/kubernetes/pkg/controller/framework",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "/hDhLPJyldd7FJFi4Nju+kO7CKg=",
+ "path": "k8s.io/kubernetes/pkg/conversion",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "ArJmcSrkPcyn2eHKKZ7wR/XuDfI=",
+ "path": "k8s.io/kubernetes/pkg/conversion/queryparams",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "6kHS7lz6NAn30wzWGFQzvo2qpJY=",
+ "path": "k8s.io/kubernetes/pkg/fields",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "ZTrv7ecL9PAYRVIidFKu13m5XU0=",
+ "path": "k8s.io/kubernetes/pkg/kubelet/qos",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "C/8xjsAsVzwnEaRNF5dYfwL7Q/Q=",
+ "path": "k8s.io/kubernetes/pkg/labels",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "yof7pwf5Vww/thImbHvNy6Ku71Y=",
+ "path": "k8s.io/kubernetes/pkg/master/ports",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "Aa5HSZQQupz38MwOZqWoZhPPenM=",
+ "path": "k8s.io/kubernetes/pkg/runtime",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "4Y9O2qVJ651NAqHUa3ndai7tQM4=",
+ "path": "k8s.io/kubernetes/pkg/runtime/serializer",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "OIOEJACuvP5xcOdSq2Z5DOJ8j7U=",
+ "path": "k8s.io/kubernetes/pkg/runtime/serializer/json",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "/Mh5Gpnchr/Dm7p+c0aDA7DGnmU=",
+ "path": "k8s.io/kubernetes/pkg/runtime/serializer/protobuf",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "WX3hEORekDWPUF5CbcA9aeNlpyE=",
+ "path": "k8s.io/kubernetes/pkg/runtime/serializer/recognizer",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "Asl9MTntP4HvLJsI+3DqJv/X89g=",
+ "path": "k8s.io/kubernetes/pkg/runtime/serializer/streaming",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "ADOylIMjID/v6BFwOcjzoXVipVQ=",
+ "path": "k8s.io/kubernetes/pkg/runtime/serializer/versioning",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "3rrkoyhikOAhhAuIveGppG/aPgw=",
+ "path": "k8s.io/kubernetes/pkg/types",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "gUK4Jl6V7W3S6ARXpNEIKgXYsZs=",
+ "path": "k8s.io/kubernetes/pkg/util",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "Bd8C1Tu0/QhvhEBzvuKkSOzUa3w=",
+ "path": "k8s.io/kubernetes/pkg/util/crypto",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "Mq9cI+ZFnUucoa2HKV6v8DlOtpQ=",
+ "path": "k8s.io/kubernetes/pkg/util/errors",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "DGn++E7mFu/SYuU7HYKalV6Dmbk=",
+ "path": "k8s.io/kubernetes/pkg/util/flowcontrol",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "hSMb2w9Ld71CkH7wA80rtLvlWKQ=",
+ "path": "k8s.io/kubernetes/pkg/util/framer",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "rPlScIrSdQJxLudKuw4QuShTZYE=",
+ "path": "k8s.io/kubernetes/pkg/util/hash",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "2mxwHk334jyQr2Qa4LP1L3+DeiE=",
+ "path": "k8s.io/kubernetes/pkg/util/homedir",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "eZnpXfc7gyoz5LgONcLIxR9+kHg=",
+ "path": "k8s.io/kubernetes/pkg/util/integer",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "V7TU2B0L0+KOCzfbXjrBVYchX5c=",
+ "path": "k8s.io/kubernetes/pkg/util/intstr",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "OxPCP9Phq7DwYZr23asBNATALRo=",
+ "path": "k8s.io/kubernetes/pkg/util/json",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "ViDH5VSxmghMux0RTzXYw3vRglY=",
+ "path": "k8s.io/kubernetes/pkg/util/net",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "EaTh3CCHMUOdK0a+71AI4+e4vfc=",
+ "path": "k8s.io/kubernetes/pkg/util/net/sets",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "Es6EIZh12NQsfEbJG1qoEWvNX4E=",
+ "path": "k8s.io/kubernetes/pkg/util/parsers",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "0gxYCwvMJvmYj5T4xmx0EOIy9jg=",
+ "path": "k8s.io/kubernetes/pkg/util/rand",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "LBEKy1pzAxA7P3nlIz4X+svQ23Q=",
+ "path": "k8s.io/kubernetes/pkg/util/runtime",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "vJOmJVEn0g7ifIUVP/6TaGE4OV4=",
+ "path": "k8s.io/kubernetes/pkg/util/sets",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "uHvQSHlfTmWqnjiB9yNmGbtef3M=",
+ "path": "k8s.io/kubernetes/pkg/util/validation",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "pXwta1HqlazLeTIdn4umB8JtDj0=",
+ "path": "k8s.io/kubernetes/pkg/util/validation/field",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "kOlRvpSkFFJ1oWdfYDCXDTz3eig=",
+ "path": "k8s.io/kubernetes/pkg/util/wait",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "aFfqTi1urwhM79HaYBgF1xAmxdw=",
+ "path": "k8s.io/kubernetes/pkg/util/yaml",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "my1qCAozaHiZjmS42EhaVYwcqVM=",
+ "path": "k8s.io/kubernetes/pkg/version",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "rxdK3CnlRBRx2CE9+BoC5Uzoh1w=",
+ "path": "k8s.io/kubernetes/pkg/watch",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "GygRzN7+/P8ios/RnxURMyoGb9M=",
+ "path": "k8s.io/kubernetes/pkg/watch/versioned",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "b7Sw/0MFLTu6wMMTyIPH77U+RuI=",
+ "path": "k8s.io/kubernetes/plugin/pkg/client/auth",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "pvLrwFh8Hc6wfjUVDFrQEL0smMQ=",
+ "path": "k8s.io/kubernetes/plugin/pkg/client/auth/gcp",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "AfLIK2y2XQPNEneyg41Z3IACIA4=",
+ "path": "k8s.io/kubernetes/plugin/pkg/client/auth/oidc",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ },
+ {
+ "checksumSHA1": "P4XQrPql22/Swxo5kmqSGFNZWEg=",
+ "path": "k8s.io/kubernetes/third_party/forked/golang/reflect",
+ "revision": "baa956b6864800c46094b68723bbe316c65035be",
+ "revisionTime": "2016-07-12T10:00:20Z"
+ }
+ ],
+ "rootPath": "kube2msb"
+}